summaryrefslogtreecommitdiffstats
path: root/fluent-bit/lib/librdkafka-2.1.0
diff options
context:
space:
mode:
Diffstat (limited to 'fluent-bit/lib/librdkafka-2.1.0')
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp103
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el10
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.formatignore18
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.gdbmacros19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE34
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.gitignore33
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml364
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md1218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt291
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md46
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md183
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md425
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/Doxyfile2375
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/INTRODUCTION.md2069
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE25
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.cjson22
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.crc32c28
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.fnv1a18
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.hdrhistogram27
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.lz426
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.murmur225
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.pycrc23
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.queue31
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.regexp5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.snappy36
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.tinycthread26
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSE.wingetopt49
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/LICENSES.txt392
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/Makefile124
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/README.md198
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/README.win3226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/STATISTICS.md624
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/configure214
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/configure.self331
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/.gitignore6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/changelog111
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/compat1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/control71
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/copyright99
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/gbp.conf9
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka++1.install1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.examples2
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.install9
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.docs5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.install1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.symbols135
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/debian/rules19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/source/format1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/debian/watch2
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/dev-conf.sh123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt40
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/Makefile137
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/README.md38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c338
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c260
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c233
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c373
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/globals.json11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c344
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp961
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c359
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c330
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/misc.c287
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp249
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/producer.c251
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c617
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp467
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp264
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c853
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp679
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c1780
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c668
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c665
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp395
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/lds-gen.py73
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mainpage.doxy40
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/.gitignore1
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/mklove/Makefile.base329
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.atomics144
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.base2484
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.builtin70
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cc186
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cxx8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.fileversion65
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.gitversion29
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.good_cflags18
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.host132
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.lib49
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libcurl99
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libsasl236
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libssl147
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libzstd58
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.parseversion95
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.pic16
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.socket20
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.zlib61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/README.md8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch56
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/RELEASE.md311
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/alpine/build-alpine.sh38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/archlinux/PKGBUILD30
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Config.cmake.in37
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindLZ4.cmake38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindZSTD.cmake27
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/LICENSE.FindZstd178
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/README.md38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/config.h.in52
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/parseversion.cmake60
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/rdkafka.pc.in12
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_32_test.c8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_64_test.c8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/c11threads_test.c14
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/crc32c_hw_test.c27
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/dlopen_test.c11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/libsasl2_test.c7
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c7
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rand_r_test.c7
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rdkafka_setup.cmake122
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/regex_test.c10
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/strndup_test.c5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_32_test.c8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_64_test.c8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cp/README.md14
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/cp/check_features.c64
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-deb.sh34
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-packages.sh43
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-rpm.sh38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/.gitignore6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/changelog66
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/compat1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/control49
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/copyright84
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/docs5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/gbp.conf9
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.dirs2
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.examples2
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.install6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.substvars1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka.dsc16
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1-dbg.substvars1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.dirs1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.install2
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postinst.debhelper5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postrm.debhelper5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.symbols64
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/debian/rules19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/source/format1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/debian/watch2
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/get_version.py21
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/README.md15
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/brew-update-pr.sh31
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh52
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw.sh21
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/run-tests.sh6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/semaphoreci-build.sh38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/travis-before-install.sh20
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/.gitignore7
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/README.md78
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/nuget/artifact.py177
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/nuget/cleanup-s3.py143
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zipbin0 -> 679055 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zipbin0 -> 516022 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zipbin0 -> 662837 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zipbin0 -> 621912 bytes
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nuget.sh21
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nugetpackage.py286
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/nuget/packaging.py448
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/nuget/push-to-nuget.sh21
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/nuget/release.py167
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/requirements.txt3
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/staticpackage.py178
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.nuspec21
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.props18
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.targets19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/__init__.py0
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/zfile.py98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/.gitignore7
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/Makefile92
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/README.md23
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/el7-x86_64.cfg40
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/librdkafka.spec118
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/rpm/mock-on-docker.sh97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/.gitignore2
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/Makefile25
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/README.md8
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/run-test.sh49
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test-on-docker.sh56
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.c77
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.cpp34
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-deb-package.sh64
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-debian.sh65
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-manylinux.sh68
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-release-artifacts.sh138
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/distro-build.sh38
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/gh-release-checksums.py39
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/rdutcoverage.sh25
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/packaging/tools/requirements.txt2
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/packaging/tools/style-format.sh148
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/service.yml18
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/CMakeLists.txt90
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConfImpl.cpp84
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConsumerImpl.cpp244
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/HandleImpl.cpp425
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/HeadersImpl.cpp48
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/KafkaConsumerImpl.cpp296
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/Makefile55
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/MessageImpl.cpp38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/MetadataImpl.cpp170
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/ProducerImpl.cpp197
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/QueueImpl.cpp70
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/README.md16
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/RdKafka.cpp59
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicImpl.cpp124
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicPartitionImpl.cpp57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp.h3764
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp_int.h1628
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt364
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/Makefile97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c2834
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h398
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c430
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h38
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh66
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.pngbin0 -> 93796 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4.c2498
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4.h774
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c1899
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h623
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h47
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c1615
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h413
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/queue.h850
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rd.h436
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c255
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h203
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h259
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c210
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h250
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c1880
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h373
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c114
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h170
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rddl.c179
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rddl.h43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h174
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h67
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c113
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h35
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h46
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c721
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c511
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h83
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h159
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c5026
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h9340
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c6668
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h482
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c968
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h73
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c1065
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h212
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c278
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c221
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c5867
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h607
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c530
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h1407
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c552
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c5969
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h383
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c4362
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h650
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c623
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h132
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h80
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c426
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h118
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c460
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h102
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c1145
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c220
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h76
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c807
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h144
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h1054
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c819
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h104
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c450
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h49
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c1468
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h212
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c836
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c2585
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h373
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c687
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c2218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h538
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c2517
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h583
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h62
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h82
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c1794
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c1445
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c1548
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h135
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c928
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h778
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c4301
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h1058
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h70
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c213
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h655
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c1085
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h1171
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c138
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c5378
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h463
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c522
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h63
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c720
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h89
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c1825
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h52
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c604
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h37
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c142
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c973
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c548
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c1841
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c3428
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c278
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c384
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h114
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c1900
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h311
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c1295
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h94
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h100
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c3249
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h171
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c546
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h421
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c89
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c487
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h487
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c167
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h35
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdports.c61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdports.h38
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h250
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c70
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c156
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h43
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h57
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c629
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h404
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h309
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h86
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c529
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h230
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c134
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h165
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h382
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c1187
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h372
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/regexp.c1347
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/regexp.h41
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/snappy.c1866
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/snappy.h62
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h138
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json444
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c932
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h503
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c175
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h208
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h58
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore15
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c72
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c244
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c173
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c865
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c133
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c163
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c136
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c179
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c99
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c576
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c537
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c473
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c512
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c172
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c166
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c142
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c332
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c289
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c162
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c71
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c212
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c147
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c541
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c79
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c198
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c589
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c119
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c509
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c377
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c73
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c86
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c85
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c120
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c284
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c251
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c96
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c252
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c77
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c459
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c65
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c283
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c162
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c124
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c125
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c220
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp535
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp236
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c366
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c311
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp112
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp237
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp163
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp275
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c126
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp180
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c481
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp140
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp129
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp148
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c138
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c123
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp197
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c448
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c381
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c252
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c350
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c357
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp96
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c2535
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c3797
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp133
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c228
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c211
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp388
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c334
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c162
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c358
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c172
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c297
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c197
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c230
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp122
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp466
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp1218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c189
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp195
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp446
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c535
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c1297
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c617
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c3926
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c300
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c259
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp218
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp183
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp127
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp3170
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp176
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp179
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp214
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c324
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c121
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp148
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c183
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c118
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c98
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c69
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c78
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c213
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp125
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c78
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c127
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c81
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c171
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c113
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c92
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp143
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c181
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c608
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c189
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c164
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp60
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt154
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py256
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/Makefile182
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/README.md505
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh33
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb30
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py297
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh17
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh20
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py183
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh56
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile8
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md13
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12bin0 -> 4345 bytes
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem109
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key34
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh93
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile12
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md31
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c74
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h90
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh165
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py363
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt16
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile22
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c314
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h54
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore1
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile12
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java46
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md14
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java162
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh11
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions483
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh59
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh50
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh43
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py115
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c58
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt2
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh16
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh16
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh140
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c249
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py328
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json6
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c801
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h85
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c145
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h61
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/test.c6960
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example27
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/test.h936
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp126
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h360
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h402
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md4
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md21
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq42
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py150
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt3
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py124
-rwxr-xr-xfluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c122
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp159
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/vcpkg.json23
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/.gitignore109
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/README.md5
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/build-package.bat3
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/build.bat19
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/common.vcxproj84
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/install-openssl.ps133
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/interceptor_test/interceptor_test.vcxproj87
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.autopkg.template55
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.master.testing.targets13
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.sln226
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.vcxproj258
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/librdkafkacpp/librdkafkacpp.vcxproj104
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/msbuild.ps115
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/openssl_engine_example/openssl_engine_example.vcxproj132
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/package-zip.ps146
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/packages/repositories.config4
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/push-package.bat4
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj67
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_example/rdkafka_example.vcxproj97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_performance/rdkafka_performance.vcxproj97
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/setup-msys2.ps131
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/setup-vcpkg.ps113
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/tests/.gitignore3
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/tests/test.conf.example25
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/tests/tests.vcxproj237
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj132
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.c564
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.h100
-rw-r--r--fluent-bit/lib/librdkafka-2.1.0/win32/wintime.h33
635 files changed, 237653 insertions, 0 deletions
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp b/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp
new file mode 100644
index 000000000..1e102adfe
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.clang-format-cpp
@@ -0,0 +1,103 @@
+---
+BasedOnStyle: Google
+Language: Cpp
+AccessModifierOffset: -1
+AlignAfterOpenBracket: Align
+AlignConsecutiveMacros: true
+AlignConsecutiveAssignments: true
+AlignConsecutiveDeclarations: false
+AlignEscapedNewlines: Right
+AlignOperands: true
+AlignTrailingComments: true
+AllowAllArgumentsOnNextLine: true
+AllowAllConstructorInitializersOnNextLine: true
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: Never
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AllowShortLambdasOnASingleLine: All
+AllowShortIfStatementsOnASingleLine: Never
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: true
+AlwaysBreakTemplateDeclarations: Yes
+BinPackArguments: true
+BinPackParameters: false
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Custom
+BreakBeforeInheritanceComma: false
+BreakInheritanceList: BeforeColon
+BreakBeforeTernaryOperators: true
+BreakConstructorInitializersBeforeComma: false
+BreakConstructorInitializers: AfterColon
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: true
+ColumnLimit: 80
+CommentPragmas: '^ IWYU pragma:'
+CompactNamespaces: false
+ConstructorInitializerAllOnOneLineOrOnePerLine: true
+ConstructorInitializerIndentWidth: 4
+ContinuationIndentWidth: 4
+Cpp11BracedListStyle: true
+DeriveLineEnding: true
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+FixNamespaceComments: true
+IncludeBlocks: Preserve
+IncludeIsMainRegex: '([-_](test|unittest))?$'
+IncludeIsMainSourceRegex: ''
+IndentCaseLabels: false
+IndentGotoLabels: true
+IndentPPDirectives: None
+IndentWidth: 2
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 3
+NamespaceIndentation: None
+ObjCBinPackProtocolList: Never
+ObjCBlockIndentWidth: 2
+ObjCSpaceAfterProperty: false
+ObjCSpaceBeforeProtocolList: true
+PenaltyBreakAssignment: 2
+PenaltyBreakBeforeFirstCallParameter: 1
+PenaltyBreakComment: 300
+PenaltyBreakFirstLessLess: 120
+PenaltyBreakString: 1000
+PenaltyBreakTemplateDeclaration: 10
+PenaltyExcessCharacter: 1000000
+PenaltyReturnTypeOnItsOwnLine: 200
+PointerAlignment: Right
+ReflowComments: true
+SortIncludes: false
+SortUsingDeclarations: true
+SpaceAfterCStyleCast: false
+SpaceAfterLogicalNot: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+SpaceBeforeCpp11BracedList: true
+SpaceBeforeCtorInitializerColon: true
+SpaceBeforeInheritanceColon: true
+SpaceBeforeParens: ControlStatements
+SpaceBeforeRangeBasedForLoopColon: true
+SpaceInEmptyBlock: false
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 2
+SpacesInAngles: false
+SpacesInConditionalStatement: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+SpaceBeforeSquareBrackets: false
+Standard: Auto
+TabWidth: 8
+UseCRLF: false
+UseTab: Never
+...
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el b/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el
new file mode 100644
index 000000000..b8c8f1e74
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.dir-locals.el
@@ -0,0 +1,10 @@
+((nil
+ (compile-command . "LC_ALL=C make -C $(git rev-parse --show-toplevel) -kw -j"))
+ (c-mode
+ (c-file-style . "linux")
+ (tab-width . 8)
+ (indent-tabs-mode . nil))
+)
+
+(if (file-exists-p (concat (dir-locals-find-file "./") "TAGS"))
+ (visit-tags-table (concat (dir-locals-find-file "./") "TAGS")))
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.formatignore b/fluent-bit/lib/librdkafka-2.1.0/.formatignore
new file mode 100644
index 000000000..7d4a45c7b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.formatignore
@@ -0,0 +1,18 @@
+# Files to not check/fix coding style for.
+# These files are imported from other sources and we want to maintain
+# them in the original form to make future updates easier.
+src/lz4.c
+src/lz4.h
+src/lz4frame.c
+src/lz4frame.h
+src/lz4hc.c
+src/lz4hc.h
+src/queue.h
+src/crc32c.c
+src/crc32c.h
+src/snappy.c
+src/snappy.h
+src/snappy_compat.h
+src/tinycthread.c
+src/tinycthread.h
+src/regexp.h
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros b/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros
new file mode 100644
index 000000000..a04366fd1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.gdbmacros
@@ -0,0 +1,19 @@
+
+# Print rd_kafka_msgq_t
+define dump_msgq
+ set $rkmq = $arg0
+ set $rkm = $rkmq.rkmq_msgs.tqh_first
+ set $exp_msgid = 0
+ set $cnt = 0
+ while $rkm != 0
+ set $msgid = $rkm.rkm_u.producer.msgid
+ printf "#%d ((rd_kafka_msgq_t *)%p) msgid %llu\n", $cnt, $rkm, $msgid
+ if $exp_msgid != 0 && $exp_msgid != $msgid
+ printf " ^ WARNING: expected msgid %llu, not %llu\n", $exp_msgid, $msgid
+ end
+ set $exp_msgid = $msgid + 1
+ set $rkm = $rkm.rkm_link.tqe_next
+ set $cnt++
+ end
+end
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE b/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE
new file mode 100644
index 000000000..ed7b6165f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.github/ISSUE_TEMPLATE
@@ -0,0 +1,34 @@
+Read the FAQ first: https://github.com/edenhill/librdkafka/wiki/FAQ
+
+Do NOT create issues for questions, use the discussion forum: https://github.com/edenhill/librdkafka/discussions
+
+
+
+Description
+===========
+<your issue description goes here>
+
+
+How to reproduce
+================
+<your steps how to reproduce goes here, or remove section if not relevant>
+
+
+**IMPORTANT**: Always try to reproduce the issue on the latest released version (see https://github.com/edenhill/librdkafka/releases), if it can't be reproduced on the latest version the issue has been fixed.
+
+
+Checklist
+=========
+
+**IMPORTANT**: We will close issues where the checklist has not been completed.
+
+Please provide the following information:
+
+ - [x] librdkafka version (release number or git tag): `<REPLACE with e.g., v0.10.5 or a git sha. NOT "latest" or "current">`
+ - [ ] Apache Kafka version: `<REPLACE with e.g., 0.10.2.3>`
+ - [ ] librdkafka client configuration: `<REPLACE with e.g., message.timeout.ms=123, auto.reset.offset=earliest, ..>`
+ - [ ] Operating system: `<REPLACE with e.g., Centos 5 (x64)>`
+ - [ ] Provide logs (with `debug=..` as necessary) from librdkafka
+ - [ ] Provide broker log excerpts
+ - [ ] Critical issue
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/.gitignore
new file mode 100644
index 000000000..31c5061e3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.gitignore
@@ -0,0 +1,33 @@
+config.h
+config.log*
+config.cache
+Makefile.config
+rdkafka*.pc
+*~
+\#*
+*.o
+*.so
+*.so.?
+*.dylib
+*.a
+*.d
+librdkafka*.lds
+core
+vgcore.*
+*dSYM/
+*.offset
+SOURCES
+gmon.out
+*.gz
+*.tgz
+*.bz2
+*.deb
+*.rpm
+staging-docs
+tmp
+stats*.json
+test_report*.json
+cov-int
+gdbrun*.gdb
+TAGS
+vcpkg_installed
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml b/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml
new file mode 100644
index 000000000..4ba05ab89
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.semaphore/project.yml
@@ -0,0 +1,43 @@
+# This file is managed by ServiceBot plugin - Semaphore. The content in this file is created using a common
+# template and configurations in service.yml.
+# Modifications in this file will be overwritten by generated content in the nightly run.
+# For more information, please refer to the page:
+# https://confluentinc.atlassian.net/wiki/spaces/Foundations/pages/2871296194/Add+SemaphoreCI
+apiVersion: v1alpha
+kind: Project
+metadata:
+ name: librdkafka
+ description: ""
+spec:
+ visibility: private
+ repository:
+ url: git@github.com:confluentinc/librdkafka.git
+ run_on:
+ - tags
+ - branches
+ pipeline_file: .semaphore/semaphore.yml
+ integration_type: github_app
+ status:
+ pipeline_files:
+ - path: .semaphore/semaphore.yml
+ level: pipeline
+ whitelist:
+ branches:
+ - master
+ - /semaphore.*/
+ - /dev_.*/
+ - /feature\/.*/
+ custom_permissions: true
+ debug_permissions:
+ - empty
+ - default_branch
+ - non_default_branch
+ - pull_request
+ - forked_pull_request
+ - tag
+ attach_permissions:
+ - default_branch
+ - non_default_branch
+ - pull_request
+ - forked_pull_request
+ - tag
diff --git a/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml b/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml
new file mode 100644
index 000000000..275bb76aa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/.semaphore/semaphore.yml
@@ -0,0 +1,364 @@
+version: v1.0
+name: 'librdkafka build and release artifact pipeline'
+agent:
+ machine:
+ type: s1-prod-macos-arm64
+global_job_config:
+ prologue:
+ commands:
+ - checkout
+ - mkdir artifacts
+ - mkdir dest
+blocks:
+ - name: 'OSX arm64/m1'
+ dependencies: []
+ task:
+ agent:
+ machine:
+ type: s1-prod-macos-arm64
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-osx__arch-arm64__lnk-all
+ epilogue:
+ commands:
+ - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/'
+ jobs:
+ - name: 'Build'
+ commands:
+ - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip
+ - make -j all examples check
+ - examples/rdkafka_example -X builtin.features
+ - otool -L src/librdkafka.dylib
+ - otool -L src-cpp/librdkafka++.dylib
+ - make -j -C tests build
+ - make -C tests run_local_quick
+ - DESTDIR="$PWD/dest" make install
+ - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .)
+
+
+ - name: 'OSX x64'
+ dependencies: []
+ task:
+ agent:
+ machine:
+ type: s1-prod-macos
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-osx__arch-x64__lnk-all
+ epilogue:
+ commands:
+ - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/'
+ jobs:
+ - name: 'Build'
+ commands:
+ - ./configure --install-deps --source-deps-only --enable-static --disable-lz4-ext --enable-strip
+ - make -j all examples check
+ - examples/rdkafka_example -X builtin.features
+ - otool -L src/librdkafka.dylib
+ - otool -L src-cpp/librdkafka++.dylib
+ - make -j -C tests build
+ - make -C tests run_local_quick
+ - DESTDIR="$PWD/dest" make install
+ - (cd dest && tar cvzf ../artifacts/librdkafka.tgz .)
+
+
+ - name: 'Style check'
+ dependencies: []
+ skip:
+ # Skip for release tags, we don't want style checks
+ # to fail the release build.
+ when: "tag =~ '^v[0-9]\\.'"
+ task:
+ agent:
+ machine:
+ type: s1-prod-ubuntu20-04-amd64-2
+ jobs:
+ - name: 'Style check'
+ commands:
+ - sudo apt install -y clang-format-10 python3 python3-pip python3-setuptools
+ - python3 -m pip install -r packaging/tools/requirements.txt
+ - CLANG_FORMAT=clang-format-10 make style-check
+
+
+ - name: 'Build documentation'
+ dependencies: []
+ task:
+ agent:
+ machine:
+ type: s1-prod-ubuntu20-04-amd64-2
+ jobs:
+ - name: 'Generate documentation'
+ commands:
+ - sudo apt install -y doxygen graphviz
+ - make docs
+ - (cd staging-docs && tar cvzf ../artifacts/librdkafka-docs.tgz .)
+ - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/librdkafka-docs.tgz --destination artifacts/librdkafka-docs.tgz'
+
+
+ - name: 'Linux Ubuntu x64: source build'
+ dependencies: []
+ skip:
+ # Skip for release tags, we don't want flaky CI tests
+ # to fail the release build.
+ when: "tag =~ '^v[0-9]\\.'"
+ task:
+ agent:
+ machine:
+ type: s1-prod-ubuntu20-04-amd64-2
+ jobs:
+ - name: 'Build and integration tests'
+ commands:
+ - wget -O rapidjson-dev.deb https://launchpad.net/ubuntu/+archive/primary/+files/rapidjson-dev_1.1.0+dfsg2-3_all.deb
+ - sudo dpkg -i rapidjson-dev.deb
+ - python3 -m pip install -U pip
+ - python3 -m pip -V
+ - python3 -m pip install -r tests/requirements.txt
+ - ./configure --install-deps
+ # split these up
+ - ./packaging/tools/rdutcoverage.sh
+ - make copyright-check
+ - make -j all examples check
+ - echo "Verifying that CONFIGURATION.md does not have manual changes"
+ - git diff --exit-code CONFIGURATION.md
+ - examples/rdkafka_example -X builtin.features
+ - ldd src/librdkafka.so.1
+ - ldd src-cpp/librdkafka++.so.1
+ - make -j -C tests build
+ - make -C tests run_local_quick
+ - DESTDIR="$PWD/dest" make install
+ - (cd tests && python3 -m trivup.clusters.KafkaCluster --version 3.1.0 --cmd 'make quick')
+
+
+ - name: 'Linux x64: release artifact docker builds'
+ dependencies: []
+ run:
+ when: "tag =~ '^v[0-9]\\.'"
+ task:
+ agent:
+ machine:
+ type: s1-prod-ubuntu20-04-amd64-2
+ epilogue:
+ commands:
+ - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/'
+ jobs:
+ - name: 'Build: centos6 glibc +gssapi'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-centos6__arch-x64__lnk-std__extra-gssapi
+ commands:
+ - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2010_x86_64 artifacts/librdkafka.tgz
+
+ - name: 'Build: centos6 glibc'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-centos6__arch-x64__lnk-all
+ commands:
+ - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2010_x86_64 artifacts/librdkafka.tgz
+
+ - name: 'Build: centos7 glibc +gssapi'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-centos7__arch-x64__lnk-std__extra-gssapi
+ commands:
+ - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2014_x86_64 artifacts/librdkafka.tgz
+
+ - name: 'Build: centos7 glibc'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-centos7__arch-x64__lnk-all
+ commands:
+ - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2014_x86_64 artifacts/librdkafka.tgz
+
+ - name: 'Build: alpine musl +gssapi'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-std__extra-gssapi
+ commands:
+ - packaging/tools/build-release-artifacts.sh alpine:3.16 artifacts/librdkafka.tgz
+
+ - name: 'Build: alpine musl'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-alpine__arch-x64__lnk-all
+ commands:
+ - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16 artifacts/librdkafka.tgz
+
+
+ - name: 'Linux arm64: release artifact docker builds'
+ dependencies: []
+ task:
+ agent:
+ machine:
+ type: s1-prod-ubuntu20-04-arm64-1
+ epilogue:
+ commands:
+ - '[[ -z $SEMAPHORE_GIT_TAG_NAME ]] || artifact push workflow artifacts/ --destination artifacts/${ARTIFACT_KEY}/'
+ jobs:
+ - name: 'Build: centos7 glibc +gssapi'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-centos7__arch-arm64__lnk-std__extra-gssapi
+ commands:
+ - packaging/tools/build-release-artifacts.sh quay.io/pypa/manylinux2014_aarch64 artifacts/librdkafka.tgz
+
+ - name: 'Build: centos7 glibc'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-centos7__arch-arm64__lnk-all
+ commands:
+ - packaging/tools/build-release-artifacts.sh --disable-gssapi quay.io/pypa/manylinux2014_aarch64 artifacts/librdkafka.tgz
+
+ - name: 'Build: alpine musl +gssapi'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all__extra-gssapi
+ commands:
+ - packaging/tools/build-release-artifacts.sh alpine:3.16 artifacts/librdkafka.tgz
+
+ - name: 'Build: alpine musl'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-linux__dist-alpine__arch-arm64__lnk-all
+ commands:
+ - packaging/tools/build-release-artifacts.sh --disable-gssapi alpine:3.16 artifacts/librdkafka.tgz
+
+
+ - name: 'Windows x64: MinGW-w64'
+ dependencies: []
+ task:
+ agent:
+ machine:
+ type: s1-prod-windows
+ env_vars:
+ - name: CHERE_INVOKING
+ value: 'yes'
+ - name: MSYSTEM
+ value: UCRT64
+ prologue:
+ commands:
+ - cache restore msys2-x64-${Env:ARTIFACT_KEY}
+ # Set up msys2
+ - "& .\\win32\\setup-msys2.ps1"
+ - cache delete msys2-x64-${Env:ARTIFACT_KEY}
+ - cache store msys2-x64-${Env:ARTIFACT_KEY} c:/msys64
+ epilogue:
+ commands:
+ - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ }
+ jobs:
+ - name: 'Build: MinGW-w64 Dynamic'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-std
+ commands:
+ - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh ./artifacts/librdkafka.tgz'
+
+ - name: 'Build: MinGW-w64 Static'
+ env_vars:
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-windows__dist-mingw__arch-x64__lnk-static
+ commands:
+ - C:\msys64\usr\bin\bash -lc './packaging/mingw-w64/semaphoreci-build.sh --static ./artifacts/librdkafka.tgz'
+
+ - name: 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019'
+ dependencies: []
+ task:
+ agent:
+ machine:
+ type: s1-prod-windows
+ env_vars:
+ # Disable vcpkg telemetry
+ - name: VCPKG_DISABLE_METRICS
+ value: 'yes'
+ prologue:
+ commands:
+ # install vcpkg in the parent directory.
+ - pwd
+ - cd ..
+ # Restore vcpkg caches, if any.
+ - cache restore vcpkg-archives-$Env:ARTIFACT_KEY
+ # Setup vcpkg
+ - "& .\\librdkafka\\win32\\setup-vcpkg.ps1"
+ - cd librdkafka
+ - ..\vcpkg\vcpkg integrate install
+ # Install required packages.
+ - ..\vcpkg\vcpkg --feature-flags=versions install --triplet $Env:triplet
+ - cd ..
+ - pwd
+ # Store vcpkg caches
+ - ls vcpkg/
+ - echo $Env:VCPKG_ROOT
+ - cache delete vcpkg-archives-$Env:ARTIFACT_KEY
+ - cache store vcpkg-archives-$Env:ARTIFACT_KEY C:/Users/semaphore/AppData/Local/vcpkg/archives
+ - pwd
+ - cd librdkafka
+ epilogue:
+ commands:
+ - Get-ChildItem . -include *.dll -recurse
+ - Get-ChildItem . -include *.lib -recurse
+ - if ($env:SEMAPHORE_GIT_TAG_NAME -ne "") { artifact push workflow artifacts/ --destination artifacts/$Env:ARTIFACT_KEY/ }
+ jobs:
+ - name: 'Build: MSVC x64'
+ env_vars:
+ - name: triplet
+ value: x64-windows
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-windows__dist-msvc__arch-x64__lnk-std
+ commands:
+ - "& .\\win32\\msbuild.ps1 -platform x64"
+ - "& .\\win32\\package-zip.ps1 -platform x64"
+ - name: 'Build: MSVC x86'
+ env_vars:
+ - name: triplet
+ value: x86-windows
+ - name: ARTIFACT_KEY
+ value: p-librdkafka__plat-windows__dist-msvc__arch-x86__lnk-std
+ commands:
+ - "& .\\win32\\msbuild.ps1 -platform Win32"
+ - "& .\\win32\\package-zip.ps1 -platform Win32"
+
+ - name: 'Packaging'
+ dependencies:
+ - 'Build documentation'
+ - 'OSX arm64/m1'
+ - 'OSX x64'
+ - 'Linux x64: release artifact docker builds'
+ - 'Linux arm64: release artifact docker builds'
+ - 'Windows x64: MinGW-w64'
+ - 'Windows x64: Windows SDK 10.0 / MSVC v142 / VS 2019'
+ run:
+ when: "tag =~ '^v[0-9]\\.'"
+ task:
+ agent:
+ machine:
+ type: s1-prod-ubuntu20-04-amd64-2
+ jobs:
+ - name: 'Build NuGet and static packages'
+ commands:
+ # Get all artifacts from previous jobs in this workflow/pipeline.
+ - artifact pull workflow artifacts
+ - mkdir -p packages
+ # Prepare packaging tools
+ - cd packaging/nuget
+ - python3 -m pip install -U -r requirements.txt
+ # Create NuGet package
+ # We need --ignore-tag since the jobs don't add the tag to
+ # the artifact path, and they don't need to since these artifacts
+ # are part of the same workflow.
+ - ./release.py --directory ../../artifacts --ignore-tag --class NugetPackage ${SEMAPHORE_GIT_TAG_NAME}
+ - cp -v librdkafka.redist.*.nupkg ../../packages
+ # Create static package
+ - ./release.py --directory ../../artifacts --ignore-tag --class StaticPackage ${SEMAPHORE_GIT_TAG_NAME}
+ - cp -v librdkafka-static-bundle*.tgz ../../packages
+ - cd ../../
+ # Copy generated docs to packages for inclusion in the tar ball
+ - cp -v artifacts/librdkafka-docs.tgz packages/
+ # Maker super tar ball of all packages
+ - cd packages
+ - tar cvf librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}.tar .
+ # Provide some extra details
+ - ls -la
+ - sha256sum *
+ - cd ..
+ # Upload all packages to project artifact store
+ - artifact push project packages --destination librdkafka-packages-${SEMAPHORE_GIT_TAG_NAME}-${SEMAPHORE_WORKFLOW_ID}
+ - echo Thank you
diff --git a/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md b/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md
new file mode 100644
index 000000000..857526c6e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/CHANGELOG.md
@@ -0,0 +1,1218 @@
+# librdkafka v2.1.0
+
+librdkafka v2.1.0 is a feature release:
+
+* [KIP-320](https://cwiki.apache.org/confluence/display/KAFKA/KIP-320%3A+Allow+fetchers+to+detect+and+handle+log+truncation)
+ Allow fetchers to detect and handle log truncation (#4122).
+* Fix a reference count issue blocking the consumer from closing (#4187).
+* Fix a protocol issue with ListGroups API, where an extra
+ field was appended for API Versions greater than or equal to 3 (#4207).
+* Fix an issue with `max.poll.interval.ms`, where polling any queue would cause
+ the timeout to be reset (#4176).
+* Fix seek partition timeout, was one thousand times lower than the passed
+ value (#4230).
+* Fix multiple inconsistent behaviour in batch APIs during **pause** or **resume** operations (#4208).
+ See **Consumer fixes** section below for more information.
+* Update lz4.c from upstream. Fixes [CVE-2021-3520](https://github.com/advisories/GHSA-gmc7-pqv9-966m)
+ (by @filimonov, #4232).
+* Upgrade OpenSSL to v3.0.8 with various security fixes,
+ check the [release notes](https://www.openssl.org/news/cl30.txt) (#4215).
+
+## Enhancements
+
+ * Added `rd_kafka_topic_partition_get_leader_epoch()` (and `set..()`).
+ * Added partition leader epoch APIs:
+ - `rd_kafka_topic_partition_get_leader_epoch()` (and `set..()`)
+ - `rd_kafka_message_leader_epoch()`
+ - `rd_kafka_*assign()` and `rd_kafka_seek_partitions()` now supports
+ partitions with a leader epoch set.
+ - `rd_kafka_offsets_for_times()` will return per-partition leader-epochs.
+ - `leader_epoch`, `stored_leader_epoch`, and `committed_leader_epoch`
+ added to per-partition statistics.
+
+
+## Fixes
+
+### OpenSSL fixes
+
+ * Fixed OpenSSL static build not able to use external modules like FIPS
+ provider module.
+
+### Consumer fixes
+
+ * A reference count issue was blocking the consumer from closing.
+ The problem would happen when a partition is lost, because forcibly
+ unassigned from the consumer or if the corresponding topic is deleted.
+ * When using `rd_kafka_seek_partitions`, the remaining timeout was
+ converted from microseconds to milliseconds but the expected unit
+ for that parameter is microseconds.
+ * Fixed known issues related to Batch Consume APIs mentioned in v2.0.0
+ release notes.
+ * Fixed `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()`
+ intermittently updating `app_offset` and `store_offset` incorrectly when
+ **pause** or **resume** was being used for a partition.
+ * Fixed `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()`
+ intermittently skipping offsets when **pause** or **resume** was being
+ used for a partition.
+
+
+## Known Issues
+
+### Consume Batch API
+
+ * When `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` APIs are used with
+ any of the **seek**, **pause**, **resume** or **rebalancing** operation, `on_consume`
+ interceptors might be called incorrectly (maybe multiple times) for not consumed messages.
+
+
+
+# librdkafka v2.0.2
+
+librdkafka v2.0.2 is a bugfix release:
+
+* Fix OpenSSL version in Win32 nuget package (#4152).
+
+
+
+# librdkafka v2.0.1
+
+librdkafka v2.0.1 is a bugfix release:
+
+* Fixed nuget package for Linux ARM64 release (#4150).
+
+
+
+# librdkafka v2.0.0
+
+librdkafka v2.0.0 is a feature release:
+
+ * [KIP-88](https://cwiki.apache.org/confluence/display/KAFKA/KIP-88%3A+OffsetFetch+Protocol+Update)
+ OffsetFetch Protocol Update (#3995).
+ * [KIP-222](https://cwiki.apache.org/confluence/display/KAFKA/KIP-222+-+Add+Consumer+Group+operations+to+Admin+API)
+ Add Consumer Group operations to Admin API (started by @lesterfan, #3995).
+ * [KIP-518](https://cwiki.apache.org/confluence/display/KAFKA/KIP-518%3A+Allow+listing+consumer+groups+per+state)
+ Allow listing consumer groups per state (#3995).
+ * [KIP-396](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=97551484)
+ Partially implemented: support for AlterConsumerGroupOffsets
+ (started by @lesterfan, #3995).
+ * OpenSSL 3.0.x support - the maximum bundled OpenSSL version is now 3.0.7 (previously 1.1.1q).
+ * Fixes to the transactional and idempotent producer.
+
+
+## Upgrade considerations
+
+### OpenSSL 3.0.x
+
+#### OpenSSL default ciphers
+
+The introduction of OpenSSL 3.0.x in the self-contained librdkafka bundles
+changes the default set of available ciphers, in particular all obsolete
+or insecure ciphers and algorithms as listed in the
+OpenSSL [legacy](https://www.openssl.org/docs/man3.0/man7/OSSL_PROVIDER-legacy.html)
+manual page are now disabled by default.
+
+**WARNING**: These ciphers are disabled for security reasons and it is
+highly recommended NOT to use them.
+
+Should you need to use any of these old ciphers you'll need to explicitly
+enable the `legacy` provider by configuring `ssl.providers=default,legacy`
+on the librdkafka client.
+
+#### OpenSSL engines and providers
+
+OpenSSL 3.0.x deprecates the use of engines, which is being replaced by
+providers. As such librdkafka will emit a deprecation warning if
+`ssl.engine.location` is configured.
+
+OpenSSL providers may be configured with the new `ssl.providers`
+configuration property.
+
+### Broker TLS certificate hostname verification
+
+The default value for `ssl.endpoint.identification.algorithm` has been
+changed from `none` (no hostname verification) to `https`, which enables
+broker hostname verification (to counter man-in-the-middle
+impersonation attacks) by default.
+
+To restore the previous behaviour, set `ssl.endpoint.identification.algorithm` to `none`.
+
+## Known Issues
+
+### Poor Consumer batch API messaging guarantees
+
+The Consumer Batch APIs `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()`
+are not thread safe if `rkmessages_size` is greater than 1 and any of the **seek**,
+**pause**, **resume** or **rebalancing** operation is performed in parallel with any of
+the above APIs. Some of the messages might be lost, or erroneously returned to the
+application, in the above scenario.
+
+It is strongly recommended to use the Consumer Batch APIs and the mentioned
+operations in sequential order in order to get consistent result.
+
+For **rebalancing** operation to work in sequencial manner, please set `rebalance_cb`
+configuration property (refer [examples/rdkafka_complex_consumer_example.c]
+(examples/rdkafka_complex_consumer_example.c) for the help with the usage) for the consumer.
+
+## Enhancements
+
+ * Self-contained static libraries can now be built on Linux arm64 (#4005).
+ * Updated to zlib 1.2.13, zstd 1.5.2, and curl 7.86.0 in self-contained
+ librdkafka bundles.
+ * Added `on_broker_state_change()` interceptor
+ * The C++ API no longer returns strings by const value, which enables better move optimization in callers.
+ * Added `rd_kafka_sasl_set_credentials()` API to update SASL credentials.
+ * Setting `allow.auto.create.topics` will no longer give a warning if used by a producer, since that is an expected use case.
+ Improvement in documentation for this property.
+ * Added a `resolve_cb` configuration setting that permits using custom DNS resolution logic.
+ * Added `rd_kafka_mock_broker_error_stack_cnt()`.
+ * The librdkafka.redist NuGet package has been updated to have fewer external
+ dependencies for its bundled librdkafka builds, as everything but cyrus-sasl
+ is now built-in. There are bundled builds with and without linking to
+ cyrus-sasl for maximum compatibility.
+ * Admin API DescribeGroups() now provides the group instance id
+ for static members [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances) (#3995).
+
+
+## Fixes
+
+### General fixes
+
+ * Windows: couldn't read a PKCS#12 keystore correctly because binary mode
+ wasn't explicitly set and Windows defaults to text mode.
+ * Fixed memory leak when loading SSL certificates (@Mekk, #3930)
+ * Load all CA certificates from `ssl.ca.pem`, not just the first one.
+ * Each HTTP request made when using OAUTHBEARER OIDC would leak a small
+ amount of memory.
+
+### Transactional producer fixes
+
+ * When a PID epoch bump is requested and the producer is waiting
+ to reconnect to the transaction coordinator, a failure in a find coordinator
+ request could cause an assert to fail. This is fixed by retrying when the
+ coordinator is known (#4020).
+ * Transactional APIs (except `send_offsets_for_transaction()`) that
+ timeout due to low timeout_ms may now be resumed by calling the same API
+ again, as the operation continues in the background.
+ * For fatal idempotent producer errors that may be recovered by bumping the
+ epoch the current transaction must first be aborted prior to the epoch bump.
+ This is now handled correctly, which fixes issues seen with fenced
+ transactional producers on fatal idempotency errors.
+ * Timeouts for EndTxn requests (transaction commits and aborts) are now
+ automatically retried and the error raised to the application is also
+ a retriable error.
+ * TxnOffsetCommitRequests were retried immediately upon temporary errors in
+ `send_offsets_to_transactions()`, causing excessive network requests.
+ These retries are now delayed 500ms.
+ * If `init_transactions()` is called with an infinite timeout (-1),
+ the timeout will be limited to 2 * `transaction.timeout.ms`.
+ The application may retry and resume the call if a retriable error is
+ returned.
+
+
+### Consumer fixes
+
+ * Back-off and retry JoinGroup request if coordinator load is in progress.
+ * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()` skipping
+ other partitions' offsets intermittently when **seek**, **pause**, **resume**
+ or **rebalancing** is used for a partition.
+ * Fix `rd_kafka_consume_batch()` and `rd_kafka_consume_batch_queue()`
+ intermittently returing incorrect partitions' messages if **rebalancing**
+ happens during these operations.
+
+# librdkafka v1.9.2
+
+librdkafka v1.9.2 is a maintenance release:
+
+ * The SASL OAUTHBEAR OIDC POST field was sometimes truncated by one byte (#3192).
+ * The bundled version of OpenSSL has been upgraded to version 1.1.1q for non-Windows builds. Windows builds remain on OpenSSL 1.1.1n for the time being.
+ * The bundled version of Curl has been upgraded to version 7.84.0.
+
+
+
+# librdkafka v1.9.1
+
+librdkafka v1.9.1 is a maintenance release:
+
+ * The librdkafka.redist NuGet package now contains OSX M1/arm64 builds.
+ * Self-contained static libraries can now be built on OSX M1 too, thanks to
+ disabling curl's configure runtime check.
+
+
+
+# librdkafka v1.9.0
+
+librdkafka v1.9.0 is a feature release:
+
+ * Added KIP-768 OUATHBEARER OIDC support (by @jliunyu, #3560)
+ * Added KIP-140 Admin API ACL support (by @emasab, #2676)
+
+
+## Upgrade considerations
+
+ * Consumer:
+ `rd_kafka_offsets_store()` (et.al) will now return an error for any
+ partition that is not currently assigned (through `rd_kafka_*assign()`).
+ This prevents a race condition where an application would store offsets
+ after the assigned partitions had been revoked (which resets the stored
+ offset), that could cause these old stored offsets to be committed later
+ when the same partitions were assigned to this consumer again - effectively
+ overwriting any committed offsets by any consumers that were assigned the
+ same partitions previously. This would typically result in the offsets
+ rewinding and messages to be reprocessed.
+ As an extra effort to avoid this situation the stored offset is now
+ also reset when partitions are assigned (through `rd_kafka_*assign()`).
+ Applications that explicitly call `..offset*_store()` will now need
+ to handle the case where `RD_KAFKA_RESP_ERR__STATE` is returned
+ in the per-partition `.err` field - meaning the partition is no longer
+ assigned to this consumer and the offset could not be stored for commit.
+
+
+## Enhancements
+
+ * Improved producer queue scheduling. Fixes the performance regression
+ introduced in v1.7.0 for some produce patterns. (#3538, #2912)
+ * Windows: Added native Win32 IO/Queue scheduling. This removes the
+ internal TCP loopback connections that were previously used for timely
+ queue wakeups.
+ * Added `socket.connection.setup.timeout.ms` (default 30s).
+ The maximum time allowed for broker connection setups (TCP connection as
+ well as SSL and SASL handshakes) is now limited to this value.
+ This fixes the issue with stalled broker connections in the case of network
+ or load balancer problems.
+ The Java clients has an exponential backoff to this timeout which is
+ limited by `socket.connection.setup.timeout.max.ms` - this was not
+ implemented in librdkafka due to differences in connection handling and
+ `ERR__ALL_BROKERS_DOWN` error reporting. Having a lower initial connection
+ setup timeout and then increase the timeout for the next attempt would
+ yield possibly false-positive `ERR__ALL_BROKERS_DOWN` too early.
+ * SASL OAUTHBEARER refresh callbacks can now be scheduled for execution
+ on librdkafka's background thread. This solves the problem where an
+ application has a custom SASL OAUTHBEARER refresh callback and thus needs to
+ call `rd_kafka_poll()` (et.al.) at least once to trigger the
+ refresh callback before being able to connect to brokers.
+ With the new `rd_kafka_conf_enable_sasl_queue()` configuration API and
+ `rd_kafka_sasl_background_callbacks_enable()` the refresh callbacks
+ can now be triggered automatically on the librdkafka background thread.
+ * `rd_kafka_queue_get_background()` now creates the background thread
+ if not already created.
+ * Added `rd_kafka_consumer_close_queue()` and `rd_kafka_consumer_closed()`.
+ This allow applications and language bindings to implement asynchronous
+ consumer close.
+ * Bundled zlib upgraded to version 1.2.12.
+ * Bundled OpenSSL upgraded to 1.1.1n.
+ * Added `test.mock.broker.rtt` to simulate RTT/latency for mock brokers.
+
+
+## Fixes
+
+### General fixes
+
+ * Fix various 1 second delays due to internal broker threads blocking on IO
+ even though there are events to handle.
+ These delays could be seen randomly in any of the non produce/consume
+ request APIs, such as `commit_transaction()`, `list_groups()`, etc.
+ * Windows: some applications would crash with an error message like
+ `no OPENSSL_Applink()` written to the console if `ssl.keystore.location`
+ was configured.
+ This regression was introduced in v1.8.0 due to use of vcpkgs and how
+ keystore file was read. #3554.
+ * Windows 32-bit only: 64-bit atomic reads were in fact not atomic and could
+ in rare circumstances yield incorrect values.
+ One manifestation of this issue was the `max.poll.interval.ms` consumer
+ timer expiring even though the application was polling according to profile.
+ Fixed by @WhiteWind (#3815).
+ * `rd_kafka_clusterid()` would previously fail with timeout if
+ called on cluster with no visible topics (#3620).
+ The clusterid is now returned as soon as metadata has been retrieved.
+ * Fix hang in `rd_kafka_list_groups()` if there are no available brokers
+ to connect to (#3705).
+ * Millisecond timeouts (`timeout_ms`) in various APIs, such as `rd_kafka_poll()`,
+ was limited to roughly 36 hours before wrapping. (#3034)
+ * If a metadata request triggered by `rd_kafka_metadata()` or consumer group rebalancing
+ encountered a non-retriable error it would not be propagated to the caller and thus
+ cause a stall or timeout, this has now been fixed. (@aiquestion, #3625)
+ * AdminAPI `DeleteGroups()` and `DeleteConsumerGroupOffsets()`:
+ if the given coordinator connection was not up by the time these calls were
+ initiated and the first connection attempt failed then no further connection
+ attempts were performed, ulimately leading to the calls timing out.
+ This is now fixed by keep retrying to connect to the group coordinator
+ until the connection is successful or the call times out.
+ Additionally, the coordinator will be now re-queried once per second until
+ the coordinator comes up or the call times out, to detect change in
+ coordinators.
+ * Mock cluster `rd_kafka_mock_broker_set_down()` would previously
+ accept and then disconnect new connections, it now refuses new connections.
+
+
+### Consumer fixes
+
+ * `rd_kafka_offsets_store()` (et.al) will now return an error for any
+ partition that is not currently assigned (through `rd_kafka_*assign()`).
+ See **Upgrade considerations** above for more information.
+ * `rd_kafka_*assign()` will now reset/clear the stored offset.
+ See **Upgrade considerations** above for more information.
+ * `seek()` followed by `pause()` would overwrite the seeked offset when
+ later calling `resume()`. This is now fixed. (#3471).
+ **Note**: Avoid storing offsets (`offsets_store()`) after calling
+ `seek()` as this may later interfere with resuming a paused partition,
+ instead store offsets prior to calling seek.
+ * A `ERR_MSG_SIZE_TOO_LARGE` consumer error would previously be raised
+ if the consumer received a maximum sized FetchResponse only containing
+ (transaction) aborted messages with no control messages. The fetching did
+ not stop, but some applications would terminate upon receiving this error.
+ No error is now raised in this case. (#2993)
+ Thanks to @jacobmikesell for providing an application to reproduce the
+ issue.
+ * The consumer no longer backs off the next fetch request (default 500ms) when
+ the parsed fetch response is truncated (which is a valid case).
+ This should speed up the message fetch rate in case of maximum sized
+ fetch responses.
+ * Fix consumer crash (`assert: rkbuf->rkbuf_rkb`) when parsing
+ malformed JoinGroupResponse consumer group metadata state.
+ * Fix crash (`cant handle op type`) when using `consume_batch_queue()` (et.al)
+ and an OAUTHBEARER refresh callback was set.
+ The callback is now triggered by the consume call. (#3263)
+ * Fix `partition.assignment.strategy` ordering when multiple strategies are configured.
+ If there is more than one eligible strategy, preference is determined by the
+ configured order of strategies. The partitions are assigned to group members according
+ to the strategy order preference now. (#3818)
+ * Any form of unassign*() (absolute or incremental) is now allowed during
+ consumer close rebalancing and they're all treated as absolute unassigns.
+ (@kevinconaway)
+
+
+### Transactional producer fixes
+
+ * Fix message loss in idempotent/transactional producer.
+ A corner case has been identified that may cause idempotent/transactional
+ messages to be lost despite being reported as successfully delivered:
+ During cluster instability a restarting broker may report existing topics
+ as non-existent for some time before it is able to acquire up to date
+ cluster and topic metadata.
+ If an idempotent/transactional producer updates its topic metadata cache
+ from such a broker the producer will consider the topic to be removed from
+ the cluster and thus remove its local partition objects for the given topic.
+ This also removes the internal message sequence number counter for the given
+ partitions.
+ If the producer later receives proper topic metadata for the cluster the
+ previously "removed" topics will be rediscovered and new partition objects
+ will be created in the producer. These new partition objects, with no
+ knowledge of previous incarnations, would start counting partition messages
+ at zero again.
+ If new messages were produced for these partitions by the same producer
+ instance, the same message sequence numbers would be sent to the broker.
+ If the broker still maintains state for the producer's PID and Epoch it could
+ deem that these messages with reused sequence numbers had already been
+ written to the log and treat them as legit duplicates.
+ This would seem to the producer that these new messages were successfully
+ written to the partition log by the broker when they were in fact discarded
+ as duplicates, leading to silent message loss.
+ The fix included in this release is to save the per-partition idempotency
+ state when a partition is removed, and then recover and use that saved
+ state if the partition comes back at a later time.
+ * The transactional producer would retry (re)initializing its PID if a
+ `PRODUCER_FENCED` error was returned from the
+ broker (added in Apache Kafka 2.8), which could cause the producer to
+ seemingly hang.
+ This error code is now correctly handled by raising a fatal error.
+ * If the given group coordinator connection was not up by the time
+ `send_offsets_to_transactions()` was called, and the first connection
+ attempt failed then no further connection attempts were performed, ulimately
+ leading to `send_offsets_to_transactions()` timing out, and possibly
+ also the transaction timing out on the transaction coordinator.
+ This is now fixed by keep retrying to connect to the group coordinator
+ until the connection is successful or the call times out.
+ Additionally, the coordinator will be now re-queried once per second until
+ the coordinator comes up or the call times out, to detect change in
+ coordinators.
+
+
+### Producer fixes
+
+ * Improved producer queue wakeup scheduling. This should significantly
+ decrease the number of wakeups and thus syscalls for high message rate
+ producers. (#3538, #2912)
+ * The logic for enforcing that `message.timeout.ms` is greather than
+ an explicitly configured `linger.ms` was incorrect and instead of
+ erroring out early the lingering time was automatically adjusted to the
+ message timeout, ignoring the configured `linger.ms`.
+ This has now been fixed so that an error is returned when instantiating the
+ producer. Thanks to @larry-cdn77 for analysis and test-cases. (#3709)
+
+
+# librdkafka v1.8.2
+
+librdkafka v1.8.2 is a maintenance release.
+
+## Enhancements
+
+ * Added `ssl.ca.pem` to add CA certificate by PEM string. (#2380)
+ * Prebuilt binaries for Mac OSX now contain statically linked OpenSSL v1.1.1l.
+ Previously the OpenSSL version was either v1.1.1 or v1.0.2 depending on
+ build type.
+
+## Fixes
+
+ * The `librdkafka.redist` 1.8.0 package had two flaws:
+ - the linux-arm64 .so build was a linux-x64 build.
+ - the included Windows MSVC 140 runtimes for x64 were infact x86.
+ The release script has been updated to verify the architectures of
+ provided artifacts to avoid this happening in the future.
+ * Prebuilt binaries for Mac OSX Sierra (10.12) and older are no longer provided.
+ This affects [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go).
+ * Some of the prebuilt binaries for Linux were built on Ubuntu 14.04,
+ these builds are now performed on Ubuntu 16.04 instead.
+ This may affect users on ancient Linux distributions.
+ * It was not possible to configure `ssl.ca.location` on OSX, the property
+ would automatically revert back to `probe` (default value).
+ This regression was introduced in v1.8.0. (#3566)
+ * librdkafka's internal timers would not start if the timeout was set to 0,
+ which would result in some timeout operations not being enforced correctly,
+ e.g., the transactional producer API timeouts.
+ These timers are now started with a timeout of 1 microsecond.
+
+### Transactional producer fixes
+
+ * Upon quick repeated leader changes the transactional producer could receive
+ an `OUT_OF_ORDER_SEQUENCE` error from the broker, which triggered an
+ Epoch bump on the producer resulting in an InitProducerIdRequest being sent
+ to the transaction coordinator in the middle of a transaction.
+ This request would start a new transaction on the coordinator, but the
+ producer would still think (erroneously) it was in current transaction.
+ Any messages produced in the current transaction prior to this event would
+ be silently lost when the application committed the transaction, leading
+ to message loss.
+ This has been fixed by setting the Abortable transaction error state
+ in the producer. #3575.
+ * The transactional producer could stall during a transaction if the transaction
+ coordinator changed while adding offsets to the transaction (send_offsets_to_transaction()).
+ This stall lasted until the coordinator connection went down, the
+ transaction timed out, transaction was aborted, or messages were produced
+ to a new partition, whichever came first. #3571.
+
+
+
+*Note: there was no v1.8.1 librdkafka release*
+
+
+# librdkafka v1.8.0
+
+librdkafka v1.8.0 is a security release:
+
+ * Upgrade bundled zlib version from 1.2.8 to 1.2.11 in the `librdkafka.redist`
+ NuGet package. The updated zlib version fixes CVEs:
+ CVE-2016-9840, CVE-2016-9841, CVE-2016-9842, CVE-2016-9843
+ See https://github.com/edenhill/librdkafka/issues/2934 for more information.
+ * librdkafka now uses [vcpkg](https://vcpkg.io/) for up-to-date Windows
+ dependencies in the `librdkafka.redist` NuGet package:
+ OpenSSL 1.1.1l, zlib 1.2.11, zstd 1.5.0.
+ * The upstream dependency (OpenSSL, zstd, zlib) source archive checksums are
+ now verified when building with `./configure --install-deps`.
+ These builds are used by the librdkafka builds bundled with
+ confluent-kafka-go, confluent-kafka-python and confluent-kafka-dotnet.
+
+
+## Enhancements
+
+ * Producer `flush()` now overrides the `linger.ms` setting for the duration
+ of the `flush()` call, effectively triggering immediate transmission of
+ queued messages. (#3489)
+
+## Fixes
+
+### General fixes
+
+ * Correctly detect presence of zlib via compilation check. (Chris Novakovic)
+ * `ERR__ALL_BROKERS_DOWN` is no longer emitted when the coordinator
+ connection goes down, only when all standard named brokers have been tried.
+ This fixes the issue with `ERR__ALL_BROKERS_DOWN` being triggered on
+ `consumer_close()`. It is also now only emitted if the connection was fully
+ up (past handshake), and not just connected.
+ * `rd_kafka_query_watermark_offsets()`, `rd_kafka_offsets_for_times()`,
+ `consumer_lag` metric, and `auto.offset.reset` now honour
+ `isolation.level` and will return the Last Stable Offset (LSO)
+ when `isolation.level` is set to `read_committed` (default), rather than
+ the uncommitted high-watermark when it is set to `read_uncommitted`. (#3423)
+ * SASL GSSAPI is now usable when `sasl.kerberos.min.time.before.relogin`
+ is set to 0 - which disables ticket refreshes (by @mpekalski, #3431).
+ * Rename internal crc32c() symbol to rd_crc32c() to avoid conflict with
+ other static libraries (#3421).
+ * `txidle` and `rxidle` in the statistics object was emitted as 18446744073709551615 when no idle was known. -1 is now emitted instead. (#3519)
+
+
+### Consumer fixes
+
+ * Automatically retry offset commits on `ERR_REQUEST_TIMED_OUT`,
+ `ERR_COORDINATOR_NOT_AVAILABLE`, and `ERR_NOT_COORDINATOR` (#3398).
+ Offset commits will be retried twice.
+ * Timed auto commits did not work when only using assign() and not subscribe().
+ This regression was introduced in v1.7.0.
+ * If the topics matching the current subscription changed (or the application
+ updated the subscription) while there was an outstanding JoinGroup or
+ SyncGroup request, an additional request would sometimes be sent before
+ handling the response of the first. This in turn lead to internal state
+ issues that could cause a crash or malbehaviour.
+ The consumer will now wait for any outstanding JoinGroup or SyncGroup
+ responses before re-joining the group.
+ * `auto.offset.reset` could previously be triggered by temporary errors,
+ such as disconnects and timeouts (after the two retries are exhausted).
+ This is now fixed so that the auto offset reset policy is only triggered
+ for permanent errors.
+ * The error that triggers `auto.offset.reset` is now logged to help the
+ application owner identify the reason of the reset.
+ * If a rebalance takes longer than a consumer's `session.timeout.ms`, the
+ consumer will remain in the group as long as it receives heartbeat responses
+ from the broker.
+
+
+### Admin fixes
+
+ * `DeleteRecords()` could crash if one of the underlying requests
+ (for a given partition leader) failed at the transport level (e.g., timeout).
+ (#3476).
+
+
+
+# librdkafka v1.7.0
+
+librdkafka v1.7.0 is feature release:
+
+ * [KIP-360](https://cwiki.apache.org/confluence/pages/viewpage.action?pageId=89068820) - Improve reliability of transactional producer.
+ Requires Apache Kafka 2.5 or later.
+ * OpenSSL Engine support (`ssl.engine.location`) by @adinigam and @ajbarb.
+
+
+## Enhancements
+
+ * Added `connections.max.idle.ms` to automatically close idle broker
+ connections.
+ This feature is disabled by default unless `bootstrap.servers` contains
+ the string `azure` in which case the default is set to <4 minutes to improve
+ connection reliability and circumvent limitations with the Azure load
+ balancers (see #3109 for more information).
+ * Bumped to OpenSSL 1.1.1k in binary librdkafka artifacts.
+ * The binary librdkafka artifacts for Alpine are now using Alpine 3.12.
+ OpenSSL 1.1.1k.
+ * Improved static librdkafka Windows builds using MinGW (@neptoess, #3130).
+ * The `librdkafka.redist` NuGet package now has updated zlib, zstd and
+ OpenSSL versions (from vcpkg).
+
+
+## Security considerations
+
+ * The zlib version bundled with the `librdkafka.redist` NuGet package has now been upgraded
+ from zlib 1.2.8 to 1.2.11, fixing the following CVEs:
+ * CVE-2016-9840: undefined behaviour (compiler dependent) in inflate (decompression) code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low.
+ * CVE-2016-9841: undefined behaviour (compiler dependent) in inflate code: this is used by the librdkafka consumer. Risk of successfully exploitation through consumed messages is eastimated very low.
+ * CVE-2016-9842: undefined behaviour in inflateMark(): this API is not used by librdkafka.
+ * CVE-2016-9843: issue in crc32_big() which is called from crc32_z(): this API is not used by librdkafka.
+
+## Upgrade considerations
+
+ * The C++ `oauthbearer_token_refresh_cb()` was missing a `Handle *`
+ argument that has now been added. This is a breaking change but the original
+ function signature is considered a bug.
+ This change only affects C++ OAuth developers.
+ * [KIP-735](https://cwiki.apache.org/confluence/display/KAFKA/KIP-735%3A+Increase+default+consumer+session+timeout) The consumer `session.timeout.ms`
+ default was changed from 10 to 45 seconds to make consumer groups more
+ robust and less sensitive to temporary network and cluster issues.
+ * Statistics: `consumer_lag` is now using the `committed_offset`,
+ while the new `consumer_lag_stored` is using `stored_offset`
+ (offset to be committed).
+ This is more correct than the previous `consumer_lag` which was using
+ either `committed_offset` or `app_offset` (last message passed
+ to application).
+ * The `librdkafka.redist` NuGet package is now built with MSVC runtime v140
+ (VS 2015). Previous versions were built with MSVC runtime v120 (VS 2013).
+
+
+## Fixes
+
+### General fixes
+
+ * Fix accesses to freed metadata cache mutexes on client termination (#3279)
+ * There was a race condition on receiving updated metadata where a broker id
+ update (such as bootstrap to proper broker transformation) could finish after
+ the topic metadata cache was updated, leading to existing brokers seemingly
+ being not available.
+ One occurrence of this issue was query_watermark_offsets() that could return
+ `ERR__UNKNOWN_PARTITION` for existing partitions shortly after the
+ client instance was created.
+ * The OpenSSL context is now initialized with `TLS_client_method()`
+ (on OpenSSL >= 1.1.0) instead of the deprecated and outdated
+ `SSLv23_client_method()`.
+ * The initial cluster connection on client instance creation could sometimes
+ be delayed up to 1 second if a `group.id` or `transactional.id`
+ was configured (#3305).
+ * Speed up triggering of new broker connections in certain cases by exiting
+ the broker thread io/op poll loop when a wakeup op is received.
+ * SASL GSSAPI: The Kerberos kinit refresh command was triggered from
+ `rd_kafka_new()` which made this call blocking if the refresh command
+ was taking long. The refresh is now performed by the background rdkafka
+ main thread.
+ * Fix busy-loop (100% CPU on the broker threads) during the handshake phase
+ of an SSL connection.
+ * Disconnects during SSL handshake are now propagated as transport errors
+ rather than SSL errors, since these disconnects are at the transport level
+ (e.g., incorrect listener, flaky load balancer, etc) and not due to SSL
+ issues.
+ * Increment metadata fast refresh interval backoff exponentially (@ajbarb, #3237).
+ * Unthrottled requests are no longer counted in the `brokers[].throttle`
+ statistics object.
+ * Log CONFWARN warning when global topic configuration properties
+ are overwritten by explicitly setting a `default_topic_conf`.
+
+### Consumer fixes
+
+ * If a rebalance happened during a `consume_batch..()` call the already
+ accumulated messages for revoked partitions were not purged, which would
+ pass messages to the application for partitions that were no longer owned
+ by the consumer. Fixed by @jliunyu. #3340.
+ * Fix balancing and reassignment issues with the cooperative-sticky assignor.
+ #3306.
+ * Fix incorrect detection of first rebalance in sticky assignor (@hallfox).
+ * Aborted transactions with no messages produced to a partition could
+ cause further successfully committed messages in the same Fetch response to
+ be ignored, resulting in consumer-side message loss.
+ A log message along the lines `Abort txn ctrl msg bad order at offset
+ 7501: expected before or at 7702: messages in aborted transactions may be delivered to the application`
+ would be seen.
+ This is a rare occurrence where a transactional producer would register with
+ the partition but not produce any messages before aborting the transaction.
+ * The consumer group deemed cached metadata up to date by checking
+ `topic.metadata.refresh.interval.ms`: if this property was set too low
+ it would cause cached metadata to be unusable and new metadata to be fetched,
+ which could delay the time it took for a rebalance to settle.
+ It now correctly uses `metadata.max.age.ms` instead.
+ * The consumer group timed auto commit would attempt commits during rebalances,
+ which could result in "Illegal generation" errors. This is now fixed, the
+ timed auto committer is only employed in the steady state when no rebalances
+ are taking places. Offsets are still auto committed when partitions are
+ revoked.
+ * Retriable FindCoordinatorRequest errors are no longer propagated to
+ the application as they are retried automatically.
+ * Fix rare crash (assert `rktp_started`) on consumer termination
+ (introduced in v1.6.0).
+ * Fix unaligned access and possibly corrupted snappy decompression when
+ building with MSVC (@azat)
+ * A consumer configured with the `cooperative-sticky` assignor did
+ not actively Leave the group on unsubscribe(). This delayed the
+ rebalance for the remaining group members by up to `session.timeout.ms`.
+ * The current subscription list was sometimes leaked when unsubscribing.
+
+### Producer fixes
+
+ * The timeout value of `flush()` was not respected when delivery reports
+ were scheduled as events (such as for confluent-kafka-go) rather than
+ callbacks.
+ * There was a race conditition in `purge()` which could cause newly
+ created partition objects, or partitions that were changing leaders, to
+ not have their message queues purged. This could cause
+ `abort_transaction()` to time out. This issue is now fixed.
+ * In certain high-thruput produce rate patterns producing could stall for
+ 1 second, regardless of `linger.ms`, due to rate-limiting of internal
+ queue wakeups. This is now fixed by not rate-limiting queue wakeups but
+ instead limiting them to one wakeup per queue reader poll. #2912.
+
+### Transactional Producer fixes
+
+ * KIP-360: Fatal Idempotent producer errors are now recoverable by the
+ transactional producer and will raise a `txn_requires_abort()` error.
+ * If the cluster went down between `produce()` and `commit_transaction()`
+ and before any partitions had been registered with the coordinator, the
+ messages would time out but the commit would succeed because nothing
+ had been sent to the coordinator. This is now fixed.
+ * If the current transaction failed while `commit_transaction()` was
+ checking the current transaction state an invalid state transaction could
+ occur which in turn would trigger a assertion crash.
+ This issue showed up as "Invalid txn state transition: .." crashes, and is
+ now fixed by properly synchronizing both checking and transition of state.
+
+
+
+# librdkafka v1.6.1
+
+librdkafka v1.6.1 is a maintenance release.
+
+## Upgrade considerations
+
+ * Fatal idempotent producer errors are now also fatal to the transactional
+ producer. This is a necessary step to maintain data integrity prior to
+ librdkafka supporting KIP-360. Applications should check any transactional
+ API errors for the is_fatal flag and decommission the transactional producer
+ if the flag is set.
+ * The consumer error raised by `auto.offset.reset=error` now has error-code
+ set to `ERR__AUTO_OFFSET_RESET` to allow an application to differentiate
+ between auto offset resets and other consumer errors.
+
+
+## Fixes
+
+### General fixes
+
+ * Admin API and transactional `send_offsets_to_transaction()` coordinator
+ requests, such as TxnOffsetCommitRequest, could in rare cases be sent
+ multiple times which could cause a crash.
+ * `ssl.ca.location=probe` is now enabled by default on Mac OSX since the
+ librdkafka-bundled OpenSSL might not have the same default CA search paths
+ as the system or brew installed OpenSSL. Probing scans all known locations.
+
+### Transactional Producer fixes
+
+ * Fatal idempotent producer errors are now also fatal to the transactional
+ producer.
+ * The transactional producer could crash if the transaction failed while
+ `send_offsets_to_transaction()` was called.
+ * Group coordinator requests for transactional
+ `send_offsets_to_transaction()` calls would leak memory if the
+ underlying request was attempted to be sent after the transaction had
+ failed.
+ * When gradually producing to multiple partitions (resulting in multiple
+ underlying AddPartitionsToTxnRequests) subsequent partitions could get
+ stuck in pending state under certain conditions. These pending partitions
+ would not send queued messages to the broker and eventually trigger
+ message timeouts, failing the current transaction. This is now fixed.
+ * Committing an empty transaction (no messages were produced and no
+ offsets were sent) would previously raise a fatal error due to invalid state
+ on the transaction coordinator. We now allow empty/no-op transactions to
+ be committed.
+
+### Consumer fixes
+
+ * The consumer will now retry indefinitely (or until the assignment is changed)
+ to retrieve committed offsets. This fixes the issue where only two retries
+ were attempted when outstanding transactions were blocking OffsetFetch
+ requests with `ERR_UNSTABLE_OFFSET_COMMIT`. #3265
+
+
+
+
+
+# librdkafka v1.6.0
+
+librdkafka v1.6.0 is feature release:
+
+ * [KIP-429 Incremental rebalancing](https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafka+Consumer+Incremental+Rebalance+Protocol) with sticky
+ consumer group partition assignor (KIP-54) (by @mhowlett).
+ * [KIP-480 Sticky producer partitioning](https://cwiki.apache.org/confluence/display/KAFKA/KIP-480%3A+Sticky+Partitioner) (`sticky.partitioning.linger.ms`) -
+ achieves higher throughput and lower latency through sticky selection
+ of random partition (by @abbycriswell).
+ * AdminAPI: Add support for `DeleteRecords()`, `DeleteGroups()` and
+ `DeleteConsumerGroupOffsets()` (by @gridaphobe)
+ * [KIP-447 Producer scalability for exactly once semantics](https://cwiki.apache.org/confluence/display/KAFKA/KIP-447%3A+Producer+scalability+for+exactly+once+semantics) -
+ allows a single transactional producer to be used for multiple input
+ partitions. Requires Apache Kafka 2.5 or later.
+ * Transactional producer fixes and improvements, see **Transactional Producer fixes** below.
+ * The [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/)
+ NuGet package now supports Linux ARM64/Aarch64.
+
+
+## Upgrade considerations
+
+ * Sticky producer partitioning (`sticky.partitioning.linger.ms`) is
+ enabled by default (10 milliseconds) which affects the distribution of
+ randomly partitioned messages, where previously these messages would be
+ evenly distributed over the available partitions they are now partitioned
+ to a single partition for the duration of the sticky time
+ (10 milliseconds by default) before a new random sticky partition
+ is selected.
+ * The new KIP-447 transactional producer scalability guarantees are only
+ supported on Apache Kafka 2.5 or later, on earlier releases you will
+ need to use one producer per input partition for EOS. This limitation
+ is not enforced by the producer or broker.
+ * Error handling for the transactional producer has been improved, see
+ the **Transactional Producer fixes** below for more information.
+
+
+## Known issues
+
+ * The Transactional Producer's API timeout handling is inconsistent with the
+ underlying protocol requests, it is therefore strongly recommended that
+ applications call `rd_kafka_commit_transaction()` and
+ `rd_kafka_abort_transaction()` with the `timeout_ms` parameter
+ set to `-1`, which will use the remaining transaction timeout.
+
+
+## Enhancements
+
+ * KIP-107, KIP-204: AdminAPI: Added `DeleteRecords()` (by @gridaphobe).
+ * KIP-229: AdminAPI: Added `DeleteGroups()` (by @gridaphobe).
+ * KIP-496: AdminAPI: Added `DeleteConsumerGroupOffsets()`.
+ * KIP-464: AdminAPI: Added support for broker-side default partition count
+ and replication factor for `CreateTopics()`.
+ * Windows: Added `ssl.ca.certificate.stores` to specify a list of
+ Windows Certificate Stores to read CA certificates from, e.g.,
+ `CA,Root`. `Root` remains the default store.
+ * Use reentrant `rand_r()` on supporting platforms which decreases lock
+ contention (@azat).
+ * Added `assignor` debug context for troubleshooting consumer partition
+ assignments.
+ * Updated to OpenSSL v1.1.1i when building dependencies.
+ * Update bundled lz4 (used when `./configure --disable-lz4-ext`) to v1.9.3
+ which has vast performance improvements.
+ * Added `rd_kafka_conf_get_default_topic_conf()` to retrieve the
+ default topic configuration object from a global configuration object.
+ * Added `conf` debugging context to `debug` - shows set configuration
+ properties on client and topic instantiation. Sensitive properties
+ are redacted.
+ * Added `rd_kafka_queue_yield()` to cancel a blocking queue call.
+ * Will now log a warning when multiple ClusterIds are seen, which is an
+ indication that the client might be erroneously configured to connect to
+ multiple clusters which is not supported.
+ * Added `rd_kafka_seek_partitions()` to seek multiple partitions to
+ per-partition specific offsets.
+
+
+## Fixes
+
+### General fixes
+
+ * Fix a use-after-free crash when certain coordinator requests were retried.
+ * The C++ `oauthbearer_set_token()` function would call `free()` on
+ a `new`-created pointer, possibly leading to crashes or heap corruption (#3194)
+
+### Consumer fixes
+
+ * The consumer assignment and consumer group implementations have been
+ decoupled, simplified and made more strict and robust. This will sort out
+ a number of edge cases for the consumer where the behaviour was previously
+ undefined.
+ * Partition fetch state was not set to STOPPED if OffsetCommit failed.
+ * The session timeout is now enforced locally also when the coordinator
+ connection is down, which was not previously the case.
+
+
+### Transactional Producer fixes
+
+ * Transaction commit or abort failures on the broker, such as when the
+ producer was fenced by a newer instance, were not propagated to the
+ application resulting in failed commits seeming successful.
+ This was a critical race condition for applications that had a delay after
+ producing messages (or sendings offsets) before committing or
+ aborting the transaction. This issue has now been fixed and test coverage
+ improved.
+ * The transactional producer API would return `RD_KAFKA_RESP_ERR__STATE`
+ when API calls were attempted after the transaction had failed, we now
+ try to return the error that caused the transaction to fail in the first
+ place, such as `RD_KAFKA_RESP_ERR__FENCED` when the producer has
+ been fenced, or `RD_KAFKA_RESP_ERR__TIMED_OUT` when the transaction
+ has timed out.
+ * Transactional producer retry count for transactional control protocol
+ requests has been increased from 3 to infinite, retriable errors
+ are now automatically retried by the producer until success or the
+ transaction timeout is exceeded. This fixes the case where
+ `rd_kafka_send_offsets_to_transaction()` would fail the current
+ transaction into an abortable state when `CONCURRENT_TRANSACTIONS` was
+ returned by the broker (which is a transient error) and the 3 retries
+ were exhausted.
+
+
+### Producer fixes
+
+ * Calling `rd_kafka_topic_new()` with a topic config object with
+ `message.timeout.ms` set could sometimes adjust the global `linger.ms`
+ property (if not explicitly configured) which was not desired, this is now
+ fixed and the auto adjustment is only done based on the
+ `default_topic_conf` at producer creation.
+ * `rd_kafka_flush()` could previously return `RD_KAFKA_RESP_ERR__TIMED_OUT`
+ just as the timeout was reached if the messages had been flushed but
+ there were now no more messages. This has been fixed.
+
+
+
+
+# librdkafka v1.5.3
+
+librdkafka v1.5.3 is a maintenance release.
+
+## Upgrade considerations
+
+ * CentOS 6 is now EOL and is no longer included in binary librdkafka packages,
+ such as NuGet.
+
+## Fixes
+
+### General fixes
+
+ * Fix a use-after-free crash when certain coordinator requests were retried.
+ * Coordinator requests could be left uncollected on instance destroy which
+ could lead to hang.
+ * Fix rare 1 second stalls by forcing rdkafka main thread wakeup when a new
+ next-timer-to-be-fired is scheduled.
+ * Fix additional cases where broker-side automatic topic creation might be
+ triggered unexpectedly.
+ * AdminAPI: The operation_timeout (on-broker timeout) previously defaulted to 0,
+ but now defaults to `socket.timeout.ms` (60s).
+ * Fix possible crash for Admin API protocol requests that fail at the
+ transport layer or prior to sending.
+
+
+### Consumer fixes
+
+ * Consumer would not filter out messages for aborted transactions
+ if the messages were compressed (#3020).
+ * Consumer destroy without prior `close()` could hang in certain
+ cgrp states (@gridaphobe, #3127).
+ * Fix possible null dereference in `Message::errstr()` (#3140).
+ * The `roundrobin` partition assignment strategy could get stuck in an
+ endless loop or generate uneven assignments in case the group members
+ had asymmetric subscriptions (e.g., c1 subscribes to t1,t2 while c2
+ subscribes to t2,t3). (#3159)
+ * Mixing committed and logical or absolute offsets in the partitions
+ passed to `rd_kafka_assign()` would in previous released ignore the
+ logical or absolute offsets and use the committed offsets for all partitions.
+ This is now fixed. (#2938)
+
+
+
+
+# librdkafka v1.5.2
+
+librdkafka v1.5.2 is a maintenance release.
+
+
+## Upgrade considerations
+
+ * The default value for the producer configuration property `retries` has
+ been increased from 2 to infinity, effectively limiting Produce retries to
+ only `message.timeout.ms`.
+ As the reasons for the automatic internal retries vary (various broker error
+ codes as well as transport layer issues), it doesn't make much sense to limit
+ the number of retries for retriable errors, but instead only limit the
+ retries based on the allowed time to produce a message.
+ * The default value for the producer configuration property
+ `request.timeout.ms` has been increased from 5 to 30 seconds to match
+ the Apache Kafka Java producer default.
+ This change yields increased robustness for broker-side congestion.
+
+
+## Enhancements
+
+ * The generated `CONFIGURATION.md` (through `rd_kafka_conf_properties_show())`)
+ now include all properties and values, regardless if they were included in
+ the build, and setting a disabled property or value through
+ `rd_kafka_conf_set()` now returns `RD_KAFKA_CONF_INVALID` and provides
+ a more useful error string saying why the property can't be set.
+ * Consumer configs on producers and vice versa will now be logged with
+ warning messages on client instantiation.
+
+## Fixes
+
+### Security fixes
+
+ * There was an incorrect call to zlib's `inflateGetHeader()` with
+ unitialized memory pointers that could lead to the GZIP header of a fetched
+ message batch to be copied to arbitrary memory.
+ This function call has now been completely removed since the result was
+ not used.
+ Reported by Ilja van Sprundel.
+
+
+### General fixes
+
+ * `rd_kafka_topic_opaque()` (used by the C++ API) would cause object
+ refcounting issues when used on light-weight (error-only) topic objects
+ such as consumer errors (#2693).
+ * Handle name resolution failures when formatting IP addresses in error logs,
+ and increase printed hostname limit to ~256 bytes (was ~60).
+ * Broker sockets would be closed twice (thus leading to potential race
+ condition with fd-reuse in other threads) if a custom `socket_cb` would
+ return error.
+
+### Consumer fixes
+
+ * The `roundrobin` `partition.assignment.strategy` could crash (assert)
+ for certain combinations of members and partitions.
+ This is a regression in v1.5.0. (#3024)
+ * The C++ `KafkaConsumer` destructor did not destroy the underlying
+ C `rd_kafka_t` instance, causing a leak if `close()` was not used.
+ * Expose rich error strings for C++ Consumer `Message->errstr()`.
+ * The consumer could get stuck if an outstanding commit failed during
+ rebalancing (#2933).
+ * Topic authorization errors during fetching are now reported only once (#3072).
+
+### Producer fixes
+
+ * Topic authorization errors are now properly propagated for produced messages,
+ both through delivery reports and as `ERR_TOPIC_AUTHORIZATION_FAILED`
+ return value from `produce*()` (#2215)
+ * Treat cluster authentication failures as fatal in the transactional
+ producer (#2994).
+ * The transactional producer code did not properly reference-count partition
+ objects which could in very rare circumstances lead to a use-after-free bug
+ if a topic was deleted from the cluster when a transaction was using it.
+ * `ERR_KAFKA_STORAGE_ERROR` is now correctly treated as a retriable
+ produce error (#3026).
+ * Messages that timed out locally would not fail the ongoing transaction.
+ If the application did not take action on failed messages in its delivery
+ report callback and went on to commit the transaction, the transaction would
+ be successfully committed, simply omitting the failed messages.
+ * EndTxnRequests (sent on commit/abort) are only retried in allowed
+ states (#3041).
+ Previously the transaction could hang on commit_transaction() if an abortable
+ error was hit and the EndTxnRequest was to be retried.
+
+
+*Note: there was no v1.5.1 librdkafka release*
+
+
+
+
+# librdkafka v1.5.0
+
+The v1.5.0 release brings usability improvements, enhancements and fixes to
+librdkafka.
+
+## Enhancements
+
+ * Improved broker connection error reporting with more useful information and
+ hints on the cause of the problem.
+ * Consumer: Propagate errors when subscribing to unavailable topics (#1540)
+ * Producer: Add `batch.size` producer configuration property (#638)
+ * Add `topic.metadata.propagation.max.ms` to allow newly manually created
+ topics to be propagated throughout the cluster before reporting them
+ as non-existent. This fixes race issues where CreateTopics() is
+ quickly followed by produce().
+ * Prefer least idle connection for periodic metadata refreshes, et.al.,
+ to allow truly idle connections to time out and to avoid load-balancer-killed
+ idle connection errors (#2845)
+ * Added `rd_kafka_event_debug_contexts()` to get the debug contexts for
+ a debug log line (by @wolfchimneyrock).
+ * Added Test scenarios which define the cluster configuration.
+ * Added MinGW-w64 builds (@ed-alertedh, #2553)
+ * `./configure --enable-XYZ` now requires the XYZ check to pass,
+ and `--disable-XYZ` disables the feature altogether (@benesch)
+ * Added `rd_kafka_produceva()` which takes an array of produce arguments
+ for situations where the existing `rd_kafka_producev()` va-arg approach
+ can't be used.
+ * Added `rd_kafka_message_broker_id()` to see the broker that a message
+ was produced or fetched from, or an error was associated with.
+ * Added RTT/delay simulation to mock brokers.
+
+
+## Upgrade considerations
+
+ * Subscribing to non-existent and unauthorized topics will now propagate
+ errors `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` and
+ `RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED` to the application through
+ the standard consumer error (the err field in the message object).
+ * Consumer will no longer trigger auto creation of topics,
+ `allow.auto.create.topics=true` may be used to re-enable the old deprecated
+ functionality.
+ * The default consumer pre-fetch queue threshold `queued.max.messages.kbytes`
+ has been decreased from 1GB to 64MB to avoid excessive network usage for low
+ and medium throughput consumer applications. High throughput consumer
+ applications may need to manually set this property to a higher value.
+ * The default consumer Fetch wait time has been increased from 100ms to 500ms
+ to avoid excessive network usage for low throughput topics.
+ * If OpenSSL is linked statically, or `ssl.ca.location=probe` is configured,
+ librdkafka will probe known CA certificate paths and automatically use the
+ first one found. This should alleviate the need to configure
+ `ssl.ca.location` when the statically linked OpenSSL's OPENSSLDIR differs
+ from the system's CA certificate path.
+ * The heuristics for handling Apache Kafka < 0.10 brokers has been removed to
+ improve connection error handling for modern Kafka versions.
+ Users on Brokers 0.9.x or older should already be configuring
+ `api.version.request=false` and `broker.version.fallback=...` so there
+ should be no functional change.
+ * The default producer batch accumulation time, `linger.ms`, has been changed
+ from 0.5ms to 5ms to improve batch sizes and throughput while reducing
+ the per-message protocol overhead.
+ Applications that require lower produce latency than 5ms will need to
+ manually set `linger.ms` to a lower value.
+ * librdkafka's build tooling now requires Python 3.x (python3 interpreter).
+
+
+## Fixes
+
+### General fixes
+
+ * The client could crash in rare circumstances on ApiVersion or
+ SaslHandshake request timeouts (#2326)
+ * `./configure --LDFLAGS='a=b, c=d'` with arguments containing = are now
+ supported (by @sky92zwq).
+ * `./configure` arguments now take precedence over cached `configure` variables
+ from previous invocation.
+ * Fix theoretical crash on coord request failure.
+ * Unknown partition error could be triggered for existing partitions when
+ additional partitions were added to a topic (@benesch, #2915)
+ * Quickly refresh topic metadata for desired but non-existent partitions.
+ This will speed up the initial discovery delay when new partitions are added
+ to an existing topic (#2917).
+
+
+### Consumer fixes
+
+ * The roundrobin partition assignor could crash if subscriptions
+ where asymmetrical (different sets from different members of the group).
+ Thanks to @ankon and @wilmai for identifying the root cause (#2121).
+ * The consumer assignors could ignore some topics if there were more subscribed
+ topics than consumers in taking part in the assignment.
+ * The consumer would connect to all partition leaders of a topic even
+ for partitions that were not being consumed (#2826).
+ * Initial consumer group joins should now be a couple of seconds quicker
+ thanks expedited query intervals (@benesch).
+ * Fix crash and/or inconsistent subscriptions when using multiple consumers
+ (in the same process) with wildcard topics on Windows.
+ * Don't propagate temporary offset lookup errors to application.
+ * Immediately refresh topic metadata when partitions are reassigned to other
+ brokers, avoiding a fetch stall of up to `topic.metadata.refresh.interval.ms`. (#2955)
+ * Memory for batches containing control messages would not be freed when
+ using the batch consume APIs (@pf-qiu, #2990).
+
+
+### Producer fixes
+
+ * Proper locking for transaction state in EndTxn handler.
+
+
+
+# librdkafka v1.4.4
+
+v1.4.4 is a maintenance release with the following fixes and enhancements:
+
+ * Transactional producer could crash on request timeout due to dereferencing
+ NULL pointer of non-existent response object.
+ * Mark `rd_kafka_send_offsets_to_transaction()` CONCURRENT_TRANSACTION (et.al)
+ errors as retriable.
+ * Fix crash on transactional coordinator FindCoordinator request failure.
+ * Minimize broker re-connect delay when broker's connection is needed to
+ send requests.
+ * Proper locking for transaction state in EndTxn handler.
+ * `socket.timeout.ms` was ignored when `transactional.id` was set.
+ * Added RTT/delay simulation to mock brokers.
+
+*Note: there was no v1.4.3 librdkafka release*
+
+
+
+# librdkafka v1.4.2
+
+v1.4.2 is a maintenance release with the following fixes and enhancements:
+
+ * Fix produce/consume hang after partition goes away and comes back,
+ such as when a topic is deleted and re-created.
+ * Consumer: Reset the stored offset when partitions are un-assign()ed (fixes #2782).
+ This fixes the case where a manual offset-less commit() or the auto-committer
+ would commit a stored offset from a previous assignment before
+ a new message was consumed by the application.
+ * Probe known CA cert paths and set default `ssl.ca.location` accordingly
+ if OpenSSL is statically linked or `ssl.ca.location` is set to `probe`.
+ * Per-partition OffsetCommit errors were unhandled (fixes #2791)
+ * Seed the PRNG (random number generator) by default, allow application to
+ override with `enable.random.seed=false` (#2795)
+ * Fix stack overwrite (of 1 byte) when SaslHandshake MechCnt is zero
+ * Align bundled c11 threads (tinycthreads) constants to glibc and musl (#2681)
+ * Fix return value of rd_kafka_test_fatal_error() (by @ckb42)
+ * Ensure CMake sets disabled defines to zero on Windows (@benesch)
+
+
+*Note: there was no v1.4.1 librdkafka release*
+
+
+
+
+
+# Older releases
+
+See https://github.com/edenhill/librdkafka/releases
diff --git a/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt
new file mode 100644
index 000000000..7f3dd0fc6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/CMakeLists.txt
@@ -0,0 +1,291 @@
+cmake_minimum_required(VERSION 3.2)
+
+include("packaging/cmake/parseversion.cmake")
+parseversion("src/rdkafka.h")
+
+project(RdKafka VERSION ${RDKAFKA_VERSION})
+
+set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_CURRENT_SOURCE_DIR}/packaging/cmake/Modules/")
+
+# Options. No 'RDKAFKA_' prefix to match old C++ code. {
+
+# This option doesn't affect build in fact, only C code
+# (see 'rd_kafka_version_str'). In CMake the build type feature usually used
+# (like Debug, Release, etc.).
+option(WITHOUT_OPTIMIZATION "Disable optimization" OFF)
+
+option(ENABLE_DEVEL "Enable development asserts, checks, etc" OFF)
+option(ENABLE_REFCNT_DEBUG "Enable refcnt debugging" OFF)
+set(TRYCOMPILE_SRC_DIR "${CMAKE_CURRENT_LIST_DIR}/packaging/cmake/try_compile")
+set(BUILT_WITH "CMAKE")
+
+# Toolchain {
+list(APPEND BUILT_WITH "${CMAKE_C_COMPILER_ID}")
+list(APPEND BUILT_WITH "${CMAKE_CXX_COMPILER_ID}")
+# }
+
+# PkgConfig {
+find_package(PkgConfig QUIET)
+if(PkgConfig_FOUND)
+ set(WITH_PKGCONFIG ON)
+ list(APPEND BUILT_WITH "PKGCONFIG")
+endif()
+# }
+
+# LIBM {
+include(CheckLibraryExists)
+check_library_exists(m pow "" WITH_HDRHISTOGRAM)
+if(WITH_HDRHISTOGRAM)
+ list(APPEND BUILT_WITH "HDRHISTOGRAM")
+endif()
+# }
+
+# ZLIB {
+find_package(ZLIB QUIET)
+if(ZLIB_FOUND)
+ set(with_zlib_default ON)
+else()
+ set(with_zlib_default OFF)
+endif()
+option(WITH_ZLIB "With ZLIB" ${with_zlib_default})
+if(WITH_ZLIB)
+ list(APPEND BUILT_WITH "ZLIB")
+endif()
+# }
+
+# CURL {
+find_package(CURL QUIET)
+if(CURL_FOUND)
+ set(with_curl_default ON)
+else()
+ set(with_curl_default OFF)
+endif()
+option(WITH_CURL "With CURL" ${with_curl_default})
+if(WITH_CURL)
+ list(APPEND BUILT_WITH "CURL")
+endif()
+# }
+
+# ZSTD {
+find_package(ZSTD QUIET)
+if(ZSTD_FOUND)
+ set(with_zstd_default ON)
+else()
+ set(with_zstd_default OFF)
+endif()
+option(WITH_ZSTD "With ZSTD" ${with_zstd_default})
+if(WITH_ZSTD)
+ list(APPEND BUILT_WITH "ZSTD")
+endif()
+# }
+
+# LibDL {
+try_compile(
+ WITH_LIBDL
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/dlopen_test.c"
+ LINK_LIBRARIES "${CMAKE_DL_LIBS}"
+)
+if(WITH_LIBDL)
+ list(APPEND BUILT_WITH "LIBDL")
+endif()
+# }
+
+# WITH_PLUGINS {
+if(WITH_LIBDL OR WIN32)
+ set(with_plugins_default ON)
+else()
+ set(with_plugins_default OFF)
+endif()
+option(WITH_PLUGINS "With plugin support" ${with_plugins_default})
+if(WITH_PLUGINS)
+ list(APPEND BUILT_WITH "PLUGINS")
+endif()
+# }
+
+# OpenSSL {
+if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
+ set(with_ssl_default ON)
+else()
+ find_package(OpenSSL QUIET)
+ if(OpenSSL_FOUND)
+ set(with_ssl_default ON)
+ else()
+ set(with_ssl_default OFF)
+ endif()
+endif()
+option(WITH_SSL "With SSL" ${with_ssl_default})
+if(WITH_SSL)
+ list(APPEND BUILT_WITH "SSL")
+endif()
+# }
+
+# SASL {
+if(WIN32)
+ set(with_sasl_default ON)
+else()
+ if(PkgConfig_FOUND)
+ pkg_check_modules(SASL libsasl2)
+ if(SASL_FOUND)
+ set(with_sasl_default ON)
+ else()
+ try_compile(
+ WITH_SASL_CYRUS_BOOL
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/libsasl2_test.c"
+ LINK_LIBRARIES "-lsasl2"
+ )
+ if(WITH_SASL_CYRUS_BOOL)
+ set(with_sasl_default ON)
+ set(SASL_LIBRARIES "-lsasl2")
+ else()
+ set(with_sasl_default OFF)
+ endif()
+ endif()
+ endif()
+endif()
+option(WITH_SASL "With SASL" ${with_sasl_default})
+if(WITH_SASL)
+ if(SASL_FOUND)
+ link_directories(${SASL_LIBRARY_DIRS})
+ endif()
+ if(WITH_SSL)
+ set(WITH_SASL_SCRAM ON)
+ set(WITH_SASL_OAUTHBEARER ON)
+ list(APPEND BUILT_WITH "SASL_SCRAM SASL_OAUTHBEARER")
+ endif()
+ if(NOT WIN32)
+ set(WITH_SASL_CYRUS ON)
+ list(APPEND BUILT_WITH "SASL_CYRUS")
+ endif()
+endif()
+# }
+
+if(WITH_SSL AND WITH_CURL)
+ set(WITH_OAUTHBEARER_OIDC ON)
+endif()
+
+# LZ4 {
+option(ENABLE_LZ4_EXT "Enable external LZ4 library support" ON)
+set(WITH_LZ4_EXT OFF)
+if(ENABLE_LZ4_EXT)
+ find_package(LZ4)
+ if(LZ4_FOUND)
+ set(WITH_LZ4_EXT ON)
+ list(APPEND BUILT_WITH "LZ4_EXT")
+ else()
+ message(STATUS "Using bundled LZ4 implementation.")
+ endif()
+endif()
+# }
+
+option(RDKAFKA_BUILD_STATIC "Build static rdkafka library" OFF)
+option(RDKAFKA_BUILD_EXAMPLES "Build examples" ON)
+option(RDKAFKA_BUILD_TESTS "Build tests" ON)
+if(WIN32)
+ option(WITHOUT_WIN32_CONFIG "Avoid including win32_config.h on cmake builds" ON)
+endif(WIN32)
+
+# In:
+# * TRYCOMPILE_SRC_DIR
+# Out:
+# * HAVE_ATOMICS_32
+# * HAVE_ATOMICS_32_SYNC
+# * HAVE_ATOMICS_64
+# * HAVE_ATOMICS_64_SYNC
+# * HAVE_REGEX
+# * HAVE_STRNDUP
+# * HAVE_PTHREAD_SETNAME_GNU
+# * HAVE_PTHREAD_SETNAME_DARWIN
+# * HAVE_PTHREAD_SETNAME_FREEBSD
+# * WITH_C11THREADS
+# * WITH_CRC32C_HW
+# * LINK_ATOMIC
+include("packaging/cmake/try_compile/rdkafka_setup.cmake")
+if(WITH_C11THREADS)
+ list(APPEND BUILT_WITH "C11THREADS")
+endif()
+if(WITH_CRC32C_HW)
+ list(APPEND BUILT_WITH "CRC32C_HW")
+endif()
+
+set(GENERATED_DIR "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+# In:
+# * WITHOUT_OPTIMIZATION
+# * ENABLE_DEVEL
+# * ENABLE_REFCNT_DEBUG
+# * HAVE_ATOMICS_32
+# * HAVE_ATOMICS_32_SYNC
+# * HAVE_ATOMICS_64
+# * HAVE_ATOMICS_64_SYNC
+# * WITH_ZLIB
+# * WITH_SSL
+# * WITH_SASL
+# * HAVE_REGEX
+# * HAVE_STRNDUP
+# * HAVE_PTHREAD_SETNAME_GNU
+# * HAVE_PTHREAD_SETNAME_DARWIN
+# * HAVE_PTHREAD_SETNAME_FREEBSD
+list(APPEND BUILT_WITH "SNAPPY")
+list(APPEND BUILT_WITH "SOCKEM")
+string(REPLACE ";" " " BUILT_WITH "${BUILT_WITH}")
+configure_file("packaging/cmake/config.h.in" "${GENERATED_DIR}/config.h")
+
+# Installation (https://github.com/forexample/package-example) {
+
+include(GNUInstallDirs)
+
+set(config_install_dir "${CMAKE_INSTALL_LIBDIR}/cmake/${PROJECT_NAME}")
+
+set(generated_dir "${CMAKE_CURRENT_BINARY_DIR}/generated")
+
+set(project_config "${generated_dir}/${PROJECT_NAME}Config.cmake")
+set(project_version "${generated_dir}/${PROJECT_NAME}ConfigVersion.cmake")
+set(targets_export_name "${PROJECT_NAME}Targets")
+set(namespace "${PROJECT_NAME}::")
+
+include(CMakePackageConfigHelpers)
+
+# In:
+# * targets_export_name
+# * PROJECT_NAME
+configure_package_config_file(
+ "packaging/cmake/Config.cmake.in"
+ "${project_config}"
+ INSTALL_DESTINATION "${config_install_dir}"
+)
+
+write_basic_package_version_file(
+ "${project_version}"
+ VERSION ${PROJECT_VERSION}
+ COMPATIBILITY AnyNewerVersion
+)
+
+install(
+ FILES "${project_config}" "${project_version}" "packaging/cmake/Modules/FindLZ4.cmake"
+ DESTINATION "${config_install_dir}"
+)
+
+install(
+ EXPORT "${targets_export_name}"
+ NAMESPACE "${namespace}"
+ DESTINATION "${config_install_dir}"
+)
+
+install(
+ FILES LICENSES.txt
+ DESTINATION "share/licenses/librdkafka"
+)
+
+add_subdirectory(src)
+add_subdirectory(src-cpp)
+
+if(RDKAFKA_BUILD_EXAMPLES)
+ add_subdirectory(examples)
+endif()
+
+if(RDKAFKA_BUILD_TESTS)
+ enable_testing()
+ add_subdirectory(tests)
+endif()
diff --git a/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md b/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md
new file mode 100644
index 000000000..dbbde19c9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/CODE_OF_CONDUCT.md
@@ -0,0 +1,46 @@
+# Contributor Covenant Code of Conduct
+
+## Our Pledge
+
+In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation.
+
+## Our Standards
+
+Examples of behavior that contributes to creating a positive environment include:
+
+* Using welcoming and inclusive language
+* Being respectful of differing viewpoints and experiences
+* Gracefully accepting constructive criticism
+* Focusing on what is best for the community
+* Showing empathy towards other community members
+
+Examples of unacceptable behavior by participants include:
+
+* The use of sexualized language or imagery and unwelcome sexual attention or advances
+* Trolling, insulting/derogatory comments, and personal or political attacks
+* Public or private harassment
+* Publishing others' private information, such as a physical or electronic address, without explicit permission
+* Other conduct which could reasonably be considered inappropriate in a professional setting
+
+## Our Responsibilities
+
+Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior.
+
+Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful.
+
+## Scope
+
+This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers.
+
+## Enforcement
+
+Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at rdkafka@edenhill.se. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately.
+
+Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership.
+
+## Attribution
+
+This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version]
+
+[homepage]: http://contributor-covenant.org
+[version]: http://contributor-covenant.org/version/1/4/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md b/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md
new file mode 100644
index 000000000..0ebec417c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/CONFIGURATION.md
@@ -0,0 +1,183 @@
+# Configuration properties
+## Global configuration properties
+
+Property | C/P | Range | Default | Importance | Description
+-----------------------------------------|-----|-----------------|--------------:|------------| --------------------------
+builtin.features | * | | gzip, snappy, ssl, sasl, regex, lz4, sasl_gssapi, sasl_plain, sasl_scram, plugins, zstd, sasl_oauthbearer, http, oidc | low | Indicates the builtin features for this build of librdkafka. An application can either query this value or attempt to set it with its list of required features to check for library support. <br>*Type: CSV flags*
+client.id | * | | rdkafka | low | Client identifier. <br>*Type: string*
+metadata.broker.list | * | | | high | Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. <br>*Type: string*
+bootstrap.servers | * | | | high | Alias for `metadata.broker.list`: Initial list of brokers as a CSV list of broker host or host:port. The application may also use `rd_kafka_brokers_add()` to add brokers during runtime. <br>*Type: string*
+message.max.bytes | * | 1000 .. 1000000000 | 1000000 | medium | Maximum Kafka protocol request message size. Due to differing framing overhead between protocol versions the producer is unable to reliably enforce a strict max message limit at produce time and may exceed the maximum size by one message in protocol ProduceRequests, the broker will enforce the the topic's `max.message.bytes` limit (see Apache Kafka documentation). <br>*Type: integer*
+message.copy.max.bytes | * | 0 .. 1000000000 | 65535 | low | Maximum size for message to be copied to buffer. Messages larger than this will be passed by reference (zero-copy) at the expense of larger iovecs. <br>*Type: integer*
+receive.message.max.bytes | * | 1000 .. 2147483647 | 100000000 | medium | Maximum Kafka protocol response message size. This serves as a safety precaution to avoid memory exhaustion in case of protocol hickups. This value must be at least `fetch.max.bytes` + 512 to allow for protocol overhead; the value is adjusted automatically unless the configuration property is explicitly set. <br>*Type: integer*
+max.in.flight.requests.per.connection | * | 1 .. 1000000 | 1000000 | low | Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. <br>*Type: integer*
+max.in.flight | * | 1 .. 1000000 | 1000000 | low | Alias for `max.in.flight.requests.per.connection`: Maximum number of in-flight requests per broker connection. This is a generic property applied to all broker communication, however it is primarily relevant to produce requests. In particular, note that other mechanisms limit the number of outstanding consumer fetch request per broker to one. <br>*Type: integer*
+topic.metadata.refresh.interval.ms | * | -1 .. 3600000 | 300000 | low | Period of time in milliseconds at which topic and broker metadata is refreshed in order to proactively discover any new brokers, topics, partitions or partition leader changes. Use -1 to disable the intervalled refresh (not recommended). If there are no locally referenced topics (no topic objects created, no messages produced, no subscription or no assignment) then only the broker list will be refreshed every interval but no more often than every 10s. <br>*Type: integer*
+metadata.max.age.ms | * | 1 .. 86400000 | 900000 | low | Metadata cache max age. Defaults to topic.metadata.refresh.interval.ms * 3 <br>*Type: integer*
+topic.metadata.refresh.fast.interval.ms | * | 1 .. 60000 | 250 | low | When a topic loses its leader a new metadata request will be enqueued with this initial interval, exponentially increasing until the topic metadata has been refreshed. This is used to recover quickly from transitioning leader brokers. <br>*Type: integer*
+topic.metadata.refresh.fast.cnt | * | 0 .. 1000 | 10 | low | **DEPRECATED** No longer used. <br>*Type: integer*
+topic.metadata.refresh.sparse | * | true, false | true | low | Sparse metadata requests (consumes less network bandwidth) <br>*Type: boolean*
+topic.metadata.propagation.max.ms | * | 0 .. 3600000 | 30000 | low | Apache Kafka topic creation is asynchronous and it takes some time for a new topic to propagate throughout the cluster to all brokers. If a client requests topic metadata after manual topic creation but before the topic has been fully propagated to the broker the client is requesting metadata from, the topic will seem to be non-existent and the client will mark the topic as such, failing queued produced messages with `ERR__UNKNOWN_TOPIC`. This setting delays marking a topic as non-existent until the configured propagation max time has passed. The maximum propagation time is calculated from the time the topic is first referenced in the client, e.g., on produce(). <br>*Type: integer*
+topic.blacklist | * | | | low | Topic blacklist, a comma-separated list of regular expressions for matching topic names that should be ignored in broker metadata information as if the topics did not exist. <br>*Type: pattern list*
+debug | * | generic, broker, topic, metadata, feature, queue, msg, protocol, cgrp, security, fetch, interceptor, plugin, consumer, admin, eos, mock, assignor, conf, all | | medium | A comma-separated list of debug contexts to enable. Detailed Producer debugging: broker,topic,msg. Consumer: consumer,cgrp,topic,fetch <br>*Type: CSV flags*
+socket.timeout.ms | * | 10 .. 300000 | 60000 | low | Default timeout for network requests. Producer: ProduceRequests will use the lesser value of `socket.timeout.ms` and remaining `message.timeout.ms` for the first message in the batch. Consumer: FetchRequests will use `fetch.wait.max.ms` + `socket.timeout.ms`. Admin: Admin requests will use `socket.timeout.ms` or explicitly set `rd_kafka_AdminOptions_set_operation_timeout()` value. <br>*Type: integer*
+socket.blocking.max.ms | * | 1 .. 60000 | 1000 | low | **DEPRECATED** No longer used. <br>*Type: integer*
+socket.send.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket send buffer size. System default is used if 0. <br>*Type: integer*
+socket.receive.buffer.bytes | * | 0 .. 100000000 | 0 | low | Broker socket receive buffer size. System default is used if 0. <br>*Type: integer*
+socket.keepalive.enable | * | true, false | false | low | Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets <br>*Type: boolean*
+socket.nagle.disable | * | true, false | false | low | Disable the Nagle algorithm (TCP_NODELAY) on broker sockets. <br>*Type: boolean*
+socket.max.fails | * | 0 .. 1000000 | 1 | low | Disconnect from broker when this number of send failures (e.g., timed out requests) is reached. Disable with 0. WARNING: It is highly recommended to leave this setting at its default value of 1 to avoid the client and broker to become desynchronized in case of request timeouts. NOTE: The connection is automatically re-established. <br>*Type: integer*
+broker.address.ttl | * | 0 .. 86400000 | 1000 | low | How long to cache the broker address resolving results (milliseconds). <br>*Type: integer*
+broker.address.family | * | any, v4, v6 | any | low | Allowed broker IP address families: any, v4, v6 <br>*Type: enum value*
+socket.connection.setup.timeout.ms | * | 1000 .. 2147483647 | 30000 | medium | Maximum time allowed for broker connection setup (TCP connection setup as well SSL and SASL handshake). If the connection to the broker is not fully functional after this the connection will be closed and retried. <br>*Type: integer*
+connections.max.idle.ms | * | 0 .. 2147483647 | 0 | medium | Close broker connections after the specified time of inactivity. Disable with 0. If this property is left at its default value some heuristics are performed to determine a suitable default value, this is currently limited to identifying brokers on Azure (see librdkafka issue #3109 for more info). <br>*Type: integer*
+reconnect.backoff.jitter.ms | * | 0 .. 3600000 | 0 | low | **DEPRECATED** No longer used. See `reconnect.backoff.ms` and `reconnect.backoff.max.ms`. <br>*Type: integer*
+reconnect.backoff.ms | * | 0 .. 3600000 | 100 | medium | The initial time to wait before reconnecting to a broker after the connection has been closed. The time is increased exponentially until `reconnect.backoff.max.ms` is reached. -25% to +50% jitter is applied to each reconnect backoff. A value of 0 disables the backoff and reconnects immediately. <br>*Type: integer*
+reconnect.backoff.max.ms | * | 0 .. 3600000 | 10000 | medium | The maximum time to wait before reconnecting to a broker after the connection has been closed. <br>*Type: integer*
+statistics.interval.ms | * | 0 .. 86400000 | 0 | high | librdkafka statistics emit interval. The application also needs to register a stats callback using `rd_kafka_conf_set_stats_cb()`. The granularity is 1000ms. A value of 0 disables statistics. <br>*Type: integer*
+enabled_events | * | 0 .. 2147483647 | 0 | low | See `rd_kafka_conf_set_events()` <br>*Type: integer*
+error_cb | * | | | low | Error callback (set with rd_kafka_conf_set_error_cb()) <br>*Type: see dedicated API*
+throttle_cb | * | | | low | Throttle callback (set with rd_kafka_conf_set_throttle_cb()) <br>*Type: see dedicated API*
+stats_cb | * | | | low | Statistics callback (set with rd_kafka_conf_set_stats_cb()) <br>*Type: see dedicated API*
+log_cb | * | | | low | Log callback (set with rd_kafka_conf_set_log_cb()) <br>*Type: see dedicated API*
+log_level | * | 0 .. 7 | 6 | low | Logging level (syslog(3) levels) <br>*Type: integer*
+log.queue | * | true, false | false | low | Disable spontaneous log_cb from internal librdkafka threads, instead enqueue log messages on queue set with `rd_kafka_set_log_queue()` and serve log callbacks or events through the standard poll APIs. **NOTE**: Log messages will linger in a temporary queue until the log queue has been set. <br>*Type: boolean*
+log.thread.name | * | true, false | true | low | Print internal thread name in log messages (useful for debugging librdkafka internals) <br>*Type: boolean*
+enable.random.seed | * | true, false | true | low | If enabled librdkafka will initialize the PRNG with srand(current_time.milliseconds) on the first invocation of rd_kafka_new() (required only if rand_r() is not available on your platform). If disabled the application must call srand() prior to calling rd_kafka_new(). <br>*Type: boolean*
+log.connection.close | * | true, false | true | low | Log broker disconnects. It might be useful to turn this off when interacting with 0.9 brokers with an aggressive `connections.max.idle.ms` value. <br>*Type: boolean*
+background_event_cb | * | | | low | Background queue event callback (set with rd_kafka_conf_set_background_event_cb()) <br>*Type: see dedicated API*
+socket_cb | * | | | low | Socket creation callback to provide race-free CLOEXEC <br>*Type: see dedicated API*
+connect_cb | * | | | low | Socket connect callback <br>*Type: see dedicated API*
+closesocket_cb | * | | | low | Socket close callback <br>*Type: see dedicated API*
+open_cb | * | | | low | File open callback to provide race-free CLOEXEC <br>*Type: see dedicated API*
+resolve_cb | * | | | low | Address resolution callback (set with rd_kafka_conf_set_resolve_cb()). <br>*Type: see dedicated API*
+opaque | * | | | low | Application opaque (set with rd_kafka_conf_set_opaque()) <br>*Type: see dedicated API*
+default_topic_conf | * | | | low | Default topic configuration for automatically subscribed topics <br>*Type: see dedicated API*
+internal.termination.signal | * | 0 .. 128 | 0 | low | Signal that librdkafka will use to quickly terminate on rd_kafka_destroy(). If this signal is not set then there will be a delay before rd_kafka_wait_destroyed() returns true as internal threads are timing out their system calls. If this signal is set however the delay will be minimal. The application should mask this signal as an internal signal handler is installed. <br>*Type: integer*
+api.version.request | * | true, false | true | high | Request broker's supported API versions to adjust functionality to available protocol features. If set to false, or the ApiVersionRequest fails, the fallback version `broker.version.fallback` will be used. **NOTE**: Depends on broker version >=0.10.0. If the request is not supported by (an older) broker the `broker.version.fallback` fallback is used. <br>*Type: boolean*
+api.version.request.timeout.ms | * | 1 .. 300000 | 10000 | low | Timeout for broker API version requests. <br>*Type: integer*
+api.version.fallback.ms | * | 0 .. 604800000 | 0 | medium | Dictates how long the `broker.version.fallback` fallback is used in the case the ApiVersionRequest fails. **NOTE**: The ApiVersionRequest is only issued when a new connection to the broker is made (such as after an upgrade). <br>*Type: integer*
+broker.version.fallback | * | | 0.10.0 | medium | Older broker versions (before 0.10.0) provide no way for a client to query for supported protocol features (ApiVersionRequest, see `api.version.request`) making it impossible for the client to know what features it may use. As a workaround a user may set this property to the expected broker version and the client will automatically adjust its feature set accordingly if the ApiVersionRequest fails (or is disabled). The fallback broker version will be used for `api.version.fallback.ms`. Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. Any other value >= 0.10, such as 0.10.2.1, enables ApiVersionRequests. <br>*Type: string*
+allow.auto.create.topics | * | true, false | false | low | Allow automatic topic creation on the broker when subscribing to or assigning non-existent topics. The broker must also be configured with `auto.create.topics.enable=true` for this configuration to take effect. Note: the default value (true) for the producer is different from the default value (false) for the consumer. Further, the consumer default value is different from the Java consumer (true), and this property is not supported by the Java producer. Requires broker version >= 0.11.0.0, for older broker versions only the broker configuration applies. <br>*Type: boolean*
+security.protocol | * | plaintext, ssl, sasl_plaintext, sasl_ssl | plaintext | high | Protocol used to communicate with brokers. <br>*Type: enum value*
+ssl.cipher.suites | * | | | low | A cipher suite is a named combination of authentication, encryption, MAC and key exchange algorithm used to negotiate the security settings for a network connection using TLS or SSL network protocol. See manual page for `ciphers(1)` and `SSL_CTX_set_cipher_list(3). <br>*Type: string*
+ssl.curves.list | * | | | low | The supported-curves extension in the TLS ClientHello message specifies the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client is willing to have the server use. See manual page for `SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required. <br>*Type: string*
+ssl.sigalgs.list | * | | | low | The client uses the TLS ClientHello signature_algorithms extension to indicate to the server which signature/hash algorithm pairs may be used in digital signatures. See manual page for `SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required. <br>*Type: string*
+ssl.key.location | * | | | low | Path to client's private key (PEM) used for authentication. <br>*Type: string*
+ssl.key.password | * | | | low | Private key passphrase (for use with `ssl.key.location` and `set_ssl_cert()`) <br>*Type: string*
+ssl.key.pem | * | | | low | Client's private key string (PEM format) used for authentication. <br>*Type: string*
+ssl_key | * | | | low | Client's private key as set by rd_kafka_conf_set_ssl_cert() <br>*Type: see dedicated API*
+ssl.certificate.location | * | | | low | Path to client's public key (PEM) used for authentication. <br>*Type: string*
+ssl.certificate.pem | * | | | low | Client's public key string (PEM format) used for authentication. <br>*Type: string*
+ssl_certificate | * | | | low | Client's public key as set by rd_kafka_conf_set_ssl_cert() <br>*Type: see dedicated API*
+ssl.ca.location | * | | | low | File or directory path to CA certificate(s) for verifying the broker's key. Defaults: On Windows the system's CA certificates are automatically looked up in the Windows Root certificate store. On Mac OSX this configuration defaults to `probe`. It is recommended to install openssl using Homebrew, to provide CA certificates. On Linux install the distribution's ca-certificates package. If OpenSSL is statically linked or `ssl.ca.location` is set to `probe` a list of standard paths will be probed and the first one found will be used as the default CA certificate location path. If OpenSSL is dynamically linked the OpenSSL library's default path will be used (see `OPENSSLDIR` in `openssl version -a`). <br>*Type: string*
+ssl.ca.pem | * | | | low | CA certificate string (PEM format) for verifying the broker's key. <br>*Type: string*
+ssl_ca | * | | | low | CA certificate as set by rd_kafka_conf_set_ssl_cert() <br>*Type: see dedicated API*
+ssl.ca.certificate.stores | * | | Root | low | Comma-separated list of Windows Certificate stores to load CA certificates from. Certificates will be loaded in the same order as stores are specified. If no certificates can be loaded from any of the specified stores an error is logged and the OpenSSL library's default CA location is used instead. Store names are typically one or more of: MY, Root, Trust, CA. <br>*Type: string*
+ssl.crl.location | * | | | low | Path to CRL for verifying broker's certificate validity. <br>*Type: string*
+ssl.keystore.location | * | | | low | Path to client's keystore (PKCS#12) used for authentication. <br>*Type: string*
+ssl.keystore.password | * | | | low | Client's keystore (PKCS#12) password. <br>*Type: string*
+ssl.providers | * | | | low | Comma-separated list of OpenSSL 3.0.x implementation providers. E.g., "default,legacy". <br>*Type: string*
+ssl.engine.location | * | | | low | **DEPRECATED** Path to OpenSSL engine library. OpenSSL >= 1.1.x required. DEPRECATED: OpenSSL engine support is deprecated and should be replaced by OpenSSL 3 providers. <br>*Type: string*
+ssl.engine.id | * | | dynamic | low | OpenSSL engine id is the name used for loading engine. <br>*Type: string*
+ssl_engine_callback_data | * | | | low | OpenSSL engine callback data (set with rd_kafka_conf_set_engine_callback_data()). <br>*Type: see dedicated API*
+enable.ssl.certificate.verification | * | true, false | true | low | Enable OpenSSL's builtin broker (server) certificate verification. This verification can be extended by the application by implementing a certificate_verify_cb. <br>*Type: boolean*
+ssl.endpoint.identification.algorithm | * | none, https | https | low | Endpoint identification algorithm to validate broker hostname using broker certificate. https - Server (broker) hostname verification as specified in RFC2818. none - No endpoint verification. OpenSSL >= 1.0.2 required. <br>*Type: enum value*
+ssl.certificate.verify_cb | * | | | low | Callback to verify the broker certificate chain. <br>*Type: see dedicated API*
+sasl.mechanisms | * | | GSSAPI | high | SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured. <br>*Type: string*
+sasl.mechanism | * | | GSSAPI | high | Alias for `sasl.mechanisms`: SASL mechanism to use for authentication. Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. **NOTE**: Despite the name only one mechanism must be configured. <br>*Type: string*
+sasl.kerberos.service.name | * | | kafka | low | Kerberos principal name that Kafka runs as, not including /hostname@REALM <br>*Type: string*
+sasl.kerberos.principal | * | | kafkaclient | low | This client's Kerberos principal name. (Not supported on Windows, will use the logon user's principal). <br>*Type: string*
+sasl.kerberos.kinit.cmd | * | | kinit -R -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} \|\| kinit -t "%{sasl.kerberos.keytab}" -k %{sasl.kerberos.principal} | low | Shell command to refresh or acquire the client's Kerberos ticket. This command is executed on client creation and every sasl.kerberos.min.time.before.relogin (0=disable). %{config.prop.name} is replaced by corresponding config object value. <br>*Type: string*
+sasl.kerberos.keytab | * | | | low | Path to Kerberos keytab file. This configuration property is only used as a variable in `sasl.kerberos.kinit.cmd` as ` ... -t "%{sasl.kerberos.keytab}"`. <br>*Type: string*
+sasl.kerberos.min.time.before.relogin | * | 0 .. 86400000 | 60000 | low | Minimum time in milliseconds between key refresh attempts. Disable automatic key refresh by setting this property to 0. <br>*Type: integer*
+sasl.username | * | | | high | SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms <br>*Type: string*
+sasl.password | * | | | high | SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism <br>*Type: string*
+sasl.oauthbearer.config | * | | | low | SASL/OAUTHBEARER configuration. The format is implementation-dependent and must be parsed accordingly. The default unsecured token implementation (see https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes space-separated name=value pairs with valid names including principalClaimName, principal, scopeClaimName, scope, and lifeSeconds. The default value for principalClaimName is "sub", the default value for scopeClaimName is "scope", and the default value for lifeSeconds is 3600. The scope value is CSV format with the default value being no/empty scope. For example: `principalClaimName=azp principal=admin scopeClaimName=roles scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions can be communicated to the broker via `extension_NAME=value`. For example: `principal=admin extension_traceId=123` <br>*Type: string*
+enable.sasl.oauthbearer.unsecure.jwt | * | true, false | false | low | Enable the builtin unsecure JWT OAUTHBEARER token handler if no oauthbearer_refresh_cb has been set. This builtin handler should only be used for development or testing, and not in production. <br>*Type: boolean*
+oauthbearer_token_refresh_cb | * | | | low | SASL/OAUTHBEARER token refresh callback (set with rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by rd_kafka_poll(), et.al. This callback will be triggered when it is time to refresh the client's OAUTHBEARER token. Also see `rd_kafka_conf_enable_sasl_queue()`. <br>*Type: see dedicated API*
+sasl.oauthbearer.method | * | default, oidc | default | low | Set to "default" or "oidc" to control which login method to be used. If set to "oidc", the following properties must also be be specified: `sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, and `sasl.oauthbearer.token.endpoint.url`. <br>*Type: enum value*
+sasl.oauthbearer.client.id | * | | | low | Public identifier for the application. Must be unique across all clients that the authorization server handles. Only used when `sasl.oauthbearer.method` is set to "oidc". <br>*Type: string*
+sasl.oauthbearer.client.secret | * | | | low | Client secret only known to the application and the authorization server. This should be a sufficiently random string that is not guessable. Only used when `sasl.oauthbearer.method` is set to "oidc". <br>*Type: string*
+sasl.oauthbearer.scope | * | | | low | Client use this to specify the scope of the access request to the broker. Only used when `sasl.oauthbearer.method` is set to "oidc". <br>*Type: string*
+sasl.oauthbearer.extensions | * | | | low | Allow additional information to be provided to the broker. Comma-separated list of key=value pairs. E.g., "supportFeatureX=true,organizationId=sales-emea".Only used when `sasl.oauthbearer.method` is set to "oidc". <br>*Type: string*
+sasl.oauthbearer.token.endpoint.url | * | | | low | OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. Only used when `sasl.oauthbearer.method` is set to "oidc". <br>*Type: string*
+plugin.library.paths | * | | | low | List of plugin libraries to load (; separated). The library search path is platform dependent (see dlopen(3) for Unix and LoadLibrary() for Windows). If no filename extension is specified the platform-specific extension (such as .dll or .so) will be appended automatically. <br>*Type: string*
+interceptors | * | | | low | Interceptors added through rd_kafka_conf_interceptor_add_..() and any configuration handled by interceptors. <br>*Type: see dedicated API*
+group.id | C | | | high | Client group id string. All clients sharing the same group.id belong to the same group. <br>*Type: string*
+group.instance.id | C | | | medium | Enable static group membership. Static group members are able to leave and rejoin a group within the configured `session.timeout.ms` without prompting a group rebalance. This should be used in combination with a larger `session.timeout.ms` to avoid group rebalances caused by transient unavailability (e.g. process restarts). Requires broker version >= 2.3.0. <br>*Type: string*
+partition.assignment.strategy | C | | range,roundrobin | medium | The name of one or more partition assignment strategies. The elected group leader will use a strategy supported by all members of the group to assign partitions to group members. If there is more than one eligible strategy, preference is determined by the order of this list (strategies earlier in the list have higher priority). Cooperative and non-cooperative (eager) strategies must not be mixed. Available strategies: range, roundrobin, cooperative-sticky. <br>*Type: string*
+session.timeout.ms | C | 1 .. 3600000 | 45000 | high | Client group session and failure detection timeout. The consumer sends periodic heartbeats (heartbeat.interval.ms) to indicate its liveness to the broker. If no hearts are received by the broker for a group member within the session timeout, the broker will remove the consumer from the group and trigger a rebalance. The allowed range is configured with the **broker** configuration properties `group.min.session.timeout.ms` and `group.max.session.timeout.ms`. Also see `max.poll.interval.ms`. <br>*Type: integer*
+heartbeat.interval.ms | C | 1 .. 3600000 | 3000 | low | Group session keepalive heartbeat interval. <br>*Type: integer*
+group.protocol.type | C | | consumer | low | Group protocol type. NOTE: Currently, the only supported group protocol type is `consumer`. <br>*Type: string*
+coordinator.query.interval.ms | C | 1 .. 3600000 | 600000 | low | How often to query for the current client group coordinator. If the currently assigned coordinator is down the configured query interval will be divided by ten to more quickly recover in case of coordinator reassignment. <br>*Type: integer*
+max.poll.interval.ms | C | 1 .. 86400000 | 300000 | high | Maximum allowed time between calls to consume messages (e.g., rd_kafka_consumer_poll()) for high-level consumers. If this interval is exceeded the consumer is considered failed and the group will rebalance in order to reassign the partitions to another consumer group member. Warning: Offset commits may be not possible at this point. Note: It is recommended to set `enable.auto.offset.store=false` for long-time processing applications and then explicitly store offsets (using offsets_store()) *after* message processing, to make sure offsets are not auto-committed prior to processing has finished. The interval is checked two times per second. See KIP-62 for more information. <br>*Type: integer*
+enable.auto.commit | C | true, false | true | high | Automatically and periodically commit offsets in the background. Note: setting this to false does not prevent the consumer from fetching previously committed start offsets. To circumvent this behaviour set specific start offsets per partition in the call to assign(). <br>*Type: boolean*
+auto.commit.interval.ms | C | 0 .. 86400000 | 5000 | medium | The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. (0 = disable). This setting is used by the high-level consumer. <br>*Type: integer*
+enable.auto.offset.store | C | true, false | true | high | Automatically store offset of last message provided to application. The offset store is an in-memory store of the next offset to (auto-)commit for each partition. <br>*Type: boolean*
+queued.min.messages | C | 1 .. 10000000 | 100000 | medium | Minimum number of messages per topic+partition librdkafka tries to maintain in the local consumer queue. <br>*Type: integer*
+queued.max.messages.kbytes | C | 1 .. 2097151 | 65536 | medium | Maximum number of kilobytes of queued pre-fetched messages in the local consumer queue. If using the high-level consumer this setting applies to the single consumer queue, regardless of the number of partitions. When using the legacy simple consumer or when separate partition queues are used this setting applies per partition. This value may be overshot by fetch.message.max.bytes. This property has higher priority than queued.min.messages. <br>*Type: integer*
+fetch.wait.max.ms | C | 0 .. 300000 | 500 | low | Maximum time the broker may wait to fill the Fetch response with fetch.min.bytes of messages. <br>*Type: integer*
+fetch.message.max.bytes | C | 1 .. 1000000000 | 1048576 | medium | Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. <br>*Type: integer*
+max.partition.fetch.bytes | C | 1 .. 1000000000 | 1048576 | medium | Alias for `fetch.message.max.bytes`: Initial maximum number of bytes per topic+partition to request when fetching messages from the broker. If the client encounters a message larger than this value it will gradually try to increase it until the entire message can be fetched. <br>*Type: integer*
+fetch.max.bytes | C | 0 .. 2147483135 | 52428800 | medium | Maximum amount of data the broker shall return for a Fetch request. Messages are fetched in batches by the consumer and if the first message batch in the first non-empty partition of the Fetch request is larger than this value, then the message batch will still be returned to ensure the consumer can make progress. The maximum message batch size accepted by the broker is defined via `message.max.bytes` (broker config) or `max.message.bytes` (broker topic config). `fetch.max.bytes` is automatically adjusted upwards to be at least `message.max.bytes` (consumer config). <br>*Type: integer*
+fetch.min.bytes | C | 1 .. 100000000 | 1 | low | Minimum number of bytes the broker responds with. If fetch.wait.max.ms expires the accumulated data will be sent to the client regardless of this setting. <br>*Type: integer*
+fetch.error.backoff.ms | C | 0 .. 300000 | 500 | medium | How long to postpone the next fetch request for a topic+partition in case of a fetch error. <br>*Type: integer*
+offset.store.method | C | none, file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires Apache Kafka 0.8.2 or later on the broker). <br>*Type: enum value*
+isolation.level | C | read_uncommitted, read_committed | read_committed | high | Controls how to read messages written transactionally: `read_committed` - only return transactional messages which have been committed. `read_uncommitted` - return all messages, even transactional messages which have been aborted. <br>*Type: enum value*
+consume_cb | C | | | low | Message consume callback (set with rd_kafka_conf_set_consume_cb()) <br>*Type: see dedicated API*
+rebalance_cb | C | | | low | Called after consumer group has been rebalanced (set with rd_kafka_conf_set_rebalance_cb()) <br>*Type: see dedicated API*
+offset_commit_cb | C | | | low | Offset commit result propagation callback. (set with rd_kafka_conf_set_offset_commit_cb()) <br>*Type: see dedicated API*
+enable.partition.eof | C | true, false | false | low | Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the consumer reaches the end of a partition. <br>*Type: boolean*
+check.crcs | C | true, false | false | medium | Verify CRC32 of consumed messages, ensuring no on-the-wire or on-disk corruption to the messages occurred. This check comes at slightly increased CPU usage. <br>*Type: boolean*
+client.rack | * | | | low | A rack identifier for this client. This can be any string value which indicates where this client is physically located. It corresponds with the broker config `broker.rack`. <br>*Type: string*
+transactional.id | P | | | high | Enables the transactional producer. The transactional.id is used to identify the same transactional producer instance across process restarts. It allows the producer to guarantee that transactions corresponding to earlier instances of the same producer have been finalized prior to starting any new transactions, and that any zombie instances are fenced off. If no transactional.id is provided, then the producer is limited to idempotent delivery (if enable.idempotence is set). Requires broker version >= 0.11.0. <br>*Type: string*
+transaction.timeout.ms | P | 1000 .. 2147483647 | 60000 | medium | The maximum amount of time in milliseconds that the transaction coordinator will wait for a transaction status update from the producer before proactively aborting the ongoing transaction. If this value is larger than the `transaction.max.timeout.ms` setting in the broker, the init_transactions() call will fail with ERR_INVALID_TRANSACTION_TIMEOUT. The transaction timeout automatically adjusts `message.timeout.ms` and `socket.timeout.ms`, unless explicitly configured in which case they must not exceed the transaction timeout (`socket.timeout.ms` must be at least 100ms lower than `transaction.timeout.ms`). This is also the default timeout value if no timeout (-1) is supplied to the transactional API methods. <br>*Type: integer*
+enable.idempotence | P | true, false | false | high | When set to `true`, the producer will ensure that messages are successfully produced exactly once and in the original produce order. The following configuration properties are adjusted automatically (if not modified by the user) when idempotence is enabled: `max.in.flight.requests.per.connection=5` (must be less than or equal to 5), `retries=INT32_MAX` (must be greater than 0), `acks=all`, `queuing.strategy=fifo`. Producer instantation will fail if user-supplied configuration is incompatible. <br>*Type: boolean*
+enable.gapless.guarantee | P | true, false | false | low | **EXPERIMENTAL**: subject to change or removal. When set to `true`, any error that could result in a gap in the produced message series when a batch of messages fails, will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop the producer. Messages failing due to `message.timeout.ms` are not covered by this guarantee. Requires `enable.idempotence=true`. <br>*Type: boolean*
+queue.buffering.max.messages | P | 0 .. 2147483647 | 100000 | high | Maximum number of messages allowed on the producer queue. This queue is shared by all topics and partitions. A value of 0 disables this limit. <br>*Type: integer*
+queue.buffering.max.kbytes | P | 1 .. 2147483647 | 1048576 | high | Maximum total message size sum allowed on the producer queue. This queue is shared by all topics and partitions. This property has higher priority than queue.buffering.max.messages. <br>*Type: integer*
+queue.buffering.max.ms | P | 0 .. 900000 | 5 | high | Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. <br>*Type: float*
+linger.ms | P | 0 .. 900000 | 5 | high | Alias for `queue.buffering.max.ms`: Delay in milliseconds to wait for messages in the producer queue to accumulate before constructing message batches (MessageSets) to transmit to brokers. A higher value allows larger and more effective (less overhead, improved compression) batches of messages to accumulate at the expense of increased message delivery latency. <br>*Type: float*
+message.send.max.retries | P | 0 .. 2147483647 | 2147483647 | high | How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true. <br>*Type: integer*
+retries | P | 0 .. 2147483647 | 2147483647 | high | Alias for `message.send.max.retries`: How many times to retry sending a failing Message. **Note:** retrying may cause reordering unless `enable.idempotence` is set to true. <br>*Type: integer*
+retry.backoff.ms | P | 1 .. 300000 | 100 | medium | The backoff time in milliseconds before retrying a protocol request. <br>*Type: integer*
+queue.buffering.backpressure.threshold | P | 1 .. 1000000 | 1 | low | The threshold of outstanding not yet transmitted broker requests needed to backpressure the producer's message accumulator. If the number of not yet transmitted requests equals or exceeds this number, produce request creation that would have otherwise been triggered (for example, in accordance with linger.ms) will be delayed. A lower number yields larger and more effective batches. A higher value can improve latency when using compression on slow machines. <br>*Type: integer*
+compression.codec | P | none, gzip, snappy, lz4, zstd | none | medium | compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. <br>*Type: enum value*
+compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. <br>*Type: enum value*
+batch.num.messages | P | 1 .. 1000000 | 10000 | medium | Maximum number of messages batched in one MessageSet. The total MessageSet size is also limited by batch.size and message.max.bytes. <br>*Type: integer*
+batch.size | P | 1 .. 2147483647 | 1000000 | medium | Maximum size (in bytes) of all messages batched in one MessageSet, including protocol framing overhead. This limit is applied after the first message has been added to the batch, regardless of the first message's size, this is to ensure that messages that exceed batch.size are produced. The total MessageSet size is also limited by batch.num.messages and message.max.bytes. <br>*Type: integer*
+delivery.report.only.error | P | true, false | false | low | Only provide delivery reports for failed messages. <br>*Type: boolean*
+dr_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_cb()) <br>*Type: see dedicated API*
+dr_msg_cb | P | | | low | Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb()) <br>*Type: see dedicated API*
+sticky.partitioning.linger.ms | P | 0 .. 900000 | 10 | low | Delay in milliseconds to wait to assign new sticky partitions for each topic. By default, set to double the time of linger.ms. To disable sticky behavior, set to 0. This behavior affects messages with the key NULL in all cases, and messages with key lengths of zero when the consistent_random partitioner is in use. These messages would otherwise be assigned randomly. A higher value allows for more effective batching of these messages. <br>*Type: integer*
+
+
+## Topic configuration properties
+
+Property | C/P | Range | Default | Importance | Description
+-----------------------------------------|-----|-----------------|--------------:|------------| --------------------------
+request.required.acks | P | -1 .. 1000 | -1 | high | This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail. <br>*Type: integer*
+acks | P | -1 .. 1000 | -1 | high | Alias for `request.required.acks`: This field indicates the number of acknowledgements the leader broker must receive from ISR brokers before responding to the request: *0*=Broker does not send any response/ack to client, *-1* or *all*=Broker will block until message is committed by all in sync replicas (ISRs). If there are less than `min.insync.replicas` (broker configuration) in the ISR set the produce request will fail. <br>*Type: integer*
+request.timeout.ms | P | 1 .. 900000 | 30000 | medium | The ack timeout of the producer request in milliseconds. This value is only enforced by the broker and relies on `request.required.acks` being != 0. <br>*Type: integer*
+message.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured. <br>*Type: integer*
+delivery.timeout.ms | P | 0 .. 2147483647 | 300000 | high | Alias for `message.timeout.ms`: Local message timeout. This value is only enforced locally and limits the time a produced message waits for successful delivery. A time of 0 is infinite. This is the maximum time librdkafka may use to deliver a message (including retries). Delivery error occurs when either the retry count or the message timeout are exceeded. The message timeout is automatically adjusted to `transaction.timeout.ms` if `transactional.id` is configured. <br>*Type: integer*
+queuing.strategy | P | fifo, lifo | fifo | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Producer queuing strategy. FIFO preserves produce ordering, while LIFO prioritizes new messages. <br>*Type: enum value*
+produce.offset.report | P | true, false | false | low | **DEPRECATED** No longer used. <br>*Type: boolean*
+partitioner | P | | consistent_random | high | Partitioner: `random` - random distribution, `consistent` - CRC32 hash of key (Empty and NULL keys are mapped to single partition), `consistent_random` - CRC32 hash of key (Empty and NULL keys are randomly partitioned), `murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are mapped to single partition), `murmur2_random` - Java Producer compatible Murmur2 hash of key (NULL keys are randomly partitioned. This is functionally equivalent to the default partitioner in the Java Producer.), `fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), `fnv1a_random` - FNV-1a hash of key (NULL keys are randomly partitioned). <br>*Type: string*
+partitioner_cb | P | | | low | Custom partitioner callback (set with rd_kafka_topic_conf_set_partitioner_cb()) <br>*Type: see dedicated API*
+msg_order_cmp | P | | | low | **EXPERIMENTAL**: subject to change or removal. **DEPRECATED** Message queue ordering comparator (set with rd_kafka_topic_conf_set_msg_order_cmp()). Also see `queuing.strategy`. <br>*Type: see dedicated API*
+opaque | * | | | low | Application opaque (set with rd_kafka_topic_conf_set_opaque()) <br>*Type: see dedicated API*
+compression.codec | P | none, gzip, snappy, lz4, zstd, inherit | inherit | high | Compression codec to use for compressing message sets. inherit = inherit global compression.codec configuration. <br>*Type: enum value*
+compression.type | P | none, gzip, snappy, lz4, zstd | none | medium | Alias for `compression.codec`: compression codec to use for compressing message sets. This is the default value for all topics, may be overridden by the topic configuration property `compression.codec`. <br>*Type: enum value*
+compression.level | P | -1 .. 12 | -1 | medium | Compression level parameter for algorithm selected by configuration property `compression.codec`. Higher values will result in better compression at the cost of more CPU usage. Usable range is algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; -1 = codec-dependent default compression level. <br>*Type: integer*
+auto.commit.enable | C | true, false | true | low | **DEPRECATED** [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method. <br>*Type: boolean*
+enable.auto.commit | C | true, false | true | low | **DEPRECATED** Alias for `auto.commit.enable`: [**LEGACY PROPERTY:** This property is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `enable.auto.commit` property must be used instead]. If true, periodically commit offset of the last message handed to the application. This committed offset will be used when the process restarts to pick up where it left off. If false, the application will have to call `rd_kafka_offset_store()` to store an offset (optional). Offsets will be written to broker or local file according to offset.store.method. <br>*Type: boolean*
+auto.commit.interval.ms | C | 10 .. 86400000 | 60000 | high | [**LEGACY PROPERTY:** This setting is used by the simple legacy consumer only. When using the high-level KafkaConsumer, the global `auto.commit.interval.ms` property must be used instead]. The frequency in milliseconds that the consumer offsets are committed (written) to offset storage. <br>*Type: integer*
+auto.offset.reset | C | smallest, earliest, beginning, largest, latest, end, error | largest | high | Action to take when there is no initial offset in offset store or the desired offset is out of range: 'smallest','earliest' - automatically reset the offset to the smallest offset, 'largest','latest' - automatically reset the offset to the largest offset, 'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is retrieved by consuming messages and checking 'message->err'. <br>*Type: enum value*
+offset.store.path | C | | . | low | **DEPRECATED** Path to local file for storing offsets. If the path is a directory a filename will be automatically generated in that directory based on the topic and partition. File-based offset storage will be removed in a future version. <br>*Type: string*
+offset.store.sync.interval.ms | C | -1 .. 86400000 | -1 | low | **DEPRECATED** fsync() interval for the offset file, in milliseconds. Use -1 to disable syncing, and 0 for immediate sync after each write. File-based offset storage will be removed in a future version. <br>*Type: integer*
+offset.store.method | C | file, broker | broker | low | **DEPRECATED** Offset commit store method: 'file' - DEPRECATED: local file store (offset.store.path, et.al), 'broker' - broker commit store (requires "group.id" to be configured and Apache Kafka 0.8.2 or later on the broker.). <br>*Type: enum value*
+consume.callback.max.messages | C | 0 .. 1000000 | 0 | low | Maximum number of messages to dispatch in one `rd_kafka_consume_callback*()` call (0 = unlimited) <br>*Type: integer*
+
+### C/P legend: C = Consumer, P = Producer, * = both
diff --git a/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md b/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md
new file mode 100644
index 000000000..45ab45f9b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/CONTRIBUTING.md
@@ -0,0 +1,425 @@
+# Contributing to librdkafka
+
+(This document is based on [curl's CONTRIBUTE.md](https://github.com/curl/curl/blob/master/docs/CONTRIBUTE.md) - thank you!)
+
+This document is intended to offer guidelines on how to best contribute to the
+librdkafka project. This concerns new features as well as bug fixes and
+general improvements.
+
+### License and copyright
+
+When contributing with code, you agree to put your changes and new code under
+the same license librdkafka is already using unless stated and agreed
+otherwise.
+
+When changing existing source code, you do not alter the copyright of the
+original file(s). The copyright will still be owned by the original creator(s)
+or those who have been assigned copyright by the original author(s).
+
+By submitting a patch to the librdkafka, you are assumed to have the right
+to the code and to be allowed by your employer or whatever to hand over that
+patch/code to us. We will credit you for your changes as far as possible, to
+give credit but also to keep a trace back to who made what changes. Please
+always provide us with your full real name when contributing!
+
+Official librdkafka project maintainer(s) assume ownership and copyright
+ownership of all accepted submissions.
+
+
+## Write a good patch
+
+### API and ABI compatibility guarantees
+
+librdkafka maintains a strict API and ABI compatibility guarantee, we guarantee
+not to break existing applications and we honour the SONAME version.
+
+**Note:** ABI compatibility is guaranteed only for the C library, not C++.
+
+**Note to librdkafka maintainers:**
+
+Don't think we can or should bump the SONAME version, it will break all
+existing applications relying on librdkafka, and there's no change important
+enough to warrant that.
+Instead deprecate (but keep) old APIs and add new better APIs as required.
+Deprecate APIs through documentation (`@deprecate ..`) rather than
+compiler hints (`RD_DEPRECATED`) - since the latter will cause compilation
+warnings/errors for users.
+
+
+#### Changes to existing APIs
+
+Existing public APIs MUST NEVER be changed, as this would be a breaking API
+and ABI change. This line must never be crossed.
+
+This means that no changes are allowed to:
+ * public function or method signatures - arguments, types, return values.
+ * public structs - existing fields may not be modified and new fields must
+ not be added.
+
+
+As for semantic changes (i.e., a function changes its behaviour), these are
+allowed under the following conditions:
+
+ * the existing behaviour that is changed is not documented and not widely
+ relied upon. Typically this revolves around what error codes a function
+ returns.
+ * the existing behaviour is well known but is clearly wrong and consistently
+ trips people up.
+
+All such changes must be clearly stated in the "Upgrade considerations" section
+of the release in CHANGELOG.md.
+
+
+#### New public APIs
+
+Since changes to existing APIs are strictly limited to the above rules, it is
+also clear that new APIs must be delicately designed to be complete and future
+proof, since once they've been introduced they can never be changed.
+
+ * Never add public structs - there are some public structs in librdkafka
+ and they were all mistakes, they've all been headaches.
+ Instead add private types and provide accessor methods to set/get values.
+ This allows future extension without breaking existing applications.
+ * Avoid adding synchronous APIs, try to make them asynch by the use of
+ `rd_kafka_queue_t` result queues, if possible.
+ This may complicate the APIs a bit, but they're most of the time abstracted
+ in higher-level language clients and it allows both synchronous and
+ asynchronous usage.
+
+
+
+### Portability
+
+librdkafka is highly portable and needs to stay that way; this means we're
+limited to almost-but-not-quite C99, and standard library (libc, et.al)
+functions that are generally available across platforms.
+
+Also avoid adding new dependencies since dependency availability across
+platforms and package managers are a common problem.
+
+If an external dependency is required, make sure that it is available as a
+vcpkg, and also add it as a source build dependency to mklove
+(see mklove/modules/configure.libcurl for an example) so that it can be built
+and linked statically into librdkafka as part of the packaging process.
+
+Less is more. Don't try to be fancy, be boring.
+
+
+### Follow code style
+
+When writing C code, follow the code style already established in
+the project. Consistent style makes code easier to read and mistakes less
+likely to happen.
+
+clang-format is used to check, and fix, the style for C/C++ files,
+while flake8 and autopep8 is used for the Python scripts.
+
+You must check the style before committing by running `make style-check-changed`
+from the top-level directory, and if any style errors are reported you can
+automatically fix them using `make style-fix-changed` (or just run
+that command directly).
+
+The Python code may need some manual fixing since autopep8 is unable to fix
+all warnings reported by flake8, in particular it will not split long lines,
+in which case a ` # noqa: E501` may be needed to turn off the warning.
+
+See the end of this document for the C style guide to use in librdkafka.
+
+
+### Write Separate Changes
+
+It is annoying when you get a huge patch from someone that is said to fix 511
+odd problems, but discussions and opinions don't agree with 510 of them - or
+509 of them were already fixed in a different way. Then the person merging
+this change needs to extract the single interesting patch from somewhere
+within the huge pile of source, and that gives a lot of extra work.
+
+Preferably, each fix that correct a problem should be in its own patch/commit
+with its own description/commit message stating exactly what they correct so
+that all changes can be selectively applied by the maintainer or other
+interested parties.
+
+Also, separate changes enable bisecting much better when we track problems
+and regression in the future.
+
+### Patch Against Recent Sources
+
+Please try to make your patches against latest master branch.
+
+### Test Cases
+
+Bugfixes should also include a new test case in the regression test suite
+that verifies the bug is fixed.
+Create a new tests/00<freenumber>-<short_bug_description>.c file and
+try to reproduce the issue in its most simple form.
+Verify that the test case fails for earlier versions and passes with your
+bugfix in-place.
+
+New features and APIs should also result in an added test case.
+
+Submitted patches must pass all existing tests.
+For more information on the test suite see [tests/README.md].
+
+
+
+## How to get your changes into the main sources
+
+File a [pull request on github](https://github.com/edenhill/librdkafka/pulls)
+
+Your change will be reviewed and discussed there and you will be
+expected to correct flaws pointed out and update accordingly, or the change
+risk stalling and eventually just get deleted without action. As a submitter
+of a change, you are the owner of that change until it has been merged.
+
+Make sure to monitor your PR on github and answer questions and/or
+fix nits/flaws. This is very important. We will take lack of replies as a
+sign that you're not very anxious to get your patch accepted and we tend to
+simply drop such changes.
+
+When you adjust your pull requests after review, please squash the
+commits so that we can review the full updated version more easily
+and keep history cleaner.
+
+For example:
+
+ # Interactive rebase to let you squash/fixup commits
+ $ git rebase -i master
+
+ # Mark fixes-on-fixes commits as 'fixup' (or just 'f') in the
+ # first column. These will be silently integrated into the
+ # previous commit, so make sure to move the fixup-commit to
+ # the line beneath the parent commit.
+
+ # Since this probably rewrote the history of previously pushed
+ # commits you will need to make a force push, which is usually
+ # a bad idea but works good for pull requests.
+ $ git push --force origin your_feature_branch
+
+
+### Write good commit messages
+
+A short guide to how to write good commit messages.
+
+ ---- start ----
+ [area]: [short line describing the main effect] [(#issuenumber)]
+ -- empty line --
+ [full description, no wider than 72 columns that describe as much as
+ possible as to why this change is made, and possibly what things
+ it fixes and everything else that is related]
+ ---- stop ----
+
+Example:
+
+ cgrp: Restart query timer on all heartbeat failures (#10023)
+
+ If unhandled errors were received in HeartbeatResponse
+ the cgrp could get stuck in a state where it would not
+ refresh its coordinator.
+
+
+**Important**: Rebase your PR branch on top of master (`git rebase -i master`)
+ and squash interim commits (to make a clean and readable git history)
+ before pushing. Use force push to keep your history clean even after
+ the initial PR push.
+
+**Note**: Good PRs with bad commit messages or messy commit history
+ such as "fixed review comment", will be squashed up in
+ to a single commit with a proper commit message.
+
+
+### Add changelog
+
+If the changes in the PR affects the end user in any way, such as for a user
+visible bug fix, new feature, API or doc change, etc, a release changelog item
+needs to be added to [CHANGELOG.md](CHANGELOG.md) for the next release.
+
+Add a single line to the appropriate section (Enhancements, Fixes, ..)
+outlining the change, an issue number (if any), and your name or GitHub
+user id for attribution.
+
+E.g.:
+```
+## Enhancements
+ * Improve commit() async parameter documentation (Paul Nit, #123)
+```
+
+
+
+# librdkafka C style and naming guide
+
+*Note: The code format style is enforced by our clang-format and pep8 rules,
+so that is not covered here.*
+
+## C standard "C98"
+
+This is a mix of C89 and C99, to be compatible with old MSVC versions.
+
+Notable, it is C99 with the following limitations:
+
+ * No variable declarations after statements.
+ * No in-line variable declarations.
+
+
+## Function and globals naming
+
+Use self-explanatory hierarchical snake-case naming.
+Pretty much all symbols should start with `rd_kafka_`, followed by
+their subsystem (e.g., `cgrp`, `broker`, `buf`, etc..), followed by an
+action (e.g, `find`, `get`, `clear`, ..).
+
+The exceptions are:
+ - Protocol requests and fields, use their Apache Kafka CamelCase names, .e.g:
+ `rd_kafka_ProduceRequest()` and `int16_t ErrorCode`.
+ - Public APIs that closely mimic the Apache Kafka Java counterpart, e.g.,
+ the Admin API: `rd_kafka_DescribeConsumerGroups()`.
+
+
+## Variable naming
+
+For existing types use the type prefix as variable name.
+The type prefix is typically the first part of struct member fields.
+Example:
+
+ * `rd_kafka_broker_t` has field names starting with `rkb_..`, thus broker
+ variable names should be named `rkb`
+
+Be consistent with using the same variable name for the same type throughout
+the code, it makes reading the code much easier as the type can be easily
+inferred from the variable.
+
+For other types use reasonably concise but descriptive names.
+`i` and `j` are typical int iterators.
+
+## Variable declaration
+
+Variables must be declared at the head of a scope, no in-line variable
+declarations after statements are allowed.
+
+## Function parameters/arguments
+
+For internal functions assume that all function parameters are properly
+specified, there is no need to check arguments for non-NULL, etc.
+Any maluse internally is a bug, and not something we need to preemptively
+protect against - the test suites should cover most of the code anyway - so
+put your efforts there instead.
+
+For arguments that may be NULL, i.e., optional arguments, we explicitlly
+document in the function docstring that the argument is optional (NULL),
+but there is no need to do this for non-optional arguments.
+
+## Indenting
+
+Use 8 spaces indent, no tabs, same as the Linux kernel.
+In emacs, use `c-set-style "linux`.
+For C++, use Google's C++ style.
+
+Fix formatting issues by running `make style-fix-changed` prior to committing.
+
+
+## Comments
+
+Use `/* .. */` comments, not `// ..`
+
+For functions, use doxygen syntax, e.g.:
+
+ /**
+ * @brief <short description>
+ * ..
+ * @returns <something..>
+ */
+
+
+Make sure to comment non-obvious code and situations where the full
+context of an operation is not easily graspable.
+
+Also make sure to update existing comments when the code changes.
+
+
+## Line length
+
+Try hard to keep line length below 80 characters, when this is not possible
+exceed it with reason.
+
+
+## Braces
+
+Braces go on the same line as their enveloping statement:
+
+ int some_func (..) {
+ while (1) {
+ if (1) {
+ do something;
+ ..
+ } else {
+ do something else;
+ ..
+ }
+ }
+
+ /* Single line scopes should not have braces */
+ if (1)
+ hi();
+ else if (2)
+ /* Say hello */
+ hello();
+ else
+ bye();
+
+
+## Spaces
+
+All expression parentheses should be prefixed and suffixed with a single space:
+
+ int some_func (int a) {
+
+ if (1)
+ ....;
+
+ for (i = 0 ; i < 19 ; i++) {
+
+
+ }
+ }
+
+
+Use space around operators:
+
+ int a = 2;
+
+ if (b >= 3)
+ c += 2;
+
+Except for these:
+
+ d++;
+ --e;
+
+
+## New block on new line
+
+New blocks should be on a new line:
+
+ if (1)
+ new();
+ else
+ old();
+
+
+## Parentheses
+
+Don't assume the reader knows C operator precedence by heart for complex
+statements, add parentheses to ease readability and make the intent clear.
+
+
+## ifdef hell
+
+Avoid ifdef's as much as possible.
+Platform support checking should be performed in configure.librdkafka.
+
+
+
+
+
+# librdkafka C++ style guide
+
+Follow [Google's C++ style guide](https://google.github.io/styleguide/cppguide.html)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/Doxyfile b/fluent-bit/lib/librdkafka-2.1.0/Doxyfile
new file mode 100644
index 000000000..33fc31a4e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/Doxyfile
@@ -0,0 +1,2375 @@
+# Doxyfile 1.8.9.1
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a double hash (##) is considered a comment and is placed in
+# front of the TAG it is preceding.
+#
+# All text after a single hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists, items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (\" \").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all text
+# before the first occurrence of this tag. Doxygen uses libiconv (or the iconv
+# built into libc) for the transcoding. See http://www.gnu.org/software/libiconv
+# for the list of possible encodings.
+# The default value is: UTF-8.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded by
+# double-quotes, unless you are using Doxywizard) that should identify the
+# project for which the documentation is generated. This name is used in the
+# title of most generated pages and in a few other places.
+# The default value is: My Project.
+
+PROJECT_NAME = "librdkafka"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. This
+# could be handy for archiving the generated documentation or if some version
+# control system is used.
+
+PROJECT_NUMBER =
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description
+# for a project that appears at the top of each page and should give viewer a
+# quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF = "The Apache Kafka C/C++ client library"
+
+# With the PROJECT_LOGO tag one can specify a logo or an icon that is included
+# in the documentation. The maximum height of the logo should not exceed 55
+# pixels and the maximum width should not exceed 200 pixels. Doxygen will copy
+# the logo to the output directory.
+
+#PROJECT_LOGO = kafka_logo.png
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) path
+# into which the generated documentation will be written. If a relative path is
+# entered, it will be relative to the location where doxygen was started. If
+# left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = staging-docs
+
+# If the CREATE_SUBDIRS tag is set to YES then doxygen will create 4096 sub-
+# directories (in 2 levels) under the output directory of each output format and
+# will distribute the generated files over these directories. Enabling this
+# option can be useful when feeding doxygen a huge amount of source files, where
+# putting all generated files in the same directory would otherwise causes
+# performance problems for the file system.
+# The default value is: NO.
+
+CREATE_SUBDIRS = NO
+
+# If the ALLOW_UNICODE_NAMES tag is set to YES, doxygen will allow non-ASCII
+# characters to appear in the names of generated files. If set to NO, non-ASCII
+# characters will be escaped, for example _xE3_x81_x84 will be used for Unicode
+# U+3044.
+# The default value is: NO.
+
+ALLOW_UNICODE_NAMES = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# Possible values are: Afrikaans, Arabic, Armenian, Brazilian, Catalan, Chinese,
+# Chinese-Traditional, Croatian, Czech, Danish, Dutch, English (United States),
+# Esperanto, Farsi (Persian), Finnish, French, German, Greek, Hungarian,
+# Indonesian, Italian, Japanese, Japanese-en (Japanese with English messages),
+# Korean, Korean-en (Korean with English messages), Latvian, Lithuanian,
+# Macedonian, Norwegian, Persian (Farsi), Polish, Portuguese, Romanian, Russian,
+# Serbian, Serbian-Cyrillic, Slovak, Slovene, Spanish, Swedish, Turkish,
+# Ukrainian and Vietnamese.
+# The default value is: English.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES, doxygen will include brief member
+# descriptions after the members that are listed in the file and class
+# documentation (similar to Javadoc). Set to NO to disable this.
+# The default value is: YES.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES, doxygen will prepend the brief
+# description of a member or function before the detailed description
+#
+# Note: If both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+# The default value is: YES.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator that is
+# used to form the text in various listings. Each string in this list, if found
+# as the leading text of the brief description, will be stripped from the text
+# and the result, after processing the whole list, is used as the annotated
+# text. Otherwise, the brief description is used as-is. If left blank, the
+# following values are used ($name is automatically replaced with the name of
+# the entity):The $name class, The $name widget, The $name file, is, provides,
+# specifies, contains, represents, a, an and the.
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# doxygen will generate a detailed section even if there is only a brief
+# description.
+# The default value is: NO.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+# The default value is: NO.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES, doxygen will prepend the full path
+# before files name in the file list and in the header files. If set to NO the
+# shortest path that makes the file name unique will be used
+# The default value is: YES.
+
+FULL_PATH_NAMES = YES
+
+# The STRIP_FROM_PATH tag can be used to strip a user-defined part of the path.
+# Stripping is only done if one of the specified strings matches the left-hand
+# part of the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the path to
+# strip.
+#
+# Note that you can specify absolute paths here, but also relative paths, which
+# will be relative from the directory where doxygen is started.
+# This tag requires that the tag FULL_PATH_NAMES is set to YES.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of the
+# path mentioned in the documentation of a class, which tells the reader which
+# header file to include in order to use a class. If left blank only the name of
+# the header file containing the class definition is used. Otherwise one should
+# specify the list of include paths that are normally passed to the compiler
+# using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter (but
+# less readable) file names. This can be useful is your file systems doesn't
+# support long names like on DOS, Mac, or CD-ROM.
+# The default value is: NO.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then doxygen will interpret the
+# first line (until the first dot) of a Javadoc-style comment as the brief
+# description. If set to NO, the Javadoc-style will behave just like regular Qt-
+# style comments (thus requiring an explicit @brief command for a brief
+# description.)
+# The default value is: NO.
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then doxygen will interpret the first
+# line (until the first dot) of a Qt-style comment as the brief description. If
+# set to NO, the Qt-style will behave just like regular Qt-style comments (thus
+# requiring an explicit \brief command for a brief description.)
+# The default value is: NO.
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make doxygen treat a
+# multi-line C++ special comment block (i.e. a block of //! or /// comments) as
+# a brief description. This used to be the default behavior. The new default is
+# to treat a multi-line C++ comment block as a detailed description. Set this
+# tag to YES if you prefer the old behavior instead.
+#
+# Note that setting this tag to YES also means that rational rose comments are
+# not recognized any more.
+# The default value is: NO.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES then an undocumented member inherits the
+# documentation from any documented member that it re-implements.
+# The default value is: YES.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES then doxygen will produce a new
+# page for each member. If set to NO, the documentation of a member will be part
+# of the file/class/namespace that contains it.
+# The default value is: NO.
+
+SEPARATE_MEMBER_PAGES = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. Doxygen
+# uses this value to replace tabs by spaces in code fragments.
+# Minimum value: 1, maximum value: 16, default value: 4.
+
+TAB_SIZE = 4
+
+# This tag can be used to specify a number of aliases that act as commands in
+# the documentation. An alias has the form:
+# name=value
+# For example adding
+# "sideeffect=@par Side Effects:\n"
+# will allow you to put the command \sideeffect (or @sideeffect) in the
+# documentation, which will result in a user-defined paragraph with heading
+# "Side Effects:". You can put \n's in the value part of an alias to insert
+# newlines.
+
+ALIASES = "locality=@par Thread restriction:"
+ALIASES += "locks=@par Lock restriction:"
+# Automatically escape @REALM in CONFIGURATION.md
+ALIASES += "REALM=\@REALM"
+
+# This tag can be used to specify a number of word-keyword mappings (TCL only).
+# A mapping has the form "name=value". For example adding "class=itcl::class"
+# will allow you to use the command class in the itcl::class meaning.
+
+TCL_SUBST =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources
+# only. Doxygen will then generate output that is more tailored for C. For
+# instance, some of the names that are used will be different. The list of all
+# members will be omitted, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java or
+# Python sources only. Doxygen will then generate output that is more tailored
+# for that language. For instance, namespaces will be presented as packages,
+# qualified scopes will look different, etc.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources. Doxygen will then generate output that is tailored for Fortran.
+# The default value is: NO.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for VHDL.
+# The default value is: NO.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given
+# extension. Doxygen has a built-in mapping, but you can override or extend it
+# using this tag. The format is ext=language, where ext is a file extension, and
+# language is one of the parsers supported by doxygen: IDL, Java, Javascript,
+# C#, C, C++, D, PHP, Objective-C, Python, Fortran (fixed format Fortran:
+# FortranFixed, free formatted Fortran: FortranFree, unknown formatted Fortran:
+# Fortran. In the later case the parser tries to guess whether the code is fixed
+# or free formatted code, this is the default for Fortran type files), VHDL. For
+# instance to make doxygen treat .inc files as Fortran files (default is PHP),
+# and .f files as C (default is Fortran), use: inc=Fortran f=C.
+#
+# Note: For files without extension you can use no_extension as a placeholder.
+#
+# Note that for custom extensions you also need to set FILE_PATTERNS otherwise
+# the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If the MARKDOWN_SUPPORT tag is enabled then doxygen pre-processes all comments
+# according to the Markdown format, which allows for more readable
+# documentation. See http://daringfireball.net/projects/markdown/ for details.
+# The output of markdown processing is further processed by doxygen, so you can
+# mix doxygen, HTML, and XML commands with Markdown formatting. Disable only in
+# case of backward compatibilities issues.
+# The default value is: YES.
+
+MARKDOWN_SUPPORT = YES
+
+# When enabled doxygen tries to link words that correspond to documented
+# classes, or namespaces to their corresponding documentation. Such a link can
+# be prevented in individual cases by putting a % sign in front of the word or
+# globally by setting AUTOLINK_SUPPORT to NO.
+# The default value is: YES.
+
+AUTOLINK_SUPPORT = YES
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should set this
+# tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string);
+# versus func(std::string) {}). This also make the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+# The default value is: NO.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+# The default value is: NO.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip (see:
+# http://www.riverbankcomputing.co.uk/software/sip/intro) sources only. Doxygen
+# will parse them like normal C++ but will assume all classes use public instead
+# of private inheritance when no explicit protection keyword is present.
+# The default value is: NO.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate
+# getter and setter methods for a property. Setting this option to YES will make
+# doxygen to replace the get and set methods by a property in the documentation.
+# This will only work if the methods are indeed getting or setting a simple
+# type. If this is not the case, or you want to show the methods anyway, you
+# should set this option to NO.
+# The default value is: YES.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+# The default value is: NO.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES to allow class member groups of the same type
+# (for instance a group of public functions) to be put as a subgroup of that
+# type (e.g. under the Public Functions section). Set it to NO to prevent
+# subgrouping. Alternatively, this can be done per class using the
+# \nosubgrouping command.
+# The default value is: YES.
+
+SUBGROUPING = YES
+
+# When the INLINE_GROUPED_CLASSES tag is set to YES, classes, structs and unions
+# are shown inside the group in which they are included (e.g. using \ingroup)
+# instead of on a separate page (for HTML and Man pages) or section (for LaTeX
+# and RTF).
+#
+# Note that this feature does not work in combination with
+# SEPARATE_MEMBER_PAGES.
+# The default value is: NO.
+
+INLINE_GROUPED_CLASSES = NO
+
+# When the INLINE_SIMPLE_STRUCTS tag is set to YES, structs, classes, and unions
+# with only public data fields or simple typedef fields will be shown inline in
+# the documentation of the scope in which they are defined (i.e. file,
+# namespace, or group documentation), provided this scope is documented. If set
+# to NO, structs, classes, and unions are shown on a separate page (for HTML and
+# Man pages) or section (for LaTeX and RTF).
+# The default value is: NO.
+
+INLINE_SIMPLE_STRUCTS = NO
+
+# When TYPEDEF_HIDES_STRUCT tag is enabled, a typedef of a struct, union, or
+# enum is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically be
+# useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+# The default value is: NO.
+
+TYPEDEF_HIDES_STRUCT = YES
+
+# The size of the symbol lookup cache can be set using LOOKUP_CACHE_SIZE. This
+# cache is used to resolve symbols given their name and scope. Since this can be
+# an expensive process and often the same symbol appears multiple times in the
+# code, doxygen keeps a cache of pre-resolved symbols. If the cache is too small
+# doxygen will become slower. If the cache is too large, memory is wasted. The
+# cache size is given by this formula: 2^(16+LOOKUP_CACHE_SIZE). The valid range
+# is 0..9, the default is 0, corresponding to a cache size of 2^16=65536
+# symbols. At the end of a run doxygen will report the cache usage and suggest
+# the optimal cache size from a speed point of view.
+# Minimum value: 0, maximum value: 9, default value: 0.
+
+LOOKUP_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES, doxygen will assume all entities in
+# documentation are documented, even if no documentation was available. Private
+# class members and static file members will be hidden unless the
+# EXTRACT_PRIVATE respectively EXTRACT_STATIC tags are set to YES.
+# Note: This will also disable the warnings about undocumented members that are
+# normally produced when WARNINGS is set to YES.
+# The default value is: NO.
+
+EXTRACT_ALL = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES, all private members of a class will
+# be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PRIVATE = NO
+
+# If the EXTRACT_PACKAGE tag is set to YES, all members with package or internal
+# scope will be included in the documentation.
+# The default value is: NO.
+
+EXTRACT_PACKAGE = NO
+
+# If the EXTRACT_STATIC tag is set to YES, all static members of a file will be
+# included in the documentation.
+# The default value is: NO.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES, classes (and structs) defined
+# locally in source files will be included in the documentation. If set to NO,
+# only classes defined in header files are included. Does not have any effect
+# for Java sources.
+# The default value is: YES.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. If set to YES, local methods,
+# which are defined in the implementation section but not in the interface are
+# included in the documentation. If set to NO, only methods in the interface are
+# included.
+# The default value is: NO.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base name of
+# the file that contains the anonymous namespace. By default anonymous namespace
+# are hidden.
+# The default value is: NO.
+
+EXTRACT_ANON_NSPACES = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, doxygen will hide all
+# undocumented members inside documented classes or files. If set to NO these
+# members will be included in the various overviews, but no documentation
+# section is generated. This option has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy. If set
+# to NO, these classes will be included in the various overviews. This option
+# has no effect if EXTRACT_ALL is enabled.
+# The default value is: NO.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, doxygen will hide all friend
+# (class|struct|union) declarations. If set to NO, these declarations will be
+# included in the documentation.
+# The default value is: NO.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, doxygen will hide any
+# documentation blocks found inside the body of a function. If set to NO, these
+# blocks will be appended to the function's detailed documentation block.
+# The default value is: NO.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation that is typed after a
+# \internal command is included. If the tag is set to NO then the documentation
+# will be excluded. Set it to YES to include the internal documentation.
+# The default value is: NO.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then doxygen will only generate file
+# names in lower-case letters. If set to YES, upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+# The default value is: system dependent.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO then doxygen will show members with
+# their full class and namespace scopes in the documentation. If set to YES, the
+# scope will be hidden.
+# The default value is: NO.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the HIDE_COMPOUND_REFERENCE tag is set to NO (default) then doxygen will
+# append additional text to a page's title, such as Class Reference. If set to
+# YES the compound reference will be hidden.
+# The default value is: NO.
+
+HIDE_COMPOUND_REFERENCE= NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES then doxygen will put a list of
+# the files that are included by a file in the documentation of that file.
+# The default value is: YES.
+
+SHOW_INCLUDE_FILES = YES
+
+# If the SHOW_GROUPED_MEMB_INC tag is set to YES then Doxygen will add for each
+# grouped member an include statement to the documentation, telling the reader
+# which file to include in order to use the member.
+# The default value is: NO.
+
+SHOW_GROUPED_MEMB_INC = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then doxygen will list include
+# files with double quotes in the documentation rather than with sharp brackets.
+# The default value is: NO.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES then a tag [inline] is inserted in the
+# documentation for inline members.
+# The default value is: YES.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES then doxygen will sort the
+# (detailed) documentation of file and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order.
+# The default value is: YES.
+
+SORT_MEMBER_DOCS = NO
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the brief
+# descriptions of file, namespace and class members alphabetically by member
+# name. If set to NO, the members will appear in declaration order. Note that
+# this will also influence the order of the classes in the class list.
+# The default value is: NO.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen will sort the
+# (brief and detailed) documentation of class members so that constructors and
+# destructors are listed first. If set to NO the constructors will appear in the
+# respective orders defined by SORT_BRIEF_DOCS and SORT_MEMBER_DOCS.
+# Note: If SORT_BRIEF_DOCS is set to NO this option is ignored for sorting brief
+# member documentation.
+# Note: If SORT_MEMBER_DOCS is set to NO this option is ignored for sorting
+# detailed member documentation.
+# The default value is: NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the hierarchy
+# of group names into alphabetical order. If set to NO the group names will
+# appear in their defined order.
+# The default value is: NO.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be sorted by
+# fully-qualified names, including namespaces. If set to NO, the class list will
+# be sorted only by class name, not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the alphabetical
+# list.
+# The default value is: NO.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper
+# type resolution of all parameters of a function it will reject a match between
+# the prototype and the implementation of a member function even if there is
+# only one candidate or it is obvious which candidate to choose by doing a
+# simple string match. By disabling STRICT_PROTO_MATCHING doxygen will still
+# accept a match between prototype and implementation in such cases.
+# The default value is: NO.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or disable (NO) the todo
+# list. This list is created by putting \todo commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or disable (NO) the test
+# list. This list is created by putting \test commands in the documentation.
+# The default value is: YES.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or disable (NO) the bug
+# list. This list is created by putting \bug commands in the documentation.
+# The default value is: YES.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or disable (NO)
+# the deprecated list. This list is created by putting \deprecated commands in
+# the documentation.
+# The default value is: YES.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional documentation
+# sections, marked by \if <section_label> ... \endif and \cond <section_label>
+# ... \endcond blocks.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines that the
+# initial value of a variable or macro / define can have for it to appear in the
+# documentation. If the initializer consists of more lines than specified here
+# it will be hidden. Use a value of 0 to hide initializers completely. The
+# appearance of the value of individual variables and macros / defines can be
+# controlled using \showinitializer or \hideinitializer command in the
+# documentation regardless of this setting.
+# Minimum value: 0, maximum value: 10000, default value: 30.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated at
+# the bottom of the documentation of classes and structs. If set to YES, the
+# list will mention the files that were used to generate the documentation.
+# The default value is: YES.
+
+SHOW_USED_FILES = YES
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page. This
+# will remove the Files entry from the Quick Index and from the Folder Tree View
+# (if specified).
+# The default value is: YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the Namespaces
+# page. This will remove the Namespaces entry from the Quick Index and from the
+# Folder Tree View (if specified).
+# The default value is: YES.
+
+SHOW_NAMESPACES = NO
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command command input-file, where command is the value of the
+# FILE_VERSION_FILTER tag, and input-file is the name of an input file provided
+# by doxygen. Whatever the program writes to standard output is used as the file
+# version. For an example see the documentation.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. To create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option. You can
+# optionally specify a file name after the option, if omitted DoxygenLayout.xml
+# will be used as the name of the layout file.
+#
+# Note that if you run doxygen from a directory containing a file called
+# DoxygenLayout.xml, doxygen will parse it automatically even if the LAYOUT_FILE
+# tag is left empty.
+
+LAYOUT_FILE =
+
+# The CITE_BIB_FILES tag can be used to specify one or more bib files containing
+# the reference definitions. This must be a list of .bib files. The .bib
+# extension is automatically appended if omitted. This requires the bibtex tool
+# to be installed. See also http://en.wikipedia.org/wiki/BibTeX for more info.
+# For LaTeX the style of the bibliography can be controlled using
+# LATEX_BIB_STYLE. To use this feature you need bibtex and perl available in the
+# search path. See also \cite for info how to create references.
+
+CITE_BIB_FILES =
+
+#---------------------------------------------------------------------------
+# Configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated to
+# standard output by doxygen. If QUIET is set to YES this implies that the
+# messages are off.
+# The default value is: NO.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated to standard error (stderr) by doxygen. If WARNINGS is set to YES
+# this implies that the warnings are on.
+#
+# Tip: Turn warnings on while writing the documentation.
+# The default value is: YES.
+
+WARNINGS = YES
+
+# Treat all warnings as errors.
+WARN_AS_ERROR = YES
+
+# If the WARN_IF_UNDOCUMENTED tag is set to YES then doxygen will generate
+# warnings for undocumented members. If EXTRACT_ALL is set to YES then this flag
+# will automatically be disabled.
+# The default value is: YES.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If the WARN_IF_DOC_ERROR tag is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some parameters
+# in a documented function, or documenting parameters that don't exist or using
+# markup commands wrongly.
+# The default value is: YES.
+
+WARN_IF_DOC_ERROR = YES
+
+# This WARN_NO_PARAMDOC option can be enabled to get warnings for functions that
+# are documented, but have no documentation for their parameters or return
+# value. If set to NO, doxygen will only warn about wrong or incomplete
+# parameter documentation, but not about the absence of documentation.
+# The default value is: NO.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that doxygen
+# can produce. The string should contain the $file, $line, and $text tags, which
+# will be replaced by the file and line number from which the warning originated
+# and the warning text. Optionally the format may contain $version, which will
+# be replaced by the version of the file (if it could be obtained via
+# FILE_VERSION_FILTER)
+# The default value is: $file:$line: $text.
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning and error
+# messages should be written. If left blank the output is written to standard
+# error (stderr).
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag is used to specify the files and/or directories that contain
+# documented source files. You may enter file names like myfile.cpp or
+# directories like /usr/src/myproject. Separate the files or directories with
+# spaces.
+# Note: If this tag is empty the current directory is searched.
+
+INPUT = mainpage.doxy INTRODUCTION.md CONFIGURATION.md STATISTICS.md src/rdkafka.h src-cpp/rdkafkacpp.h
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding. Doxygen uses
+# libiconv (or the iconv built into libc) for the transcoding. See the libiconv
+# documentation (see: http://www.gnu.org/software/libiconv) for the list of
+# possible encodings.
+# The default value is: UTF-8.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard patterns (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank the
+# following patterns are tested:*.c, *.cc, *.cxx, *.cpp, *.c++, *.java, *.ii,
+# *.ixx, *.ipp, *.i++, *.inl, *.idl, *.ddl, *.odl, *.h, *.hh, *.hxx, *.hpp,
+# *.h++, *.cs, *.d, *.php, *.php4, *.php5, *.phtml, *.inc, *.m, *.markdown,
+# *.md, *.mm, *.dox, *.py, *.f90, *.f, *.for, *.tcl, *.vhd, *.vhdl, *.ucf,
+# *.qsf, *.as and *.js.
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to specify whether or not subdirectories should
+# be searched for input files as well.
+# The default value is: NO.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should be
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+#
+# Note that relative paths are relative to the directory from which doxygen is
+# run.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used to select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+# The default value is: NO.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories.
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+#
+# Note that the wildcards are matched against the file with absolute path, so to
+# exclude all test directories use the pattern */test/*
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or directories
+# that contain example code fragments that are included (see the \include
+# command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp and
+# *.h) to filter out the source-files in the directories. If left blank all
+# files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude commands
+# irrespective of the value of the RECURSIVE tag.
+# The default value is: NO.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or directories
+# that contain images that are to be included in the documentation (see the
+# \image command).
+
+IMAGE_PATH = src
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command:
+#
+# <filter> <input-file>
+#
+# where <filter> is the value of the INPUT_FILTER tag, and <input-file> is the
+# name of an input file. Doxygen will then use the output that the filter
+# program writes to standard output. If FILTER_PATTERNS is specified, this tag
+# will be ignored.
+#
+# Note that the filter must not add or remove lines; it is applied before the
+# code is scanned, but not when the output code is generated. If lines are added
+# or removed, the anchors will not be placed correctly.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis. Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match. The filters are a list of the form: pattern=filter
+# (like *.cpp=my_cpp_filter). See INPUT_FILTER for further information on how
+# filters are used. If the FILTER_PATTERNS tag is empty or if none of the
+# patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will also be used to filter the input files that are used for
+# producing the source files to browse (i.e. when SOURCE_BROWSER is set to YES).
+# The default value is: NO.
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any) and
+# it is also possible to disable source filtering for a specific pattern using
+# *.ext= (so without naming a filter).
+# This tag requires that the tag FILTER_SOURCE_FILES is set to YES.
+
+FILTER_SOURCE_PATTERNS =
+
+# If the USE_MDFILE_AS_MAINPAGE tag refers to the name of a markdown file that
+# is part of the input, its contents will be placed on the main page
+# (index.html). This can be useful if you have a project on for instance GitHub
+# and want to reuse the introduction page also for the doxygen output.
+
+USE_MDFILE_AS_MAINPAGE =
+
+#---------------------------------------------------------------------------
+# Configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will be
+# generated. Documented entities will be cross-referenced with these sources.
+#
+# Note: To get rid of all source code in the generated output, make sure that
+# also VERBATIM_HEADERS is set to NO.
+# The default value is: NO.
+
+SOURCE_BROWSER = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body of functions,
+# classes and enums directly into the documentation.
+# The default value is: NO.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES will instruct doxygen to hide any
+# special comment blocks from generated source code fragments. Normal C, C++ and
+# Fortran comments will always remain visible.
+# The default value is: YES.
+
+STRIP_CODE_COMMENTS = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES then for each documented
+# function all documented functions referencing it will be listed.
+# The default value is: NO.
+
+REFERENCED_BY_RELATION = NO
+
+# If the REFERENCES_RELATION tag is set to YES then for each documented function
+# all documented entities called/used by that function will be listed.
+# The default value is: NO.
+
+REFERENCES_RELATION = NO
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES and SOURCE_BROWSER tag is set
+# to YES then the hyperlinks from functions in REFERENCES_RELATION and
+# REFERENCED_BY_RELATION lists will link to the source code. Otherwise they will
+# link to the documentation.
+# The default value is: YES.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If SOURCE_TOOLTIPS is enabled (the default) then hovering a hyperlink in the
+# source code will show a tooltip with additional information such as prototype,
+# brief description and links to the definition and documentation. Since this
+# will make the HTML file larger and loading of large files a bit slower, you
+# can opt to disable this feature.
+# The default value is: YES.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+SOURCE_TOOLTIPS = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code will
+# point to the HTML generated by the htags(1) tool instead of doxygen built-in
+# source browser. The htags tool is part of GNU's global source tagging system
+# (see http://www.gnu.org/software/global/global.html). You will need version
+# 4.8.6 or higher.
+#
+# To use it do the following:
+# - Install the latest version of global
+# - Enable SOURCE_BROWSER and USE_HTAGS in the config file
+# - Make sure the INPUT points to the root of the source tree
+# - Run doxygen as normal
+#
+# Doxygen will invoke htags (and that will in turn invoke gtags), so these
+# tools must be available from the command line (i.e. in the search path).
+#
+# The result: instead of the source browser generated by doxygen, the links to
+# source code will now point to the output of htags.
+# The default value is: NO.
+# This tag requires that the tag SOURCE_BROWSER is set to YES.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set the YES then doxygen will generate a
+# verbatim copy of the header file for each class for which an include is
+# specified. Set to NO to disable this.
+# See also: Section \class.
+# The default value is: YES.
+
+VERBATIM_HEADERS = YES
+
+# If the CLANG_ASSISTED_PARSING tag is set to YES then doxygen will use the
+# clang parser (see: http://clang.llvm.org/) for more accurate parsing at the
+# cost of reduced performance. This can be particularly helpful with template
+# rich C++ code for which doxygen's built-in parser lacks the necessary type
+# information.
+# Note: The availability of this option depends on whether or not doxygen was
+# compiled with the --with-libclang option.
+# The default value is: NO.
+
+#CLANG_ASSISTED_PARSING = NO
+
+# If clang assisted parsing is enabled you can provide the compiler with command
+# line options that you would normally use when invoking the compiler. Note that
+# the include paths will already be set by doxygen for the files and directories
+# specified with INPUT and INCLUDE_PATH.
+# This tag requires that the tag CLANG_ASSISTED_PARSING is set to YES.
+
+#CLANG_OPTIONS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index of all
+# compounds will be generated. Enable this if the project contains a lot of
+# classes, structs, unions or interfaces.
+# The default value is: YES.
+
+ALPHABETICAL_INDEX = YES
+
+# The COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns in
+# which the alphabetical index list will be split.
+# Minimum value: 1, maximum value: 20, default value: 5.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all classes will
+# be put under the same header in the alphabetical index. The IGNORE_PREFIX tag
+# can be used to specify a prefix (or a list of prefixes) that should be ignored
+# while generating the index headers.
+# This tag requires that the tag ALPHABETICAL_INDEX is set to YES.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES, doxygen will generate HTML output
+# The default value is: YES.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for each
+# generated HTML page (for example: .htm, .php, .asp).
+# The default value is: .html.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a user-defined HTML header file for
+# each generated HTML page. If the tag is left blank doxygen will generate a
+# standard header.
+#
+# To get valid HTML the header file that includes any scripts and style sheets
+# that doxygen needs, which is dependent on the configuration options used (e.g.
+# the setting GENERATE_TREEVIEW). It is highly recommended to start with a
+# default header using
+# doxygen -w html new_header.html new_footer.html new_stylesheet.css
+# YourConfigFile
+# and then modify the file new_header.html. See also section "Doxygen usage"
+# for information on how to generate the default header that doxygen normally
+# uses.
+# Note: The header is subject to change so you typically have to regenerate the
+# default header when upgrading to a newer version of doxygen. For a description
+# of the possible markers and block names see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a user-defined HTML footer for each
+# generated HTML page. If the tag is left blank doxygen will generate a standard
+# footer. See HTML_HEADER for more information on how to generate a default
+# footer and what special commands can be used inside the footer. See also
+# section "Doxygen usage" for information on how to generate the default footer
+# that doxygen normally uses.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading style
+# sheet that is used by each HTML page. It can be used to fine-tune the look of
+# the HTML output. If left blank doxygen will generate a default style sheet.
+# See also section "Doxygen usage" for information on how to generate the style
+# sheet that doxygen normally uses.
+# Note: It is recommended to use HTML_EXTRA_STYLESHEET instead of this tag, as
+# it is more robust and this tag (HTML_STYLESHEET) will in the future become
+# obsolete.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_STYLESHEET =
+
+# The HTML_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# cascading style sheets that are included after the standard style sheets
+# created by doxygen. Using this option one can overrule certain style aspects.
+# This is preferred over using HTML_STYLESHEET since it does not replace the
+# standard style sheet and is therefore more robust against future updates.
+# Doxygen will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list). For an example see the documentation.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_STYLESHEET =
+
+# The HTML_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the HTML output directory. Note
+# that these files will be copied to the base HTML output directory. Use the
+# $relpath^ marker in the HTML_HEADER and/or HTML_FOOTER files to load these
+# files. In the HTML_STYLESHEET file, use the file name only. Also note that the
+# files will be copied as-is; there are no commands or markers available.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_EXTRA_FILES =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output. Doxygen
+# will adjust the colors in the style sheet and background images according to
+# this color. Hue is specified as an angle on a colorwheel, see
+# http://en.wikipedia.org/wiki/Hue for more information. For instance the value
+# 0 represents red, 60 is yellow, 120 is green, 180 is cyan, 240 is blue, 300
+# purple, and 360 is red again.
+# Minimum value: 0, maximum value: 359, default value: 220.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of the colors
+# in the HTML output. For a value of 0 the output will use grayscales only. A
+# value of 255 will produce the most vivid colors.
+# Minimum value: 0, maximum value: 255, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to the
+# luminance component of the colors in the HTML output. Values below 100
+# gradually make the output lighter, whereas values above 100 make the output
+# darker. The value divided by 100 is the actual gamma applied, so 80 represents
+# a gamma of 0.8, The value 220 represents a gamma of 2.2, and 100 does not
+# change the gamma.
+# Minimum value: 40, maximum value: 240, default value: 80.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting this
+# to YES can help to show when doxygen was last run and thus if the
+# documentation is up to date.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# With HTML_INDEX_NUM_ENTRIES one can control the preferred number of entries
+# shown in the various tree structured indices initially; the user can expand
+# and collapse entries dynamically later on. Doxygen will expand the tree to
+# such a level that at most the specified number of entries are visible (unless
+# a fully collapsed tree already exceeds this amount). So setting the number of
+# entries 1 will produce a full collapsed tree by default. 0 is a special value
+# representing an infinite number of entries and will result in a full expanded
+# tree by default.
+# Minimum value: 0, maximum value: 9999, default value: 100.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+HTML_INDEX_NUM_ENTRIES = 100
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files will be
+# generated that can be used as input for Apple's Xcode 3 integrated development
+# environment (see: http://developer.apple.com/tools/xcode/), introduced with
+# OSX 10.5 (Leopard). To create a documentation set, doxygen will generate a
+# Makefile in the HTML output directory. Running make will produce the docset in
+# that directory and running make install will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find it at
+# startup. See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_DOCSET = NO
+
+# This tag determines the name of the docset feed. A documentation feed provides
+# an umbrella under which multiple documentation sets from a single provider
+# (such as a company or product suite) can be grouped.
+# The default value is: Doxygen generated docs.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_FEEDNAME = "librdkafka documentation"
+
+# This tag specifies a string that should uniquely identify the documentation
+# set bundle. This should be a reverse domain-name style string, e.g.
+# com.mycompany.MyDocSet. Doxygen will append .docset to the name.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_BUNDLE_ID = se.edenhill.librdkafka
+
+# The DOCSET_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+# The default value is: org.doxygen.Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_ID = se.edenhill
+
+# The DOCSET_PUBLISHER_NAME tag identifies the documentation publisher.
+# The default value is: Publisher.
+# This tag requires that the tag GENERATE_DOCSET is set to YES.
+
+DOCSET_PUBLISHER_NAME = Magnus Edenhill
+
+# If the GENERATE_HTMLHELP tag is set to YES then doxygen generates three
+# additional HTML index files: index.hhp, index.hhc, and index.hhk. The
+# index.hhp is a project file that can be read by Microsoft's HTML Help Workshop
+# (see: http://www.microsoft.com/en-us/download/details.aspx?id=21138) on
+# Windows.
+#
+# The HTML Help Workshop contains a compiler that can convert all HTML output
+# generated by doxygen into a single compiled HTML file (.chm). Compiled HTML
+# files are now used as the Windows 98 help format, and will replace the old
+# Windows help format (.hlp) on all Windows platforms in the future. Compressed
+# HTML files also contain an index, a table of contents, and you can search for
+# words in the documentation. The HTML workshop also contains a viewer for
+# compressed HTML files.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_HTMLHELP = NO
+
+# The CHM_FILE tag can be used to specify the file name of the resulting .chm
+# file. You can add a path in front of the file if the result should not be
+# written to the html output directory.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_FILE =
+
+# The HHC_LOCATION tag can be used to specify the location (absolute path
+# including file name) of the HTML help compiler (hhc.exe). If non-empty,
+# doxygen will try to run the HTML help compiler on the generated index.hhp.
+# The file has to be specified with full path.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+HHC_LOCATION =
+
+# The GENERATE_CHI flag controls if a separate .chi index file is generated
+# (YES) or that it should be included in the primary .chm file (NO).
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+GENERATE_CHI = NO
+
+# The CHM_INDEX_ENCODING is used to encode HtmlHelp index (hhk), content (hhc)
+# and project file content.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+CHM_INDEX_ENCODING =
+
+# The BINARY_TOC flag controls whether a binary table of contents is generated
+# (YES) or a normal table of contents (NO) in the .chm file. Furthermore it
+# enables the Previous and Next buttons.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members to
+# the table of contents of the HTML help documentation and to the tree view.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTMLHELP is set to YES.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated that
+# can be used as input for Qt's qhelpgenerator to generate a Qt Compressed Help
+# (.qch) of the generated HTML documentation.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can be used to specify
+# the file name of the resulting .qch file. The path specified is relative to
+# the HTML output folder.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating Qt Help
+# Project output. For more information please see Qt Help Project / Namespace
+# (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#namespace).
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_NAMESPACE = se.edenhill.librdkafka
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating Qt
+# Help Project output. For more information please see Qt Help Project / Virtual
+# Folders (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#virtual-
+# folders).
+# The default value is: doc.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If the QHP_CUST_FILTER_NAME tag is set, it specifies the name of a custom
+# filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILTER_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see Qt Help Project / Custom
+# Filters (see: http://qt-project.org/doc/qt-4.8/qthelpproject.html#custom-
+# filters).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's filter section matches. Qt Help Project / Filter Attributes (see:
+# http://qt-project.org/doc/qt-4.8/qthelpproject.html#filter-attributes).
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHP_SECT_FILTER_ATTRS =
+
+# The QHG_LOCATION tag can be used to specify the location of Qt's
+# qhelpgenerator. If non-empty doxygen will try to run qhelpgenerator on the
+# generated .qhp file.
+# This tag requires that the tag GENERATE_QHP is set to YES.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files will be
+# generated, together with the HTML files, they form an Eclipse help plugin. To
+# install this plugin and make it available under the help contents menu in
+# Eclipse, the contents of the directory containing the HTML and XML files needs
+# to be copied into the plugins directory of eclipse. The name of the directory
+# within the plugins directory should be the same as the ECLIPSE_DOC_ID value.
+# After copying Eclipse needs to be restarted before the help appears.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the Eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have this
+# name. Each documentation set should have its own identifier.
+# The default value is: org.doxygen.Project.
+# This tag requires that the tag GENERATE_ECLIPSEHELP is set to YES.
+
+ECLIPSE_DOC_ID = se.edenhill.librdkafka
+
+# If you want full control over the layout of the generated HTML pages it might
+# be necessary to disable the index and replace it with your own. The
+# DISABLE_INDEX tag can be used to turn on/off the condensed index (tabs) at top
+# of each HTML page. A value of NO enables the index and the value YES disables
+# it. Since the tabs in the index contain the same information as the navigation
+# tree, you can set this option to YES if you also set GENERATE_TREEVIEW to YES.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+DISABLE_INDEX = NO
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information. If the tag
+# value is set to YES, a side panel will be generated containing a tree-like
+# index structure (just like the one that is generated for HTML Help). For this
+# to work a browser that supports JavaScript, DHTML, CSS and frames is required
+# (i.e. any modern browser). Windows users are probably better off using the
+# HTML help feature. Via custom style sheets (see HTML_EXTRA_STYLESHEET) one can
+# further fine-tune the look of the index. As an example, the default style
+# sheet generated by doxygen has an example that shows how to put an image at
+# the root of the tree instead of the PROJECT_NAME. Since the tree basically has
+# the same information as the tab index, you could consider setting
+# DISABLE_INDEX to YES when enabling this option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+GENERATE_TREEVIEW = YES
+
+# The ENUM_VALUES_PER_LINE tag can be used to set the number of enum values that
+# doxygen will group on one line in the generated HTML documentation.
+#
+# Note that a value of 0 will completely suppress the enum values from appearing
+# in the overview section.
+# Minimum value: 0, maximum value: 20, default value: 4.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+ENUM_VALUES_PER_LINE = 1
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be used
+# to set the initial width (in pixels) of the frame in which the tree is shown.
+# Minimum value: 0, maximum value: 1500, default value: 250.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+TREEVIEW_WIDTH = 250
+
+# If the EXT_LINKS_IN_WINDOW option is set to YES, doxygen will open links to
+# external symbols imported via tag files in a separate window.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of LaTeX formulas included as images in
+# the HTML documentation. When you change the font size after a successful
+# doxygen run you need to manually remove any form_*.png images from the HTML
+# output directory to force them to be regenerated.
+# Minimum value: 8, maximum value: 50, default value: 10.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are not
+# supported properly for IE 6.0, but are supported on all modern browsers.
+#
+# Note that when changing this option you need to delete any form_*.png files in
+# the HTML output directory before the changes have effect.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax (see
+# http://www.mathjax.org) which uses client side Javascript for the rendering
+# instead of using pre-rendered bitmaps. Use this if you do not have LaTeX
+# installed or if you want to formulas look prettier in the HTML output. When
+# enabled you may also need to install MathJax separately and configure the path
+# to it using the MATHJAX_RELPATH option.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you can set the default output format to be used for
+# the MathJax output. See the MathJax site (see:
+# http://docs.mathjax.org/en/latest/output.html) for more details.
+# Possible values are: HTML-CSS (which is slower, but has the best
+# compatibility), NativeMML (i.e. MathML) and SVG.
+# The default value is: HTML-CSS.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_FORMAT = HTML-CSS
+
+# When MathJax is enabled you need to specify the location relative to the HTML
+# output directory using the MATHJAX_RELPATH option. The destination directory
+# should contain the MathJax.js script. For instance, if the mathjax directory
+# is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the MathJax
+# Content Delivery Network so you can quickly see the result without installing
+# MathJax. However, it is strongly recommended to install a local copy of
+# MathJax from http://www.mathjax.org before deployment.
+# The default value is: http://cdn.mathjax.org/mathjax/latest.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_RELPATH = http://cdn.mathjax.org/mathjax/latest
+
+# The MATHJAX_EXTENSIONS tag can be used to specify one or more MathJax
+# extension names that should be enabled during MathJax rendering. For example
+# MATHJAX_EXTENSIONS = TeX/AMSmath TeX/AMSsymbols
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_EXTENSIONS =
+
+# The MATHJAX_CODEFILE tag can be used to specify a file with javascript pieces
+# of code that will be used on startup of the MathJax code. See the MathJax site
+# (see: http://docs.mathjax.org/en/latest/output.html) for more details. For an
+# example see the documentation.
+# This tag requires that the tag USE_MATHJAX is set to YES.
+
+MATHJAX_CODEFILE =
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box for
+# the HTML output. The underlying search engine uses javascript and DHTML and
+# should work on any modern browser. Note that when using HTML help
+# (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets (GENERATE_DOCSET)
+# there is already a search function so this one should typically be disabled.
+# For large projects the javascript based search engine can be slow, then
+# enabling SERVER_BASED_SEARCH may provide a better solution. It is possible to
+# search using the keyboard; to jump to the search box use <access key> + S
+# (what the <access key> is depends on the OS and browser, but it is typically
+# <CTRL>, <ALT>/<option>, or both). Inside the search box use the <cursor down
+# key> to jump into the search results window, the results can be navigated
+# using the <cursor keys>. Press <Enter> to select an item or <escape> to cancel
+# the search. The filter options can be selected when the cursor is inside the
+# search box by pressing <Shift>+<cursor down>. Also here use the <cursor keys>
+# to select a filter and <Enter> or <escape> to activate or cancel the filter
+# option.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_HTML is set to YES.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a web server instead of a web client using Javascript. There
+# are two flavors of web server based searching depending on the EXTERNAL_SEARCH
+# setting. When disabled, doxygen will generate a PHP script for searching and
+# an index file used by the script. When EXTERNAL_SEARCH is enabled the indexing
+# and searching needs to be provided by external tools. See the section
+# "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SERVER_BASED_SEARCH = NO
+
+# When EXTERNAL_SEARCH tag is enabled doxygen will no longer generate the PHP
+# script for searching. Instead the search results are written to an XML file
+# which needs to be processed by an external indexer. Doxygen will invoke an
+# external search engine pointed to by the SEARCHENGINE_URL option to obtain the
+# search results.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/).
+#
+# See the section "External Indexing and Searching" for details.
+# The default value is: NO.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH = NO
+
+# The SEARCHENGINE_URL should point to a search engine hosted by a web server
+# which will return the search results when EXTERNAL_SEARCH is enabled.
+#
+# Doxygen ships with an example indexer (doxyindexer) and search engine
+# (doxysearch.cgi) which are based on the open source search engine library
+# Xapian (see: http://xapian.org/). See the section "External Indexing and
+# Searching" for details.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHENGINE_URL =
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the unindexed
+# search data is written to a file for indexing by an external tool. With the
+# SEARCHDATA_FILE tag the name of this file can be specified.
+# The default file is: searchdata.xml.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+SEARCHDATA_FILE = searchdata.xml
+
+# When SERVER_BASED_SEARCH and EXTERNAL_SEARCH are both enabled the
+# EXTERNAL_SEARCH_ID tag can be used as an identifier for the project. This is
+# useful in combination with EXTRA_SEARCH_MAPPINGS to search through multiple
+# projects and redirect the results back to the right project.
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTERNAL_SEARCH_ID =
+
+# The EXTRA_SEARCH_MAPPINGS tag can be used to enable searching through doxygen
+# projects other than the one defined by this configuration file, but that are
+# all added to the same external search index. Each project needs to have a
+# unique id set via EXTERNAL_SEARCH_ID. The search mapping then maps the id of
+# to a relative location where the documentation can be found. The format is:
+# EXTRA_SEARCH_MAPPINGS = tagname1=loc1 tagname2=loc2 ...
+# This tag requires that the tag SEARCHENGINE is set to YES.
+
+EXTRA_SEARCH_MAPPINGS =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES, doxygen will generate LaTeX output.
+# The default value is: YES.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked.
+#
+# Note that when enabling USE_PDFLATEX this option is only used for generating
+# bitmaps for formulas in the HTML output, but not in the Makefile that is
+# written to the output directory.
+# The default file is: latex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to generate
+# index for LaTeX.
+# The default file is: makeindex.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES, doxygen generates more compact LaTeX
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+COMPACT_LATEX = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used by the
+# printer.
+# Possible values are: a4 (210 x 297 mm), letter (8.5 x 11 inches), legal (8.5 x
+# 14 inches) and executive (7.25 x 10.5 inches).
+# The default value is: a4.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PAPER_TYPE = a4
+
+# The EXTRA_PACKAGES tag can be used to specify one or more LaTeX package names
+# that should be included in the LaTeX output. To get the times font for
+# instance you can specify
+# EXTRA_PACKAGES=times
+# If left blank no extra packages will be included.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for the
+# generated LaTeX document. The header should contain everything until the first
+# chapter. If it is left blank doxygen will generate a standard header. See
+# section "Doxygen usage" for information on how to let doxygen write the
+# default header to a separate file.
+#
+# Note: Only use a user-defined header if you know what you are doing! The
+# following commands have a special meaning inside the header: $title,
+# $datetime, $date, $doxygenversion, $projectname, $projectnumber,
+# $projectbrief, $projectlogo. Doxygen will replace $title with the empty
+# string, for the replacement values of the other commands the user is referred
+# to HTML_HEADER.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HEADER =
+
+# The LATEX_FOOTER tag can be used to specify a personal LaTeX footer for the
+# generated LaTeX document. The footer should contain everything after the last
+# chapter. If it is left blank doxygen will generate a standard footer. See
+# LATEX_HEADER for more information on how to generate a default footer and what
+# special commands can be used inside the footer.
+#
+# Note: Only use a user-defined footer if you know what you are doing!
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_FOOTER =
+
+# The LATEX_EXTRA_STYLESHEET tag can be used to specify additional user-defined
+# LaTeX style sheets that are included after the standard style sheets created
+# by doxygen. Using this option one can overrule certain style aspects. Doxygen
+# will copy the style sheet files to the output directory.
+# Note: The order of the extra style sheet files is of importance (e.g. the last
+# style sheet in the list overrules the setting of the previous ones in the
+# list).
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_STYLESHEET =
+
+# The LATEX_EXTRA_FILES tag can be used to specify one or more extra images or
+# other source files which should be copied to the LATEX_OUTPUT output
+# directory. Note that the files will be copied as-is; there are no commands or
+# markers available.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_EXTRA_FILES =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated is
+# prepared for conversion to PDF (using ps2pdf or pdflatex). The PDF file will
+# contain links (just like the HTML output) instead of page references. This
+# makes the output suitable for online browsing using a PDF viewer.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, doxygen will use pdflatex to generate
+# the PDF file directly from the LaTeX files. Set this option to YES, to get a
+# higher quality PDF documentation.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \batchmode
+# command to the generated LaTeX files. This will instruct LaTeX to keep running
+# if errors occur, instead of asking the user for help. This option is also used
+# when generating formulas in HTML.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BATCHMODE = NO
+
+# If the LATEX_HIDE_INDICES tag is set to YES then doxygen will not include the
+# index chapters (such as File Index, Compound Index, etc.) in the output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_HIDE_INDICES = NO
+
+# If the LATEX_SOURCE_CODE tag is set to YES then doxygen will include source
+# code with syntax highlighting in the LaTeX output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_SOURCE_CODE = NO
+
+# The LATEX_BIB_STYLE tag can be used to specify the style to use for the
+# bibliography, e.g. plainnat, or ieeetr. See
+# http://en.wikipedia.org/wiki/BibTeX and \cite for more info.
+# The default value is: plain.
+# This tag requires that the tag GENERATE_LATEX is set to YES.
+
+LATEX_BIB_STYLE = plain
+
+#---------------------------------------------------------------------------
+# Configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES, doxygen will generate RTF output. The
+# RTF output is optimized for Word 97 and may not look too pretty with other RTF
+# readers/editors.
+# The default value is: NO.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: rtf.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES, doxygen generates more compact RTF
+# documents. This may be useful for small projects and may help to save some
+# trees in general.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated will
+# contain hyperlink fields. The RTF file will contain links (just like the HTML
+# output) instead of page references. This makes the output suitable for online
+# browsing using Word or some other Word compatible readers that support those
+# fields.
+#
+# Note: WordPad (write) and others do not support links.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's config
+# file, i.e. a series of assignments. You only have to provide replacements,
+# missing definitions are set to their default value.
+#
+# See also section "Doxygen usage" for information on how to generate the
+# default style sheet that doxygen normally uses.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an RTF document. Syntax is
+# similar to doxygen's config file. A template extensions file can be generated
+# using doxygen -e rtf extensionFile.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_EXTENSIONS_FILE =
+
+# If the RTF_SOURCE_CODE tag is set to YES then doxygen will include source code
+# with syntax highlighting in the RTF output.
+#
+# Note that which sources are shown also depends on other settings such as
+# SOURCE_BROWSER.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_RTF is set to YES.
+
+RTF_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES, doxygen will generate man pages for
+# classes and files.
+# The default value is: NO.
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it. A directory man3 will be created inside the directory specified by
+# MAN_OUTPUT.
+# The default directory is: man.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to the generated
+# man pages. In case the manual section does not start with a number, the number
+# 3 is prepended. The dot (.) at the beginning of the MAN_EXTENSION tag is
+# optional.
+# The default value is: .3.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_EXTENSION = .3
+
+# The MAN_SUBDIR tag determines the name of the directory created within
+# MAN_OUTPUT in which the man pages are placed. If defaults to man followed by
+# MAN_EXTENSION with the initial . removed.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_SUBDIR =
+
+# If the MAN_LINKS tag is set to YES and doxygen generates man output, then it
+# will generate one additional man file for each entity documented in the real
+# man page(s). These additional files only source the real man page, but without
+# them the man command would be unable to find the correct page.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_MAN is set to YES.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES, doxygen will generate an XML file that
+# captures the structure of the code including all documentation.
+# The default value is: NO.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. If a
+# relative path is entered the value of OUTPUT_DIRECTORY will be put in front of
+# it.
+# The default directory is: xml.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_OUTPUT = xml
+
+# If the XML_PROGRAMLISTING tag is set to YES, doxygen will dump the program
+# listings (including syntax highlighting and cross-referencing information) to
+# the XML output. Note that enabling this will significantly increase the size
+# of the XML output.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_XML is set to YES.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the DOCBOOK output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_DOCBOOK tag is set to YES, doxygen will generate Docbook files
+# that can be used to generate PDF.
+# The default value is: NO.
+
+GENERATE_DOCBOOK = NO
+
+# The DOCBOOK_OUTPUT tag is used to specify where the Docbook pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be put in
+# front of it.
+# The default directory is: docbook.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_OUTPUT = docbook
+
+# If the DOCBOOK_PROGRAMLISTING tag is set to YES, doxygen will include the
+# program listings (including syntax highlighting and cross-referencing
+# information) to the DOCBOOK output. Note that enabling this will significantly
+# increase the size of the DOCBOOK output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_DOCBOOK is set to YES.
+
+DOCBOOK_PROGRAMLISTING = NO
+
+#---------------------------------------------------------------------------
+# Configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES, doxygen will generate an
+# AutoGen Definitions (see http://autogen.sf.net) file that captures the
+# structure of the code including all documentation. Note that this feature is
+# still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES, doxygen will generate a Perl module
+# file that captures the structure of the code including all documentation.
+#
+# Note that this feature is still experimental and incomplete at the moment.
+# The default value is: NO.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES, doxygen will generate the necessary
+# Makefile rules, Perl scripts and LaTeX code to be able to generate PDF and DVI
+# output from the Perl module output.
+# The default value is: NO.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES, the Perl module output will be nicely
+# formatted so it can be parsed by a human reader. This is useful if you want to
+# understand what is going on. On the other hand, if this tag is set to NO, the
+# size of the Perl module output will be much smaller and Perl will parse it
+# just the same.
+# The default value is: YES.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file are
+# prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. This is useful
+# so different doxyrules.make files included by the same Makefile don't
+# overwrite each other's variables.
+# This tag requires that the tag GENERATE_PERLMOD is set to YES.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES, doxygen will evaluate all
+# C-preprocessor directives found in the sources and include files.
+# The default value is: YES.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES, doxygen will expand all macro names
+# in the source code. If set to NO, only conditional compilation will be
+# performed. Macro expansion can be done in a controlled way by setting
+# EXPAND_ONLY_PREDEF to YES.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES then
+# the macro expansion is limited to the macros specified with the PREDEFINED and
+# EXPAND_AS_DEFINED tags.
+# The default value is: NO.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES, the include files in the
+# INCLUDE_PATH will be searched if a #include is found.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by the
+# preprocessor.
+# This tag requires that the tag SEARCH_INCLUDES is set to YES.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will be
+# used.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that are
+# defined before the preprocessor is started (similar to the -D option of e.g.
+# gcc). The argument of the tag is a list of macros of the form: name or
+# name=definition (no spaces). If the definition and the "=" are omitted, "=1"
+# is assumed. To prevent a macro definition from being undefined via #undef or
+# recursively expanded use the := operator instead of the = operator.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+PREDEFINED =
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then this
+# tag can be used to specify a list of macro names that should be expanded. The
+# macro definition that is found in the sources will be used. Use the PREDEFINED
+# tag if you want to use a different macro definition that overrules the
+# definition found in the source code.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES then doxygen's preprocessor will
+# remove all references to function-like macros that are alone on a line, have
+# an all uppercase name, and do not end with a semicolon. Such function macros
+# are typically used for boiler-plate code, and will confuse the parser if not
+# removed.
+# The default value is: YES.
+# This tag requires that the tag ENABLE_PREPROCESSING is set to YES.
+
+SKIP_FUNCTION_MACROS = NO
+
+#---------------------------------------------------------------------------
+# Configuration options related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES tag can be used to specify one or more tag files. For each tag
+# file the location of the external documentation should be added. The format of
+# a tag file without this location is as follows:
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where loc1 and loc2 can be relative or absolute paths or URLs. See the
+# section "Linking to external documentation" for more information about the use
+# of tag files.
+# Note: Each tag file must have a unique name (where the name does NOT include
+# the path). If a tag file is not located in the directory in which doxygen is
+# run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create a
+# tag file that is based on the input files it reads. See section "Linking to
+# external documentation" for more information about the usage of tag files.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES, all external class will be listed in
+# the class index. If set to NO, only the inherited external classes will be
+# listed.
+# The default value is: NO.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES, all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will be
+# listed.
+# The default value is: YES.
+
+EXTERNAL_GROUPS = YES
+
+# If the EXTERNAL_PAGES tag is set to YES, all external pages will be listed in
+# the related pages index. If set to NO, only the current project's pages will
+# be listed.
+# The default value is: YES.
+
+EXTERNAL_PAGES = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES, doxygen will generate a class diagram
+# (in HTML and LaTeX) for classes with base or super classes. Setting the tag to
+# NO turns the diagrams off. Note that this option also works with HAVE_DOT
+# disabled, but it is recommended to install and use dot, since it yields more
+# powerful graphs.
+# The default value is: YES.
+
+CLASS_DIAGRAMS = YES
+
+# You can include diagrams made with dia in doxygen documentation. Doxygen will
+# then run dia to produce the diagram and insert it in the documentation. The
+# DIA_PATH tag allows you to specify the directory where the dia binary resides.
+# If left empty dia is assumed to be found in the default search path.
+
+DIA_PATH =
+
+# If set to YES the inheritance and collaboration graphs will hide inheritance
+# and usage relations if the target is undocumented or is not a class.
+# The default value is: YES.
+
+HIDE_UNDOC_RELATIONS = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz (see:
+# http://www.graphviz.org/), a graph visualization toolkit from AT&T and Lucent
+# Bell Labs. The other options in this section have no effect if this option is
+# set to NO
+# The default value is: YES.
+
+HAVE_DOT = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is allowed
+# to run in parallel. When set to 0 doxygen will base this on the number of
+# processors available in the system. You can set it explicitly to a value
+# larger than 0 to get control over the balance between CPU load and processing
+# speed.
+# Minimum value: 0, maximum value: 32, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_NUM_THREADS = 0
+
+# When you want a differently looking font in the dot files that doxygen
+# generates you can specify the font name using DOT_FONTNAME. You need to make
+# sure dot is able to find the font, which can be done by putting it in a
+# standard location or by setting the DOTFONTPATH environment variable or by
+# setting DOT_FONTPATH to the directory containing the font.
+# The default value is: Helvetica.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTNAME = Helvetica
+
+# The DOT_FONTSIZE tag can be used to set the size (in points) of the font of
+# dot graphs.
+# Minimum value: 4, maximum value: 24, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the default font as specified with
+# DOT_FONTNAME. If you specify a different font using DOT_FONTNAME you can set
+# the path where dot can find it using this tag.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH tag is set to YES then doxygen will generate a graph for
+# each documented class showing the direct and indirect inheritance relations.
+# Setting this tag to YES will force the CLASS_DIAGRAMS tag to NO.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH tag is set to YES then doxygen will generate a
+# graph for each documented class showing the direct and indirect implementation
+# dependencies (inheritance, containment, and class references variables) of the
+# class with other documented classes.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS tag is set to YES then doxygen will generate a graph for
+# groups, showing the direct groups dependencies.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES, doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LOOK = NO
+
+# If the UML_LOOK tag is enabled, the fields and methods are shown inside the
+# class node. If there are many fields or methods and many nodes the graph may
+# become too big to be useful. The UML_LIMIT_NUM_FIELDS threshold limits the
+# number of items for each type to make the size more manageable. Set this to 0
+# for no limit. Note that the threshold may be exceeded by 50% before the limit
+# is enforced. So when you set the threshold to 10, up to 15 fields may appear,
+# but if the number exceeds 15, the total amount of fields shown is limited to
+# 10.
+# Minimum value: 0, maximum value: 100, default value: 10.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+UML_LIMIT_NUM_FIELDS = 10
+
+# If the TEMPLATE_RELATIONS tag is set to YES then the inheritance and
+# collaboration graphs will show the relations between templates and their
+# instances.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+TEMPLATE_RELATIONS = NO
+
+# If the INCLUDE_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are set to
+# YES then doxygen will generate a graph for each documented file showing the
+# direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDE_GRAPH = YES
+
+# If the INCLUDED_BY_GRAPH, ENABLE_PREPROCESSING and SEARCH_INCLUDES tags are
+# set to YES then doxygen will generate a graph for each documented file showing
+# the direct and indirect include dependencies of the file with other documented
+# files.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH tag is set to YES then doxygen will generate a call
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable call graphs for selected
+# functions only using the \callgraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALL_GRAPH = NO
+
+# If the CALLER_GRAPH tag is set to YES then doxygen will generate a caller
+# dependency graph for every global function or class method.
+#
+# Note that enabling this option will significantly increase the time of a run.
+# So in most cases it will be better to enable caller graphs for selected
+# functions only using the \callergraph command.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+CALLER_GRAPH = NO
+
+# If the GRAPHICAL_HIERARCHY tag is set to YES then doxygen will graphical
+# hierarchy of all classes instead of a textual one.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH tag is set to YES then doxygen will show the
+# dependencies a directory has on other directories in a graphical way. The
+# dependency relations are determined by the #include relations between the
+# files in the directories.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot.
+# Note: If you choose svg you need to set HTML_FILE_EXTENSION to xhtml in order
+# to make the SVG files visible in IE 9+ (other browsers do not have this
+# requirement).
+# Possible values are: png, png:cairo, png:cairo:cairo, png:cairo:gd, png:gd,
+# png:gd:gd, jpg, jpg:cairo, jpg:cairo:gd, jpg:gd, jpg:gd:gd, gif, gif:cairo,
+# gif:cairo:gd, gif:gd, gif:gd:gd and svg.
+# The default value is: png.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_IMAGE_FORMAT = png
+
+# If DOT_IMAGE_FORMAT is set to svg, then this option can be set to YES to
+# enable generation of interactive SVG images that allow zooming and panning.
+#
+# Note that this requires a modern browser other than Internet Explorer. Tested
+# and working are Firefox, Chrome, Safari, and Opera.
+# Note: For IE 9+ you need to set HTML_FILE_EXTENSION to xhtml in order to make
+# the SVG files visible. Older versions of IE do not have SVG support.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+INTERACTIVE_SVG = NO
+
+# The DOT_PATH tag can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the \dotfile
+# command).
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the \mscfile
+# command).
+
+MSCFILE_DIRS =
+
+# The DIAFILE_DIRS tag can be used to specify one or more directories that
+# contain dia files that are included in the documentation (see the \diafile
+# command).
+
+DIAFILE_DIRS =
+
+# When using plantuml, the PLANTUML_JAR_PATH tag should be used to specify the
+# path where java can find the plantuml.jar file. If left blank, it is assumed
+# PlantUML is not used or called during a preprocessing step. Doxygen will
+# generate a warning when it encounters a \startuml command in this case and
+# will not generate output for the diagram.
+
+PLANTUML_JAR_PATH =
+
+# When using plantuml, the specified paths are searched for files specified by
+# the !include statement in a plantuml block.
+
+PLANTUML_INCLUDE_PATH =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of nodes
+# that will be shown in the graph. If the number of nodes in a graph becomes
+# larger than this value, doxygen will truncate the graph, which is visualized
+# by representing a node as a red box. Note that doxygen if the number of direct
+# children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note that
+# the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+# Minimum value: 0, maximum value: 10000, default value: 50.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the graphs
+# generated by dot. A depth value of 3 means that only nodes reachable from the
+# root by following a path via at most 3 edges will be shown. Nodes that lay
+# further from the root node will be omitted. Note that setting this option to 1
+# or 2 may greatly reduce the computation time needed for large code bases. Also
+# note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+# Minimum value: 0, maximum value: 1000, default value: 0.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not seem
+# to support this out of the box.
+#
+# Warning: Depending on the platform used, enabling this option may lead to
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to
+# read).
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES to allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10) support
+# this, this feature is disabled by default.
+# The default value is: NO.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES doxygen will generate a legend page
+# explaining the meaning of the various boxes and arrows in the dot generated
+# graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES, doxygen will remove the intermediate dot
+# files that are used to generate the various graphs.
+# The default value is: YES.
+# This tag requires that the tag HAVE_DOT is set to YES.
+
+DOT_CLEANUP = YES
diff --git a/fluent-bit/lib/librdkafka-2.1.0/INTRODUCTION.md b/fluent-bit/lib/librdkafka-2.1.0/INTRODUCTION.md
new file mode 100644
index 000000000..66f796bca
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/INTRODUCTION.md
@@ -0,0 +1,2069 @@
+# Introduction to librdkafka - the Apache Kafka C/C++ client library
+
+
+librdkafka is a high performance C implementation of the Apache
+Kafka client, providing a reliable and performant client for production use.
+librdkafka also provides a native C++ interface.
+
+<!-- markdown-toc start - Don't edit this section. Run M-x markdown-toc-refresh-toc -->
+**Table of Contents**
+
+- [Introduction to librdkafka - the Apache Kafka C/C++ client library](#introduction-to-librdkafka---the-apache-kafka-cc-client-library)
+ - [Performance](#performance)
+ - [High throughput](#high-throughput)
+ - [Low latency](#low-latency)
+ - [Latency measurement](#latency-measurement)
+ - [Compression](#compression)
+ - [Message reliability](#message-reliability)
+ - [Producer message delivery success](#producer-message-delivery-success)
+ - [Producer message delivery failure](#producer-message-delivery-failure)
+ - [Error: Timed out in transmission queue](#error-timed-out-in-transmission-queue)
+ - [Error: Timed out in flight to/from broker](#error-timed-out-in-flight-tofrom-broker)
+ - [Error: Temporary broker-side error](#error-temporary-broker-side-error)
+ - [Error: Temporary errors due to stale metadata](#error-temporary-errors-due-to-stale-metadata)
+ - [Error: Local time out](#error-local-time-out)
+ - [Error: Permanent errors](#error-permanent-errors)
+ - [Producer retries](#producer-retries)
+ - [Reordering](#reordering)
+ - [Idempotent Producer](#idempotent-producer)
+ - [Guarantees](#guarantees)
+ - [Ordering and message sequence numbers](#ordering-and-message-sequence-numbers)
+ - [Partitioner considerations](#partitioner-considerations)
+ - [Message timeout considerations](#message-timeout-considerations)
+ - [Leader change](#leader-change)
+ - [Error handling](#error-handling)
+ - [RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER](#rdkafkaresperroutofordersequencenumber)
+ - [RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER](#rdkafkaresperrduplicatesequencenumber)
+ - [RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID](#rdkafkaresperrunknownproducerid)
+ - [Standard errors](#standard-errors)
+ - [Message persistence status](#message-persistence-status)
+ - [Transactional Producer](#transactional-producer)
+ - [Error handling](#error-handling-1)
+ - [Old producer fencing](#old-producer-fencing)
+ - [Configuration considerations](#configuration-considerations)
+ - [Exactly Once Semantics (EOS) and transactions](#exactly-once-semantics-eos-and-transactions)
+ - [Usage](#usage)
+ - [Documentation](#documentation)
+ - [Initialization](#initialization)
+ - [Configuration](#configuration)
+ - [Example](#example)
+ - [Termination](#termination)
+ - [High-level KafkaConsumer](#high-level-kafkaconsumer)
+ - [Producer](#producer)
+ - [Admin API client](#admin-api-client)
+ - [Speeding up termination](#speeding-up-termination)
+ - [Threads and callbacks](#threads-and-callbacks)
+ - [Brokers](#brokers)
+ - [SSL](#ssl)
+ - [OAUTHBEARER with support for OIDC](#oauthbearer-with-support-for-oidc)
+ - [Sparse connections](#sparse-connections)
+ - [Random broker selection](#random-broker-selection)
+ - [Persistent broker connections](#persistent-broker-connections)
+ - [Connection close](#connection-close)
+ - [Fetch From Follower](#fetch-from-follower)
+ - [Logging](#logging)
+ - [Debug contexts](#debug-contexts)
+ - [Feature discovery](#feature-discovery)
+ - [Producer API](#producer-api)
+ - [Simple Consumer API (legacy)](#simple-consumer-api-legacy)
+ - [Offset management](#offset-management)
+ - [Auto offset commit](#auto-offset-commit)
+ - [At-least-once processing](#at-least-once-processing)
+ - [Auto offset reset](#auto-offset-reset)
+ - [Consumer groups](#consumer-groups)
+ - [Static consumer groups](#static-consumer-groups)
+ - [Topics](#topics)
+ - [Unknown or unauthorized topics](#unknown-or-unauthorized-topics)
+ - [Topic metadata propagation for newly created topics](#topic-metadata-propagation-for-newly-created-topics)
+ - [Topic auto creation](#topic-auto-creation)
+ - [Metadata](#metadata)
+ - [< 0.9.3](#-093)
+ - [> 0.9.3](#-093)
+ - [Query reasons](#query-reasons)
+ - [Caching strategy](#caching-strategy)
+ - [Fatal errors](#fatal-errors)
+ - [Fatal producer errors](#fatal-producer-errors)
+ - [Fatal consumer errors](#fatal-consumer-errors)
+ - [Compatibility](#compatibility)
+ - [Broker version compatibility](#broker-version-compatibility)
+ - [Broker version >= 0.10.0.0 (or trunk)](#broker-version--01000-or-trunk)
+ - [Broker versions 0.9.0.x](#broker-versions-090x)
+ - [Broker versions 0.8.x.y](#broker-versions-08xy)
+ - [Detailed description](#detailed-description)
+ - [Supported KIPs](#supported-kips)
+ - [Supported protocol versions](#supported-protocol-versions)
+- [Recommendations for language binding developers](#recommendations-for-language-binding-developers)
+ - [Expose the configuration interface pass-thru](#expose-the-configuration-interface-pass-thru)
+ - [Error constants](#error-constants)
+ - [Reporting client software name and version to broker](#reporting-client-software-name-and-version-to-broker)
+ - [Documentation reuse](#documentation-reuse)
+ - [Community support](#community-support)
+
+<!-- markdown-toc end -->
+
+
+## Performance
+
+librdkafka is a multi-threaded library designed for use on modern hardware and
+it attempts to keep memory copying to a minimum. The payload of produced or
+consumed messages may pass through without any copying
+(if so desired by the application) putting no limit on message sizes.
+
+librdkafka allows you to decide if high throughput is the name of the game,
+or if a low latency service is required, or a balance between the two, all
+through the configuration property interface.
+
+The single most important configuration properties for performance tuning is
+`linger.ms` - how long to wait for `batch.num.messages` or `batch.size` to
+fill up in the local per-partition queue before sending the batch of messages
+to the broker.
+
+In low throughput scenarios, a lower value improves latency.
+As throughput increases, the cost of each broker request becomes significant
+impacting both maximum throughput and latency. For higher throughput
+applications, latency will typically be lower using a higher `linger.ms` due
+to larger batches resulting in a lesser number of requests, yielding decreased
+per-message load on the broker. A good general purpose setting is 5ms.
+For applications seeking maximum throughput, the recommended value is >= 50ms.
+
+
+### High throughput
+
+The key to high throughput is message batching - waiting for a certain amount
+of messages to accumulate in the local queue before sending them off in
+one large message set or batch to the peer. This amortizes the messaging
+overhead and eliminates the adverse effect of the round trip time (rtt).
+
+`linger.ms` (also called `queue.buffering.max.ms`) allows librdkafka to
+wait up to the specified amount of time to accumulate up to
+`batch.num.messages` or `batch.size` in a single batch (MessageSet) before
+sending to the broker. The larger the batch the higher the throughput.
+Enabling `msg` debugging (set `debug` property to `msg`) will emit log
+messages for the accumulation process which lets you see what batch sizes
+are being produced.
+
+Example using `linger.ms=1`:
+
+```
+... test [0]: MessageSet with 1514 message(s) delivered
+... test [3]: MessageSet with 1690 message(s) delivered
+... test [0]: MessageSet with 1720 message(s) delivered
+... test [3]: MessageSet with 2 message(s) delivered
+... test [3]: MessageSet with 4 message(s) delivered
+... test [0]: MessageSet with 4 message(s) delivered
+... test [3]: MessageSet with 11 message(s) delivered
+```
+
+Example using `linger.ms=1000`:
+```
+... test [0]: MessageSet with 10000 message(s) delivered
+... test [0]: MessageSet with 10000 message(s) delivered
+... test [0]: MessageSet with 4667 message(s) delivered
+... test [3]: MessageSet with 10000 message(s) delivered
+... test [3]: MessageSet with 10000 message(s) delivered
+... test [3]: MessageSet with 4476 message(s) delivered
+
+```
+
+
+The default setting of `linger.ms=5` is not suitable for
+high throughput, it is recommended to set this value to >50ms, with
+throughput leveling out somewhere around 100-1000ms depending on
+message produce pattern and sizes.
+
+These setting are set globally (`rd_kafka_conf_t`) but applies on a
+per topic+partition basis.
+
+
+### Low latency
+
+When low latency messaging is required the `linger.ms` should be
+tuned to the maximum permitted producer-side latency.
+Setting `linger.ms` to 0 or 0.1 will make sure messages are sent as
+soon as possible.
+Lower buffering time leads to smaller batches and larger per-message overheads,
+increasing network, memory and CPU usage for producers, brokers and consumers.
+
+See [How to decrease message latency](https://github.com/edenhill/librdkafka/wiki/How-to-decrease-message-latency) for more info.
+
+
+#### Latency measurement
+
+End-to-end latency is preferably measured by synchronizing clocks on producers
+and consumers and using the message timestamp on the consumer to calculate
+the full latency. Make sure the topic's `log.message.timestamp.type` is set to
+the default `CreateTime` (Kafka topic configuration, not librdkafka topic).
+
+Latencies are typically incurred by the producer, network and broker, the
+consumer effect on end-to-end latency is minimal.
+
+To break down the end-to-end latencies and find where latencies are adding up
+there are a number of metrics available through librdkafka statistics
+on the producer:
+
+ * `brokers[].int_latency` is the time, per message, between produce()
+ and the message being written to a MessageSet and ProduceRequest.
+ High `int_latency` indicates CPU core contention: check CPU load and,
+ involuntary context switches (`/proc/<..>/status`).
+ Consider using a machine/instance with more CPU cores.
+ This metric is only relevant on the producer.
+
+ * `brokers[].outbuf_latency` is the time, per protocol request
+ (such as ProduceRequest), between the request being enqueued (which happens
+ right after int_latency) and the time the request is written to the
+ TCP socket connected to the broker.
+ High `outbuf_latency` indicates CPU core contention or network congestion:
+ check CPU load and socket SendQ (`netstat -anp | grep :9092`).
+
+ * `brokers[].rtt` is the time, per protocol request, between the request being
+ written to the TCP socket and the time the response is received from
+ the broker.
+ High `rtt` indicates broker load or network congestion:
+ check broker metrics, local socket SendQ, network performance, etc.
+
+ * `brokers[].throttle` is the time, per throttled protocol request, the
+ broker throttled/delayed handling of a request due to usage quotas.
+ The throttle time will also be reflected in `rtt`.
+
+ * `topics[].batchsize` is the size of individual Producer MessageSet batches.
+ See below.
+
+ * `topics[].batchcnt` is the number of messages in individual Producer
+ MessageSet batches. Due to Kafka protocol overhead a batch with few messages
+ will have a higher relative processing and size overhead than a batch
+ with many messages.
+ Use the `linger.ms` client configuration property to set the maximum
+ amount of time allowed for accumulating a single batch, the larger the
+ value the larger the batches will grow, thus increasing efficiency.
+ When producing messages at a high rate it is recommended to increase
+ linger.ms, which will improve throughput and in some cases also latency.
+
+
+See [STATISTICS.md](STATISTICS.md) for the full definition of metrics.
+A JSON schema for the statistics is available in
+[statistics-schema.json](src/statistics-schema.json).
+
+
+### Compression
+
+Producer message compression is enabled through the `compression.codec`
+configuration property.
+
+Compression is performed on the batch of messages in the local queue, the
+larger the batch the higher likelyhood of a higher compression ratio.
+The local batch queue size is controlled through the `batch.num.messages`,
+`batch.size`, and `linger.ms` configuration properties as described in the
+**High throughput** chapter above.
+
+
+
+## Message reliability
+
+Message reliability is an important factor of librdkafka - an application
+can rely fully on librdkafka to deliver a message according to the specified
+configuration (`request.required.acks` and `message.send.max.retries`, etc).
+
+If the topic configuration property `request.required.acks` is set to wait
+for message commit acknowledgements from brokers (any value but 0, see
+[`CONFIGURATION.md`](CONFIGURATION.md)
+for specifics) then librdkafka will hold on to the message until
+all expected acks have been received, gracefully handling the following events:
+
+ * Broker connection failure
+ * Topic leader change
+ * Produce errors signaled by the broker
+ * Network problems
+
+We recommend `request.required.acks` to be set to `all` to make sure
+produced messages are acknowledged by all in-sync replica brokers.
+
+This is handled automatically by librdkafka and the application does not need
+to take any action at any of the above events.
+The message will be resent up to `message.send.max.retries` times before
+reporting a failure back to the application.
+
+The delivery report callback is used by librdkafka to signal the status of
+a message back to the application, it will be called once for each message
+to report the status of message delivery:
+
+ * If `error_code` is non-zero the message delivery failed and the error_code
+ indicates the nature of the failure (`rd_kafka_resp_err_t` enum).
+ * If `error_code` is zero the message has been successfully delivered.
+
+See Producer API chapter for more details on delivery report callback usage.
+
+The delivery report callback is optional but highly recommended.
+
+
+### Producer message delivery success
+
+When a ProduceRequest is successfully handled by the broker and a
+ProduceResponse is received (also called the ack) without an error code
+the messages from the ProduceRequest are enqueued on the delivery report
+queue (if a delivery report callback has been set) and will be passed to
+the application on the next invocation rd_kafka_poll().
+
+
+### Producer message delivery failure
+
+The following sub-chapters explains how different produce errors
+are handled.
+
+If the error is retryable and there are remaining retry attempts for
+the given message(s), an automatic retry will be scheduled by librdkafka,
+these retries are not visible to the application.
+
+Only permanent errors and temporary errors that have reached their maximum
+retry count will generate a delivery report event to the application with an
+error code set.
+
+The application should typically not attempt to retry producing the message
+on failure, but instead configure librdkafka to perform these retries
+using the `retries` and `retry.backoff.ms` configuration properties.
+
+
+#### Error: Timed out in transmission queue
+
+Internal error ERR__TIMED_OUT_QUEUE.
+
+The connectivity to the broker may be stalled due to networking contention,
+local or remote system issues, etc, and the request has not yet been sent.
+
+The producer can be certain that the message has not been sent to the broker.
+
+This is a retryable error, but is not counted as a retry attempt
+since the message was never actually transmitted.
+
+A retry by librdkafka at this point will not cause duplicate messages.
+
+
+#### Error: Timed out in flight to/from broker
+
+Internal error ERR__TIMED_OUT, ERR__TRANSPORT.
+
+Same reasons as for `Timed out in transmission queue` above, with the
+difference that the message may have been sent to the broker and might
+be stalling waiting for broker replicas to ack the message, or the response
+could be stalled due to networking issues.
+At this point the producer can't know if the message reached the broker,
+nor if the broker wrote the message to disk and replicas.
+
+This is a retryable error.
+
+A retry by librdkafka at this point may cause duplicate messages.
+
+
+#### Error: Temporary broker-side error
+
+Broker errors ERR_REQUEST_TIMED_OUT, ERR_NOT_ENOUGH_REPLICAS,
+ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND.
+
+These errors are considered temporary and librdkafka is will retry them
+if permitted by configuration.
+
+
+#### Error: Temporary errors due to stale metadata
+
+Broker errors ERR_LEADER_NOT_AVAILABLE, ERR_NOT_LEADER_FOR_PARTITION.
+
+These errors are considered temporary and a retry is warranted, a metadata
+request is automatically sent to find a new leader for the partition.
+
+A retry by librdkafka at this point will not cause duplicate messages.
+
+
+#### Error: Local time out
+
+Internal error ERR__MSG_TIMED_OUT.
+
+The message could not be successfully transmitted before `message.timeout.ms`
+expired, typically due to no leader being available or no broker connection.
+The message may have been retried due to other errors but
+those error messages are abstracted by the ERR__MSG_TIMED_OUT error code.
+
+Since the `message.timeout.ms` has passed there will be no more retries
+by librdkafka.
+
+
+#### Error: Permanent errors
+
+Any other error is considered a permanent error and the message
+will fail immediately, generating a delivery report event with the
+distinctive error code.
+
+The full list of permanent errors depend on the broker version and
+will likely grow in the future.
+
+Typical permanent broker errors are:
+ * ERR_CORRUPT_MESSAGE
+ * ERR_MSG_SIZE_TOO_LARGE - adjust client's or broker's `message.max.bytes`.
+ * ERR_UNKNOWN_TOPIC_OR_PART - topic or partition does not exist,
+ automatic topic creation is disabled on the
+ broker or the application is specifying a
+ partition that does not exist.
+ * ERR_RECORD_LIST_TOO_LARGE
+ * ERR_INVALID_REQUIRED_ACKS
+ * ERR_TOPIC_AUTHORIZATION_FAILED
+ * ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT
+ * ERR_CLUSTER_AUTHORIZATION_FAILED
+
+
+### Producer retries
+
+The ProduceRequest itself is not retried, instead the messages
+are put back on the internal partition queue by an insert sort
+that maintains their original position (the message order is defined
+at the time a message is initially appended to a partition queue, i.e., after
+partitioning).
+A backoff time (`retry.backoff.ms`) is set on the retried messages which
+effectively blocks retry attempts until the backoff time has expired.
+
+
+### Reordering
+
+As for all retries, if `max.in.flight` > 1 and `retries` > 0, retried messages
+may be produced out of order, since a sub-sequent message in a sub-sequent
+ProduceRequest may already be in-flight (and accepted by the broker)
+by the time the retry for the failing message is sent.
+
+Using the Idempotent Producer prevents reordering even with `max.in.flight` > 1,
+see [Idempotent Producer](#idempotent-producer) below for more information.
+
+
+### Idempotent Producer
+
+librdkafka supports the idempotent producer which provides strict ordering and
+and exactly-once producer guarantees.
+The idempotent producer is enabled by setting the `enable.idempotence`
+configuration property to `true`, this will automatically adjust a number of
+other configuration properties to adhere to the idempotency requirements,
+see the documentation of `enable.idempotence` in [CONFIGURATION.md](CONFIGURATION.md) for
+more information.
+Producer instantiation will fail if the user supplied an incompatible value
+for any of the automatically adjusted properties, e.g., it is an error to
+explicitly set `acks=1` when `enable.idempotence=true` is set.
+
+
+#### Guarantees
+
+There are three types of guarantees that the idempotent producer can satisfy:
+
+ * Exactly-once - a message is only written to the log once.
+ Does NOT cover the exactly-once consumer case.
+ * Ordering - a series of messages are written to the log in the
+ order they were produced.
+ * Gap-less - **EXPERIMENTAL** a series of messages are written once and
+ in order without risk of skipping messages. The sequence
+ of messages may be cut short and fail before all
+ messages are written, but may not fail individual
+ messages in the series.
+ This guarantee is disabled by default, but may be enabled
+ by setting `enable.gapless.guarantee` if individual message
+ failure is a concern.
+ Messages that fail due to exceeded timeout (`message.timeout.ms`),
+ are permitted by the gap-less guarantee and may cause
+ gaps in the message series without raising a fatal error.
+ See **Message timeout considerations** below for more info.
+ **WARNING**: This is an experimental property subject to
+ change or removal.
+
+All three guarantees are in effect when idempotence is enabled, only
+gap-less may be disabled individually.
+
+
+#### Ordering and message sequence numbers
+
+librdkafka maintains the original produce() ordering per-partition for all
+messages produced, using an internal per-partition 64-bit counter
+called the msgid which starts at 1. This msgid allows messages to be
+re-inserted in the partition message queue in the original order in the
+case of retries.
+
+The Idempotent Producer functionality in the Kafka protocol also has
+a per-message sequence number, which is a signed 32-bit wrapping counter that is
+reset each time the Producer's ID (PID) or Epoch changes.
+
+The librdkafka msgid is used, along with a base msgid value stored
+at the time the PID/Epoch was bumped, to calculate the Kafka protocol's
+message sequence number.
+
+With Idempotent Producer enabled there is no risk of reordering despite
+`max.in.flight` > 1 (capped at 5).
+
+**Note**: "MsgId" in log messages refer to the librdkafka msgid, while "seq"
+ refers to the protocol message sequence, "baseseq" is the seq of
+ the first message in a batch.
+ MsgId starts at 1, while message seqs start at 0.
+
+
+The producer statistics also maintain two metrics for tracking the next
+expected response sequence:
+
+ * `next_ack_seq` - the next sequence to expect an acknowledgement for, which
+ is the last successfully produced MessageSet's last
+ sequence + 1.
+ * `next_err_seq` - the next sequence to expect an error for, which is typically
+ the same as `next_ack_seq` until an error occurs, in which
+ case the `next_ack_seq` can't be incremented (since no
+ messages were acked on error). `next_err_seq` is used to
+ properly handle sub-sequent errors due to a failing
+ first request.
+
+**Note**: Both are exposed in partition statistics.
+
+
+
+#### Partitioner considerations
+
+Strict ordering is guaranteed on a **per partition** basis.
+
+An application utilizing the idempotent producer should not mix
+producing to explicit partitions with partitioner-based partitions
+since messages produced for the latter are queued separately until
+a topic's partition count is known, which would insert these messages
+after the partition-explicit messages regardless of produce order.
+
+
+#### Message timeout considerations
+
+If messages time out (due to `message.timeout.ms`) while in the producer queue
+there will be gaps in the series of produced messages.
+
+E.g., Messages 1,2,3,4,5 are produced by the application.
+ While messages 2,3,4 are transmitted to the broker the connection to
+ the broker goes down.
+ While the broker is down the message timeout expires for message 2 and 3.
+ As the connection comes back up messages 4, 5 are transmitted to the
+ broker, resulting in a final written message sequence of 1, 4, 5.
+
+The producer gracefully handles this case by draining the in-flight requests
+for a given partition when one or more of its queued (not transmitted)
+messages are timed out. When all requests are drained the Epoch is bumped and
+the base sequence number is reset to the first message in the queue, effectively
+skipping the timed out messages as if they had never existed from the
+broker's point of view.
+The message status for timed out queued messages will be
+`RD_KAFKA_MSG_STATUS_NOT_PERSISTED`.
+
+If messages time out while in-flight to the broker (also due to
+`message.timeout.ms`), the protocol request will fail, the broker
+connection will be closed by the client, and the timed out messages will be
+removed from the producer queue. In this case the in-flight messages may be
+written to the topic log by the broker, even though
+a delivery report with error `ERR__MSG_TIMED_OUT` will be raised, since
+the producer timed out the request before getting an acknowledgement back
+from the broker.
+The message status for timed out in-flight messages will be
+`RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED`, indicating that the producer
+does not know if the messages were written and acked by the broker,
+or dropped in-flight.
+
+An application may inspect the message status by calling
+`rd_kafka_message_status()` on the message in the delivery report callback,
+to see if the message was (possibly) persisted (written to the topic log) by
+the broker or not.
+
+Despite the graceful handling of timeouts, we recommend to use a
+large `message.timeout.ms` to minimize the risk of timeouts.
+
+**Warning**: `enable.gapless.guarantee` does not apply to timed-out messages.
+
+**Note**: `delivery.timeout.ms` is an alias for `message.timeout.ms`.
+
+
+#### Leader change
+
+There are corner cases where an Idempotent Producer has outstanding
+ProduceRequests in-flight to the previous leader while a new leader is elected.
+
+A leader change is typically triggered by the original leader
+failing or terminating, which has the risk of also failing (some of) the
+in-flight ProduceRequests to that broker. To recover the producer to a
+consistent state it will not send any ProduceRequests for these partitions to
+the new leader broker until all responses for any outstanding ProduceRequests
+to the previous partition leader has been received, or these requests have
+timed out.
+This drain may take up to `min(socket.timeout.ms, message.timeout.ms)`.
+If the connection to the previous broker goes down the outstanding requests
+are failed immediately.
+
+
+#### Error handling
+
+Background:
+The error handling for the Idempotent Producer, as initially proposed
+in the [EOS design document](https://docs.google.com/document/d/11Jqy_GjUGtdXJK94XGsEIK7CP1SnQGdp2eF0wSw9ra8),
+missed some corner cases which are now being addressed in [KIP-360](https://cwiki.apache.org/confluence/display/KAFKA/KIP-360%3A+Improve+handling+of+unknown+producer).
+There were some intermediate fixes and workarounds prior to KIP-360 that proved
+to be incomplete and made the error handling in the client overly complex.
+With the benefit of hindsight the librdkafka implementation will attempt
+to provide correctness from the lessons learned in the Java client and
+provide stricter and less complex error handling.
+
+The follow sections describe librdkafka's handling of the
+Idempotent Producer specific errors that may be returned by the broker.
+
+
+##### RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER
+
+This error is returned by the broker when the sequence number in the
+ProduceRequest is larger than the expected next sequence
+for the given PID+Epoch+Partition (last BaseSeq + msgcount + 1).
+Note: sequence 0 is always accepted.
+
+If the failed request is the head-of-line (next expected sequence to be acked)
+it indicates desynchronization between the client and broker:
+the client thinks the sequence number is correct but the broker disagrees.
+There is no way for the client to recover from this scenario without
+risking message loss or duplication, and it is not safe for the
+application to manually retry messages.
+A fatal error (`RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER`) is raised.
+
+When the request is not head-of-line the previous request failed
+(for any reason), which means the messages in the current request
+can be retried after waiting for all outstanding requests for this
+partition to drain and then reset the Producer ID and start over.
+
+
+**Java Producer behaviour**:
+Fail the batch, reset the pid, and then continue producing
+(and retrying sub-sequent) messages. This will lead to gaps
+in the message series.
+
+
+
+##### RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER
+
+Returned by broker when the request's base sequence number is
+less than the expected sequence number (which is the last written
+sequence + msgcount).
+Note: sequence 0 is always accepted.
+
+This error is typically benign and occurs upon retrying a previously successful
+send that was not acknowledged.
+
+The messages will be considered successfully produced but will have neither
+timestamp or offset set.
+
+
+**Java Producer behaviour:**
+Treats the message as successfully delivered.
+
+
+##### RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID
+
+Returned by broker when the PID+Epoch is unknown, which may occur when
+the PID's state has expired (due to topic retention, DeleteRecords,
+or compaction).
+
+The Java producer added quite a bit of error handling for this case,
+extending the ProduceRequest protocol to return the logStartOffset
+to give the producer a chance to differentiate between an actual
+UNKNOWN_PRODUCER_ID or topic retention having deleted the last
+message for this producer (effectively voiding the Producer ID cache).
+This workaround proved to be error prone (see explanation in KIP-360)
+when the partition leader changed.
+
+KIP-360 suggests removing this error checking in favour of failing fast,
+librdkafka follows suite.
+
+
+If the response is for the first ProduceRequest in-flight
+and there are no messages waiting to be retried nor any ProduceRequests
+unaccounted for, then the error is ignored and the epoch is incremented,
+this is likely to happen for an idle producer who's last written
+message has been deleted from the log, and thus its PID state.
+Otherwise the producer raises a fatal error
+(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID) since the delivery guarantees can't
+be satisfied.
+
+
+**Java Producer behaviour:**
+Retries the send in some cases (but KIP-360 will change this).
+Not a fatal error in any case.
+
+
+##### Standard errors
+
+All the standard Produce errors are handled in the usual way,
+permanent errors will fail the messages in the batch, while
+temporary errors will be retried (if retry count permits).
+
+If a permanent error is returned for a batch in a series of in-flight batches,
+the sub-sequent batches will fail with
+RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER since the sequence number of the
+failed batched was never written to the topic log and next expected sequence
+thus not incremented on the broker.
+
+A fatal error (RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE) is raised to satisfy
+the gap-less guarantee (if `enable.gapless.guarantee` is set) by failing all
+queued messages.
+
+
+##### Message persistence status
+
+To help the application decide what to do in these error cases, a new
+per-message API is introduced, `rd_kafka_message_status()`,
+which returns one of the following values:
+
+ * `RD_KAFKA_MSG_STATUS_NOT_PERSISTED` - the message has never
+ been transmitted to the broker, or failed with an error indicating
+ it was not written to the log.
+ Application retry will risk ordering, but not duplication.
+ * `RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED` - the message was transmitted
+ to the broker, but no acknowledgement was received.
+ Application retry will risk ordering and duplication.
+ * `RD_KAFKA_MSG_STATUS_PERSISTED` - the message was written to the log by
+ the broker and fully acknowledged.
+ No reason for application to retry.
+
+This method should be called by the application on delivery report error.
+
+
+### Transactional Producer
+
+
+#### Error handling
+
+Using the transactional producer simplifies error handling compared to the
+standard or idempotent producer, a transactional application will only need
+to care about these different types of errors:
+
+ * Retriable errors - the operation failed due to temporary problems,
+ such as network timeouts, the operation may be safely retried.
+ Use `rd_kafka_error_is_retriable()` to distinguish this case.
+ * Abortable errors - if any of the transactional APIs return a non-fatal
+ error code the current transaction has failed and the application
+ must call `rd_kafka_abort_transaction()`, rewind its input to the
+ point before the current transaction started, and attempt a new transaction
+ by calling `rd_kafka_begin_transaction()`, etc.
+ Use `rd_kafka_error_txn_requires_abort()` to distinguish this case.
+ * Fatal errors - the application must cease operations and destroy the
+ producer instance.
+ Use `rd_kafka_error_is_fatal()` to distinguish this case.
+ * For all other errors returned from the transactional API: the current
+ recommendation is to treat any error that has neither retriable, abortable,
+ or fatal set, as a fatal error.
+
+While the application should log the actual fatal or abortable errors, there
+is no need for the application to handle the underlying errors specifically.
+
+
+
+#### Old producer fencing
+
+If a new transactional producer instance is started with the same
+`transactional.id`, any previous still running producer
+instance will be fenced off at the next produce, commit or abort attempt, by
+raising a fatal error with the error code set to
+`RD_KAFKA_RESP_ERR__FENCED`.
+
+
+#### Configuration considerations
+
+To make sure messages time out (in case of connectivity problems, etc) within
+the transaction, the `message.timeout.ms` configuration property must be
+set lower than the `transaction.timeout.ms`, this is enforced when
+creating the producer instance.
+If `message.timeout.ms` is not explicitly configured it will be adjusted
+automatically.
+
+
+
+
+### Exactly Once Semantics (EOS) and transactions
+
+librdkafka supports Exactly One Semantics (EOS) as defined in [KIP-98](https://cwiki.apache.org/confluence/display/KAFKA/KIP-98+-+Exactly+Once+Delivery+and+Transactional+Messaging).
+For more on the use of transactions, see [Transactions in Apache Kafka](https://www.confluent.io/blog/transactions-apache-kafka/).
+
+See [examples/transactions.c](examples/transactions.c) for an example
+transactional EOS application.
+
+**Warning**
+If the broker version is older than Apache Kafka 2.5.0 then one transactional
+producer instance per consumed input partition is required.
+For 2.5.0 and later a single producer instance may be used regardless of
+the number of input partitions.
+See KIP-447 for more information.
+
+
+## Usage
+
+### Documentation
+
+The librdkafka API is documented in the [`rdkafka.h`](src/rdkafka.h)
+header file, the configuration properties are documented in
+[`CONFIGURATION.md`](CONFIGURATION.md)
+
+### Initialization
+
+The application needs to instantiate a top-level object `rd_kafka_t` which is
+the base container, providing global configuration and shared state.
+It is created by calling `rd_kafka_new()`.
+
+It also needs to instantiate one or more topics (`rd_kafka_topic_t`) to be used
+for producing to or consuming from. The topic object holds topic-specific
+configuration and will be internally populated with a mapping of all available
+partitions and their leader brokers.
+It is created by calling `rd_kafka_topic_new()`.
+
+Both `rd_kafka_t` and `rd_kafka_topic_t` comes with a configuration API which
+is optional.
+Not using the API will cause librdkafka to use its default values which are
+documented in [`CONFIGURATION.md`](CONFIGURATION.md).
+
+**Note**: An application may create multiple `rd_kafka_t` objects and
+ they share no state.
+
+**Note**: An `rd_kafka_topic_t` object may only be used with the `rd_kafka_t`
+ object it was created from.
+
+
+
+### Configuration
+
+To ease integration with the official Apache Kafka software and lower
+the learning curve, librdkafka implements identical configuration
+properties as found in the official clients of Apache Kafka.
+
+Configuration is applied prior to object creation using the
+`rd_kafka_conf_set()` and `rd_kafka_topic_conf_set()` APIs.
+
+**Note**: The `rd_kafka.._conf_t` objects are not reusable after they have been
+ passed to `rd_kafka.._new()`.
+ The application does not need to free any config resources after a
+ `rd_kafka.._new()` call.
+
+#### Example
+
+```c
+ rd_kafka_conf_t *conf;
+ rd_kafka_conf_res_t res;
+ rd_kafka_t *rk;
+ char errstr[512];
+
+ conf = rd_kafka_conf_new();
+
+ res = rd_kafka_conf_set(conf, "compression.codec", "snappy",
+ errstr, sizeof(errstr));
+ if (res != RD_KAFKA_CONF_OK)
+ fail("%s\n", errstr);
+
+ res = rd_kafka_conf_set(conf, "batch.num.messages", "100",
+ errstr, sizeof(errstr));
+ if (res != RD_KAFKA_CONF_OK)
+ fail("%s\n", errstr);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(rk);
+ fail("Failed to create producer: %s\n", errstr);
+ }
+
+ /* Note: librdkafka takes ownership of the conf object on success */
+```
+
+Configuration properties may be set in any order (except for interceptors) and
+may be overwritten before being passed to `rd_kafka_new()`.
+`rd_kafka_new()` will verify that the passed configuration is consistent
+and will fail and return an error if incompatible configuration properties
+are detected. It will also emit log warnings for deprecated and problematic
+configuration properties.
+
+
+### Termination
+
+librdkafka is asynchronous in its nature and performs most operation in its
+background threads.
+
+Calling the librdkafka handle destructor tells the librdkafka background
+threads to finalize their work, close network connections, clean up, etc, and
+may thus take some time. The destructor (`rd_kafka_destroy()`) will block
+until all background threads have terminated.
+
+If the destructor blocks indefinitely it typically means there is an outstanding
+object reference, such as a message or topic object, that was not destroyed
+prior to destroying the client handle.
+
+All objects except for the handle (C: `rd_kafka_t`,
+C++: `Consumer,KafkaConsumer,Producer`), such as topic objects, messages,
+`topic_partition_t`, `TopicPartition`, events, etc, **MUST** be
+destroyed/deleted prior to destroying or closing the handle.
+
+For C, make sure the following objects are destroyed prior to calling
+`rd_kafka_consumer_close()` and `rd_kafka_destroy()`:
+ * `rd_kafka_message_t`
+ * `rd_kafka_topic_t`
+ * `rd_kafka_topic_partition_t`
+ * `rd_kafka_topic_partition_list_t`
+ * `rd_kafka_event_t`
+ * `rd_kafka_queue_t`
+
+For C++ make sure the following objects are deleted prior to
+calling `KafkaConsumer::close()` and delete on the Consumer, KafkaConsumer or
+Producer handle:
+ * `Message`
+ * `Topic`
+ * `TopicPartition`
+ * `Event`
+ * `Queue`
+
+
+#### High-level KafkaConsumer
+
+Proper termination sequence for the high-level KafkaConsumer is:
+```c
+ /* 1) Leave the consumer group, commit final offsets, etc. */
+ rd_kafka_consumer_close(rk);
+
+ /* 2) Destroy handle object */
+ rd_kafka_destroy(rk);
+```
+
+**NOTE**: There is no need to unsubscribe prior to calling `rd_kafka_consumer_close()`.
+
+**NOTE**: Any topic objects created must be destroyed prior to rd_kafka_destroy()
+
+Effects of not doing the above, for:
+ 1. Final offsets are not committed and the consumer will not actively leave
+ the group, it will be kicked out of the group after the `session.timeout.ms`
+ expires. It is okay to omit the `rd_kafka_consumer_close()` call in case
+ the application does not want to wait for the blocking close call.
+ 2. librdkafka will continue to operate on the handle. Actual memory leaks.
+
+
+#### Producer
+
+The proper termination sequence for Producers is:
+
+```c
+ /* 1) Make sure all outstanding requests are transmitted and handled. */
+ rd_kafka_flush(rk, 60*1000); /* One minute timeout */
+
+ /* 2) Destroy the topic and handle objects */
+ rd_kafka_topic_destroy(rkt); /* Repeat for all topic objects held */
+ rd_kafka_destroy(rk);
+```
+
+Effects of not doing the above, for:
+ 1. Messages in-queue or in-flight will be dropped.
+ 2. librdkafka will continue to operate on the handle. Actual memory leaks.
+
+
+#### Admin API client
+
+Unlike the Java Admin client, the Admin APIs in librdkafka are available
+on any type of client instance and can be used in combination with the
+client type's main functionality, e.g., it is perfectly fine to call
+`CreateTopics()` in your running producer, or `DeleteRecords()` in your
+consumer.
+
+If you need a client instance to only perform Admin API operations the
+recommendation is to create a producer instance since it requires less
+configuration (no `group.id`) than the consumer and is generally more cost
+efficient.
+We do recommend that you set `allow.auto.create.topics=false` to avoid
+topic metadata lookups to unexpectedly have the broker create topics.
+
+
+
+#### Speeding up termination
+To speed up the termination of librdkafka an application can set a
+termination signal that will be used internally by librdkafka to quickly
+cancel any outstanding I/O waits.
+Make sure you block this signal in your application.
+
+```c
+ char tmp[16];
+ snprintf(tmp, sizeof(tmp), "%i", SIGIO); /* Or whatever signal you decide */
+ rd_kafka_conf_set(rk_conf, "internal.termination.signal", tmp, errstr, sizeof(errstr));
+```
+
+
+### Threads and callbacks
+
+librdkafka uses multiple threads internally to fully utilize modern hardware.
+The API is completely thread-safe and the calling application may call any
+of the API functions from any of its own threads at any time.
+
+A poll-based API is used to provide signaling back to the application,
+the application should call rd_kafka_poll() at regular intervals.
+The poll API will call the following configured callbacks (optional):
+
+ * `dr_msg_cb` - Message delivery report callback - signals that a message has
+ been delivered or failed delivery, allowing the application to take action
+ and to release any application resources used in the message.
+ * `error_cb` - Error callback - signals an error. These errors are usually of
+ an informational nature, i.e., failure to connect to a broker, and the
+ application usually does not need to take any action.
+ The type of error is passed as a rd_kafka_resp_err_t enum value,
+ including both remote broker errors as well as local failures.
+ An application typically does not have to perform any action when
+ an error is raised through the error callback, the client will
+ automatically try to recover from all errors, given that the
+ client and cluster is correctly configured.
+ In some specific cases a fatal error may occur which will render
+ the client more or less inoperable for further use:
+ if the error code in the error callback is set to
+ `RD_KAFKA_RESP_ERR__FATAL` the application should retrieve the
+ underlying fatal error and reason using the `rd_kafka_fatal_error()` call,
+ and then begin terminating the instance.
+ The Event API's EVENT_ERROR has a `rd_kafka_event_error_is_fatal()`
+ function, and the C++ EventCb has a `fatal()` method, to help the
+ application determine if an error is fatal or not.
+ * `stats_cb` - Statistics callback - triggered if `statistics.interval.ms`
+ is configured to a non-zero value, emitting metrics and internal state
+ in JSON format, see [STATISTICS.md].
+ * `throttle_cb` - Throttle callback - triggered whenever a broker has
+ throttled (delayed) a request.
+
+These callbacks will also be triggered by `rd_kafka_flush()`,
+`rd_kafka_consumer_poll()`, and any other functions that serve queues.
+
+
+Optional callbacks not triggered by poll, these may be called spontaneously
+from any thread at any time:
+
+ * `log_cb` - Logging callback - allows the application to output log messages
+ generated by librdkafka.
+ * `partitioner_cb` - Partitioner callback - application provided message partitioner.
+ The partitioner may be called in any thread at any time, it may be
+ called multiple times for the same key.
+ Partitioner function contraints:
+ - MUST NOT call any rd_kafka_*() functions
+ - MUST NOT block or execute for prolonged periods of time.
+ - MUST return a value between 0 and partition_cnt-1, or the
+ special RD_KAFKA_PARTITION_UA value if partitioning
+ could not be performed.
+
+
+
+### Brokers
+
+On initialization, librdkafka only needs a partial list of
+brokers (at least one), called the bootstrap brokers.
+The client will connect to the bootstrap brokers specified by the
+`bootstrap.servers` configuration property and query cluster Metadata
+information which contains the full list of brokers, topic, partitions and their
+leaders in the Kafka cluster.
+
+Broker names are specified as `host[:port]` where the port is optional
+(default 9092) and the host is either a resolvable hostname or an IPv4 or IPv6
+address.
+If host resolves to multiple addresses librdkafka will round-robin the
+addresses for each connection attempt.
+A DNS record containing all broker address can thus be used to provide a
+reliable bootstrap broker.
+
+
+#### SSL
+
+If the client is to connect to a broker's SSL endpoints/listeners the client
+needs to be configured with `security.protocol=SSL` for just SSL transport or
+`security.protocol=SASL_SSL` for SASL authentication and SSL transport.
+The client will try to verify the broker's certificate by checking the
+CA root certificates, if the broker's certificate can't be verified
+the connection is closed (and retried). This is to protect the client
+from connecting to rogue brokers.
+
+The CA root certificate defaults are system specific:
+ * On Linux, Mac OSX, and other Unix-like system the OpenSSL default
+ CA path will be used, also called the OPENSSLDIR, which is typically
+ `/etc/ssl/certs` (on Linux, typcially in the `ca-certificates` package) and
+ `/usr/local/etc/openssl` on Mac OSX (Homebrew).
+ * On Windows the Root certificate store is used, unless
+ `ssl.ca.certificate.stores` is configured in which case certificates are
+ read from the specified stores.
+ * If OpenSSL is linked statically, librdkafka will set the default CA
+ location to the first of a series of probed paths (see below).
+
+If the system-provided default CA root certificates are not sufficient to
+verify the broker's certificate, such as when a self-signed certificate
+or a local CA authority is used, the CA certificate must be specified
+explicitly so that the client can find it.
+This can be done either by providing a PEM file (e.g., `cacert.pem`)
+as the `ssl.ca.location` configuration property, or by passing an in-memory
+PEM, X.509/DER or PKCS#12 certificate to `rd_kafka_conf_set_ssl_cert()`.
+
+It is also possible to disable broker certificate verification completely
+by setting `enable.ssl.certificate.verification=false`, but this is not
+recommended since it allows for rogue brokers and man-in-the-middle attacks,
+and should only be used for testing and troubleshooting purposes.
+
+CA location probe paths (see [rdkafka_ssl.c](src/rdkafka_ssl.c) for full list)
+used when OpenSSL is statically linked:
+
+ "/etc/pki/tls/certs/ca-bundle.crt",
+ "/etc/ssl/certs/ca-bundle.crt",
+ "/etc/pki/tls/certs/ca-bundle.trust.crt",
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
+ "/etc/ssl/ca-bundle.pem",
+ "/etc/pki/tls/cacert.pem",
+ "/etc/ssl/cert.pem",
+ "/etc/ssl/cacert.pem",
+ "/etc/certs/ca-certificates.crt",
+ "/etc/ssl/certs/ca-certificates.crt",
+ "/etc/ssl/certs",
+ "/usr/local/etc/ssl/cert.pem",
+ "/usr/local/etc/ssl/cacert.pem",
+ "/usr/local/etc/ssl/certs/cert.pem",
+ "/usr/local/etc/ssl/certs/cacert.pem",
+ etc..
+
+
+On **Windows** the Root certificate store is read by default, but any number
+of certificate stores can be read by setting the `ssl.ca.certificate.stores`
+configuration property to a comma-separated list of certificate store names.
+The predefined system store names are:
+
+ * `MY` - User certificates
+ * `Root` - System CA certificates (default)
+ * `CA` - Intermediate CA certificates
+ * `Trust` - Trusted publishers
+
+For example, to read both intermediate and root CAs, set
+`ssl.ca.certificate.stores=CA,Root`.
+
+
+#### OAUTHBEARER with support for OIDC
+
+OAUTHBEARER with OIDC provides a method for the client to authenticate to the
+Kafka cluster by requesting an authentication token from an issuing server
+and passing the retrieved token to brokers during connection setup.
+
+To use this authentication method the client needs to be configured as follows:
+
+ * `security.protocol` - set to `SASL_SSL` or `SASL_PLAINTEXT`.
+ * `sasl.mechanism` - set to `OAUTHBEARER`.
+ * `sasl.oauthbearer.method` - set to `OIDC`.
+ * `sasl.oauthbearer.token.endpoint.url` - OAUTH issuer token
+ endpoint HTTP(S) URI used to retrieve the token.
+ * `sasl.oauthbearer.client.id` - public identifier for the application.
+ It must be unique across all clients that the authorization server handles.
+ * `sasl.oauthbearer.client.secret` - secret known only to the
+ application and the authorization server.
+ This should be a sufficiently random string that is not guessable.
+ * `sasl.oauthbearer.scope` - clients use this to specify the scope of the
+ access request to the broker.
+ * `sasl.oauthbearer.extensions` - (optional) additional information to be
+ provided to the broker. A comma-separated list of key=value pairs.
+ For example:
+ `supportFeatureX=true,organizationId=sales-emea`
+
+
+#### Sparse connections
+
+The client will only connect to brokers it needs to communicate with, and
+only when necessary.
+
+Examples of needed broker connections are:
+
+ * leaders for partitions being consumed from
+ * leaders for partitions being produced to
+ * consumer group coordinator broker
+ * cluster controller for Admin API operations
+
+
+##### Random broker selection
+
+When there is no broker connection and a connection to any broker
+is needed, such as on startup to retrieve metadata, the client randomly selects
+a broker from its list of brokers, which includes both the configured bootstrap
+brokers (including brokers manually added with `rd_kafka_brokers_add()`), as
+well as the brokers discovered from cluster metadata.
+Brokers with no prior connection attempt are tried first.
+
+If there is already an available broker connection to any broker it is used,
+rather than connecting to a new one.
+
+The random broker selection and connection scheduling is triggered when:
+ * bootstrap servers are configured (`rd_kafka_new()`)
+ * brokers are manually added (`rd_kafka_brokers_add()`).
+ * a consumer group coordinator needs to be found.
+ * acquiring a ProducerID for the Idempotent Producer.
+ * cluster or topic metadata is being refreshed.
+
+A single connection attempt will be performed, and the broker will
+return to an idle INIT state on failure to connect.
+
+The random broker selection is rate-limited to:
+10 < `reconnect.backoff.ms`/2 < 1000 milliseconds.
+
+**Note**: The broker connection will be maintained until it is closed
+ by the broker (idle connection reaper).
+
+##### Persistent broker connections
+
+While the random broker selection is useful for one-off queries, there
+is need for the client to maintain persistent connections to certain brokers:
+ * Consumer: the group coordinator.
+ * Consumer: partition leader for topics being fetched from.
+ * Producer: partition leader for topics being produced to.
+
+These dependencies are discovered and maintained automatically, marking
+matching brokers as persistent, which will make the client maintain connections
+to these brokers at all times, reconnecting as necessary.
+
+
+#### Connection close
+
+A broker connection may be closed by the broker, intermediary network gear,
+due to network errors, timeouts, etc.
+When a broker connection is closed, librdkafka will back off the next reconnect
+attempt (to the given broker) for `reconnect.backoff.ms` -25% to +50% jitter,
+this value is increased exponentially for each connect attempt until
+`reconnect.backoff.max.ms` is reached, at which time the value is reset
+to `reconnect.backoff.ms`.
+
+The broker will disconnect clients that have not sent any protocol requests
+within `connections.max.idle.ms` (broker configuration propertion, defaults
+to 10 minutes), but there is no fool proof way for the client to know that it
+was a deliberate close by the broker and not an error. To avoid logging these
+deliberate idle disconnects as errors the client employs some logic to try to
+classify a disconnect as an idle disconnect if no requests have been sent in
+the last `socket.timeout.ms` or there are no outstanding, or
+queued, requests waiting to be sent. In this case the standard "Disconnect"
+error log is silenced (will only be seen with debug enabled).
+
+Otherwise, if a connection is closed while there are requests in-flight
+the logging level will be LOG_WARNING (4), else LOG_INFO (6).
+
+`log.connection.close=false` may be used to silence all disconnect logs,
+but it is recommended to instead rely on the above heuristics.
+
+
+#### Fetch From Follower
+
+librdkafka supports consuming messages from follower replicas
+([KIP-392](https://cwiki.apache.org/confluence/display/KAFKA/KIP-392%3A+Allow+consumers+to+fetch+from+closest+replica)).
+This is enabled by setting the `client.rack` configuration property which
+corresponds to `broker.rack` on the broker. The actual assignment of
+consumers to replicas is determined by the configured `replica.selector.class`
+on the broker.
+
+
+### Logging
+
+#### Debug contexts
+
+Extensive debugging of librdkafka can be enabled by setting the
+`debug` configuration property to a CSV string of debug contexts:
+
+Debug context | Type | Description
+--------------|----------|----------------------
+generic | * | General client instance level debugging. Includes initialization and termination debugging.
+broker | * | Broker and connection state debugging.
+topic | * | Topic and partition state debugging. Includes leader changes.
+metadata | * | Cluster and topic metadata retrieval debugging.
+feature | * | Kafka protocol feature support as negotiated with the broker.
+queue | producer | Message queue debugging.
+msg | * | Message debugging. Includes information about batching, compression, sizes, etc.
+protocol | * | Kafka protocol request/response debugging. Includes latency (rtt) printouts.
+cgrp | consumer | Low-level consumer group state debugging.
+security | * | Security and authentication debugging.
+fetch | consumer | Consumer message fetch debugging. Includes decision when and why messages are fetched.
+interceptor | * | Interceptor interface debugging.
+plugin | * | Plugin loading debugging.
+consumer | consumer | High-level consumer debugging.
+admin | admin | Admin API debugging.
+eos | producer | Idempotent Producer debugging.
+mock | * | Mock cluster functionality debugging.
+assignor | consumer | Detailed consumer group partition assignor debugging.
+conf | * | Display set configuration properties on startup.
+all | * | All of the above.
+
+
+Suggested debugging settings for troubleshooting:
+
+Problem space | Type | Debug setting
+-----------------------|----------|-------------------
+Producer not delivering messages to broker | producer | `broker,topic,msg`
+Consumer not fetching messages | consumer | Start with `consumer`, or use `cgrp,fetch` for detailed information.
+Consumer starts reading at unexpected offset | consumer | `consumer` or `cgrp,fetch`
+Authentication or connectivity issues | * | `broker,auth`
+Protocol handling or latency | * | `broker,protocol`
+Topic leader and state | * | `topic,metadata`
+
+
+
+
+### Feature discovery
+
+Apache Kafka broker version 0.10.0 added support for the ApiVersionRequest API
+which allows a client to query a broker for its range of supported API versions.
+
+librdkafka supports this functionality and will query each broker on connect
+for this information (if `api.version.request=true`) and use it to enable or disable
+various protocol features, such as MessageVersion 1 (timestamps), KafkaConsumer, etc.
+
+If the broker fails to respond to the ApiVersionRequest librdkafka will
+assume the broker is too old to support the API and fall back to an older
+broker version's API. These fallback versions are hardcoded in librdkafka
+and is controlled by the `broker.version.fallback` configuration property.
+
+
+
+### Producer API
+
+After setting up the `rd_kafka_t` object with type `RD_KAFKA_PRODUCER` and one
+or more `rd_kafka_topic_t` objects librdkafka is ready for accepting messages
+to be produced and sent to brokers.
+
+The `rd_kafka_produce()` function takes the following arguments:
+
+ * `rkt` - the topic to produce to, previously created with
+ `rd_kafka_topic_new()`
+ * `partition` - partition to produce to. If this is set to
+ `RD_KAFKA_PARTITION_UA` (UnAssigned) then the configured partitioner
+ function will be used to select a target partition.
+ * `msgflags` - 0, or one of:
+ * `RD_KAFKA_MSG_F_COPY` - librdkafka will immediately make a copy of
+ the payload. Use this when the payload is in non-persistent
+ memory, such as the stack.
+ * `RD_KAFKA_MSG_F_FREE` - let librdkafka free the payload using
+ `free(3)` when it is done with it.
+
+ These two flags are mutually exclusive and neither need to be set in
+ which case the payload is neither copied nor freed by librdkafka.
+
+ If `RD_KAFKA_MSG_F_COPY` flag is not set no data copying will be
+ performed and librdkafka will hold on the payload pointer until
+ the message has been delivered or fails.
+ The delivery report callback will be called when librdkafka is done
+ with the message to let the application regain ownership of the
+ payload memory.
+ The application must not free the payload in the delivery report
+ callback if `RD_KAFKA_MSG_F_FREE is set`.
+ * `payload`,`len` - the message payload
+ * `key`,`keylen` - an optional message key which can be used for partitioning.
+ It will be passed to the topic partitioner callback, if any, and
+ will be attached to the message when sending to the broker.
+ * `msg_opaque` - an optional application-provided per-message opaque pointer
+ that will be provided in the message delivery callback to let
+ the application reference a specific message.
+
+
+`rd_kafka_produce()` is a non-blocking API, it will enqueue the message
+on an internal queue and return immediately.
+If the new message would cause the internal queue to exceed
+`queue.buffering.max.messages` or `queue.buffering.max.kbytes`
+configuration properties, `rd_kafka_produce()` returns -1 and sets errno
+to `ENOBUFS` and last_error to `RD_KAFKA_RESP_ERR__QUEUE_FULL`, thus
+providing a backpressure mechanism.
+
+
+`rd_kafka_producev()` provides an alternative produce API that does not
+require a topic `rkt` object and also provides support for extended
+message fields, such as timestamp and headers.
+
+
+**Note**: See `examples/rdkafka_performance.c` for a producer implementation.
+
+
+### Simple Consumer API (legacy)
+
+NOTE: For the high-level KafkaConsumer interface see rd_kafka_subscribe (rdkafka.h) or KafkaConsumer (rdkafkacpp.h)
+
+The consumer API is a bit more stateful than the producer API.
+After creating `rd_kafka_t` with type `RD_KAFKA_CONSUMER` and
+`rd_kafka_topic_t` instances the application must also start the consumer
+for a given partition by calling `rd_kafka_consume_start()`.
+
+`rd_kafka_consume_start()` arguments:
+
+ * `rkt` - the topic to start consuming from, previously created with
+ `rd_kafka_topic_new()`.
+ * `partition` - partition to consume from.
+ * `offset` - message offset to start consuming from. This may either be an
+ absolute message offset or one of the three special offsets:
+ `RD_KAFKA_OFFSET_BEGINNING` to start consuming from the beginning
+ of the partition's queue (oldest message), or
+ `RD_KAFKA_OFFSET_END` to start consuming at the next message to be
+ produced to the partition, or
+ `RD_KAFKA_OFFSET_STORED` to use the offset store.
+
+After a topic+partition consumer has been started librdkafka will attempt
+to keep `queued.min.messages` messages in the local queue by repeatedly
+fetching batches of messages from the broker. librdkafka will fetch all
+consumed partitions for which that broker is a leader, through a single
+request.
+
+This local message queue is then served to the application through three
+different consume APIs:
+
+ * `rd_kafka_consume()` - consumes a single message
+ * `rd_kafka_consume_batch()` - consumes one or more messages
+ * `rd_kafka_consume_callback()` - consumes all messages in the local
+ queue and calls a callback function for each one.
+
+These three APIs are listed above the ascending order of performance,
+`rd_kafka_consume()` being the slowest and `rd_kafka_consume_callback()` being
+the fastest. The different consume variants are provided to cater for different
+application needs.
+
+A consumed message, as provided or returned by each of the consume functions,
+is represented by the `rd_kafka_message_t` type.
+
+`rd_kafka_message_t` members:
+
+ * `err` - Error signaling back to the application. If this field is non-zero
+ the `payload` field should be considered an error message and
+ `err` is an error code (`rd_kafka_resp_err_t`).
+ If `err` is zero then the message is a proper fetched message
+ and `payload` et.al contains message payload data.
+ * `rkt`,`partition` - Topic and partition for this message or error.
+ * `payload`,`len` - Message payload data or error message (err!=0).
+ * `key`,`key_len` - Optional message key as specified by the producer
+ * `offset` - Message offset
+
+Both the `payload` and `key` memory, as well as the message as a whole, is
+owned by librdkafka and must not be used after an `rd_kafka_message_destroy()`
+call. librdkafka will share the same messageset receive buffer memory for all
+message payloads of that messageset to avoid excessive copying which means
+that if the application decides to hang on to a single `rd_kafka_message_t`
+it will hinder the backing memory to be released for all other messages
+from the same messageset.
+
+When the application is done consuming messages from a topic+partition it
+should call `rd_kafka_consume_stop()` to stop the consumer. This will also
+purge any messages currently in the local queue.
+
+
+**Note**: See `examples/rdkafka_performance.c` for a consumer implementation.
+
+
+#### Offset management
+
+Broker based offset management is available for broker version >= 0.9.0
+in conjunction with using the high-level KafkaConsumer interface (see
+rdkafka.h or rdkafkacpp.h)
+
+Offset management is also available through a deprecated local offset file,
+where the offset is periodically written to a local file for each
+topic+partition according to the following topic configuration properties:
+
+ * `enable.auto.commit`
+ * `auto.commit.interval.ms`
+ * `offset.store.path`
+ * `offset.store.sync.interval.ms`
+
+The legacy `auto.commit.enable` topic configuration property is only to be used
+with the legacy low-level consumer.
+Use `enable.auto.commit` with the modern KafkaConsumer.
+
+
+##### Auto offset commit
+
+The consumer will automatically commit offsets every `auto.commit.interval.ms`
+when `enable.auto.commit` is enabled (default).
+
+Offsets to be committed are kept in a local in-memory offset store,
+this offset store is updated by `consumer_poll()` (et.al) to
+store the offset of the last message passed to the application
+(per topic+partition).
+
+##### At-least-once processing
+Since auto commits are performed in a background thread this may result in
+the offset for the latest message being committed before the application has
+finished processing the message. If the application was to crash or exit
+prior to finishing processing, and the offset had been auto committed,
+the next incarnation of the consumer application would start at the next
+message, effectively missing the message that was processed when the
+application crashed.
+To avoid this scenario the application can disable the automatic
+offset **store** by setting `enable.auto.offset.store` to false
+and manually **storing** offsets after processing by calling
+`rd_kafka_offsets_store()`.
+This gives an application fine-grained control on when a message
+is eligible for committing without having to perform the commit itself.
+`enable.auto.commit` should be set to true when using manual offset storing.
+The latest stored offset will be automatically committed every
+`auto.commit.interval.ms`.
+
+**Note**: Only greater offsets are committed, e.g., if the latest committed
+ offset was 10 and the application performs an offsets_store()
+ with offset 9, that offset will not be committed.
+
+
+##### Auto offset reset
+
+The consumer will by default try to acquire the last committed offsets for
+each topic+partition it is assigned using its configured `group.id`.
+If there is no committed offset available, or the consumer is unable to
+fetch the committed offsets, the policy of `auto.offset.reset` will kick in.
+This configuration property may be set to one the following values:
+
+ * `earliest` - start consuming the earliest message of the partition.
+ * `latest` - start consuming the next message to be produced to the partition.
+ * `error` - don't start consuming but isntead raise a consumer error
+ with error-code `RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET` for
+ the topic+partition. This allows the application to decide what
+ to do in case there is no committed start offset.
+
+
+### Consumer groups
+
+Broker based consumer groups (requires Apache Kafka broker >=0.9) are supported,
+see KafkaConsumer in rdkafka.h or rdkafkacpp.h
+
+The following diagram visualizes the high-level balanced consumer group state
+flow and synchronization between the application, librdkafka consumer,
+group coordinator, and partition leader(s).
+
+![Consumer group state diagram](src/librdkafka_cgrp_synch.png)
+
+
+#### Static consumer groups
+
+By default Kafka consumers are rebalanced each time a new consumer joins
+the group or an existing member leaves. This is what is known as a dynamic
+membership. Apache Kafka >= 2.3.0 introduces static membership.
+Unlike dynamic membership, static members can leave and rejoin a group
+within the `session.timeout.ms` without triggering a rebalance, retaining
+their existing partitions assignment.
+
+To enable static group membership configure each consumer instance
+in the group with a unique `group.instance.id`.
+
+Consumers with `group.instance.id` set will not send a leave group request on
+close - session timeout, change of subscription, or a new group member joining
+the group, are the only mechanisms that will trigger a group rebalance for
+static consumer groups.
+
+If a new consumer joins the group with same `group.instance.id` as an
+existing consumer, the existing consumer will be fenced and raise a fatal error.
+The fatal error is propagated as a consumer error with error code
+`RD_KAFKA_RESP_ERR__FATAL`, use `rd_kafka_fatal_error()` to retrieve
+the original fatal error code and reason.
+
+To read more about static group membership, see [KIP-345](https://cwiki.apache.org/confluence/display/KAFKA/KIP-345%3A+Introduce+static+membership+protocol+to+reduce+consumer+rebalances).
+
+
+### Note on Batch consume APIs
+
+Using multiple instances of `rd_kafka_consume_batch()` and/or `rd_kafka_consume_batch_queue()`
+APIs concurrently is not thread safe and will result in undefined behaviour. We strongly recommend a
+single instance of these APIs to be used at a given time. This usecase is not supported and will not
+be supported in future as well. There are different ways to achieve similar result:
+
+* Create multiple consumers reading from different partitions. In this way, different partitions
+ are read by different consumers and each consumer can run its own batch call.
+* Create multiple consumers in same consumer group. In this way, partitions are assigned to
+ different consumers and each consumer can run its own batch call.
+* Create single consumer and read data from single batch call and process this data in parallel.
+
+Even after this if you feel the need to use multiple instances of these APIs for the same consumer
+concurrently, then don't use any of the **seek**, **pause**, **resume** or **rebalancing** operation
+in conjunction with these API calls. For **rebalancing** operation to work in sequencial manner, please
+set `rebalance_cb` configuration property (refer [examples/rdkafka_complex_consumer_example.c](examples/rdkafka_complex_consumer_example.c)
+for the help with the usage) for the consumer.
+
+
+### Topics
+
+#### Unknown or unauthorized topics
+
+If a consumer application subscribes to non-existent or unauthorized topics
+a consumer error will be propagated for each unavailable topic with the
+error code set to either `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART` or a
+broker-specific error code, such as
+`RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`.
+
+As the topic metadata is refreshed every `topic.metadata.refresh.interval.ms`
+the unavailable topics are re-checked for availability, but the same error
+will not be raised again for the same topic.
+
+If a consumer has Describe (ACL) permissions for a topic but not Read it will
+be able to join a consumer group and start consuming the topic, but the Fetch
+requests to retrieve messages from the broker will fail with
+`RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED`.
+This error will be raised to the application once per partition and
+assign()/seek() and the fetcher will back off the next fetch 10 times longer than
+the `fetch.error.backoff.ms` (but at least 1 second).
+It is recommended that the application takes appropriate action when this
+occurs, for instance adjusting its subscription or assignment to exclude the
+unauthorized topic.
+
+
+#### Topic metadata propagation for newly created topics
+
+Due to the asynchronous nature of topic creation in Apache Kafka it may
+take some time for a newly created topic to be known by all brokers in the
+cluster.
+If a client tries to use a topic after topic creation but before the topic
+has been fully propagated in the cluster it will seem as if the topic does not
+exist which would raise `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC` (et.al)
+errors to the application.
+To avoid these temporary errors being raised, the client will not flag
+a topic as non-existent until a propagation time has elapsed, this propagation
+defaults to 30 seconds and can be configured with
+`topic.metadata.propagation.max.ms`.
+The per-topic max propagation time starts ticking as soon as the topic is
+referenced (e.g., by produce()).
+
+If messages are produced to unknown topics during the propagation time, the
+messages will be queued for later delivery to the broker when the topic
+metadata has propagated.
+Should the topic propagation time expire without the topic being seen the
+produced messages will fail with `RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC`.
+
+**Note**: The propagation time will not take affect if a topic is known to
+ the client and then deleted, in this case the topic will immediately
+ be marked as non-existent and remain non-existent until a topic
+ metadata refresh sees the topic again (after the topic has been
+ re-created).
+
+
+#### Topic auto creation
+
+Topic auto creation is supported by librdkafka, if a non-existent topic is
+referenced by the client (by produce to, or consuming from, the topic, etc)
+the broker will automatically create the topic (with default partition counts
+and replication factor) if the broker configuration property
+`auto.create.topics.enable=true` is set.
+
+*Note*: A topic that is undergoing automatic creation may be reported as
+unavailable, with e.g., `RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART`, during the
+time the topic is being created and partition leaders are elected.
+
+While topic auto creation may be useful for producer applications, it is not
+particularily valuable for consumer applications since even if the topic
+to consume is auto created there is nothing writing messages to the topic.
+To avoid consumers automatically creating topics the
+`allow.auto.create.topics` consumer configuration property is set to
+`false` by default, preventing the consumer to trigger automatic topic
+creation on the broker. This requires broker version v0.11.0.0 or later.
+The `allow.auto.create.topics` property may be set to `true` to allow
+auto topic creation, which also requires `auto.create.topics.enable=true` to
+be configured on the broker.
+
+
+
+### Metadata
+
+#### < 0.9.3
+Previous to the 0.9.3 release librdkafka's metadata handling
+was chatty and excessive, which usually isn't a problem in small
+to medium-sized clusters, but in large clusters with a large amount
+of librdkafka clients the metadata requests could hog broker CPU and bandwidth.
+
+#### > 0.9.3
+
+The remaining Metadata sections describe the current behaviour.
+
+**Note:** "Known topics" in the following section means topics for
+ locally created `rd_kafka_topic_t` objects.
+
+
+#### Query reasons
+
+There are four reasons to query metadata:
+
+ * brokers - update/populate cluster broker list, so the client can
+ find and connect to any new brokers added.
+
+ * specific topic - find leader or partition count for specific topic
+
+ * known topics - same, but for all locally known topics.
+
+ * all topics - get topic names for consumer group wildcard subscription
+ matching
+
+The above list is sorted so that the sub-sequent entries contain the
+information above, e.g., 'known topics' contains enough information to
+also satisfy 'specific topic' and 'brokers'.
+
+
+#### Caching strategy
+
+The prevalent cache timeout is `metadata.max.age.ms`, any cached entry
+will remain authoritative for this long or until a relevant broker error
+is returned.
+
+
+ * brokers - eternally cached, the broker list is additative.
+
+ * topics - cached for `metadata.max.age.ms`
+
+
+
+### Fatal errors
+
+If an unrecoverable error occurs, a fatal error is triggered in one
+or more of the follow ways depending on what APIs the application is utilizing:
+
+ * C: the `error_cb` is triggered with error code `RD_KAFKA_RESP_ERR__FATAL`,
+ the application should call `rd_kafka_fatal_error()` to retrieve the
+ underlying fatal error code and error string.
+ * C: an `RD_KAFKA_EVENT_ERROR` event is triggered and
+ `rd_kafka_event_error_is_fatal()` returns true: the fatal error code
+ and string are available through `rd_kafka_event_error()`, and `.._string()`.
+ * C and C++: any API call may return `RD_KAFKA_RESP_ERR__FATAL`, use
+ `rd_kafka_fatal_error()` to retrieve the underlying fatal error code
+ and error string.
+ * C++: an `EVENT_ERROR` event is triggered and `event.fatal()` returns true:
+ the fatal error code and string are available through `event.err()` and
+ `event.str()`.
+
+
+An application may call `rd_kafka_fatal_error()` at any time to check if
+a fatal error has been raised.
+
+
+#### Fatal producer errors
+
+The idempotent producer guarantees of ordering and no duplicates also
+requires a way for the client to fail gracefully when these guarantees
+can't be satisfied.
+
+If a fatal error has been raised, sub-sequent use of the following API calls
+will fail:
+
+ * `rd_kafka_produce()`
+ * `rd_kafka_producev()`
+ * `rd_kafka_produce_batch()`
+
+The underlying fatal error code will be returned, depending on the error
+reporting scheme for each of those APIs.
+
+
+When a fatal error has occurred the application should call `rd_kafka_flush()`
+to wait for all outstanding and queued messages to drain before terminating
+the application.
+`rd_kafka_purge(RD_KAFKA_PURGE_F_QUEUE)` is automatically called by the client
+when a producer fatal error has occurred, messages in-flight are not purged
+automatically to allow waiting for the proper acknowledgement from the broker.
+The purged messages in queue will fail with error code set to
+`RD_KAFKA_RESP_ERR__PURGE_QUEUE`.
+
+
+#### Fatal consumer errors
+
+A consumer configured for static group membership (`group.instance.id`) may
+raise a fatal error if a new consumer instance is started with the same
+instance id, causing the existing consumer to be fenced by the new consumer.
+
+This fatal error is propagated on the fenced existing consumer in multiple ways:
+ * `error_cb` (if configured) is triggered.
+ * `rd_kafka_consumer_poll()` (et.al) will return a message object
+ with the `err` field set to `RD_KAFKA_ERR__FATAL`.
+ * any sub-sequent calls to state-changing consumer calls will
+ return `RD_KAFKA_ERR___FATAL`.
+ This includes `rd_kafka_subscribe()`, `rd_kafka_assign()`,
+ `rd_kafka_consumer_close()`, `rd_kafka_commit*()`, etc.
+
+The consumer will automatically stop consuming when a fatal error has occurred
+and no further subscription, assignment, consumption or offset committing
+will be possible. At this point the application should simply destroy the
+consumer instance and terminate the application since it has been replaced
+by a newer instance.
+
+
+## Compatibility
+
+### Broker version compatibility
+
+librdkafka supports all released Apache Kafka broker versions since 0.8.0.0.0,
+but not all features may be available on all broker versions since some
+features rely on newer broker functionality.
+
+**Current defaults:**
+ * `api.version.request=true`
+ * `broker.version.fallback=0.10.0`
+ * `api.version.fallback.ms=0` (never revert to `broker.version.fallback`)
+
+Depending on what broker version you are using, please configure your
+librdkafka based client as follows:
+
+#### Broker version >= 0.10.0.0 (or trunk)
+
+For librdkafka >= v1.0.0 there is no need to set any api.version-related
+configuration parameters, the defaults are tailored for broker version 0.10.0.0
+or later.
+
+For librdkafka < v1.0.0, please specify:
+```
+api.version.request=true
+api.version.fallback.ms=0
+```
+
+
+#### Broker versions 0.9.0.x
+
+```
+api.version.request=false
+broker.version.fallback=0.9.0.x (the exact 0.9.0.. version you are using)
+```
+
+#### Broker versions 0.8.x.y
+
+```
+api.version.request=false
+broker.version.fallback=0.8.x.y (your exact 0.8... broker version)
+```
+
+#### Detailed description
+
+Apache Kafka version 0.10.0.0 added support for
+[KIP-35](https://cwiki.apache.org/confluence/display/KAFKA/KIP-35+-+Retrieving+protocol+version) -
+querying the broker for supported API request types and versions -
+allowing the client to figure out what features it can use.
+But for older broker versions there is no way for the client to reliably know
+what protocol features the broker supports.
+
+To alleviate this situation librdkafka has three configuration properties:
+ * `api.version.request=true|false` - enables the API version request,
+ this requires a >= 0.10.0.0 broker and will cause a disconnect on
+ brokers 0.8.x - this disconnect is recognized by librdkafka and on the next
+ connection attempt (which is immediate) it will disable the API version
+ request and use `broker.version.fallback` as a basis of available features.
+ **NOTE**: Due to a bug in broker version 0.9.0.0 & 0.9.0.1 the broker will
+ not close the connection when receiving the API version request, instead
+ the request will time out in librdkafka after 10 seconds and it will fall
+ back to `broker.version.fallback` on the next immediate connection attempt.
+ * `broker.version.fallback=X.Y.Z.N` - if the API version request fails
+ (if `api.version.request=true`) or API version requests are disabled
+ (`api.version.request=false`) then this tells librdkafka what version the
+ broker is running and adapts its feature set accordingly.
+ * `api.version.fallback.ms=MS` - In the case where `api.version.request=true`
+ and the API version request fails, this property dictates for how long
+ librdkafka will use `broker.version.fallback` instead of
+ `api.version.request=true`. After `MS` has passed the API version request
+ will be sent on any new connections made for the broker in question.
+ This allows upgrading the Kafka broker to a new version with extended
+ feature set without needing to restart or reconfigure the client
+ (given that `api.version.request=true`).
+
+*Note: These properties applies per broker.*
+
+The API version query was disabled by default (`api.version.request=false`) in
+librdkafka up to and including v0.9.5 due to the afforementioned bug in
+broker version 0.9.0.0 & 0.9.0.1, but was changed to `true` in
+librdkafka v0.11.0.
+
+
+### Supported KIPs
+
+The [Apache Kafka Implementation Proposals (KIPs)](https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Improvement+Proposals) supported by librdkafka.
+
+
+| KIP | Kafka release | Status |
+|--------------------------------------------------------------------------|-----------------------------|-----------------------------------------------------------------------------------------------|
+| KIP-1 - Stop accepting request.required.acks > 1 | 0.9.0.0 | Not enforced on client (due to backwards compat with brokers <0.8.3) |
+| KIP-4 - Metadata protocol changes | 0.9.0.0, 0.10.0.0, 0.10.1.0 | Supported |
+| KIP-8 - Producer flush() | 0.9.0.0 | Supported |
+| KIP-12 - SASL Kerberos | 0.9.0.0 | Supported (uses SSPI/logged-on-user on Windows, full KRB5 keytabs on Unix) |
+| KIP-13 - Protocol request throttling (enforced on broker) | 0.9.0.0 | Supported |
+| KIP-15 - Producer close with timeout | 0.9.0.0 | Supported (through flush() + destroy()) |
+| KIP-19 - Request timeouts | 0.9.0.0 | Supported |
+| KIP-22 - Producer pluggable partitioner | 0.9.0.0 | Supported (not supported by Go, .NET and Python) |
+| KIP-31 - Relative offsets in messagesets | 0.10.0.0 | Supported |
+| KIP-35 - ApiVersionRequest | 0.10.0.0 | Supported |
+| KIP-40 - ListGroups and DescribeGroups | 0.9.0.0 | Supported |
+| KIP-41 - max.poll.records | 0.10.0.0 | Supported through batch consumption interface (not supported by .NET and Go) |
+| KIP-42 - Producer and Consumer interceptors | 0.10.0.0 | Supported (not supported by Go, .NET and Python) |
+| KIP-43 - SASL PLAIN and handshake | 0.10.0.0 | Supported |
+| KIP-48 - Delegation tokens | 1.1.0 | Not supported |
+| KIP-54 - Sticky partition assignment strategy | 0.11.0.0 | Supported but not available, use KIP-429 instead. |
+| KIP-57 - Interoperable LZ4 framing | 0.10.0.0 | Supported |
+| KIP-62 - max.poll.interval and background heartbeats | 0.10.1.0 | Supported |
+| KIP-70 - Proper client rebalance event on unsubscribe/subscribe | 0.10.1.0 | Supported |
+| KIP-74 - max.partition.fetch.bytes | 0.10.1.0 | Supported |
+| KIP-78 - Retrieve Cluster Id | 0.10.1.0 | Supported (not supported by .NET) |
+| KIP-79 - OffsetsForTimes | 0.10.1.0 | Supported |
+| KIP-81 - Consumer pre-fetch buffer size | 2.4.0 (WIP) | Supported |
+| KIP-82 - Record Headers | 0.11.0.0 | Supported |
+| KIP-84 - SASL SCRAM | 0.10.2.0 | Supported |
+| KIP-85 - SASL config properties | 0.10.2.0 | Supported |
+| KIP-86 - Configurable SASL callbacks | 2.0.0 | Not supported |
+| KIP-88 - AdminAPI: ListGroupOffsets | 0.10.2.0 | Supported |
+| KIP-91 - Intuitive timeouts in Producer | 2.1.0 | Supported |
+| KIP-92 - Per-partition lag metrics in Consumer | 0.10.2.0 | Supported |
+| KIP-97 - Backwards compatibility with older brokers | 0.10.2.0 | Supported |
+| KIP-98 - EOS | 0.11.0.0 | Supported |
+| KIP-102 - Close with timeout in consumer | 0.10.2.0 | Not supported |
+| KIP-107 - AdminAPI: DeleteRecordsBefore | 0.11.0.0 | Supported |
+| KIP-110 - ZStd compression | 2.1.0 | Supported |
+| KIP-117 - AdminClient | 0.11.0.0 | Supported |
+| KIP-124 - Request rate quotas | 0.11.0.0 | Partially supported (depending on protocol request) |
+| KIP-126 - Producer ensure proper batch size after compression | 0.11.0.0 | Supported |
+| KIP-133 - AdminAPI: DescribeConfigs and AlterConfigs | 0.11.0.0 | Supported |
+| KIP-140 - AdminAPI: ACLs | 0.11.0.0 | Supported |
+| KIP-144 - Broker reconnect backoff | 0.11.0.0 | Supported |
+| KIP-152 - Improved SASL auth error messages | 1.0.0 | Supported |
+| KIP-192 - Cleaner idempotence semantics | 1.0.0 | Not supported (superceeded by KIP-360) |
+| KIP-195 - AdminAPI: CreatePartitions | 1.0.0 | Supported |
+| KIP-204 - AdminAPI: DeleteRecords | 1.1.0 | Supported |
+| KIP-219 - Client-side throttling | 2.0.0 | Not supported |
+| KIP-222 - AdminAPI: Consumer group operations | 2.0.0 | Supported |
+| KIP-223 - Consumer partition lead metric | 2.0.0 | Not supported |
+| KIP-226 - AdminAPI: Dynamic broker config | 1.1.0 | Supported |
+| KIP-227 - Consumer Incremental Fetch | 1.1.0 | Not supported |
+| KIP-229 - AdminAPI: DeleteGroups | 1.1.0 | Supported |
+| KIP-235 - DNS alias for secure connections | 2.1.0 | Not supported |
+| KIP-249 - AdminAPI: Deletegation Tokens | 2.0.0 | Not supported |
+| KIP-255 - SASL OAUTHBEARER | 2.0.0 | Supported |
+| KIP-266 - Fix indefinite consumer timeouts | 2.0.0 | Supported (bound by session.timeout.ms and max.poll.interval.ms) |
+| KIP-289 - Consumer group.id default to NULL | 2.2.0 | Supported |
+| KIP-294 - SSL endpoint verification | 2.0.0 | Supported |
+| KIP-302 - Use all addresses for resolved broker hostname | 2.1.0 | Supported |
+| KIP-320 - Consumer: handle log truncation | 2.1.0, 2.2.0 | Supported |
+| KIP-322 - DeleteTopics disabled error code | 2.1.0 | Supported |
+| KIP-339 - AdminAPI: incrementalAlterConfigs | 2.3.0 | Not supported |
+| KIP-341 - Update Sticky partition assignment data | 2.3.0 | Not supported (superceeded by KIP-429) |
+| KIP-342 - Custom SASL OAUTHBEARER extensions | 2.1.0 | Supported |
+| KIP-345 - Consumer: Static membership | 2.4.0 | Supported |
+| KIP-357 - AdminAPI: list ACLs per principal | 2.1.0 | Not supported |
+| KIP-359 - Producer: use EpochLeaderId | 2.4.0 | Not supported |
+| KIP-360 - Improve handling of unknown Idempotent Producer | 2.5.0 | Supported |
+| KIP-361 - Consumer: add config to disable auto topic creation | 2.3.0 | Supported |
+| KIP-368 - SASL periodic reauth | 2.2.0 | Not supported |
+| KIP-369 - Always roundRobin partitioner | 2.4.0 | Not supported |
+| KIP-389 - Consumer group max size | 2.2.0 | Supported (error is propagated to application, but the consumer does not raise a fatal error) |
+| KIP-392 - Allow consumers to fetch from closest replica | 2.4.0 | Supported |
+| KIP-394 - Consumer: require member.id in JoinGroupRequest | 2.2.0 | Supported |
+| KIP-396 - AdminAPI: commit/list offsets | 2.4.0 | Partially supported (remaining APIs available outside Admin client) |
+| KIP-412 - AdminAPI: adjust log levels | 2.4.0 | Not supported |
+| KIP-421 - Variables in client config files | 2.3.0 | Not applicable (librdkafka, et.al, does not provide a config file interface, and shouldn't) |
+| KIP-429 - Consumer: incremental rebalance protocol | 2.4.0 | Supported |
+| KIP-430 - AdminAPI: return authorized operations in Describe.. responses | 2.3.0 | Not supported |
+| KIP-436 - Start time in stats | 2.3.0 | Supported |
+| KIP-447 - Producer scalability for EOS | 2.5.0 | Supported |
+| KIP-455 - AdminAPI: Replica assignment | 2.4.0 (WIP) | Not supported |
+| KIP-460 - AdminAPI: electPreferredLeader | 2.4.0 | Not supported |
+| KIP-464 - AdminAPI: defaults for createTopics | 2.4.0 | Supported |
+| KIP-467 - Per-message (sort of) error codes in ProduceResponse | 2.4.0 (WIP) | Not supported |
+| KIP-480 - Sticky partitioner | 2.4.0 | Supported |
+| KIP-482 - Optional fields in Kafka protocol | 2.4.0 | Partially supported (ApiVersionRequest) |
+| KIP-496 - AdminAPI: delete offsets | 2.4.0 | Supported |
+| KIP-511 - Collect Client's Name and Version | 2.4.0 | Supported |
+| KIP-514 - Bounded flush() | 2.4.0 | Supported |
+| KIP-517 - Consumer poll() metrics | 2.4.0 | Not supported |
+| KIP-518 - Allow listing consumer groups per state | 2.6.0 | Supported |
+| KIP-519 - Make SSL engine configurable | 2.6.0 | Supported |
+| KIP-525 - Return topic metadata and configs in CreateTopics response | 2.4.0 | Not supported |
+| KIP-526 - Reduce Producer Metadata Lookups for Large Number of Topics | 2.5.0 | Not supported |
+| KIP-533 - Add default API timeout to AdminClient | 2.5.0 | Not supported |
+| KIP-546 - Add Client Quota APIs to AdminClient | 2.6.0 | Not supported |
+| KIP-559 - Make the Kafka Protocol Friendlier with L7 Proxies | 2.5.0 | Not supported |
+| KIP-568 - Explicit rebalance triggering on the Consumer | 2.6.0 | Not supported |
+| KIP-659 - Add metadata to DescribeConfigsResponse | 2.6.0 | Not supported |
+| KIP-580 - Exponential backoff for Kafka clients | WIP | Partially supported |
+| KIP-584 - Versioning scheme for features | WIP | Not supported |
+| KIP-588 - Allow producers to recover gracefully from txn timeouts | 2.8.0 (WIP) | Not supported |
+| KIP-601 - Configurable socket connection timeout | 2.7.0 | Supported |
+| KIP-602 - Use all resolved addresses by default | 2.6.0 | Supported |
+| KIP-651 - Support PEM format for SSL certs and keys | 2.7.0 | Supported |
+| KIP-654 - Aborted txns with non-flushed msgs should not be fatal | 2.7.0 | Supported |
+| KIP-735 - Increase default consumer session timeout | 3.0.0 | Supported |
+| KIP-768 - SASL/OAUTHBEARER OIDC support | 3.0 | Supported |
+
+
+
+
+### Supported protocol versions
+
+"Kafka max" is the maximum ApiVersion supported in Apache Kafka 3.3.1, while
+"librdkafka max" is the maximum ApiVersion supported in the latest
+release of librdkafka.
+
+
+| ApiKey | Request name | Kafka max | librdkafka max |
+| ------- | ------------------- | ----------- | ----------------------- |
+| 0 | Produce | 9 | 7 |
+| 1 | Fetch | 13 | 11 |
+| 2 | ListOffsets | 7 | 2 |
+| 3 | Metadata | 12 | 9 |
+| 8 | OffsetCommit | 8 | 7 |
+| 9 | OffsetFetch | 8 | 7 |
+| 10 | FindCoordinator | 4 | 2 |
+| 11 | JoinGroup | 9 | 5 |
+| 12 | Heartbeat | 4 | 3 |
+| 13 | LeaveGroup | 5 | 1 |
+| 14 | SyncGroup | 5 | 3 |
+| 15 | DescribeGroups | 5 | 4 |
+| 16 | ListGroups | 4 | 4 |
+| 17 | SaslHandshake | 1 | 1 |
+| 18 | ApiVersions | 3 | 3 |
+| 19 | CreateTopics | 7 | 4 |
+| 20 | DeleteTopics | 6 | 1 |
+| 21 | DeleteRecords | 2 | 1 |
+| 22 | InitProducerId | 4 | 4 |
+| 24 | AddPartitionsToTxn | 3 | 0 |
+| 25 | AddOffsetsToTxn | 3 | 0 |
+| 26 | EndTxn | 3 | 1 |
+| 28 | TxnOffsetCommit | 3 | 3 |
+| 32 | DescribeConfigs | 4 | 1 |
+| 33 | AlterConfigs | 2 | 1 |
+| 36 | SaslAuthenticate | 2 | 0 |
+| 37 | CreatePartitions | 3 | 0 |
+| 42 | DeleteGroups | 2 | 1 |
+| 47 | OffsetDelete | 0 | 0 |
+
+
+
+# Recommendations for language binding developers
+
+These recommendations are targeted for developers that wrap librdkafka
+with their high-level languages, such as confluent-kafka-go or node-rdkafka.
+
+## Expose the configuration interface pass-thru
+
+librdkafka's string-based key=value configuration property interface controls
+most runtime behaviour and evolves over time.
+Most features are also only configuration-based, meaning they do not require a
+new API (SSL and SASL are two good examples which are purely enabled through
+configuration properties) and thus no changes needed to the binding/application
+code.
+
+If your language binding/applications allows configuration properties to be set
+in a pass-through fashion without any pre-checking done by your binding code it
+means that a simple upgrade of the underlying librdkafka library (but not your
+bindings) will provide new features to the user.
+
+## Error constants
+
+The error constants, both the official (value >= 0) errors as well as the
+internal (value < 0) errors, evolve constantly.
+To avoid hard-coding them to expose to your users, librdkafka provides an API
+to extract the full list programmatically during runtime or for
+code generation, see `rd_kafka_get_err_descs()`.
+
+## Reporting client software name and version to broker
+
+[KIP-511](https://cwiki.apache.org/confluence/display/KAFKA/KIP-511%3A+Collect+and+Expose+Client%27s+Name+and+Version+in+the+Brokers) introduces a means for a
+Kafka client to report its implementation name and version to the broker, the
+broker then exposes this as metrics (e.g., through JMX) to help Kafka operators
+troubleshoot problematic clients, understand the impact of broker and client
+upgrades, etc.
+This requires broker version 2.4.0 or later (metrics added in 2.5.0).
+
+librdkafka will send its name (`librdkafka`) and version (e.g., `v1.3.0`)
+upon connect to a supporting broker.
+To help distinguish high-level client bindings on top of librdkafka, a client
+binding should configure the following two properties:
+ * `client.software.name` - set to the binding name, e.g,
+ `confluent-kafka-go` or `node-rdkafka`.
+ * `client.software.version` - the version of the binding and the version
+ of librdkafka, e.g., `v1.3.0-librdkafka-v1.3.0` or
+ `1.2.0-librdkafka-v1.3.0`.
+ It is **highly recommended** to include the librdkafka version in this
+ version string.
+
+These configuration properties are hidden (from CONFIGURATION.md et.al.) as
+they should typically not be modified by the user.
+
+## Documentation reuse
+
+You are free to reuse the librdkafka API and CONFIGURATION documentation in
+your project, but please do return any documentation improvements back to
+librdkafka (file a github pull request).
+
+## Community support
+
+You are welcome to direct your users to
+[librdkafka's Gitter chat room](http://gitter.im/edenhill/librdkafka) as long as
+you monitor the conversions in there to pick up questions specific to your
+bindings.
+But for the most part user questions are usually generic enough to apply to all
+librdkafka bindings.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE b/fluent-bit/lib/librdkafka-2.1.0/LICENSE
new file mode 100644
index 000000000..193ffaae2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE
@@ -0,0 +1,25 @@
+librdkafka - Apache Kafka C driver library
+
+Copyright (c) 2012-2020, Magnus Edenhill
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.cjson b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.cjson
new file mode 100644
index 000000000..72cd1e107
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.cjson
@@ -0,0 +1,22 @@
+For cJSON.c and cJSON.h:
+
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.crc32c b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.crc32c
new file mode 100644
index 000000000..482a3456b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.crc32c
@@ -0,0 +1,28 @@
+# For src/crc32c.c copied (with modifications) from
+# http://stackoverflow.com/a/17646775/1821055
+
+/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
+ * Copyright (C) 2013 Mark Adler
+ * Version 1.1 1 Aug 2013 Mark Adler
+ */
+
+/*
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the author be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Mark Adler
+ madler@alumni.caltech.edu
+ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.fnv1a b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.fnv1a
new file mode 100644
index 000000000..a8c4f8751
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.fnv1a
@@ -0,0 +1,18 @@
+parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c
+
+
+Please do not copyright this code. This code is in the public domain.
+
+LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
+EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+By:
+ chongo <Landon Curt Noll> /\oo/\
+ http://www.isthe.com/chongo/
+
+Share and Enjoy! :-)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.hdrhistogram b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.hdrhistogram
new file mode 100644
index 000000000..7f20d7dc4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.hdrhistogram
@@ -0,0 +1,27 @@
+This license covers src/rdhdrhistogram.c which is a C port of
+Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram
+at revision 3a0bb77429bd3a61596f5e8a3172445844342120
+
+-----------------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright (c) 2014 Coda Hale
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.lz4 b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.lz4
new file mode 100644
index 000000000..f57dbc6ba
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.lz4
@@ -0,0 +1,26 @@
+src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
+
+LZ4 Library
+Copyright (c) 2011-2016, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.murmur2 b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.murmur2
new file mode 100644
index 000000000..296fffab4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.murmur2
@@ -0,0 +1,25 @@
+parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
+
+
+MurMurHash2 Library
+//-----------------------------------------------------------------------------
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.pycrc b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.pycrc
new file mode 100644
index 000000000..71baded4a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.pycrc
@@ -0,0 +1,23 @@
+The following license applies to the files rdcrc32.c and rdcrc32.h which
+have been generated by the pycrc tool.
+============================================================================
+
+Copyright (c) 2006-2012, Thomas Pircher <tehpeh@gmx.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.queue b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.queue
new file mode 100644
index 000000000..14bbf9380
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.queue
@@ -0,0 +1,31 @@
+For sys/queue.h:
+
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD$ \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.regexp b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.regexp
new file mode 100644
index 000000000..5fa0b1048
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.regexp
@@ -0,0 +1,5 @@
+regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
+
+"
+These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
+"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.snappy b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.snappy
new file mode 100644
index 000000000..baa6cfe10
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.snappy
@@ -0,0 +1,36 @@
+######################################################################
+# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h #
+# originally retrieved from http://github.com/andikleen/snappy-c #
+# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 #
+######################################################################
+
+The snappy-c code is under the same license as the original snappy source
+
+Copyright 2011 Intel Corporation All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Intel Corporation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.tinycthread b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.tinycthread
new file mode 100644
index 000000000..0ceadef9c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.tinycthread
@@ -0,0 +1,26 @@
+From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
+
+License
+-------
+
+Copyright (c) 2012 Marcus Geelnard
+ 2013-2014 Evan Nemerson
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSE.wingetopt b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.wingetopt
new file mode 100644
index 000000000..4c28701b2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSE.wingetopt
@@ -0,0 +1,49 @@
+For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
+
+/*
+ * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F39502-99-1-0512.
+ */
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/LICENSES.txt b/fluent-bit/lib/librdkafka-2.1.0/LICENSES.txt
new file mode 100644
index 000000000..1ab8a1dd4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/LICENSES.txt
@@ -0,0 +1,392 @@
+LICENSE
+--------------------------------------------------------------
+librdkafka - Apache Kafka C driver library
+
+Copyright (c) 2012-2020, Magnus Edenhill
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are met:
+
+1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+
+
+LICENSE.cjson
+--------------------------------------------------------------
+For cJSON.c and cJSON.h:
+
+Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
+
+LICENSE.crc32c
+--------------------------------------------------------------
+# For src/crc32c.c copied (with modifications) from
+# http://stackoverflow.com/a/17646775/1821055
+
+/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
+ * Copyright (C) 2013 Mark Adler
+ * Version 1.1 1 Aug 2013 Mark Adler
+ */
+
+/*
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the author be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Mark Adler
+ madler@alumni.caltech.edu
+ */
+
+
+LICENSE.fnv1a
+--------------------------------------------------------------
+parts of src/rdfnv1a.c: http://www.isthe.com/chongo/src/fnv/hash_32a.c
+
+
+Please do not copyright this code. This code is in the public domain.
+
+LANDON CURT NOLL DISCLAIMS ALL WARRANTIES WITH REGARD TO THIS SOFTWARE,
+INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS. IN NO
+EVENT SHALL LANDON CURT NOLL BE LIABLE FOR ANY SPECIAL, INDIRECT OR
+CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF
+USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR
+OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+PERFORMANCE OF THIS SOFTWARE.
+
+By:
+ chongo <Landon Curt Noll> /\oo/\
+ http://www.isthe.com/chongo/
+
+Share and Enjoy! :-)
+
+
+LICENSE.hdrhistogram
+--------------------------------------------------------------
+This license covers src/rdhdrhistogram.c which is a C port of
+Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram
+at revision 3a0bb77429bd3a61596f5e8a3172445844342120
+
+-----------------------------------------------------------------------------
+
+The MIT License (MIT)
+
+Copyright (c) 2014 Coda Hale
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE
+
+
+LICENSE.lz4
+--------------------------------------------------------------
+src/rdxxhash.[ch] src/lz4*.[ch]: git@github.com:lz4/lz4.git e2827775ee80d2ef985858727575df31fc60f1f3
+
+LZ4 Library
+Copyright (c) 2011-2016, Yann Collet
+All rights reserved.
+
+Redistribution and use in source and binary forms, with or without modification,
+are permitted provided that the following conditions are met:
+
+* Redistributions of source code must retain the above copyright notice, this
+ list of conditions and the following disclaimer.
+
+* Redistributions in binary form must reproduce the above copyright notice, this
+ list of conditions and the following disclaimer in the documentation and/or
+ other materials provided with the distribution.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
+ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
+WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
+DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
+ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
+(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
+LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
+ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
+SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+LICENSE.murmur2
+--------------------------------------------------------------
+parts of src/rdmurmur2.c: git@github.com:abrandoned/murmur2.git
+
+
+MurMurHash2 Library
+//-----------------------------------------------------------------------------
+// MurmurHash2 was written by Austin Appleby, and is placed in the public
+// domain. The author hereby disclaims copyright to this source code.
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+
+
+LICENSE.pycrc
+--------------------------------------------------------------
+The following license applies to the files rdcrc32.c and rdcrc32.h which
+have been generated by the pycrc tool.
+============================================================================
+
+Copyright (c) 2006-2012, Thomas Pircher <tehpeh@gmx.net>
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
+
+
+LICENSE.queue
+--------------------------------------------------------------
+For sys/queue.h:
+
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 4. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ * $FreeBSD$
+
+LICENSE.regexp
+--------------------------------------------------------------
+regexp.c and regexp.h from https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684
+
+"
+These libraries are in the public domain (or the equivalent where that is not possible). You can do anything you want with them. You have no legal obligation to do anything else, although I appreciate attribution.
+"
+
+
+LICENSE.snappy
+--------------------------------------------------------------
+######################################################################
+# LICENSE.snappy covers files: snappy.c, snappy.h, snappy_compat.h #
+# originally retrieved from http://github.com/andikleen/snappy-c #
+# git revision 8015f2d28739b9a6076ebaa6c53fe27bc238d219 #
+######################################################################
+
+The snappy-c code is under the same license as the original snappy source
+
+Copyright 2011 Intel Corporation All Rights Reserved.
+
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions are
+met:
+
+ * Redistributions of source code must retain the above copyright
+notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+copyright notice, this list of conditions and the following disclaimer
+in the documentation and/or other materials provided with the
+distribution.
+ * Neither the name of Intel Corporation nor the names of its
+contributors may be used to endorse or promote products derived from
+this software without specific prior written permission.
+
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+
+
+LICENSE.tinycthread
+--------------------------------------------------------------
+From https://github.com/tinycthread/tinycthread/README.txt c57166cd510ffb5022dd5f127489b131b61441b9
+
+License
+-------
+
+Copyright (c) 2012 Marcus Geelnard
+ 2013-2014 Evan Nemerson
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
+
+
+LICENSE.wingetopt
+--------------------------------------------------------------
+For the files wingetopt.c wingetopt.h downloaded from https://github.com/alex85k/wingetopt
+
+/*
+ * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F39502-99-1-0512.
+ */
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/Makefile b/fluent-bit/lib/librdkafka-2.1.0/Makefile
new file mode 100755
index 000000000..2d931f09a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/Makefile
@@ -0,0 +1,124 @@
+LIBSUBDIRS= src src-cpp
+
+CHECK_FILES+= CONFIGURATION.md \
+ examples/rdkafka_example examples/rdkafka_performance \
+ examples/rdkafka_example_cpp
+
+DOC_FILES+= LICENSE LICENSES.txt INTRODUCTION.md README.md \
+ CONFIGURATION.md STATISTICS.md CHANGELOG.md
+
+PKGNAME?= librdkafka
+VERSION?= $(shell python3 packaging/get_version.py src/rdkafka.h)
+
+# Jenkins CI integration
+BUILD_NUMBER ?= 1
+
+# Skip copyright check in the following paths
+MKL_COPYRIGHT_SKIP?=^(tests|packaging)
+
+
+.PHONY:
+
+all: mklove-check libs CONFIGURATION.md check TAGS
+
+include mklove/Makefile.base
+
+libs:
+ @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d || exit $?; done)
+
+CONFIGURATION.md: src/rdkafka.h examples
+ @printf "$(MKL_YELLOW)Updating $@$(MKL_CLR_RESET)\n"
+ @echo "# Configuration properties" > CONFIGURATION.md.tmp
+ @(examples/rdkafka_performance -X list | \
+ sed 's/||/\\|\\|/g' >> \
+ CONFIGURATION.md.tmp; \
+ cmp CONFIGURATION.md CONFIGURATION.md.tmp || \
+ mv -f CONFIGURATION.md.tmp CONFIGURATION.md; \
+ rm -f CONFIGURATION.md.tmp)
+
+file-check: CONFIGURATION.md LICENSES.txt examples
+check: file-check
+ @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ || exit $?; done)
+
+install-subdirs:
+ @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d install || exit $?; done)
+
+install: install-subdirs doc-install
+
+uninstall-subdirs:
+ @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d uninstall || exit $?; done)
+
+uninstall: uninstall-subdirs doc-uninstall
+
+examples tests: .PHONY libs
+ $(MAKE) -C $@
+
+docs:
+ doxygen Doxyfile
+ @echo "Documentation generated in staging-docs"
+
+clean-docs:
+ rm -rf staging-docs
+
+clean:
+ @$(MAKE) -C tests $@
+ @$(MAKE) -C examples $@
+ @(for d in $(LIBSUBDIRS); do $(MAKE) -C $$d $@ ; done)
+
+distclean: clean deps-clean
+ ./configure --clean
+ rm -f config.log config.log.old
+
+archive:
+ git archive --prefix=$(PKGNAME)-$(VERSION)/ \
+ -o $(PKGNAME)-$(VERSION).tar.gz HEAD
+ git archive --prefix=$(PKGNAME)-$(VERSION)/ \
+ -o $(PKGNAME)-$(VERSION).zip HEAD
+
+rpm: distclean
+ $(MAKE) -C packaging/rpm
+
+LICENSES.txt: .PHONY
+ @(for i in LICENSE LICENSE.*[^~] ; do (echo "$$i" ; echo "--------------------------------------------------------------" ; cat $$i ; echo "" ; echo "") ; done) > $@.tmp
+ @cmp $@ $@.tmp || mv -f $@.tmp $@ ; rm -f $@.tmp
+
+
+TAGS: .PHONY
+ @(if which etags >/dev/null 2>&1 ; then \
+ echo "Using etags to generate $@" ; \
+ git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$' | \
+ etags -f $@.tmp - ; \
+ cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp ; \
+ elif which ctags >/dev/null 2>&1 ; then \
+ echo "Using ctags to generate $@" ; \
+ git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h)$$' | \
+ ctags -e -f $@.tmp -L- ; \
+ cmp $@ $@.tmp || mv $@.tmp $@ ; rm -f $@.tmp ; \
+ fi)
+
+coverity: Makefile.config
+ @(which cov-build >/dev/null 2>&1 || echo "Make sure coverity../bin is in your PATH")
+ @(cd src && \
+ make clean && \
+ (rm -rf cov-int cov-librdkafka.tgz cov-build || true) && \
+ cov-build --dir cov-int make -j && \
+ tar cvzf ../cov-librdkafka.tgz cov-int && \
+ printf "$(MKL_GREEN)Now upload cov-librdkafka.tgz to Coverity for analysis$(MKL_CLR_RESET)\n")
+
+
+style-check:
+ @(packaging/tools/style-format.sh \
+ $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$') )
+
+style-check-changed:
+ @(packaging/tools/style-format.sh \
+ $$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|cpp|h|py)$$'))
+
+style-fix:
+ @(packaging/tools/style-format.sh --fix \
+ $$(git ls-tree -r --name-only HEAD | egrep '\.(c|cpp|h|py)$$'))
+
+style-fix-changed:
+ @(packaging/tools/style-format.sh --fix \
+ $$( (git diff --name-only ; git diff --name-only --staged) | egrep '\.(c|cpp|h|py)$$'))
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/README.md b/fluent-bit/lib/librdkafka-2.1.0/README.md
new file mode 100644
index 000000000..640b8791c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/README.md
@@ -0,0 +1,198 @@
+librdkafka - the Apache Kafka C/C++ client library
+==================================================
+
+Copyright (c) 2012-2022, [Magnus Edenhill](http://www.edenhill.se/).
+
+[https://github.com/edenhill/librdkafka](https://github.com/edenhill/librdkafka)
+
+**librdkafka** is a C library implementation of the
+[Apache Kafka](https://kafka.apache.org/) protocol, providing Producer, Consumer
+and Admin clients. It was designed with message delivery reliability
+and high performance in mind, current figures exceed 1 million msgs/second for
+the producer and 3 million msgs/second for the consumer.
+
+**librdkafka** is licensed under the 2-clause BSD license.
+
+KAFKA is a registered trademark of The Apache Software Foundation and
+has been licensed for use by librdkafka. librdkafka has no
+affiliation with and is not endorsed by The Apache Software Foundation.
+
+
+# Features #
+ * Full Exactly-Once-Semantics (EOS) support
+ * High-level producer, including Idempotent and Transactional producers
+ * High-level balanced KafkaConsumer (requires broker >= 0.9)
+ * Simple (legacy) consumer
+ * Admin client
+ * Compression: snappy, gzip, lz4, zstd
+ * [SSL](https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka) support
+ * [SASL](https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka) (GSSAPI/Kerberos/SSPI, PLAIN, SCRAM, OAUTHBEARER) support
+ * Full list of [supported KIPs](INTRODUCTION.md#supported-kips)
+ * Broker version support: >=0.8 (see [Broker version compatibility](INTRODUCTION.md#broker-version-compatibility))
+ * Guaranteed API stability for C & C++ APIs (ABI safety guaranteed for C)
+ * [Statistics](STATISTICS.md) metrics
+ * Debian package: librdkafka1 and librdkafka-dev in Debian and Ubuntu
+ * RPM package: librdkafka and librdkafka-devel
+ * Gentoo package: dev-libs/librdkafka
+ * Portable: runs on Linux, MacOS X, Windows, Solaris, FreeBSD, AIX, ...
+
+# Documentation
+
+ * Public API in [C header](src/rdkafka.h) and [C++ header](src-cpp/rdkafkacpp.h).
+ * Introduction and manual in [INTRODUCTION.md](https://github.com/edenhill/librdkafka/blob/master/INTRODUCTION.md).
+ * Configuration properties in
+[CONFIGURATION.md](https://github.com/edenhill/librdkafka/blob/master/CONFIGURATION.md).
+ * Statistics metrics in [STATISTICS.md](https://github.com/edenhill/librdkafka/blob/master/STATISTICS.md).
+ * [Frequently asked questions](https://github.com/edenhill/librdkafka/wiki).
+ * Step-by-step tutorial [Getting Started with Apache Kafka and C/C++](https://developer.confluent.io/get-started/c/).
+
+**NOTE**: The `master` branch is actively developed, use latest [release](https://github.com/edenhill/librdkafka/releases) for production use.
+
+
+# Installation
+
+## Installing prebuilt packages
+
+On Mac OSX, install librdkafka with homebrew:
+
+```bash
+$ brew install librdkafka
+```
+
+On Debian and Ubuntu, install librdkafka from the Confluent APT repositories,
+see instructions [here](https://docs.confluent.io/current/installation/installing_cp/deb-ubuntu.html#get-the-software) and then install librdkafka:
+
+ ```bash
+ $ apt install librdkafka-dev
+ ```
+
+On RedHat, CentOS, Fedora, install librdkafka from the Confluent YUM repositories,
+instructions [here](https://docs.confluent.io/current/installation/installing_cp/rhel-centos.html#get-the-software) and then install librdkafka:
+
+```bash
+$ yum install librdkafka-devel
+```
+
+On Windows, reference [librdkafka.redist](https://www.nuget.org/packages/librdkafka.redist/) NuGet package in your Visual Studio project.
+
+
+For other platforms, follow the source building instructions below.
+
+
+## Installing librdkafka using vcpkg
+
+You can download and install librdkafka using the [vcpkg](https://github.com/Microsoft/vcpkg) dependency manager:
+
+```bash
+# Install vcpkg if not already installed
+$ git clone https://github.com/Microsoft/vcpkg.git
+$ cd vcpkg
+$ ./bootstrap-vcpkg.sh
+$ ./vcpkg integrate install
+
+# Install librdkafka
+$ vcpkg install librdkafka
+```
+
+The librdkafka package in vcpkg is kept up to date by Microsoft team members and community contributors.
+If the version is out of date, please [create an issue or pull request](https://github.com/Microsoft/vcpkg) on the vcpkg repository.
+
+
+## Build from source
+
+### Requirements
+ The GNU toolchain
+ GNU make
+ pthreads
+ zlib-dev (optional, for gzip compression support)
+ libssl-dev (optional, for SSL and SASL SCRAM support)
+ libsasl2-dev (optional, for SASL GSSAPI support)
+ libzstd-dev (optional, for ZStd compression support)
+ libcurl-dev (optional, for SASL OAUTHBEARER OIDC support)
+
+**NOTE**: Static linking of ZStd (requires zstd >= 1.2.1) in the producer
+ enables encoding the original size in the compression frame header,
+ which will speed up the consumer.
+ Use `STATIC_LIB_libzstd=/path/to/libzstd.a ./configure --enable-static`
+ to enable static ZStd linking.
+ MacOSX example:
+ `STATIC_LIB_libzstd=$(brew ls -v zstd | grep libzstd.a$) ./configure --enable-static`
+
+
+### Building
+
+ ./configure
+ # Or, to automatically install dependencies using the system's package manager:
+ # ./configure --install-deps
+ # Or, build dependencies from source:
+ # ./configure --install-deps --source-deps-only
+
+ make
+ sudo make install
+
+
+**NOTE**: See [README.win32](README.win32) for instructions how to build
+ on Windows with Microsoft Visual Studio.
+
+**NOTE**: See [CMake instructions](packaging/cmake/README.md) for experimental
+ CMake build (unsupported).
+
+
+## Usage in code
+
+See [getting Started with Apache Kafka and C/C++](https://developer.confluent.io/get-started/c/) for a basic tutorial.
+
+1. Refer to the [examples directory](examples/) for code using:
+
+ * Producers: basic producers, idempotent producers, transactional producers.
+ * Consumers: basic consumers, reading batches of messages.
+ * Performance and latency testing tools.
+
+2. Refer to the [examples GitHub repo](https://github.com/confluentinc/examples/tree/master/clients/cloud/c) for code connecting to a cloud streaming data service based on Apache Kafka
+
+3. Link your program with `-lrdkafka` (C) or `-lrdkafka++` (C++).
+
+
+## Commercial support
+
+Commercial support is available from [Confluent Inc](https://www.confluent.io/)
+
+
+## Community support
+
+**Only the [latest official release](https://github.com/edenhill/librdkafka/releases) is supported for community members.**
+
+File bug reports and feature requests using [GitHub Issues](https://github.com/edenhill/librdkafka/issues).
+
+Questions and discussions are welcome on the [Discussions](https://github.com/edenhill/librdkafka/discussions) forum, and on the [Confluent Community slack](https://launchpass.com/confluentcommunity) #clients channel.
+
+
+# Language bindings #
+
+ * C#/.NET: [confluent-kafka-dotnet](https://github.com/confluentinc/confluent-kafka-dotnet) (based on [rdkafka-dotnet](https://github.com/ah-/rdkafka-dotnet))
+ * C++: [cppkafka](https://github.com/mfontanini/cppkafka)
+ * C++: [modern-cpp-kafka](https://github.com/Morgan-Stanley/modern-cpp-kafka)
+ * Common Lisp: [cl-rdkafka](https://github.com/SahilKang/cl-rdkafka)
+ * D (C-like): [librdkafka](https://github.com/DlangApache/librdkafka/)
+ * D (C++-like): [librdkafkad](https://github.com/tamediadigital/librdkafka-d)
+ * Erlang: [erlkaf](https://github.com/silviucpp/erlkaf)
+ * Go: [confluent-kafka-go](https://github.com/confluentinc/confluent-kafka-go)
+ * Haskell (kafka, conduit, avro, schema registry): [hw-kafka](https://github.com/haskell-works/hw-kafka)
+ * Kotlin Native: [Kafka-Kotlin-Native](https://github.com/icemachined/kafka-kotlin-native)
+ * Lua: [luardkafka](https://github.com/mistsv/luardkafka)
+ * Node.js: [node-rdkafka](https://github.com/Blizzard/node-rdkafka)
+ * OCaml: [ocaml-kafka](https://github.com/didier-wenzek/ocaml-kafka)
+ * Perl: [Net::Kafka](https://github.com/bookingcom/perl-Net-Kafka)
+ * PHP: [php-rdkafka](https://github.com/arnaud-lb/php-rdkafka)
+ * PHP: [php-simple-kafka-client](https://github.com/php-kafka/php-simple-kafka-client)
+ * Python: [confluent-kafka-python](https://github.com/confluentinc/confluent-kafka-python)
+ * Python: [PyKafka](https://github.com/Parsely/pykafka)
+ * Ruby: [Hermann](https://github.com/reiseburo/hermann)
+ * Ruby: [rdkafka-ruby](https://github.com/appsignal/rdkafka-ruby)
+ * Rust: [rust-rdkafka](https://github.com/fede1024/rust-rdkafka)
+ * Tcl: [KafkaTcl](https://github.com/flightaware/kafkatcl)
+ * Shell: [kafkacat](https://github.com/edenhill/kafkacat) - Apache Kafka command line tool
+ * Swift: [Perfect-Kafka](https://github.com/PerfectlySoft/Perfect-Kafka)
+
+
+See [Powered by librdkafka](https://github.com/edenhill/librdkafka/wiki/Powered-by-librdkafka) for an incomplete list of librdkafka users.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/README.win32 b/fluent-bit/lib/librdkafka-2.1.0/README.win32
new file mode 100644
index 000000000..7c8bbef88
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/README.win32
@@ -0,0 +1,26 @@
+
+Native win32 build instructions using Microsoft Visual Studio 2015 (MSVC).
+
+Requirements:
+ * zlib is installed automatically from NuGet,
+ but probably requires the NuGet VS extension.
+ * OpenSSL-win32 must be installed in C:\OpenSSL-win32 and C:\OpenSSL-Win64
+ depending on your architecture.
+ Download and install the latest v1.0.2 non-light package from:
+ https://slproweb.com/products/Win32OpenSSL.html
+ (This would be using NuGet too but the current
+ OpenSSL packages are outdated and with broken
+ dependencies, so no luck)
+
+The Visual Studio solution file for librdkafka resides in win32/librdkafka.sln
+
+Artifacts:
+ - C library
+ - C++ library
+ - rdkafka_example, rdkafka_performance
+ - tests
+
+
+If you build librdkafka with an external tool (ie CMake) you can get rid of the
+__declspec(dllexport) / __declspec(dllimport) decorations by adding a define
+-DLIBRDKAFKA_STATICLIB to your CFLAGS
diff --git a/fluent-bit/lib/librdkafka-2.1.0/STATISTICS.md b/fluent-bit/lib/librdkafka-2.1.0/STATISTICS.md
new file mode 100644
index 000000000..db2cb437b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/STATISTICS.md
@@ -0,0 +1,624 @@
+# Statistics
+
+librdkafka may be configured to emit internal metrics at a fixed interval
+by setting the `statistics.interval.ms` configuration property to a value > 0
+and registering a `stats_cb` (or similar, depending on language).
+
+The stats are provided as a JSON object string.
+
+**Note**: The metrics returned may not be completely consistent between
+ brokers, toppars and totals, due to the internal asynchronous
+ nature of librdkafka.
+ E.g., the top level `tx` total may be less than the sum of
+ the broker `tx` values which it represents.
+
+
+## General structure
+
+All fields that contain sizes are in bytes unless otherwise noted.
+
+```
+{
+ <Top-level fields>
+ "brokers": {
+ <brokers fields>,
+ "toppars": { <toppars fields> }
+ },
+ "topics": {
+ <topic fields>,
+ "partitions": {
+ <partitions fields>
+ }
+ }
+[, "cgrp": { <cgrp fields> } ]
+[, "eos": { <eos fields> } ]
+}
+```
+
+## Field type
+
+Fields are represented as follows:
+ * string - UTF8 string.
+ * int - Integer counter (64 bits wide). Ever increasing.
+ * int gauge - Integer gauge (64 bits wide). Will be reset to 0 on each stats emit.
+ * object - Nested JSON object.
+ * bool - `true` or `false`.
+
+
+## Top-level
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+name | string | `"rdkafka#producer-1"` | Handle instance name
+client_id | string | `"rdkafka"` | The configured (or default) `client.id`
+type | string | `"producer"` | Instance type (producer or consumer)
+ts | int | 12345678912345 | librdkafka's internal monotonic clock (microseconds)
+time | int | | Wall clock time in seconds since the epoch
+age | int | | Time since this client instance was created (microseconds)
+replyq | int gauge | | Number of ops (callbacks, events, etc) waiting in queue for application to serve with rd_kafka_poll()
+msg_cnt | int gauge | | Current number of messages in producer queues
+msg_size | int gauge | | Current total size of messages in producer queues
+msg_max | int | | Threshold: maximum number of messages allowed allowed on the producer queues
+msg_size_max | int | | Threshold: maximum total size of messages allowed on the producer queues
+tx | int | | Total number of requests sent to Kafka brokers
+tx_bytes | int | | Total number of bytes transmitted to Kafka brokers
+rx | int | | Total number of responses received from Kafka brokers
+rx_bytes | int | | Total number of bytes received from Kafka brokers
+txmsgs | int | | Total number of messages transmitted (produced) to Kafka brokers
+txmsg_bytes | int | | Total number of message bytes (including framing, such as per-Message framing and MessageSet/batch framing) transmitted to Kafka brokers
+rxmsgs | int | | Total number of messages consumed, not including ignored messages (due to offset, etc), from Kafka brokers.
+rxmsg_bytes | int | | Total number of message bytes (including framing) received from Kafka brokers
+simple_cnt | int gauge | | Internal tracking of legacy vs new consumer API state
+metadata_cache_cnt | int gauge | | Number of topics in the metadata cache.
+brokers | object | | Dict of brokers, key is broker name, value is object. See **brokers** below
+topics | object | | Dict of topics, key is topic name, value is object. See **topics** below
+cgrp | object | | Consumer group metrics. See **cgrp** below
+eos | object | | EOS / Idempotent producer state and metrics. See **eos** below
+
+## brokers
+
+Per broker statistics.
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+name | string | `"example.com:9092/13"` | Broker hostname, port and broker id
+nodeid | int | 13 | Broker id (-1 for bootstraps)
+nodename | string | `"example.com:9092"` | Broker hostname
+source | string | `"configured"` | Broker source (learned, configured, internal, logical)
+state | string | `"UP"` | Broker state (INIT, DOWN, CONNECT, AUTH, APIVERSION_QUERY, AUTH_HANDSHAKE, UP, UPDATE)
+stateage | int gauge | | Time since last broker state change (microseconds)
+outbuf_cnt | int gauge | | Number of requests awaiting transmission to broker
+outbuf_msg_cnt | int gauge | | Number of messages awaiting transmission to broker
+waitresp_cnt | int gauge | | Number of requests in-flight to broker awaiting response
+waitresp_msg_cnt | int gauge | | Number of messages in-flight to broker awaiting response
+tx | int | | Total number of requests sent
+txbytes | int | | Total number of bytes sent
+txerrs | int | | Total number of transmission errors
+txretries | int | | Total number of request retries
+txidle | int | | Microseconds since last socket send (or -1 if no sends yet for current connection).
+req_timeouts | int | | Total number of requests timed out
+rx | int | | Total number of responses received
+rxbytes | int | | Total number of bytes received
+rxerrs | int | | Total number of receive errors
+rxcorriderrs | int | | Total number of unmatched correlation ids in response (typically for timed out requests)
+rxpartial | int | | Total number of partial MessageSets received. The broker may return partial responses if the full MessageSet could not fit in the remaining Fetch response size.
+rxidle | int | | Microseconds since last socket receive (or -1 if no receives yet for current connection).
+req | object | | Request type counters. Object key is the request name, value is the number of requests sent.
+zbuf_grow | int | | Total number of decompression buffer size increases
+buf_grow | int | | Total number of buffer size increases (deprecated, unused)
+wakeups | int | | Broker thread poll loop wakeups
+connects | int | | Number of connection attempts, including successful and failed, and name resolution failures.
+disconnects | int | | Number of disconnects (triggered by broker, network, load-balancer, etc.).
+int_latency | object | | Internal producer queue latency in microseconds. See *Window stats* below
+outbuf_latency | object | | Internal request queue latency in microseconds. This is the time between a request is enqueued on the transmit (outbuf) queue and the time the request is written to the TCP socket. Additional buffering and latency may be incurred by the TCP stack and network. See *Window stats* below
+rtt | object | | Broker latency / round-trip time in microseconds. See *Window stats* below
+throttle | object | | Broker throttling time in milliseconds. See *Window stats* below
+toppars | object | | Partitions handled by this broker handle. Key is "topic-partition". See *brokers.toppars* below
+
+
+## Window stats
+
+Rolling window statistics. The values are in microseconds unless otherwise stated.
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+min | int gauge | | Smallest value
+max | int gauge | | Largest value
+avg | int gauge | | Average value
+sum | int gauge | | Sum of values
+cnt | int gauge | | Number of values sampled
+stddev | int gauge | | Standard deviation (based on histogram)
+hdrsize | int gauge | | Memory size of Hdr Histogram
+p50 | int gauge | | 50th percentile
+p75 | int gauge | | 75th percentile
+p90 | int gauge | | 90th percentile
+p95 | int gauge | | 95th percentile
+p99 | int gauge | | 99th percentile
+p99_99 | int gauge | | 99.99th percentile
+outofrange | int gauge | | Values skipped due to out of histogram range
+
+
+## brokers.toppars
+
+Topic partition assigned to broker.
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+topic | string | `"mytopic"` | Topic name
+partition | int | 3 | Partition id
+
+## topics
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+topic | string | `"myatopic"` | Topic name
+age | int gauge | | Age of client's topic object (milliseconds)
+metadata_age | int gauge | | Age of metadata from broker for this topic (milliseconds)
+batchsize | object | | Batch sizes in bytes. See *Window stats*·
+batchcnt | object | | Batch message counts. See *Window stats*·
+partitions | object | | Partitions dict, key is partition id. See **partitions** below.
+
+
+## partitions
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+partition | int | 3 | Partition Id (-1 for internal UA/UnAssigned partition)
+broker | int | | The id of the broker that messages are currently being fetched from
+leader | int | | Current leader broker id
+desired | bool | | Partition is explicitly desired by application
+unknown | bool | | Partition not seen in topic metadata from broker
+msgq_cnt | int gauge | | Number of messages waiting to be produced in first-level queue
+msgq_bytes | int gauge | | Number of bytes in msgq_cnt
+xmit_msgq_cnt | int gauge | | Number of messages ready to be produced in transmit queue
+xmit_msgq_bytes | int gauge | | Number of bytes in xmit_msgq
+fetchq_cnt | int gauge | | Number of pre-fetched messages in fetch queue
+fetchq_size | int gauge | | Bytes in fetchq
+fetch_state | string | `"active"` | Consumer fetch state for this partition (none, stopping, stopped, offset-query, offset-wait, active).
+query_offset | int gauge | | Current/Last logical offset query
+next_offset | int gauge | | Next offset to fetch
+app_offset | int gauge | | Offset of last message passed to application + 1
+stored_offset | int gauge | | Offset to be committed
+stored_leader_epoch | int | | Partition leader epoch of stored offset
+committed_offset | int gauge | | Last committed offset
+committed_leader_epoch | int | | Partition leader epoch of committed offset
+eof_offset | int gauge | | Last PARTITION_EOF signaled offset
+lo_offset | int gauge | | Partition's low watermark offset on broker
+hi_offset | int gauge | | Partition's high watermark offset on broker
+ls_offset | int gauge | | Partition's last stable offset on broker, or same as hi_offset is broker version is less than 0.11.0.0.
+consumer_lag | int gauge | | Difference between (hi_offset or ls_offset) and committed_offset). hi_offset is used when isolation.level=read_uncommitted, otherwise ls_offset.
+consumer_lag_stored | int gauge | | Difference between (hi_offset or ls_offset) and stored_offset. See consumer_lag and stored_offset.
+leader_epoch | int | | Last known partition leader epoch, or -1 if unknown.
+txmsgs | int | | Total number of messages transmitted (produced)
+txbytes | int | | Total number of bytes transmitted for txmsgs
+rxmsgs | int | | Total number of messages consumed, not including ignored messages (due to offset, etc).
+rxbytes | int | | Total number of bytes received for rxmsgs
+msgs | int | | Total number of messages received (consumer, same as rxmsgs), or total number of messages produced (possibly not yet transmitted) (producer).
+rx_ver_drops | int | | Dropped outdated messages
+msgs_inflight | int gauge | | Current number of messages in-flight to/from broker
+next_ack_seq | int gauge | | Next expected acked sequence (idempotent producer)
+next_err_seq | int gauge | | Next expected errored sequence (idempotent producer)
+acked_msgid | int | | Last acked internal message id (idempotent producer)
+
+## cgrp
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+state | string | "up" | Local consumer group handler's state.
+stateage | int gauge | | Time elapsed since last state change (milliseconds).
+join_state | string | "assigned" | Local consumer group handler's join state.
+rebalance_age | int gauge | | Time elapsed since last rebalance (assign or revoke) (milliseconds).
+rebalance_cnt | int | | Total number of rebalances (assign or revoke).
+rebalance_reason | string | | Last rebalance reason, or empty string.
+assignment_size | int gauge | | Current assignment's partition count.
+
+
+## eos
+
+Field | Type | Example | Description
+----- | ---- | ------- | -----------
+idemp_state | string | "Assigned" | Current idempotent producer id state.
+idemp_stateage | int gauge | | Time elapsed since last idemp_state change (milliseconds).
+txn_state | string | "InTransaction" | Current transactional producer state.
+txn_stateage | int gauge | | Time elapsed since last txn_state change (milliseconds).
+txn_may_enq | bool | | Transactional state allows enqueuing (producing) new messages.
+producer_id | int gauge | | The currently assigned Producer ID (or -1).
+producer_epoch | int gauge | | The current epoch (or -1).
+epoch_cnt | int | | The number of Producer ID assignments since start.
+
+
+# Example output
+
+This (prettified) example output is from a short-lived producer using the following command:
+`rdkafka_performance -b localhost -P -t test -T 1000 -Y 'cat >> stats.json'`.
+
+Note: this output is prettified using `jq .`, the JSON object emitted by librdkafka does not contain line breaks.
+
+```json
+{
+ "name": "rdkafka#producer-1",
+ "client_id": "rdkafka",
+ "type": "producer",
+ "ts": 5016483227792,
+ "time": 1527060869,
+ "replyq": 0,
+ "msg_cnt": 22710,
+ "msg_size": 704010,
+ "msg_max": 500000,
+ "msg_size_max": 1073741824,
+ "simple_cnt": 0,
+ "metadata_cache_cnt": 1,
+ "brokers": {
+ "localhost:9092/2": {
+ "name": "localhost:9092/2",
+ "nodeid": 2,
+ "nodename": "localhost:9092",
+ "source": "learned",
+ "state": "UP",
+ "stateage": 9057234,
+ "outbuf_cnt": 0,
+ "outbuf_msg_cnt": 0,
+ "waitresp_cnt": 0,
+ "waitresp_msg_cnt": 0,
+ "tx": 320,
+ "txbytes": 84283332,
+ "txerrs": 0,
+ "txretries": 0,
+ "req_timeouts": 0,
+ "rx": 320,
+ "rxbytes": 15708,
+ "rxerrs": 0,
+ "rxcorriderrs": 0,
+ "rxpartial": 0,
+ "zbuf_grow": 0,
+ "buf_grow": 0,
+ "wakeups": 591067,
+ "int_latency": {
+ "min": 86,
+ "max": 59375,
+ "avg": 23726,
+ "sum": 5694616664,
+ "stddev": 13982,
+ "p50": 28031,
+ "p75": 36095,
+ "p90": 39679,
+ "p95": 43263,
+ "p99": 48639,
+ "p99_99": 59391,
+ "outofrange": 0,
+ "hdrsize": 11376,
+ "cnt": 240012
+ },
+ "rtt": {
+ "min": 1580,
+ "max": 3389,
+ "avg": 2349,
+ "sum": 79868,
+ "stddev": 474,
+ "p50": 2319,
+ "p75": 2543,
+ "p90": 3183,
+ "p95": 3199,
+ "p99": 3391,
+ "p99_99": 3391,
+ "outofrange": 0,
+ "hdrsize": 13424,
+ "cnt": 34
+ },
+ "throttle": {
+ "min": 0,
+ "max": 0,
+ "avg": 0,
+ "sum": 0,
+ "stddev": 0,
+ "p50": 0,
+ "p75": 0,
+ "p90": 0,
+ "p95": 0,
+ "p99": 0,
+ "p99_99": 0,
+ "outofrange": 0,
+ "hdrsize": 17520,
+ "cnt": 34
+ },
+ "toppars": {
+ "test-1": {
+ "topic": "test",
+ "partition": 1
+ }
+ }
+ },
+ "localhost:9093/3": {
+ "name": "localhost:9093/3",
+ "nodeid": 3,
+ "nodename": "localhost:9093",
+ "source": "learned",
+ "state": "UP",
+ "stateage": 9057209,
+ "outbuf_cnt": 0,
+ "outbuf_msg_cnt": 0,
+ "waitresp_cnt": 0,
+ "waitresp_msg_cnt": 0,
+ "tx": 310,
+ "txbytes": 84301122,
+ "txerrs": 0,
+ "txretries": 0,
+ "req_timeouts": 0,
+ "rx": 310,
+ "rxbytes": 15104,
+ "rxerrs": 0,
+ "rxcorriderrs": 0,
+ "rxpartial": 0,
+ "zbuf_grow": 0,
+ "buf_grow": 0,
+ "wakeups": 607956,
+ "int_latency": {
+ "min": 82,
+ "max": 58069,
+ "avg": 23404,
+ "sum": 5617432101,
+ "stddev": 14021,
+ "p50": 27391,
+ "p75": 35839,
+ "p90": 39679,
+ "p95": 42751,
+ "p99": 48639,
+ "p99_99": 58111,
+ "outofrange": 0,
+ "hdrsize": 11376,
+ "cnt": 240016
+ },
+ "rtt": {
+ "min": 1704,
+ "max": 3572,
+ "avg": 2493,
+ "sum": 87289,
+ "stddev": 559,
+ "p50": 2447,
+ "p75": 2895,
+ "p90": 3375,
+ "p95": 3407,
+ "p99": 3583,
+ "p99_99": 3583,
+ "outofrange": 0,
+ "hdrsize": 13424,
+ "cnt": 35
+ },
+ "throttle": {
+ "min": 0,
+ "max": 0,
+ "avg": 0,
+ "sum": 0,
+ "stddev": 0,
+ "p50": 0,
+ "p75": 0,
+ "p90": 0,
+ "p95": 0,
+ "p99": 0,
+ "p99_99": 0,
+ "outofrange": 0,
+ "hdrsize": 17520,
+ "cnt": 35
+ },
+ "toppars": {
+ "test-0": {
+ "topic": "test",
+ "partition": 0
+ }
+ }
+ },
+ "localhost:9094/4": {
+ "name": "localhost:9094/4",
+ "nodeid": 4,
+ "nodename": "localhost:9094",
+ "source": "learned",
+ "state": "UP",
+ "stateage": 9057207,
+ "outbuf_cnt": 0,
+ "outbuf_msg_cnt": 0,
+ "waitresp_cnt": 0,
+ "waitresp_msg_cnt": 0,
+ "tx": 1,
+ "txbytes": 25,
+ "txerrs": 0,
+ "txretries": 0,
+ "req_timeouts": 0,
+ "rx": 1,
+ "rxbytes": 272,
+ "rxerrs": 0,
+ "rxcorriderrs": 0,
+ "rxpartial": 0,
+ "zbuf_grow": 0,
+ "buf_grow": 0,
+ "wakeups": 4,
+ "int_latency": {
+ "min": 0,
+ "max": 0,
+ "avg": 0,
+ "sum": 0,
+ "stddev": 0,
+ "p50": 0,
+ "p75": 0,
+ "p90": 0,
+ "p95": 0,
+ "p99": 0,
+ "p99_99": 0,
+ "outofrange": 0,
+ "hdrsize": 11376,
+ "cnt": 0
+ },
+ "rtt": {
+ "min": 0,
+ "max": 0,
+ "avg": 0,
+ "sum": 0,
+ "stddev": 0,
+ "p50": 0,
+ "p75": 0,
+ "p90": 0,
+ "p95": 0,
+ "p99": 0,
+ "p99_99": 0,
+ "outofrange": 0,
+ "hdrsize": 13424,
+ "cnt": 0
+ },
+ "throttle": {
+ "min": 0,
+ "max": 0,
+ "avg": 0,
+ "sum": 0,
+ "stddev": 0,
+ "p50": 0,
+ "p75": 0,
+ "p90": 0,
+ "p95": 0,
+ "p99": 0,
+ "p99_99": 0,
+ "outofrange": 0,
+ "hdrsize": 17520,
+ "cnt": 0
+ },
+ "toppars": {}
+ }
+ },
+ "topics": {
+ "test": {
+ "topic": "test",
+ "metadata_age": 9060,
+ "batchsize": {
+ "min": 99,
+ "max": 391805,
+ "avg": 272593,
+ "sum": 18808985,
+ "stddev": 180408,
+ "p50": 393215,
+ "p75": 393215,
+ "p90": 393215,
+ "p95": 393215,
+ "p99": 393215,
+ "p99_99": 393215,
+ "outofrange": 0,
+ "hdrsize": 14448,
+ "cnt": 69
+ },
+ "batchcnt": {
+ "min": 1,
+ "max": 10000,
+ "avg": 6956,
+ "sum": 480028,
+ "stddev": 4608,
+ "p50": 10047,
+ "p75": 10047,
+ "p90": 10047,
+ "p95": 10047,
+ "p99": 10047,
+ "p99_99": 10047,
+ "outofrange": 0,
+ "hdrsize": 8304,
+ "cnt": 69
+ },
+ "partitions": {
+ "0": {
+ "partition": 0,
+ "broker": 3,
+ "leader": 3,
+ "desired": false,
+ "unknown": false,
+ "msgq_cnt": 1,
+ "msgq_bytes": 31,
+ "xmit_msgq_cnt": 0,
+ "xmit_msgq_bytes": 0,
+ "fetchq_cnt": 0,
+ "fetchq_size": 0,
+ "fetch_state": "none",
+ "query_offset": 0,
+ "next_offset": 0,
+ "app_offset": -1001,
+ "stored_offset": -1001,
+ "commited_offset": -1001,
+ "committed_offset": -1001,
+ "eof_offset": -1001,
+ "lo_offset": -1001,
+ "hi_offset": -1001,
+ "consumer_lag": -1,
+ "txmsgs": 2150617,
+ "txbytes": 66669127,
+ "rxmsgs": 0,
+ "rxbytes": 0,
+ "msgs": 2160510,
+ "rx_ver_drops": 0
+ },
+ "1": {
+ "partition": 1,
+ "broker": 2,
+ "leader": 2,
+ "desired": false,
+ "unknown": false,
+ "msgq_cnt": 0,
+ "msgq_bytes": 0,
+ "xmit_msgq_cnt": 0,
+ "xmit_msgq_bytes": 0,
+ "fetchq_cnt": 0,
+ "fetchq_size": 0,
+ "fetch_state": "none",
+ "query_offset": 0,
+ "next_offset": 0,
+ "app_offset": -1001,
+ "stored_offset": -1001,
+ "commited_offset": -1001,
+ "committed_offset": -1001,
+ "eof_offset": -1001,
+ "lo_offset": -1001,
+ "hi_offset": -1001,
+ "consumer_lag": -1,
+ "txmsgs": 2150136,
+ "txbytes": 66654216,
+ "rxmsgs": 0,
+ "rxbytes": 0,
+ "msgs": 2159735,
+ "rx_ver_drops": 0
+ },
+ "-1": {
+ "partition": -1,
+ "broker": -1,
+ "leader": -1,
+ "desired": false,
+ "unknown": false,
+ "msgq_cnt": 0,
+ "msgq_bytes": 0,
+ "xmit_msgq_cnt": 0,
+ "xmit_msgq_bytes": 0,
+ "fetchq_cnt": 0,
+ "fetchq_size": 0,
+ "fetch_state": "none",
+ "query_offset": 0,
+ "next_offset": 0,
+ "app_offset": -1001,
+ "stored_offset": -1001,
+ "commited_offset": -1001,
+ "committed_offset": -1001,
+ "eof_offset": -1001,
+ "lo_offset": -1001,
+ "hi_offset": -1001,
+ "consumer_lag": -1,
+ "txmsgs": 0,
+ "txbytes": 0,
+ "rxmsgs": 0,
+ "rxbytes": 0,
+ "msgs": 1177,
+ "rx_ver_drops": 0
+ }
+ }
+ }
+ },
+ "tx": 631,
+ "tx_bytes": 168584479,
+ "rx": 631,
+ "rx_bytes": 31084,
+ "txmsgs": 4300753,
+ "txmsg_bytes": 133323343,
+ "rxmsgs": 0,
+ "rxmsg_bytes": 0
+}
+```
diff --git a/fluent-bit/lib/librdkafka-2.1.0/configure b/fluent-bit/lib/librdkafka-2.1.0/configure
new file mode 100755
index 000000000..d27408cc8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/configure
@@ -0,0 +1,214 @@
+#!/usr/bin/env bash
+#
+
+BASHVER=$(expr ${BASH_VERSINFO[0]} \* 1000 + ${BASH_VERSINFO[1]})
+
+if [ "$BASHVER" -lt 3002 ]; then
+ echo "ERROR: mklove requires bash version 3.2 or later but you are using $BASH_VERSION ($BASHVER)"
+ echo " See https://github.com/edenhill/mklove/issues/15"
+ exit 1
+fi
+
+MKL_CONFIGURE_ARGS="$0 $*"
+
+# Load base module
+source mklove/modules/configure.base
+
+# Read some special command line options right away that must be known prior to
+# sourcing modules.
+mkl_in_list "$*" "--no-download" && MKL_NO_DOWNLOAD=1
+# Disable downloads when --help is used to avoid blocking calls.
+mkl_in_list "$*" "--help" && MKL_NO_DOWNLOAD=1
+mkl_in_list "$*" "--debug" && MKL_DEBUG=1
+
+# This is the earliest possible time to check for color support in
+# terminal because mkl_check_terminal_color_support uses mkl_dbg which
+# needs to know if MKL_DEBUG is set
+mkl_check_terminal_color_support
+
+# Delete temporary Makefile and header files on exit.
+trap "{ rm -f $MKL_OUTMK $MKL_OUTH; }" EXIT
+
+
+
+##
+## Load builtin modules
+##
+
+# Builtin options, etc.
+mkl_require builtin
+
+# Host/target support
+mkl_require host
+
+# Compiler detection
+mkl_require cc
+
+
+# Load application provided modules (in current directory), if any.
+for fname in configure.* ; do
+ if [[ $fname = 'configure.*' ]]; then
+ continue
+ fi
+
+ # Skip temporary files
+ if [[ $fname = *~ ]]; then
+ continue
+ fi
+
+ mkl_require $fname
+done
+
+
+
+
+##
+## Argument parsing (options)
+##
+##
+
+_SAVE_ARGS="$*"
+
+# Parse arguments
+while [[ ! -z $@ ]]; do
+ if [[ $1 != --* ]]; then
+ mkl_err "Unknown non-option argument: $1"
+ mkl_usage
+ exit 1
+ fi
+
+ opt=${1#--}
+ shift
+
+ if [[ $opt = *=* ]]; then
+ name="${opt%%=*}"
+ arg="${opt#*=}"
+ eqarg=1
+ else
+ name="$opt"
+ arg=""
+ eqarg=0
+ fi
+
+ safeopt="$(mkl_env_esc $name)"
+
+ if ! mkl_func_exists opt_$safeopt ; then
+ mkl_err "Unknown option $opt"
+ mkl_usage
+ exit 1
+ fi
+
+ # Check if this option needs an argument.
+ reqarg=$(mkl_meta_get "MKL_OPT_ARGS" "$(mkl_env_esc $name)")
+ if [[ ! -z $reqarg ]]; then
+ if [[ $eqarg == 0 && -z $arg ]]; then
+ arg="$1"
+ shift
+
+ if [[ -z $arg && $reqarg != '\*' ]]; then
+ mkl_err "Missing argument to option --$name $reqarg"
+ exit 1
+ fi
+ fi
+ else
+ if [[ ! -z $arg ]]; then
+ mkl_err "Option --$name expects no argument"
+ exit 1
+ fi
+ arg=y
+ fi
+
+ case $name in
+ re|reconfigure)
+ oldcmd=$(head -1 config.log | grep '^# configure exec: ' | \
+ sed -e 's/^\# configure exec: [^ ]*configure//')
+ echo "Reconfiguring: $0 $oldcmd"
+ exec $0 $oldcmd
+ ;;
+
+ list-modules)
+ echo "Modules loaded:"
+ for mod in $MKL_MODULES ; do
+ echo " $mod"
+ done
+ exit 0
+ ;;
+
+ list-checks)
+ echo "Check functions in calling order:"
+ for mf in $MKL_CHECKS ; do
+ mod=${mf%:*}
+ func=${mf#*:}
+ echo -e "${MKL_GREEN}From module $mod:$MKL_CLR_RESET"
+ declare -f $func
+ echo ""
+ done
+ exit 0
+ ;;
+
+ update-modules)
+ fails=0
+ echo "Updating modules"
+ for mod in $MKL_MODULES ; do
+ echo -n "Updating $mod..."
+ if mkl_module_download "$mod" > /dev/null ; then
+ echo -e "${MKL_GREEN}ok${MKL_CLR_RESET}"
+ else
+ echo -e "${MKL_RED}failed${MKL_CLR_RESET}"
+ fails=$(expr $fails + 1)
+ fi
+ done
+ exit $fails
+ ;;
+
+ help)
+ mkl_usage
+ exit 0
+ ;;
+
+ *)
+ opt_$safeopt "$arg" || exit 1
+ mkl_var_append MKL_OPTS_SET "$safeopt"
+ ;;
+ esac
+done
+
+if [[ ! -z $MKL_CLEAN ]]; then
+ mkl_clean
+ exit 0
+fi
+
+# Move away previous log file
+[[ -f $MKL_OUTDBG ]] && mv $MKL_OUTDBG ${MKL_OUTDBG}.old
+
+
+# Create output files
+echo "# configure exec: $0 $_SAVE_ARGS" >> $MKL_OUTDBG
+echo "# On $(date)" >> $MKL_OUTDBG
+
+rm -f $MKL_OUTMK $MKL_OUTH
+
+
+# Load cache file
+mkl_cache_read
+
+# Run checks
+mkl_checks_run
+
+# Check accumulated failures, will not return on failure.
+mkl_check_fails
+
+# Generate outputs
+mkl_generate
+
+# Summarize what happened
+mkl_summary
+
+# Write cache file
+mkl_cache_write
+
+
+echo ""
+echo "Now type 'make' to build"
+trap - EXIT
+exit 0
diff --git a/fluent-bit/lib/librdkafka-2.1.0/configure.self b/fluent-bit/lib/librdkafka-2.1.0/configure.self
new file mode 100644
index 000000000..bb0a975c9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/configure.self
@@ -0,0 +1,331 @@
+#!/bin/bash
+#
+
+mkl_meta_set "description" "name" "librdkafka"
+mkl_meta_set "description" "oneline" "The Apache Kafka C/C++ library"
+mkl_meta_set "description" "long" "Full Apache Kafka protocol support, including producer and consumer"
+mkl_meta_set "description" "copyright" "Copyright (c) 2012-2019 Magnus Edenhill"
+
+# Enable generation of pkg-config .pc file
+mkl_mkvar_set "" GEN_PKG_CONFIG y
+
+
+mkl_require cxx
+mkl_require lib
+mkl_require pic
+mkl_require atomics
+mkl_require good_cflags
+mkl_require socket
+mkl_require zlib
+mkl_require libzstd
+mkl_require libssl
+mkl_require libsasl2
+mkl_require libcurl
+
+# Generate version variables from rdkafka.h hex version define
+# so we can use it as string version when generating a pkg-config file.
+
+verdef=$(grep '^#define *RD_KAFKA_VERSION *0x' src/rdkafka.h | sed 's/^#define *RD_KAFKA_VERSION *\(0x[a-f0-9]*\)\.*$/\1/')
+mkl_require parseversion hex2str "%d.%d.%d" "$verdef" RDKAFKA_VERSION_STR
+
+
+mkl_toggle_option "Development" ENABLE_DEVEL "--enable-devel" "Enable development asserts, checks, etc" "n"
+mkl_toggle_option "Development" ENABLE_VALGRIND "--enable-valgrind" "Enable in-code valgrind suppressions" "n"
+
+mkl_toggle_option "Development" ENABLE_REFCNT_DEBUG "--enable-refcnt-debug" "Enable refcnt debugging" "n"
+
+mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4-ext" "Enable external LZ4 library support (builtin version 1.9.3)" "y"
+mkl_toggle_option "Feature" ENABLE_LZ4_EXT "--enable-lz4" "Deprecated: alias for --enable-lz4-ext" "y"
+
+mkl_toggle_option "Feature" ENABLE_REGEX_EXT "--enable-regex-ext" "Enable external (libc) regex (else use builtin)" "y"
+
+# librdkafka with TSAN won't work with glibc C11 threads on Ubuntu 19.04.
+# This option allows disabling libc-based C11 threads and instead
+# use the builtin tinycthread alternative.
+mkl_toggle_option "Feature" ENABLE_C11THREADS "--enable-c11threads" "Enable detection of C11 threads support in libc" "try"
+
+mkl_toggle_option "Feature" ENABLE_SYSLOG "--enable-syslog" "Enable logging to syslog" "y"
+
+
+function checks {
+
+ # -lrt is needed on linux for clock_gettime: link it if it exists.
+ mkl_lib_check "librt" "" cont CC "-lrt"
+
+ # pthreads required (even if C11 threads available) for rwlocks.
+ mkl_lib_check "libpthread" "" fail CC "-lpthread" \
+ "#include <pthread.h>"
+
+ if [[ $ENABLE_C11THREADS != n ]]; then
+ case "$ENABLE_C11THREADS" in
+ y) local action=fail ;;
+ try) local action=disable ;;
+ *) mkl_err "mklove internal error: invalid value for ENABLE_C11THREADS: $ENABLE_C11THREADS"; exit 1 ;;
+ esac
+ # Use internal tinycthread if C11 threads not available.
+ # Requires -lpthread on glibc c11 threads, thus the use of $LIBS.
+ mkl_lib_check "c11threads" WITH_C11THREADS $action CC "$LIBS" \
+ "
+#include <threads.h>
+
+
+static int start_func (void *arg) {
+ int iarg = *(int *)arg;
+ return iarg;
+}
+
+void foo (void) {
+ thrd_t thr;
+ int arg = 1;
+ if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) {
+ ;
+ }
+}
+"
+ fi
+
+ # Check if dlopen() is available
+ mkl_lib_check "libdl" "WITH_LIBDL" disable CC "-ldl" \
+"
+#include <stdlib.h>
+#include <dlfcn.h>
+void foo (void) {
+ void *h = dlopen(\"__bad_lib\", 0);
+ void *p = dlsym(h, \"sym\");
+ if (p)
+ p = NULL;
+ dlclose(h);
+}"
+
+ if [[ $WITH_LIBDL == "y" ]]; then
+ mkl_allvar_set WITH_PLUGINS WITH_PLUGINS y
+ fi
+
+ # optional libs
+ mkl_check "zlib"
+ mkl_check "libssl"
+ mkl_check "libsasl2"
+ mkl_check "libzstd"
+ mkl_check "libcurl"
+
+ if mkl_lib_check "libm" "" disable CC "-lm" \
+ "#include <math.h>"; then
+ mkl_allvar_set WITH_HDRHISTOGRAM WITH_HDRHISTOGRAM y
+ fi
+
+ # Use builtin lz4 if linking statically or if --disable-lz4-ext is used.
+ if [[ $MKL_SOURCE_DEPS_ONLY != y ]] && [[ $WITH_STATIC_LINKING != y ]] && [[ $ENABLE_LZ4_EXT == y ]]; then
+ mkl_meta_set "liblz4" "static" "liblz4.a"
+ mkl_lib_check "liblz4" "WITH_LZ4_EXT" disable CC "-llz4" \
+ "#include <lz4frame.h>"
+ fi
+
+ if [[ $ENABLE_SYSLOG == y ]]; then
+ mkl_compile_check "syslog" "WITH_SYSLOG" disable CC "" \
+ '
+#include <syslog.h>
+void foo (void) {
+ syslog(LOG_INFO, "test");
+}'
+ fi
+
+ # rapidjson (>=1.1.0) is used in tests to verify statistics data, not used
+ # by librdkafka itself.
+ mkl_compile_check "rapidjson" "WITH_RAPIDJSON" disable CXX "" \
+ "#include <rapidjson/schema.h>"
+
+ # Snappy support is built-in
+ mkl_allvar_set WITH_SNAPPY WITH_SNAPPY y
+
+ # Enable sockem (tests)
+ mkl_allvar_set WITH_SOCKEM WITH_SOCKEM y
+
+ if [[ "$WITH_SSL" == "y" ]]; then
+ # SASL SCRAM requires base64 encoding from OpenSSL
+ mkl_allvar_set WITH_SASL_SCRAM WITH_SASL_SCRAM y
+ # SASL OAUTHBEARER's default unsecured JWS implementation
+ # requires base64 encoding from OpenSSL
+ mkl_allvar_set WITH_SASL_OAUTHBEARER WITH_SASL_OAUTHBEARER y
+
+ if [[ $WITH_CURL == y ]]; then
+ mkl_allvar_set WITH_OAUTHBEARER_OIDC WITH_OAUTHBEARER_OIDC y
+ fi
+ fi
+
+ # CRC32C: check for crc32 instruction support.
+ # This is also checked during runtime using cpuid.
+ mkl_compile_check crc32chw WITH_CRC32C_HW disable CC "" \
+ "
+#include <inttypes.h>
+#include <stdio.h>
+#define LONGx1 \"8192\"
+#define LONGx2 \"16384\"
+void foo (void) {
+ const char *n = \"abcdefghijklmnopqrstuvwxyz0123456789\";
+ uint64_t c0 = 0, c1 = 1, c2 = 2;
+ uint64_t s;
+ uint32_t eax = 1, ecx;
+ __asm__(\"cpuid\"
+ : \"=c\"(ecx)
+ : \"a\"(eax)
+ : \"%ebx\", \"%edx\");
+ __asm__(\"crc32b\t\" \"(%1), %0\"
+ : \"=r\"(c0)
+ : \"r\"(n), \"0\"(c0));
+ __asm__(\"crc32q\t\" \"(%3), %0\n\t\"
+ \"crc32q\t\" LONGx1 \"(%3), %1\n\t\"
+ \"crc32q\t\" LONGx2 \"(%3), %2\"
+ : \"=r\"(c0), \"=r\"(c1), \"=r\"(c2)
+ : \"r\"(n), \"0\"(c0), \"1\"(c1), \"2\"(c2));
+ s = c0 + c1 + c2;
+ printf(\"avoiding unused code removal by printing %d, %d, %d\n\", (int)s, (int)eax, (int)ecx);
+}
+"
+
+
+ # Check for libc regex
+ if [[ $ENABLE_REGEX_EXT == y ]]; then
+ mkl_compile_check "regex" "HAVE_REGEX" disable CC "" \
+"
+#include <stddef.h>
+#include <regex.h>
+void foo (void) {
+ regcomp(NULL, NULL, 0);
+ regexec(NULL, NULL, 0, NULL, 0);
+ regerror(0, NULL, NULL, 0);
+ regfree(NULL);
+}"
+ fi
+
+ # Older g++ (<=4.1?) gives invalid warnings for the C++ code.
+ mkl_mkvar_append CXXFLAGS CXXFLAGS "-Wno-non-virtual-dtor"
+
+ # Required on SunOS
+ if [[ $MKL_DISTRO == "sunos" ]]; then
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-D_POSIX_PTHREAD_SEMANTICS -D_REENTRANT -D__EXTENSIONS__"
+ # Source defines _POSIX_C_SOURCE to 200809L for Solaris, and this is
+ # incompatible on that platform with compilers < c99.
+ mkl_mkvar_append CFLAGS CFLAGS "-std=c99"
+ fi
+
+ # Check if rand_r() is available
+ mkl_compile_check "rand_r" "HAVE_RAND_R" disable CC "" \
+"#include <stdlib.h>
+void foo (void) {
+ unsigned int seed = 0xbeaf;
+ (void)rand_r(&seed);
+}"
+
+ # Check if strndup() is available (isn't on Solaris 10)
+ mkl_compile_check "strndup" "HAVE_STRNDUP" disable CC "" \
+"#include <string.h>
+int foo (void) {
+ return strndup(\"hi\", 2) ? 0 : 1;
+}"
+
+ # Check if strlcpy() is available
+ mkl_compile_check "strlcpy" "HAVE_STRLCPY" disable CC "" \
+"
+#define _DARWIN_C_SOURCE
+#include <string.h>
+int foo (void) {
+ char dest[4];
+ return strlcpy(dest, \"something\", sizeof(dest));
+}"
+
+ # Check if strerror_r() is available.
+ # The check for GNU vs XSI is done in rdposix.h since
+ # we can't rely on all defines to be set here (_GNU_SOURCE).
+ mkl_compile_check "strerror_r" "HAVE_STRERROR_R" disable CC "" \
+"#include <string.h>
+const char *foo (void) {
+ static char buf[64];
+ strerror_r(1, buf, sizeof(buf));
+ return buf;
+}"
+
+ # Check if strcasestr() is available.
+ mkl_compile_check "strcasestr" "HAVE_STRCASESTR" disable CC "" \
+"
+#define _GNU_SOURCE
+#include <string.h>
+char *foo (const char *needle) {
+ return strcasestr(\"the hay\", needle);
+}"
+
+
+ # See if GNU's pthread_setname_np() is available, and in what form.
+ mkl_compile_check "pthread_setname_gnu" "HAVE_PTHREAD_SETNAME_GNU" disable CC "-D_GNU_SOURCE -lpthread" \
+'
+#include <pthread.h>
+
+void foo (void) {
+ pthread_setname_np(pthread_self(), "abc");
+}
+' || \
+ mkl_compile_check "pthread_setname_darwin" "HAVE_PTHREAD_SETNAME_DARWIN" disable CC "-D_DARWIN_C_SOURCE -lpthread" \
+'
+#include <pthread.h>
+
+void foo (void) {
+ pthread_setname_np("abc");
+}
+' || \
+ mkl_compile_check "pthread_setname_freebsd" "HAVE_PTHREAD_SETNAME_FREEBSD" disable CC "-lpthread" \
+'
+#include <pthread.h>
+#include <pthread_np.h>
+
+void foo (void) {
+ pthread_set_name_np(pthread_self(), "abc");
+}
+'
+
+ # Figure out what tool to use for dumping public symbols.
+ # We rely on configure.cc setting up $NM if it exists.
+ if mkl_env_check "nm" "" cont "NM" ; then
+ # nm by future mk var
+ if [[ $MKL_DISTRO == "osx" || $MKL_DISTRO == "aix" ]]; then
+ mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -g'
+ else
+ mkl_mkvar_set SYMDUMPER SYMDUMPER '$(NM) -D'
+ fi
+ else
+ # Fake symdumper
+ mkl_mkvar_set SYMDUMPER SYMDUMPER 'echo'
+ fi
+
+ # The linker-script generator (lds-gen.py) requires python3
+ if [[ $WITH_LDS == y ]]; then
+ if ! mkl_command_check python3 "HAVE_PYTHON" "disable" "python3 -V"; then
+ mkl_err "disabling linker-script since python3 is not available"
+ mkl_mkvar_set WITH_LDS WITH_LDS "n"
+ fi
+ fi
+
+ if [[ "$ENABLE_VALGRIND" == "y" ]]; then
+ mkl_compile_check valgrind WITH_VALGRIND fail CC "" \
+ "#include <valgrind/memcheck.h>"
+ fi
+
+ # getrusage() is used by the test framework
+ mkl_compile_check "getrusage" "HAVE_GETRUSAGE" disable CC "" \
+'
+#include <stdio.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+
+
+void foo (void) {
+ struct rusage ru;
+ if (getrusage(RUSAGE_SELF, &ru) == -1)
+ return;
+ printf("ut %ld, st %ld, maxrss %ld, nvcsw %ld\n",
+ (long int)ru.ru_utime.tv_usec,
+ (long int)ru.ru_stime.tv_usec,
+ (long int)ru.ru_maxrss,
+ (long int)ru.ru_nvcsw);
+}'
+
+}
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/debian/.gitignore
new file mode 100644
index 000000000..eb66d4d31
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/.gitignore
@@ -0,0 +1,6 @@
+*.log
+files
+librdkafka-dev
+librdkafka1-dbg
+librdkafka1
+tmp
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/changelog b/fluent-bit/lib/librdkafka-2.1.0/debian/changelog
new file mode 100644
index 000000000..352f22cfe
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/changelog
@@ -0,0 +1,111 @@
+librdkafka (0.9.3-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Add Build-Depends to liblz4-dev to enable LZ4 compression.
+ * Add copyright for xxhash.[hc] and regexp.[hc].
+ * Update librdkafka1.symbols for 0.9.2 and 0.9.3.
+ * Use hardening build flags.
+ * Add myself to Uploaders.
+ * Switch to debhelper 10.
+ * Move packaging to alioth, under the pkg-kafka team.
+
+ -- Christos Trochalakis <ctrochalakis@debian.org> Tue, 24 Jan 2017 17:33:40 +0200
+
+librdkafka (0.9.1-1) unstable; urgency=medium
+
+ * New upstream release (Closes: #816047).
+ - Remove 0001-mklove-update-add-disable-silent-rules patch (not needed)
+ - Remove 0002_hexdump_use_size_t_instead_of_int patch (not needed)
+ - Fixes a GCC6-identified warning and possible FTBFS. (Closes: #811596)
+ * Add Build-Depends to libssl-dev/libsasl2-dev, to enable Kafka 0.9
+ encryption and authentication.
+ * Update the long description to mention 0.9 protocol support and also the
+ high-performance aspect, by copying upstream's description almost
+ verbatim.
+ * Bump Standards-Version to 3.9.8 (no changes needed).
+ * Switch Vcs-Git to GitHub's https (from git://)
+ * Migrate from our own -dbg package to the automatic -dbgsym package.
+ * Update librdkafka1.symbols with 0.9.0' and 0.9.1's new symbols.
+ * Ship the new C++ library, by shipping a new binary package, librdkafka++1.
+ * Ship pkg-config files in the -dev package for both the C and C++
+ libraries.
+ * Replace the perl build-dependency by python, as the build system (among
+ others) now requires it.
+
+ -- Faidon Liambotis <paravoid@debian.org> Mon, 30 May 2016 16:07:33 +0300
+
+librdkafka (0.8.6-1.1) unstable; urgency=medium
+
+ * Non-maintainer upload.
+ * Use size_t instead of int for hexdump to fix FTBFS (Closes: 799993):
+ assuming signed overflow does not occur when
+ assuming that (X + c) >= X is always true
+
+ -- YunQiang Su <syq@debian.org> Mon, 21 Dec 2015 21:20:59 +0800
+
+librdkafka (0.8.6-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Backport upstream commit f6fd0da, adding --disable-silent-rules
+ compatibility support to mklove. (Closes: #788742)
+
+ -- Faidon Liambotis <paravoid@debian.org> Sun, 19 Jul 2015 01:36:18 +0300
+
+librdkafka (0.8.5-2) unstable; urgency=medium
+
+ * Install rdkafka.pc in the right, multiarch location. (Closes: #766759)
+
+ -- Faidon Liambotis <paravoid@debian.org> Sun, 26 Oct 2014 06:47:07 +0200
+
+librdkafka (0.8.5-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Fixes kFreeBSD FTBFS.
+ * Ship rdkafka.pc pkg-config in librdkafka-dev.
+
+ -- Faidon Liambotis <paravoid@debian.org> Fri, 24 Oct 2014 18:03:22 +0300
+
+librdkafka (0.8.4-1) unstable; urgency=medium
+
+ * New upstream release, including a new build system.
+ - Add Build-Depends on perl, required by configure.
+ - Support multiarch library paths.
+ - Better detection of architecture atomic builtins, supporting more
+ architectures. (Closes: #739930)
+ - Various portability bugs fixed. (Closes: #730506)
+ - Update debian/librdkafka1.symbols.
+ * Convert to a multiarch package.
+ * Switch to Architecture: any, because of renewed upstream portability.
+ * Update debian/copyright to add src/ before Files: paths.
+ * Update Standards-Version to 3.9.6, no changes needed.
+ * Ship only the C library for now, not the new C++ library; the latter is
+ still in flux in some ways and will probably be shipped in a separate
+ package in a future release.
+
+ -- Faidon Liambotis <paravoid@debian.org> Wed, 22 Oct 2014 23:57:24 +0300
+
+librdkafka (0.8.3-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Multiple internal symbols hidden; breaks ABI without a SONAME bump, but
+ these were internal and should not break any applications, packaged or
+ not.
+ * Update Standards-Version to 3.9.5, no changes needed.
+
+ -- Faidon Liambotis <paravoid@debian.org> Tue, 18 Feb 2014 02:21:43 +0200
+
+librdkafka (0.8.1-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Multiple fixes to FTBFS on various architectures. (Closes: #730506)
+ - Remove dh_auto_clean override, fixed upstream.
+ * Limit the set of architectures: upstream currently relies on 64-bit atomic
+ operations that several Debian architectures do not support.
+
+ -- Faidon Liambotis <paravoid@debian.org> Thu, 05 Dec 2013 16:53:28 +0200
+
+librdkafka (0.8.0-1) unstable; urgency=low
+
+ * Initial release. (Closes: #710271)
+
+ -- Faidon Liambotis <paravoid@debian.org> Mon, 04 Nov 2013 16:50:07 +0200
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/compat b/fluent-bit/lib/librdkafka-2.1.0/debian/compat
new file mode 100644
index 000000000..ec635144f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/control b/fluent-bit/lib/librdkafka-2.1.0/debian/control
new file mode 100644
index 000000000..bddaf4724
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/control
@@ -0,0 +1,71 @@
+Source: librdkafka
+Priority: optional
+Maintainer: Faidon Liambotis <paravoid@debian.org>
+Uploaders: Christos Trochalakis <ctrochalakis@debian.org>
+Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, liblz4-dev, python3
+Standards-Version: 3.9.7
+Section: libs
+Homepage: https://github.com/edenhill/librdkafka
+Vcs-Git: https://anonscm.debian.org/cgit/pkg-kafka/librdkafka.git -b debian
+Vcs-Browser: https://anonscm.debian.org/cgit/pkg-kafka/librdkafka.git
+
+Package: librdkafka1
+Architecture: any
+Multi-Arch: same
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: library implementing the Apache Kafka protocol
+ librdkafka is a C library implementation of the Apache Kafka protocol,
+ containing both Producer and Consumer support. It was designed with message
+ delivery reliability and high performance in mind, current figures exceed
+ 800000 msgs/second for the producer and 3 million msgs/second for the
+ consumer. Supports broker version 0.8 and later.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the C shared library.
+
+Package: librdkafka++1
+Architecture: any
+Multi-Arch: same
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (C++ bindings)
+ librdkafka is a C library implementation of the Apache Kafka protocol,
+ containing both Producer and Consumer support. It was designed with message
+ delivery reliability and high performance in mind, current figures exceed
+ 800000 msgs/second for the producer and 3 million msgs/second for the
+ consumer. Supports broker version 0.8 and later.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the C++ shared library.
+
+Package: librdkafka-dev
+Section: libdevel
+Architecture: any
+Multi-Arch: same
+Depends: librdkafka1 (= ${binary:Version}), librdkafka++1 (= ${binary:Version}), ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (development headers)
+ librdkafka is a C library implementation of the Apache Kafka protocol,
+ containing both Producer and Consumer support. It was designed with message
+ delivery reliability and high performance in mind, current figures exceed
+ 800000 msgs/second for the producer and 3 million msgs/second for the
+ consumer. Supports broker version 0.8 and later.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the development headers.
+
+Package: librdkafka1-dbg
+Section: debug
+Priority: extra
+Architecture: any
+Multi-Arch: same
+Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (debugging symbols)
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the debugging symbols.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/copyright b/fluent-bit/lib/librdkafka-2.1.0/debian/copyright
new file mode 100644
index 000000000..aa6c33cce
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/copyright
@@ -0,0 +1,99 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: librdkafka
+Source: https://github.com/edenhill/librdkafka
+
+License: BSD-2-clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ .
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+Files: *
+Copyright: 2012-2015, Magnus Edenhill
+License: BSD-2-clause
+
+Files: src/rdcrc32.c src/rdcrc32.h
+Copyright: 2006-2012, Thomas Pircher <tehpeh@gmx.net>
+License: MIT
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+ .
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+Files: src/snappy.c src/snappy.h src/snappy_compat.h
+Copyright: 2005, Google Inc.
+ 2011, Intel Corporation
+License: BSD-3-clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ .
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Files: src/rdxxhash.h src/rdxxhash.c
+Copyright: 2012-2014, Yann Collet
+License: BSD-2-clause
+
+Files: src/regexp.h src/regexp.c
+Copyright: n/a
+License: public-domain
+
+License: public-domain
+ The files tagged with this license contain the following paragraphs:
+ .
+ These libraries are in the public domain (or the equivalent where that is not
+ possible). You can do anything you want with them. You have no legal
+ obligation to do anything else, although I appreciate attribution.
+
+Files: debian/*
+Copyright: 2013 Faidon Liambotis <paravoid@debian.org>
+License: BSD-2-clause
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/gbp.conf b/fluent-bit/lib/librdkafka-2.1.0/debian/gbp.conf
new file mode 100644
index 000000000..b2a0f02e3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/gbp.conf
@@ -0,0 +1,9 @@
+[buildpackage]
+upstream-tree=tag
+upstream-branch=master
+debian-branch=debian
+upstream-tag=%(version)s
+debian-tag=debian/%(version)s
+no-create-orig = True
+tarball-dir = ../tarballs
+export-dir = ../build-area
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka++1.install b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka++1.install
new file mode 100644
index 000000000..897ddc166
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka++1.install
@@ -0,0 +1 @@
+usr/lib/*/librdkafka++.so.*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.examples b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.examples
new file mode 100644
index 000000000..b45032efe
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.examples
@@ -0,0 +1,2 @@
+examples/rdkafka_example.c
+examples/rdkafka_performance.c
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.install b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.install
new file mode 100644
index 000000000..fd0c8f721
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka-dev.install
@@ -0,0 +1,9 @@
+usr/include/*/rdkafka.h
+usr/include/*/rdkafkacpp.h
+usr/include/*/rdkafka_mock.h
+usr/lib/*/librdkafka.a
+usr/lib/*/librdkafka.so
+usr/lib/*/librdkafka++.a
+usr/lib/*/librdkafka++.so
+usr/lib/*/pkgconfig/rdkafka.pc
+usr/lib/*/pkgconfig/rdkafka++.pc
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.docs b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.docs
new file mode 100644
index 000000000..316807c6c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.docs
@@ -0,0 +1,5 @@
+README.md
+INTRODUCTION.md
+CONFIGURATION.md
+STATISTICS.md
+CHANGELOG.md
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.install b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.install
new file mode 100644
index 000000000..72e443030
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.install
@@ -0,0 +1 @@
+usr/lib/*/librdkafka.so.*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.symbols b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.symbols
new file mode 100644
index 000000000..1b0ad9b37
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/librdkafka1.symbols
@@ -0,0 +1,135 @@
+librdkafka.so.1 librdkafka1 #MINVER#
+* Build-Depends-Package: librdkafka-dev
+ rd_kafka_assign@Base 0.9.0
+ rd_kafka_assignment@Base 0.9.0
+ rd_kafka_brokers_add@Base 0.8.0
+ rd_kafka_commit@Base 0.9.0
+ rd_kafka_commit_message@Base 0.9.0
+ rd_kafka_commit_queue@Base 0.9.2
+ rd_kafka_committed@Base 0.9.1
+ rd_kafka_conf_destroy@Base 0.8.0
+ rd_kafka_conf_dump@Base 0.8.3
+ rd_kafka_conf_dump_free@Base 0.8.3
+ rd_kafka_conf_dup@Base 0.8.3
+ rd_kafka_conf_get@Base 0.9.0
+ rd_kafka_conf_new@Base 0.8.0
+ rd_kafka_conf_properties_show@Base 0.8.0
+ rd_kafka_conf_set@Base 0.8.0
+ rd_kafka_conf_set_closesocket_cb@Base 0.9.3
+ rd_kafka_conf_set_connect_cb@Base 0.9.3
+ rd_kafka_conf_set_consume_cb@Base 0.9.0
+ rd_kafka_conf_set_default_topic_conf@Base 0.9.0
+ rd_kafka_conf_set_dr_cb@Base 0.8.0
+ rd_kafka_conf_set_dr_msg_cb@Base 0.8.4
+ rd_kafka_conf_set_error_cb@Base 0.8.0
+ rd_kafka_conf_set_events@Base 0.9.2
+ rd_kafka_conf_set_log_cb@Base 0.8.4
+ rd_kafka_conf_set_offset_commit_cb@Base 0.9.0
+ rd_kafka_conf_set_opaque@Base 0.8.0
+ rd_kafka_conf_set_open_cb@Base 0.8.4
+ rd_kafka_conf_set_rebalance_cb@Base 0.9.0
+ rd_kafka_conf_set_socket_cb@Base 0.8.4
+ rd_kafka_conf_set_stats_cb@Base 0.8.0
+ rd_kafka_conf_set_throttle_cb@Base 0.9.0
+ rd_kafka_consume@Base 0.8.0
+ rd_kafka_consume_batch@Base 0.8.0
+ rd_kafka_consume_batch_queue@Base 0.8.4
+ rd_kafka_consume_callback@Base 0.8.0
+ rd_kafka_consume_callback_queue@Base 0.8.4
+ rd_kafka_consume_queue@Base 0.8.4
+ rd_kafka_consume_start@Base 0.8.0
+ rd_kafka_consume_start_queue@Base 0.8.4
+ rd_kafka_consume_stop@Base 0.8.0
+ rd_kafka_consumer_close@Base 0.9.0
+ rd_kafka_consumer_poll@Base 0.9.0
+ rd_kafka_destroy@Base 0.8.0
+ rd_kafka_dump@Base 0.8.0
+ rd_kafka_err2name@Base 0.9.1
+ rd_kafka_err2str@Base 0.8.0
+ rd_kafka_errno2err@Base 0.8.3
+ rd_kafka_errno@Base 0.9.1
+ rd_kafka_event_destroy@Base 0.9.2
+ rd_kafka_event_error@Base 0.9.2
+ rd_kafka_event_error_string@Base 0.9.2
+ rd_kafka_event_log@Base 0.9.2
+ rd_kafka_event_message_array@Base 0.9.2
+ rd_kafka_event_message_count@Base 0.9.2
+ rd_kafka_event_message_next@Base 0.9.2
+ rd_kafka_event_name@Base 0.9.2
+ rd_kafka_event_opaque@Base 0.9.2
+ rd_kafka_event_topic_partition@Base 0.9.2
+ rd_kafka_event_topic_partition_list@Base 0.9.2
+ rd_kafka_event_type@Base 0.9.2
+ rd_kafka_flush@Base 0.9.2
+ rd_kafka_get_debug_contexts@Base 0.9.0
+ rd_kafka_get_err_descs@Base 0.9.1
+ rd_kafka_get_watermark_offsets@Base 0.9.1
+ rd_kafka_group_list_destroy@Base 0.9.1
+ rd_kafka_last_error@Base 0.9.1
+ rd_kafka_list_groups@Base 0.9.1
+ rd_kafka_log_print@Base 0.8.0
+ rd_kafka_log_syslog@Base 0.8.0
+ rd_kafka_mem_free@Base 0.9.1
+ rd_kafka_memberid@Base 0.9.0
+ rd_kafka_message_destroy@Base 0.8.0
+ rd_kafka_message_timestamp@Base 0.9.1
+ rd_kafka_metadata@Base 0.8.4
+ rd_kafka_metadata_destroy@Base 0.8.4
+ rd_kafka_msg_partitioner_consistent@Base 0.9.0
+ rd_kafka_msg_partitioner_consistent_random@Base 0.9.1
+ rd_kafka_msg_partitioner_random@Base 0.8.0
+ rd_kafka_name@Base 0.8.0
+ rd_kafka_new@Base 0.8.0
+ rd_kafka_offset_store@Base 0.8.3
+ rd_kafka_opaque@Base 0.8.4
+ rd_kafka_outq_len@Base 0.8.0
+ rd_kafka_pause_partitions@Base 0.9.1
+ rd_kafka_poll@Base 0.8.0
+ rd_kafka_poll_set_consumer@Base 0.9.0
+ rd_kafka_position@Base 0.9.1
+ rd_kafka_produce@Base 0.8.0
+ rd_kafka_produce_batch@Base 0.8.4
+ rd_kafka_query_watermark_offsets@Base 0.9.1
+ rd_kafka_queue_destroy@Base 0.8.4
+ rd_kafka_queue_forward@Base 0.9.2
+ rd_kafka_queue_get_consumer@Base 0.9.2
+ rd_kafka_queue_get_main@Base 0.9.2
+ rd_kafka_queue_io_event_enable@Base 0.9.2
+ rd_kafka_queue_length@Base 0.9.2
+ rd_kafka_queue_new@Base 0.8.4
+ rd_kafka_queue_poll@Base 0.9.2
+ rd_kafka_resume_partitions@Base 0.9.1
+ rd_kafka_seek@Base 0.9.0
+ rd_kafka_set_log_level@Base 0.8.0
+ rd_kafka_set_logger@Base 0.8.0
+ rd_kafka_subscribe@Base 0.9.0
+ rd_kafka_subscription@Base 0.9.0
+ rd_kafka_thread_cnt@Base 0.8.0
+ rd_kafka_topic_conf_destroy@Base 0.8.0
+ rd_kafka_topic_conf_dump@Base 0.8.3
+ rd_kafka_topic_conf_dup@Base 0.8.3
+ rd_kafka_topic_conf_get@Base 0.9.0
+ rd_kafka_topic_conf_new@Base 0.8.0
+ rd_kafka_topic_conf_set@Base 0.8.0
+ rd_kafka_topic_conf_set_opaque@Base 0.8.0
+ rd_kafka_topic_conf_set_partitioner_cb@Base 0.8.0
+ rd_kafka_topic_destroy@Base 0.8.0
+ rd_kafka_topic_name@Base 0.8.0
+ rd_kafka_topic_new@Base 0.8.0
+ rd_kafka_topic_opaque@Base 0.9.0
+ rd_kafka_topic_partition_available@Base 0.8.0
+ rd_kafka_topic_partition_destroy@Base 0.9.2
+ rd_kafka_topic_partition_list_add@Base 0.9.0
+ rd_kafka_topic_partition_list_add_range@Base 0.9.0
+ rd_kafka_topic_partition_list_copy@Base 0.9.0
+ rd_kafka_topic_partition_list_del@Base 0.9.1
+ rd_kafka_topic_partition_list_del_by_idx@Base 0.9.1
+ rd_kafka_topic_partition_list_destroy@Base 0.9.0
+ rd_kafka_topic_partition_list_find@Base 0.9.1
+ rd_kafka_topic_partition_list_new@Base 0.9.0
+ rd_kafka_topic_partition_list_set_offset@Base 0.9.1
+ rd_kafka_unsubscribe@Base 0.9.0
+ rd_kafka_version@Base 0.8.1
+ rd_kafka_version_str@Base 0.8.1
+ rd_kafka_wait_destroyed@Base 0.8.0
+ rd_kafka_yield@Base 0.9.0
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/rules b/fluent-bit/lib/librdkafka-2.1.0/debian/rules
new file mode 100755
index 000000000..a712cbd70
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/rules
@@ -0,0 +1,19 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+export DPKG_EXPORT_BUILDFLAGS=1
+export DEB_BUILD_MAINT_OPTIONS=hardening=+bindnow,-pie
+include /usr/share/dpkg/buildflags.mk
+
+%:
+ dh $@ --without systemd,autoreconf
+
+override_dh_auto_configure:
+ # Use dependency installation for missing dependencies, zstd in particular.
+ # These will be statically linked.
+ dh_auto_configure -- --install-deps
+
+override_dh_strip:
+ dh_strip --dbg-package=librdkafka1-dbg
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/source/format b/fluent-bit/lib/librdkafka-2.1.0/debian/source/format
new file mode 100644
index 000000000..163aaf8d8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/debian/watch b/fluent-bit/lib/librdkafka-2.1.0/debian/watch
new file mode 100644
index 000000000..7b3bdea11
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/debian/watch
@@ -0,0 +1,2 @@
+version=3
+https://github.com/edenhill/librdkafka/tags .*/v?(\d[\d\.]*)\.tar\.gz
diff --git a/fluent-bit/lib/librdkafka-2.1.0/dev-conf.sh b/fluent-bit/lib/librdkafka-2.1.0/dev-conf.sh
new file mode 100755
index 000000000..23931a77e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/dev-conf.sh
@@ -0,0 +1,123 @@
+#!/bin/bash
+#
+# librdkafka - Apache Kafka C library
+#
+# Copyright (c) 2018 Magnus Edenhill
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+#
+
+#
+# Configure librdkafka for development
+#
+# Usage:
+# ./dev-conf.sh - Build with settings in dev-conf.sh
+# ./dev-conf.sh asan|tsan - ... and ASAN or TSAN
+# ./dev-conf.sh clean - Non-development clean build
+#
+
+set -e
+
+build () {
+ local btype="$1"
+ local opts="$2"
+
+ echo "$btype configuration options: $opts"
+ ./configure --clean
+ ./configure $opts
+
+ make clean
+ make -j
+ (cd tests ; make -j build)
+
+ echo "$btype build done"
+}
+
+OPTS=""
+
+case "$1" in
+ clean)
+ build Clean
+ exit $?
+ ;;
+ asan)
+ FSAN='-fsanitize=address'
+ ;;
+ tsan)
+ FSAN='-fsanitize=thread'
+ # C11 threads in glibc don't play nice with TSAN,
+ # so use the builtin tinycthreads instead.
+ OPTS="$OPTS --disable-c11threads"
+ ;;
+ ubsan)
+ FSAN='-fsanitize=undefined -fsanitize-undefined-trap-on-error -fno-omit-frame-pointer'
+ ;;
+ gprof)
+ # gprof
+ OPTS="$OPTS --enable-profiling"
+ ;;
+ "")
+ ;;
+ *)
+ echo "Usage: $0 [clean|asan|tsan|ubsan|gprof]"
+ exit 1
+ ;;
+esac
+
+
+if [[ $1 != clean ]]; then
+ # enable strict C99, C++98 checks.
+ export CFLAGS="$CFLAGS -std=c99"
+ export CXXFLAGS="$CXXFLAGS -std=c++98"
+fi
+
+# enable variable shadow warnings
+#export CFLAGS="$CFLAGS -Wshadow=compatible-local -Wshadow=local"
+#export CXXFLAGS="$CXXFLAGS -Wshadow=compatible-local -Wshadow=local"
+
+# enable pedantic
+#export CFLAGS='-pedantic'
+#export CXXFLAGS='-pedantic'
+
+if [[ ! -z $FSAN ]]; then
+ export CPPFLAGS="$CPPFLAGS $FSAN"
+ export LDFLAGS="$LDFLAGS $FSAN"
+fi
+
+# enable devel asserts
+OPTS="$OPTS --enable-devel"
+
+# disable optimizations
+OPTS="$OPTS --disable-optimization"
+
+# disable lz4
+#OPTS="$OPTS --disable-lz4"
+
+# disable cyrus-sasl
+#OPTS="$OPTS --disable-sasl"
+
+#enable refcnt debugging
+#OPTS="$OPTS --enable-refcnt-debug"
+
+build Development "$OPTS"
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore
new file mode 100644
index 000000000..4190608c4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/.gitignore
@@ -0,0 +1,19 @@
+consumer
+delete_records
+idempotent_producer
+kafkatest_verifiable_client
+misc
+openssl_engine_example_cpp
+producer
+producer_cpp
+rdkafka_complex_consumer_example
+rdkafka_complex_consumer_example_cpp
+rdkafka_consume_batch
+rdkafka_example
+rdkafka_example_cpp
+rdkafka_performance
+transactions
+list_consumer_groups
+describe_consumer_groups
+list_consumer_group_offsets
+alter_consumer_group_offsets
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt
new file mode 100644
index 000000000..bbbb89ad9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/CMakeLists.txt
@@ -0,0 +1,40 @@
+if(WIN32)
+ set(win32_sources ../win32/wingetopt.c ../win32/wingetopt.h)
+endif(WIN32)
+
+add_executable(producer producer.c ${win32_sources})
+target_link_libraries(producer PUBLIC rdkafka)
+
+add_executable(producer_cpp producer.cpp ${win32_sources})
+target_link_libraries(producer_cpp PUBLIC rdkafka++)
+
+add_executable(consumer consumer.c ${win32_sources})
+target_link_libraries(consumer PUBLIC rdkafka)
+
+add_executable(rdkafka_performance rdkafka_performance.c ${win32_sources})
+target_link_libraries(rdkafka_performance PUBLIC rdkafka)
+
+add_executable(rdkafka_example_cpp rdkafka_example.cpp ${win32_sources})
+target_link_libraries(rdkafka_example_cpp PUBLIC rdkafka++)
+
+add_executable(rdkafka_complex_consumer_example_cpp rdkafka_complex_consumer_example.cpp ${win32_sources})
+target_link_libraries(rdkafka_complex_consumer_example_cpp PUBLIC rdkafka++)
+
+add_executable(openssl_engine_example_cpp openssl_engine_example.cpp ${win32_sources})
+target_link_libraries(openssl_engine_example_cpp PUBLIC rdkafka++)
+
+add_executable(misc misc.c ${win32_sources})
+target_link_libraries(misc PUBLIC rdkafka)
+
+
+# The targets below has Unix include dirs and do not compile on Windows.
+if(NOT WIN32)
+ add_executable(rdkafka_example rdkafka_example.c)
+ target_link_libraries(rdkafka_example PUBLIC rdkafka)
+
+ add_executable(rdkafka_complex_consumer_example rdkafka_complex_consumer_example.c)
+ target_link_libraries(rdkafka_complex_consumer_example PUBLIC rdkafka)
+
+ add_executable(kafkatest_verifiable_client kafkatest_verifiable_client.cpp)
+ target_link_libraries(kafkatest_verifiable_client PUBLIC rdkafka++)
+endif(NOT WIN32)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile b/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile
new file mode 100644
index 000000000..15fba3c2a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/Makefile
@@ -0,0 +1,137 @@
+EXAMPLES ?= rdkafka_example rdkafka_performance rdkafka_example_cpp \
+ rdkafka_complex_consumer_example rdkafka_complex_consumer_example_cpp \
+ kafkatest_verifiable_client \
+ producer consumer idempotent_producer transactions \
+ delete_records \
+ openssl_engine_example_cpp \
+ list_consumer_groups \
+ describe_consumer_groups \
+ list_consumer_group_offsets \
+ alter_consumer_group_offsets \
+ misc
+
+all: $(EXAMPLES)
+
+include ../mklove/Makefile.base
+
+CFLAGS += -I../src
+CXXFLAGS += -I../src-cpp
+
+# librdkafka must be compiled with -gstrict-dwarf, but rdkafka_example must not,
+# due to some clang bug on OSX 10.9
+CPPFLAGS := $(subst strict-dwarf,,$(CPPFLAGS))
+
+rdkafka_example: ../src/librdkafka.a rdkafka_example.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_example.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+ @echo "# $@ is ready"
+ @echo "#"
+ @echo "# Run producer (write messages on stdin)"
+ @echo "./$@ -P -t <topic> -p <partition>"
+ @echo ""
+ @echo "# or consumer"
+ @echo "./$@ -C -t <topic> -p <partition>"
+ @echo ""
+ @echo "#"
+ @echo "# More usage options:"
+ @echo "./$@ -h"
+
+producer: ../src/librdkafka.a producer.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+producer_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a producer.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) producer.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+consumer: ../src/librdkafka.a consumer.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+idempotent_producer: ../src/librdkafka.a idempotent_producer.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+transactions: ../src/librdkafka.a transactions.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+transactions-older-broker.c: ../src/librdkafka.a transactions-older-broker.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+delete_records: ../src/librdkafka.a delete_records.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+list_consumer_groups: ../src/librdkafka.a list_consumer_groups.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+describe_consumer_groups: ../src/librdkafka.a describe_consumer_groups.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+list_consumer_group_offsets: ../src/librdkafka.a list_consumer_group_offsets.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+alter_consumer_group_offsets: ../src/librdkafka.a alter_consumer_group_offsets.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+rdkafka_complex_consumer_example: ../src/librdkafka.a rdkafka_complex_consumer_example.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_complex_consumer_example.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+ @echo "# $@ is ready"
+ @echo "#"
+ @echo "./$@ <topic[:part]> <topic2[:part]> .."
+ @echo ""
+ @echo "#"
+ @echo "# More usage options:"
+ @echo "./$@ -h"
+
+rdkafka_performance: ../src/librdkafka.a rdkafka_performance.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) rdkafka_performance.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+ @echo "# $@ is ready"
+ @echo "#"
+ @echo "# Run producer"
+ @echo "./$@ -P -t <topic> -p <partition> -s <msgsize>"
+ @echo ""
+ @echo "# or consumer"
+ @echo "./$@ -C -t <topic> -p <partition>"
+ @echo ""
+ @echo "#"
+ @echo "# More usage options:"
+ @echo "./$@ -h"
+
+
+rdkafka_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_example.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_example.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+kafkatest_verifiable_client: ../src-cpp/librdkafka++.a ../src/librdkafka.a kafkatest_verifiable_client.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) kafkatest_verifiable_client.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+
+rdkafka_complex_consumer_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_complex_consumer_example.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_complex_consumer_example.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+rdkafka_consume_batch: ../src-cpp/librdkafka++.a ../src/librdkafka.a rdkafka_consume_batch.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) rdkafka_consume_batch.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+openssl_engine_example_cpp: ../src-cpp/librdkafka++.a ../src/librdkafka.a openssl_engine_example.cpp
+ $(CXX) $(CPPFLAGS) $(CXXFLAGS) openssl_engine_example.cpp -o $@ $(LDFLAGS) \
+ ../src-cpp/librdkafka++.a ../src/librdkafka.a $(LIBS)
+
+misc: ../src/librdkafka.a misc.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) $@.c -o $@ $(LDFLAGS) \
+ ../src/librdkafka.a $(LIBS)
+
+clean:
+ rm -f $(EXAMPLES)
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/README.md b/fluent-bit/lib/librdkafka-2.1.0/examples/README.md
new file mode 100644
index 000000000..3caee3b86
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/README.md
@@ -0,0 +1,38 @@
+# librdkafka examples
+
+This directory contains example applications utilizing librdkafka.
+The examples are built by running `make` and they will be be linked
+statically or dynamically to librdkafka in the parent `../src` directory.
+
+Begin with the following examples:
+
+ * [consumer.c](consumer.c) - a typical C high-level consumer application.
+ * [producer.c](producer.c) - a typical C producer application.
+ * [producer.cpp](producer.cpp) - a typical C++ producer application.
+ * [idempotent_producer.c](idempotent_producer.c) - Idempotent producer.
+ * [transactions.c](transactions.c) - Full exactly once semantics (EOS)
+ transactional consumer-producer exammple.
+ Requires Apache Kafka 2.5 or later.
+ * [transactions-older-broker.c](transactions-older-broker.c) - Same as
+ `transactions.c` but for Apache Kafka versions 2.4.x and older which
+ lack KIP-447 support.
+ * [misc.c](misc.c) - a collection of miscellaneous usage examples.
+
+
+For more complex uses, see:
+ * [rdkafka_example.c](rdkafka_example.c) - simple consumer, producer, metadata listing, kitchen sink, etc.
+ * [rdkafka_example.cpp](rdkafka_example.cpp) - simple consumer, producer, metadata listing in C++.
+ * [rdkafka_complex_consumer_example.c](rdkafka_complex_consumer_example.c) - a more contrived high-level C consumer example.
+ * [rdkafka_complex_consumer_example.cpp](rdkafka_complex_consumer_example.cpp) - a more contrived high-level C++ consumer example.
+ * [rdkafka_consume_batch.cpp](rdkafka_consume_batch.cpp) - batching high-level C++ consumer example.
+ * [rdkafka_performance.c](rdkafka_performance.c) - performance, benchmark, latency producer and consumer tool.
+ * [kafkatest_verifiable_client.cpp](kafkatest_verifiable_client.cpp) - for use with the official Apache Kafka client system tests.
+ * [openssl_engine_example.cpp](openssl_engine_example.cpp) - metadata listing in C++ over SSL channel established using OpenSSL engine.
+
+
+ For Admin API examples see:
+ * [delete_records.c](delete_records.c) - Delete records.
+ * [list_consumer_groups.c](list_consumer_groups.c) - List consumer groups.
+ * [describe_consumer_groups.c](describe_consumer_groups.c) - Describe consumer groups.
+ * [list_consumer_group_offsets.c](list_consumer_group_offsets.c) - List offsets of a consumer group.
+ * [alter_consumer_group_offsets.c](alter_consumer_group_offsets.c) - Alter offsets of a consumer group.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c
new file mode 100644
index 000000000..09a52fd7e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/alter_consumer_group_offsets.c
@@ -0,0 +1,338 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * AlterConsumerGroupOffsets usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Alter consumer group offsets usage examples\n"
+ "\n"
+ "Usage: %s <options> <group_id> <topic>\n"
+ " <partition1> <offset1>\n"
+ " <partition2> <offset2>\n"
+ " ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions,
+ int print_offset,
+ const char *prefix) {
+ int i;
+
+ if (partitions->cnt == 0) {
+ fprintf(fp, "%sNo partition found", prefix);
+ }
+ for (i = 0; i < partitions->cnt; i++) {
+ char offset_string[512] = {};
+ *offset_string = '\0';
+ if (print_offset) {
+ snprintf(offset_string, sizeof(offset_string),
+ " offset %" PRId64,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s",
+ i > 0 ? "\n" : "", prefix, partitions->elems[i].topic,
+ partitions->elems[i].partition, offset_string,
+ rd_kafka_err2str(partitions->elems[i].err));
+ }
+ fprintf(fp, "\n");
+}
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+static void
+cmd_alter_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) {
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ rd_kafka_t *rk; /* Admin client instance */
+ rd_kafka_AdminOptions_t *options; /* (Optional) Options for
+ * AlterConsumerGroupOffsets() */
+ rd_kafka_event_t *event; /* AlterConsumerGroupOffsets result event */
+ const int min_argc = 2;
+ int i, num_partitions = 0;
+ const char *group_id, *topic;
+ rd_kafka_AlterConsumerGroupOffsets_t *alter_consumer_group_offsets;
+
+ /*
+ * Argument validation
+ */
+ if (argc < min_argc || (argc - min_argc) % 2 != 0) {
+ usage("Wrong number of arguments");
+ }
+
+ num_partitions = (argc - min_argc) / 2;
+ group_id = argv[0];
+ topic = argv[1];
+
+ /*
+ * Create an admin client, it can be created using any client type,
+ * so we choose producer since it requires no extra configuration
+ * and is more light-weight than the consumer.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* The Admin API is completely asynchronous, results are emitted
+ * on the result queue that is passed to AlterConsumerGroupOffsets() */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Set timeout (optional) */
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ exit(1);
+ }
+
+ /* Read passed partition-offsets */
+ rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_topic_partition_list_new(num_partitions);
+ for (i = 0; i < num_partitions; i++) {
+ rd_kafka_topic_partition_list_add(
+ partitions, topic,
+ parse_int("partition", argv[min_argc + i * 2]))
+ ->offset = parse_int("offset", argv[min_argc + 1 + i * 2]);
+ }
+
+ /* Create argument */
+ alter_consumer_group_offsets =
+ rd_kafka_AlterConsumerGroupOffsets_new(group_id, partitions);
+ /* Call AlterConsumerGroupOffsets */
+ rd_kafka_AlterConsumerGroupOffsets(rk, &alter_consumer_group_offsets, 1,
+ options, queue);
+
+ /* Clean up input arguments */
+ rd_kafka_AlterConsumerGroupOffsets_destroy(
+ alter_consumer_group_offsets);
+ rd_kafka_AdminOptions_destroy(options);
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (30s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ /* AlterConsumerGroupOffsets request failed */
+ fprintf(stderr, "%% AlterConsumerGroupOffsets failed: %s\n",
+ rd_kafka_event_error_string(event));
+ exit(1);
+
+ } else {
+ /* AlterConsumerGroupOffsets request succeeded, but individual
+ * partitions may have errors. */
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *result;
+ const rd_kafka_group_result_t **groups;
+ size_t n_groups, i;
+
+ result = rd_kafka_event_AlterConsumerGroupOffsets_result(event);
+ groups = rd_kafka_AlterConsumerGroupOffsets_result_groups(
+ result, &n_groups);
+
+ printf("AlterConsumerGroupOffsets results:\n");
+ for (i = 0; i < n_groups; i++) {
+ const rd_kafka_group_result_t *group = groups[i];
+ const rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_group_result_partitions(group);
+ print_partition_list(stderr, partitions, 1, " ");
+ }
+ }
+
+ /* Destroy event object when we're done with it.
+ * Note: rd_kafka_event_destroy() allows a NULL event. */
+ rd_kafka_event_destroy(event);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(queue);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_alter_consumer_group_offsets(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c b/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c
new file mode 100644
index 000000000..21b27ca78
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/consumer.c
@@ -0,0 +1,260 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Simple high-level balanced Apache Kafka consumer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <ctype.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+//#include <librdkafka/rdkafka.h>
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+
+/**
+ * @returns 1 if all bytes are printable, else 0.
+ */
+static int is_printable(const char *buf, size_t size) {
+ size_t i;
+
+ for (i = 0; i < size; i++)
+ if (!isprint((int)buf[i]))
+ return 0;
+
+ return 1;
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *rk; /* Consumer instance handle */
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ rd_kafka_resp_err_t err; /* librdkafka API error code */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ const char *brokers; /* Argument: broker list */
+ const char *groupid; /* Argument: Consumer group id */
+ char **topics; /* Argument: list of topics to subscribe to */
+ int topic_cnt; /* Number of topics to subscribe to */
+ rd_kafka_topic_partition_list_t *subscription; /* Subscribed topics */
+ int i;
+
+ /*
+ * Argument validation
+ */
+ if (argc < 4) {
+ fprintf(stderr,
+ "%% Usage: "
+ "%s <broker> <group.id> <topic1> <topic2>..\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ groupid = argv[2];
+ topics = &argv[3];
+ topic_cnt = argc - 3;
+
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* Set the consumer group id.
+ * All consumers sharing the same group id will join the same
+ * group, and the subscribed topic' partitions will be assigned
+ * according to the partition.assignment.strategy
+ * (consumer config property) to the consumers in the group. */
+ if (rd_kafka_conf_set(conf, "group.id", groupid, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* If there is no previously committed offset for a partition
+ * the auto.offset.reset strategy will be used to decide where
+ * in the partition to start fetching messages.
+ * By setting this to earliest the consumer will read all messages
+ * in the partition if there was no previously committed offset. */
+ if (rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /*
+ * Create consumer instance.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new consumer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ conf = NULL; /* Configuration object is now owned, and freed,
+ * by the rd_kafka_t instance. */
+
+
+ /* Redirect all messages from per-partition queues to
+ * the main queue so that messages can be consumed with one
+ * call from all assigned partitions.
+ *
+ * The alternative is to poll the main queue (for events)
+ * and each partition queue separately, which requires setting
+ * up a rebalance callback and keeping track of the assignment:
+ * but that is more complex and typically not recommended. */
+ rd_kafka_poll_set_consumer(rk);
+
+
+ /* Convert the list of topics to a format suitable for librdkafka */
+ subscription = rd_kafka_topic_partition_list_new(topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ rd_kafka_topic_partition_list_add(subscription, topics[i],
+ /* the partition is ignored
+ * by subscribe() */
+ RD_KAFKA_PARTITION_UA);
+
+ /* Subscribe to the list of topics */
+ err = rd_kafka_subscribe(rk, subscription);
+ if (err) {
+ fprintf(stderr, "%% Failed to subscribe to %d topics: %s\n",
+ subscription->cnt, rd_kafka_err2str(err));
+ rd_kafka_topic_partition_list_destroy(subscription);
+ rd_kafka_destroy(rk);
+ return 1;
+ }
+
+ fprintf(stderr,
+ "%% Subscribed to %d topic(s), "
+ "waiting for rebalance and messages...\n",
+ subscription->cnt);
+
+ rd_kafka_topic_partition_list_destroy(subscription);
+
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Subscribing to topics will trigger a group rebalance
+ * which may take some time to finish, but there is no need
+ * for the application to handle this idle period in a special way
+ * since a rebalance may happen at any time.
+ * Start polling for messages. */
+
+ while (run) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(rk, 100);
+ if (!rkm)
+ continue; /* Timeout: no message within 100ms,
+ * try again. This short timeout allows
+ * checking for `run` at frequent intervals.
+ */
+
+ /* consumer_poll() will return either a proper message
+ * or a consumer error (rkm->err is set). */
+ if (rkm->err) {
+ /* Consumer errors are generally to be considered
+ * informational as the consumer will automatically
+ * try to recover from all types of errors. */
+ fprintf(stderr, "%% Consumer error: %s\n",
+ rd_kafka_message_errstr(rkm));
+ rd_kafka_message_destroy(rkm);
+ continue;
+ }
+
+ /* Proper message. */
+ printf("Message on %s [%" PRId32 "] at offset %" PRId64
+ " (leader epoch %" PRId32 "):\n",
+ rd_kafka_topic_name(rkm->rkt), rkm->partition,
+ rkm->offset, rd_kafka_message_leader_epoch(rkm));
+
+ /* Print the message key. */
+ if (rkm->key && is_printable(rkm->key, rkm->key_len))
+ printf(" Key: %.*s\n", (int)rkm->key_len,
+ (const char *)rkm->key);
+ else if (rkm->key)
+ printf(" Key: (%d bytes)\n", (int)rkm->key_len);
+
+ /* Print the message value/payload. */
+ if (rkm->payload && is_printable(rkm->payload, rkm->len))
+ printf(" Value: %.*s\n", (int)rkm->len,
+ (const char *)rkm->payload);
+ else if (rkm->payload)
+ printf(" Value: (%d bytes)\n", (int)rkm->len);
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+
+ /* Close the consumer: commit final offsets and leave the group. */
+ fprintf(stderr, "%% Closing consumer\n");
+ rd_kafka_consumer_close(rk);
+
+
+ /* Destroy the consumer */
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c b/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c
new file mode 100644
index 000000000..2660996a5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/delete_records.c
@@ -0,0 +1,233 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Example utility that shows how to use DeleteRecords (AdminAPI)
+ * do delete all messages/records up to (but not including) a specific offset
+ * from one or more topic partitions.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ const char *brokers; /* Argument: broker list */
+ rd_kafka_t *rk; /* Admin client instance */
+ rd_kafka_topic_partition_list_t *offsets_before; /* Delete messages up
+ * to but not
+ * including these
+ * offsets */
+ rd_kafka_DeleteRecords_t *del_records; /* Container for offsets_before*/
+ rd_kafka_AdminOptions_t *options; /* (Optional) Options for
+ * DeleteRecords() */
+ rd_kafka_event_t *event; /* DeleteRecords result event */
+ int exitcode = 0;
+ int i;
+
+ /*
+ * Argument validation
+ */
+ if (argc < 5 || (argc - 2) % 3 != 0) {
+ fprintf(stderr,
+ "%% Usage: %s <broker> "
+ "<topic> <partition> <offset_before> "
+ "<topic2> <partition2> <offset_before2> ...\n"
+ "\n"
+ "Delete all messages up to but not including the "
+ "specified offset(s).\n"
+ "\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+
+ /* Parse topic partition offset tuples and add to offsets list */
+ offsets_before = rd_kafka_topic_partition_list_new((argc - 2) / 3);
+ for (i = 2; i < argc; i += 3) {
+ const char *topic = argv[i];
+ int partition = parse_int("partition", argv[i + 1]);
+ int64_t offset = parse_int("offset_before", argv[i + 2]);
+
+ rd_kafka_topic_partition_list_add(offsets_before, topic,
+ partition)
+ ->offset = offset;
+ }
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ return 1;
+ }
+ rd_kafka_conf_set(conf, "debug", "admin,topic,metadata", NULL, 0);
+
+ /*
+ * Create an admin client, it can be created using any client type,
+ * so we choose producer since it requires no extra configuration
+ * and is more light-weight than the consumer.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ /* The Admin API is completely asynchronous, results are emitted
+ * on the result queue that is passed to DeleteRecords() */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Set timeout (optional) */
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ return 1;
+ }
+
+ /* Create argument */
+ del_records = rd_kafka_DeleteRecords_new(offsets_before);
+ /* We're now done with offsets_before */
+ rd_kafka_topic_partition_list_destroy(offsets_before);
+
+ /* Call DeleteRecords */
+ rd_kafka_DeleteRecords(rk, &del_records, 1, options, queue);
+
+ /* Clean up input arguments */
+ rd_kafka_DeleteRecords_destroy(del_records);
+ rd_kafka_AdminOptions_destroy(options);
+
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /*indefinitely*/);
+
+ if (!event) {
+ /* User hit Ctrl-C */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ /* DeleteRecords request failed */
+ fprintf(stderr, "%% DeleteRecords failed: %s\n",
+ rd_kafka_event_error_string(event));
+ exitcode = 2;
+
+ } else {
+ /* DeleteRecords request succeeded, but individual
+ * partitions may have errors. */
+ const rd_kafka_DeleteRecords_result_t *result;
+ const rd_kafka_topic_partition_list_t *offsets;
+ int i;
+
+ result = rd_kafka_event_DeleteRecords_result(event);
+ offsets = rd_kafka_DeleteRecords_result_offsets(result);
+
+ printf("DeleteRecords results:\n");
+ for (i = 0; i < offsets->cnt; i++)
+ printf(" %s [%" PRId32 "] offset %" PRId64 ": %s\n",
+ offsets->elems[i].topic,
+ offsets->elems[i].partition,
+ offsets->elems[i].offset,
+ rd_kafka_err2str(offsets->elems[i].err));
+ }
+
+ /* Destroy event object when we're done with it.
+ * Note: rd_kafka_event_destroy() allows a NULL event. */
+ rd_kafka_event_destroy(event);
+
+ signal(SIGINT, SIG_DFL);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(queue);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+
+ return exitcode;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c b/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c
new file mode 100644
index 000000000..45b6b8d0b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/describe_consumer_groups.c
@@ -0,0 +1,373 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * DescribeConsumerGroups usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Describe groups usage examples\n"
+ "\n"
+ "Usage: %s <options> <group1> <group2> ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions,
+ int print_offset,
+ const char *prefix) {
+ int i;
+
+ if (partitions->cnt == 0) {
+ fprintf(fp, "%sNo partition found", prefix);
+ }
+ for (i = 0; i < partitions->cnt; i++) {
+ char offset_string[512] = {};
+ *offset_string = '\0';
+ if (print_offset) {
+ snprintf(offset_string, sizeof(offset_string),
+ " offset %" PRId64,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s",
+ i > 0 ? "\n" : "", prefix, partitions->elems[i].topic,
+ partitions->elems[i].partition, offset_string,
+ rd_kafka_err2str(partitions->elems[i].err));
+ }
+ fprintf(fp, "\n");
+}
+
+/**
+ * @brief Print group information.
+ */
+static int
+print_groups_info(const rd_kafka_DescribeConsumerGroups_result_t *grpdesc,
+ int groups_cnt) {
+ size_t i;
+ const rd_kafka_ConsumerGroupDescription_t **result_groups;
+ size_t result_groups_cnt;
+ result_groups = rd_kafka_DescribeConsumerGroups_result_groups(
+ grpdesc, &result_groups_cnt);
+
+ if (result_groups_cnt == 0) {
+ if (groups_cnt > 0) {
+ fprintf(stderr, "No matching groups found\n");
+ return 1;
+ } else {
+ fprintf(stderr, "No groups in cluster\n");
+ }
+ }
+
+ for (i = 0; i < result_groups_cnt; i++) {
+ int j, member_cnt;
+ const rd_kafka_error_t *error;
+ const rd_kafka_ConsumerGroupDescription_t *group =
+ result_groups[i];
+ char coordinator_desc[512];
+ const rd_kafka_Node_t *coordinator = NULL;
+ const char *group_id =
+ rd_kafka_ConsumerGroupDescription_group_id(group);
+ const char *partition_assignor =
+ rd_kafka_ConsumerGroupDescription_partition_assignor(group);
+ rd_kafka_consumer_group_state_t state =
+ rd_kafka_ConsumerGroupDescription_state(group);
+ member_cnt =
+ rd_kafka_ConsumerGroupDescription_member_count(group);
+ error = rd_kafka_ConsumerGroupDescription_error(group);
+ coordinator =
+ rd_kafka_ConsumerGroupDescription_coordinator(group);
+ *coordinator_desc = '\0';
+
+ if (coordinator != NULL) {
+ snprintf(coordinator_desc, sizeof(coordinator_desc),
+ ", coordinator [id: %" PRId32
+ ", host: %s"
+ ", port: %" PRIu16 "]",
+ rd_kafka_Node_id(coordinator),
+ rd_kafka_Node_host(coordinator),
+ rd_kafka_Node_port(coordinator));
+ }
+ printf(
+ "Group \"%s\", partition assignor \"%s\", "
+ "state %s%s, with %" PRId32 " member(s)",
+ group_id, partition_assignor,
+ rd_kafka_consumer_group_state_name(state), coordinator_desc,
+ member_cnt);
+ if (error)
+ printf(" error[%" PRId32 "]: %s",
+ rd_kafka_error_code(error),
+ rd_kafka_error_string(error));
+ printf("\n");
+ for (j = 0; j < member_cnt; j++) {
+ const rd_kafka_MemberDescription_t *member =
+ rd_kafka_ConsumerGroupDescription_member(group, j);
+ printf(
+ " Member \"%s\" with client-id %s,"
+ " group instance id: %s, host %s\n",
+ rd_kafka_MemberDescription_consumer_id(member),
+ rd_kafka_MemberDescription_client_id(member),
+ rd_kafka_MemberDescription_group_instance_id(
+ member),
+ rd_kafka_MemberDescription_host(member));
+ const rd_kafka_MemberAssignment_t *assignment =
+ rd_kafka_MemberDescription_assignment(member);
+ const rd_kafka_topic_partition_list_t
+ *topic_partitions =
+ rd_kafka_MemberAssignment_partitions(
+ assignment);
+ if (!topic_partitions) {
+ printf(" No assignment\n");
+ } else if (topic_partitions->cnt == 0) {
+ printf(" Empty assignment\n");
+ } else {
+ printf(" Assignment:\n");
+ print_partition_list(stdout, topic_partitions,
+ 0, " ");
+ }
+ }
+ }
+ return 0;
+}
+
+/**
+ * @brief Call rd_kafka_DescribeConsumerGroups() with a list of
+ * groups.
+ */
+static void
+cmd_describe_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char **groups = NULL;
+ char errstr[512];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_event_t *event = NULL;
+ int retval = 0;
+ int groups_cnt = 0;
+
+ if (argc >= 1) {
+ groups = (const char **)&argv[0];
+ groups_cnt = argc;
+ }
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * Describe consumer groups
+ */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS);
+
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ goto exit;
+ }
+
+ rd_kafka_DescribeConsumerGroups(rk, groups, groups_cnt, options, queue);
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (10s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ rd_kafka_resp_err_t err = rd_kafka_event_error(event);
+ /* DescribeConsumerGroups request failed */
+ fprintf(stderr,
+ "%% DescribeConsumerGroups failed[%" PRId32 "]: %s\n",
+ err, rd_kafka_event_error_string(event));
+ goto exit;
+
+ } else {
+ /* DescribeConsumerGroups request succeeded, but individual
+ * groups may have errors. */
+ const rd_kafka_DescribeConsumerGroups_result_t *result;
+
+ result = rd_kafka_event_DescribeConsumerGroups_result(event);
+ printf("DescribeConsumerGroups results:\n");
+ retval = print_groups_info(result, groups_cnt);
+ }
+
+
+exit:
+ if (event)
+ rd_kafka_event_destroy(event);
+ rd_kafka_AdminOptions_destroy(options);
+ rd_kafka_queue_destroy(queue);
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_describe_consumer_groups(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json b/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json
new file mode 100644
index 000000000..527e1262b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/globals.json
@@ -0,0 +1,11 @@
+{"VerifiableConsumer":
+ {
+ "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
+ "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --consumer --debug cgrp,topic,protocol,broker"
+ },
+ "VerifiableProducer":
+ {
+ "class": "kafkatest.services.verifiable_client.VerifiableClientApp",
+ "exec_cmd": "/vagrant/tests/c/kafkatest_verifiable_client --producer --debug topic,broker"
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c b/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c
new file mode 100644
index 000000000..91b42a4b9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/idempotent_producer.c
@@ -0,0 +1,344 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Idempotent Producer example.
+ *
+ * The idempotent producer provides strict ordering and
+ * exactly-once producing guarantees.
+ *
+ * From the application developer's perspective, the only difference
+ * from a standard producer is the enabling of the feature by setting
+ * the `enable.idempotence` configuration property to `true`, and
+ * handling fatal (RD_KAFKA_RESP_ERR__FATAL) errors which are raised when
+ * the idempotent guarantees can't be satisfied.
+ */
+
+#define _DEFAULT_SOURCE /* avoid glibc deprecation warning of _BSD_SOURCE */
+#define _BSD_SOURCE /* vsnprintf() */
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+static int deliveredcnt = 0;
+static int msgerrcnt = 0;
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll() or rd_kafka_flush() and
+ * executes on the application's thread.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err) {
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+ msgerrcnt++;
+ } else {
+ fprintf(stderr,
+ "%% Message delivered (%zd bytes, topic %s, "
+ "partition %" PRId32 ", offset %" PRId64 ")\n",
+ rkmessage->len, rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+ deliveredcnt++;
+ }
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+/**
+ * @brief Generic error handling callback.
+ *
+ * This callback is triggered by rd_kafka_poll() or rd_kafka_flush()
+ * for client instance-level errors, such as broker connection failures,
+ * authentication issues, etc.
+ *
+ * These errors should generally be considered informational as
+ * the underlying client will automatically try to recover from
+ * any errors encountered, the application does not need to take
+ * action on them.
+ *
+ * But with idempotence truly fatal errors can be raised when
+ * the idempotence guarantees can't be satisfied, these errors
+ * are identified by a the `RD_KAFKA_RESP_ERR__FATAL` error code.
+ */
+static void
+error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ rd_kafka_resp_err_t orig_err;
+ char errstr[512];
+
+ fprintf(stderr, "%% Error: %s: %s\n", rd_kafka_err2name(err), reason);
+
+ if (err != RD_KAFKA_RESP_ERR__FATAL)
+ return;
+
+ /* Fatal error handling.
+ *
+ * When a fatal error is detected by the producer instance,
+ * it will trigger an error_cb with ERR__FATAL set.
+ * The application should use rd_kafka_fatal_error() to extract
+ * the actual underlying error code and description, propagate it
+ * to the user (for troubleshooting), and then terminate the
+ * producer since it will no longer accept any new messages to
+ * produce().
+ *
+ * Note:
+ * After a fatal error has been raised, rd_kafka_produce*() will
+ * fail with the original error code.
+ *
+ * Note:
+ * As an alternative to an error_cb, the application may call
+ * rd_kafka_fatal_error() at any time to check if a fatal error
+ * has occurred, typically after a failing rd_kafka_produce*() call.
+ */
+
+ orig_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ fprintf(stderr, "%% FATAL ERROR: %s: %s\n", rd_kafka_err2name(orig_err),
+ errstr);
+
+ /* Clean termination to get delivery results (from rd_kafka_flush())
+ * for all outstanding/in-transit/queued messages. */
+ fprintf(stderr, "%% Terminating on fatal error\n");
+ run = 0;
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *rk; /* Producer instance handle */
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ rd_kafka_resp_err_t err; /* librdkafka API error code */
+ const char *brokers; /* Argument: broker list */
+ const char *topic; /* Argument: topic to produce to */
+ int msgcnt = 0; /* Number of messages produced */
+
+ /*
+ * Argument validation
+ */
+ if (argc != 3) {
+ fprintf(stderr, "%% Usage: %s <broker> <topic>\n", argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ topic = argv[2];
+
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* Enable the idempotent producer */
+ if (rd_kafka_conf_set(conf, "enable.idempotence", "true", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ return 1;
+ }
+
+ /* Set the delivery report callback.
+ * This callback will be called once per message to inform
+ * the application if delivery succeeded or failed.
+ * See dr_msg_cb() above. */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /* Set an error handler callback to catch generic instance-level
+ * errors.
+ *
+ * See the `error_cb()` handler above for how to handle the
+ * fatal errors.
+ */
+ rd_kafka_conf_set_error_cb(conf, error_cb);
+
+
+ /*
+ * Create producer instance.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ fprintf(stderr, "%% Running producer loop. Press Ctrl-C to exit\n");
+
+ while (run) {
+ char buf[64];
+
+ snprintf(buf, sizeof(buf),
+ "Idempotent Producer example message #%d", msgcnt);
+
+ /*
+ * Produce message.
+ * This is an asynchronous call, on success it will only
+ * enqueue the message on the internal producer queue.
+ * The actual delivery attempts to the broker are handled
+ * by background threads.
+ * The previously registered delivery report callback
+ * (dr_msg_cb) is used to signal back to the application
+ * when the message has been delivered (or failed),
+ * and is triggered when the application calls
+ * rd_kafka_poll() or rd_kafka_flush().
+ */
+ retry:
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE(buf, strlen(buf)),
+ /* Copy the message payload so the `buf` can
+ * be reused for the next message. */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+
+ if (err) {
+ /**
+ * Failed to *enqueue* message for producing.
+ */
+ fprintf(stderr,
+ "%% Failed to produce to topic %s: %s\n", topic,
+ rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If the internal queue is full, wait for
+ * messages to be delivered and then retry.
+ * The internal queue represents both
+ * messages to be sent and messages that have
+ * been sent or failed, awaiting their
+ * delivery report callback to be called.
+ *
+ * The internal queue is limited by the
+ * configuration property
+ * queue.buffering.max.messages and
+ * queue.buffering.max.kbytes */
+ rd_kafka_poll(rk,
+ 1000 /*block for max 1000ms*/);
+ goto retry;
+ } else {
+ /* Produce failed, most likely due to a
+ * fatal error (will be handled by error_cb()),
+ * bail out. */
+
+ /* Instead of using the error_cb(), an
+ * application may check for fatal errors here
+ * by calling rd_kafka_fatal_error(). */
+ break;
+ }
+ }
+
+ /* A producer application should continually serve
+ * the delivery report queue by calling rd_kafka_poll()
+ * at frequent intervals.
+ * Either put the poll call in your main loop, or in a
+ * dedicated thread, or call it after or before every
+ * rd_kafka_produce*() call.
+ * Just make sure that rd_kafka_poll() is still called
+ * during periods where you are not producing any messages
+ * to make sure previously produced messages have their
+ * delivery report callback served (and any other callbacks
+ * you register). */
+ rd_kafka_poll(rk, 0 /*non-blocking*/);
+
+ msgcnt++;
+
+ /* Since fatal errors can't be triggered in practice,
+ * use the test API to trigger a fabricated error after
+ * some time. */
+ if (msgcnt == 13)
+ rd_kafka_test_fatal_error(
+ rk, RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
+ "This is a fabricated error to test the "
+ "fatal error handling");
+
+ /* Short sleep to rate-limit this example.
+ * A real application should not do this. */
+ usleep(500 * 1000); /* 500ms */
+ }
+
+
+ /* Wait for final messages to be delivered or fail.
+ * rd_kafka_flush() is an abstraction over rd_kafka_poll() which
+ * waits for all messages to be delivered. */
+ fprintf(stderr, "%% Flushing outstanding messages..\n");
+ rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */);
+ fprintf(stderr, "%% %d message(s) produced, %d delivered, %d failed\n",
+ msgcnt, deliveredcnt, msgerrcnt);
+
+ /* Save fatal error prior for using with exit status below. */
+ err = rd_kafka_fatal_error(rk, NULL, 0);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+
+ /* Exit application with an error (1) if there was a fatal error. */
+ if (err)
+ return 1;
+ else
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp
new file mode 100644
index 000000000..bdb8607a3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/kafkatest_verifiable_client.cpp
@@ -0,0 +1,961 @@
+/*
+ * Copyright (c) 2015, Confluent Inc
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * librdkafka version of the Java VerifiableProducer and VerifiableConsumer
+ * for use with the official Kafka client tests.
+ */
+
+
+#include <iostream>
+#include <fstream>
+#include <sstream>
+#include <map>
+#include <string>
+#include <algorithm>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+#include <unistd.h>
+#include <sys/time.h>
+#include <assert.h>
+#include <ctype.h>
+#include <strings.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+static volatile sig_atomic_t run = 1;
+static bool exit_eof = false;
+static int verbosity = 1;
+static std::string value_prefix;
+
+class Assignment {
+ public:
+ static std::string name(const std::string &t, int partition) {
+ std::stringstream stm;
+ stm << t << "." << partition;
+ return stm.str();
+ }
+
+ Assignment() :
+ topic(""),
+ partition(-1),
+ consumedMessages(0),
+ minOffset(-1),
+ maxOffset(0) {
+ printf("Created assignment\n");
+ }
+ Assignment(const Assignment &a) {
+ topic = a.topic;
+ partition = a.partition;
+ consumedMessages = a.consumedMessages;
+ minOffset = a.minOffset;
+ maxOffset = a.maxOffset;
+ }
+
+ Assignment &operator=(const Assignment &a) {
+ this->topic = a.topic;
+ this->partition = a.partition;
+ this->consumedMessages = a.consumedMessages;
+ this->minOffset = a.minOffset;
+ this->maxOffset = a.maxOffset;
+ return *this;
+ }
+
+ int operator==(const Assignment &a) const {
+ return !(this->topic == a.topic && this->partition == a.partition);
+ }
+
+ int operator<(const Assignment &a) const {
+ if (this->topic < a.topic)
+ return 1;
+ if (this->topic >= a.topic)
+ return 0;
+ return (this->partition < a.partition);
+ }
+
+ void setup(std::string t, int32_t p) {
+ assert(!t.empty());
+ assert(topic.empty() || topic == t);
+ assert(partition == -1 || partition == p);
+ topic = t;
+ partition = p;
+ }
+
+ std::string topic;
+ int partition;
+ int consumedMessages;
+ int64_t minOffset;
+ int64_t maxOffset;
+};
+
+
+
+static struct {
+ int maxMessages;
+
+ struct {
+ int numAcked;
+ int numSent;
+ int numErr;
+ } producer;
+
+ struct {
+ int consumedMessages;
+ int consumedMessagesLastReported;
+ int consumedMessagesAtLastCommit;
+ bool useAutoCommit;
+ std::map<std::string, Assignment> assignments;
+ } consumer;
+} state = {
+ /* .maxMessages = */ -1};
+
+
+static RdKafka::KafkaConsumer *consumer;
+
+
+static std::string now() {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ time_t t = tv.tv_sec;
+ struct tm tm;
+ char buf[64];
+
+ localtime_r(&t, &tm);
+ strftime(buf, sizeof(buf), "%H:%M:%S", &tm);
+ snprintf(buf + strlen(buf), sizeof(buf) - strlen(buf), ".%03d",
+ (int)(tv.tv_usec / 1000));
+
+ return buf;
+}
+
+
+static time_t watchdog_last_kick;
+static const int watchdog_timeout = 20; /* Must be > socket.timeout.ms */
+static void sigwatchdog(int sig) {
+ time_t t = time(NULL);
+ if (watchdog_last_kick + watchdog_timeout <= t) {
+ std::cerr << now() << ": WATCHDOG TIMEOUT ("
+ << (int)(t - watchdog_last_kick) << "s): TERMINATING"
+ << std::endl;
+ int *i = NULL;
+ *i = 100;
+ abort();
+ }
+}
+
+static void watchdog_kick() {
+ watchdog_last_kick = time(NULL);
+
+ /* Safe guard against hangs-on-exit */
+ alarm(watchdog_timeout);
+}
+
+
+
+static void errorString(const std::string &name,
+ const std::string &errmsg,
+ const std::string &topic,
+ const std::string *key,
+ const std::string &value) {
+ std::cout << "{ "
+ << "\"name\": \"" << name << "\", "
+ << "\"_time\": \"" << now() << "\", "
+ << "\"message\": \"" << errmsg << "\", "
+ << "\"topic\": \"" << topic << "\", "
+ << "\"key\": \"" << (key ? *key : "NULL") << "\", "
+ << "\"value\": \"" << value << "\" "
+ << "}" << std::endl;
+}
+
+
+static void successString(const std::string &name,
+ const std::string &topic,
+ int partition,
+ int64_t offset,
+ const std::string *key,
+ const std::string &value) {
+ std::cout << "{ "
+ << "\"name\": \"" << name << "\", "
+ << "\"_time\": \"" << now() << "\", "
+ << "\"topic\": \"" << topic << "\", "
+ << "\"partition\": " << partition << ", "
+ << "\"offset\": " << offset << ", "
+ << "\"key\": \"" << (key ? *key : "NULL") << "\", "
+ << "\"value\": \"" << value << "\" "
+ << "}" << std::endl;
+}
+
+
+#if FIXME
+static void offsetStatus(bool success,
+ const std::string &topic,
+ int partition,
+ int64_t offset,
+ const std::string &errstr) {
+ std::cout << "{ "
+ "\"name\": \"offsets_committed\", "
+ << "\"success\": " << success << ", "
+ << "\"offsets\": [ "
+ << " { "
+ << " \"topic\": \"" << topic << "\", "
+ << " \"partition\": " << partition << ", "
+ << " \"offset\": " << (int)offset << ", "
+ << " \"error\": \"" << errstr << "\" "
+ << " } "
+ << "] }" << std::endl;
+}
+#endif
+
+
+static void sigterm(int sig) {
+ std::cerr << now() << ": Terminating because of signal " << sig << std::endl;
+
+ if (!run) {
+ std::cerr << now() << ": Forced termination" << std::endl;
+ exit(1);
+ }
+ run = 0;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &message) {
+ if (message.err()) {
+ state.producer.numErr++;
+ errorString("producer_send_error", message.errstr(), message.topic_name(),
+ message.key(),
+ std::string(static_cast<const char *>(message.payload()),
+ message.len()));
+ } else {
+ successString("producer_send_success", message.topic_name(),
+ (int)message.partition(), message.offset(), message.key(),
+ std::string(static_cast<const char *>(message.payload()),
+ message.len()));
+ state.producer.numAcked++;
+ }
+ }
+};
+
+
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ std::cerr << now() << ": ERROR (" << RdKafka::err2str(event.err())
+ << "): " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_STATS:
+ std::cerr << now() << ": \"STATS\": " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ std::cerr << now() << ": LOG-" << event.severity() << "-" << event.fac()
+ << ": " << event.str() << std::endl;
+ break;
+
+ default:
+ std::cerr << now() << ": EVENT " << event.type() << " ("
+ << RdKafka::err2str(event.err()) << "): " << event.str()
+ << std::endl;
+ break;
+ }
+ }
+};
+
+
+/* Use of this partitioner is pretty pointless since no key is provided
+ * in the produce() call. */
+class MyHashPartitionerCb : public RdKafka::PartitionerCb {
+ public:
+ int32_t partitioner_cb(const RdKafka::Topic *topic,
+ const std::string *key,
+ int32_t partition_cnt,
+ void *msg_opaque) {
+ return djb_hash(key->c_str(), key->size()) % partition_cnt;
+ }
+
+ private:
+ static inline unsigned int djb_hash(const char *str, size_t len) {
+ unsigned int hash = 5381;
+ for (size_t i = 0; i < len; i++)
+ hash = ((hash << 5) + hash) + str[i];
+ return hash;
+ }
+};
+
+
+
+/**
+ * Print number of records consumed, every 100 messages or on timeout.
+ */
+static void report_records_consumed(int immediate) {
+ std::map<std::string, Assignment> *assignments = &state.consumer.assignments;
+
+ if (state.consumer.consumedMessages <=
+ state.consumer.consumedMessagesLastReported + (immediate ? 0 : 999))
+ return;
+
+ std::cout << "{ "
+ "\"name\": \"records_consumed\", "
+ << "\"_totcount\": " << state.consumer.consumedMessages << ", "
+ << "\"count\": "
+ << (state.consumer.consumedMessages -
+ state.consumer.consumedMessagesLastReported)
+ << ", "
+ << "\"partitions\": [ ";
+
+ for (std::map<std::string, Assignment>::iterator ii = assignments->begin();
+ ii != assignments->end(); ii++) {
+ Assignment *a = &(*ii).second;
+ assert(!a->topic.empty());
+ std::cout << (ii == assignments->begin() ? "" : ", ") << " { "
+ << " \"topic\": \"" << a->topic << "\", "
+ << " \"partition\": " << a->partition << ", "
+ << " \"minOffset\": " << a->minOffset << ", "
+ << " \"maxOffset\": " << a->maxOffset << " "
+ << " } ";
+ a->minOffset = -1;
+ }
+
+ std::cout << "] }" << std::endl;
+
+ state.consumer.consumedMessagesLastReported = state.consumer.consumedMessages;
+}
+
+
+class ExampleOffsetCommitCb : public RdKafka::OffsetCommitCb {
+ public:
+ void offset_commit_cb(RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &offsets) {
+ std::cerr << now() << ": Propagate offset for " << offsets.size()
+ << " partitions, error: " << RdKafka::err2str(err) << std::endl;
+
+ /* No offsets to commit, dont report anything. */
+ if (err == RdKafka::ERR__NO_OFFSET)
+ return;
+
+ /* Send up-to-date records_consumed report to make sure consumed > committed
+ */
+ report_records_consumed(1);
+
+ std::cout << "{ "
+ << "\"name\": \"offsets_committed\", "
+ << "\"success\": " << (err ? "false" : "true") << ", "
+ << "\"error\": \"" << (err ? RdKafka::err2str(err) : "") << "\", "
+ << "\"_autocommit\": "
+ << (state.consumer.useAutoCommit ? "true" : "false") << ", "
+ << "\"offsets\": [ ";
+ assert(offsets.size() > 0);
+ for (unsigned int i = 0; i < offsets.size(); i++) {
+ std::cout << (i == 0 ? "" : ", ") << "{ "
+ << " \"topic\": \"" << offsets[i]->topic() << "\", "
+ << " \"partition\": " << offsets[i]->partition() << ", "
+ << " \"offset\": " << (int)offsets[i]->offset() << ", "
+ << " \"error\": \""
+ << (offsets[i]->err() ? RdKafka::err2str(offsets[i]->err())
+ : "")
+ << "\" "
+ << " }";
+ }
+ std::cout << " ] }" << std::endl;
+ }
+};
+
+static ExampleOffsetCommitCb ex_offset_commit_cb;
+
+
+/**
+ * Commit every 1000 messages or whenever there is a consume timeout.
+ */
+static void do_commit(RdKafka::KafkaConsumer *consumer, int immediate) {
+ if (!immediate && (state.consumer.useAutoCommit ||
+ state.consumer.consumedMessagesAtLastCommit + 1000 >
+ state.consumer.consumedMessages))
+ return;
+
+ /* Make sure we report consumption before commit,
+ * otherwise tests may fail because of commit > consumed. */
+ if (state.consumer.consumedMessagesLastReported <
+ state.consumer.consumedMessages)
+ report_records_consumed(1);
+
+ std::cerr << now() << ": committing "
+ << (state.consumer.consumedMessages -
+ state.consumer.consumedMessagesAtLastCommit)
+ << " messages" << std::endl;
+
+ RdKafka::ErrorCode err;
+ err = consumer->commitSync(&ex_offset_commit_cb);
+
+ std::cerr << now() << ": "
+ << "sync commit returned " << RdKafka::err2str(err) << std::endl;
+
+ state.consumer.consumedMessagesAtLastCommit = state.consumer.consumedMessages;
+}
+
+
+void msg_consume(RdKafka::KafkaConsumer *consumer,
+ RdKafka::Message *msg,
+ void *opaque) {
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ /* Try reporting consumed messages */
+ report_records_consumed(1);
+ /* Commit one every consume() timeout instead of on every message.
+ * Also commit on every 1000 messages, whichever comes first. */
+ do_commit(consumer, 1);
+ break;
+
+
+ case RdKafka::ERR_NO_ERROR: {
+ /* Real message */
+ if (verbosity > 2)
+ std::cerr << now() << ": Read msg from " << msg->topic_name() << " ["
+ << (int)msg->partition() << "] at offset " << msg->offset()
+ << std::endl;
+
+ if (state.maxMessages >= 0 &&
+ state.consumer.consumedMessages >= state.maxMessages)
+ return;
+
+
+ Assignment *a = &state.consumer.assignments[Assignment::name(
+ msg->topic_name(), msg->partition())];
+ a->setup(msg->topic_name(), msg->partition());
+
+ a->consumedMessages++;
+ if (a->minOffset == -1)
+ a->minOffset = msg->offset();
+ if (a->maxOffset < msg->offset())
+ a->maxOffset = msg->offset();
+
+ if (msg->key()) {
+ if (verbosity >= 3)
+ std::cerr << now() << ": Key: " << *msg->key() << std::endl;
+ }
+
+ if (verbosity >= 3)
+ fprintf(stderr, "%.*s\n", static_cast<int>(msg->len()),
+ static_cast<const char *>(msg->payload()));
+
+ state.consumer.consumedMessages++;
+
+ report_records_consumed(0);
+
+ do_commit(consumer, 0);
+ } break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ /* Last message */
+ if (exit_eof) {
+ std::cerr << now() << ": Terminate: exit on EOF" << std::endl;
+ run = 0;
+ }
+ break;
+
+ case RdKafka::ERR__UNKNOWN_TOPIC:
+ case RdKafka::ERR__UNKNOWN_PARTITION:
+ std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
+ run = 0;
+ break;
+
+ case RdKafka::ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
+ std::cerr << now() << ": Warning: " << msg->errstr() << std::endl;
+ break;
+
+ default:
+ /* Errors */
+ std::cerr << now() << ": Consume failed: " << msg->errstr() << std::endl;
+ run = 0;
+ }
+}
+
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+ void consume_cb(RdKafka::Message &msg, void *opaque) {
+ msg_consume(consumer_, &msg, opaque);
+ }
+ RdKafka::KafkaConsumer *consumer_;
+};
+
+class ExampleRebalanceCb : public RdKafka::RebalanceCb {
+ private:
+ static std::string part_list_json(
+ const std::vector<RdKafka::TopicPartition *> &partitions) {
+ std::ostringstream out;
+ for (unsigned int i = 0; i < partitions.size(); i++)
+ out << (i == 0 ? "" : ", ") << "{ "
+ << " \"topic\": \"" << partitions[i]->topic() << "\", "
+ << " \"partition\": " << partitions[i]->partition() << " }";
+ return out.str();
+ }
+
+ public:
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ std::cerr << now() << ": rebalance_cb " << RdKafka::err2str(err) << " for "
+ << partitions.size() << " partitions" << std::endl;
+ /* Send message report prior to rebalancing event to make sure they
+ * are accounted for on the "right side" of the rebalance. */
+ report_records_consumed(1);
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS)
+ consumer->assign(partitions);
+ else {
+ do_commit(consumer, 1);
+ consumer->unassign();
+ }
+
+ std::cout << "{ "
+ << "\"name\": \"partitions_"
+ << (err == RdKafka::ERR__ASSIGN_PARTITIONS ? "assigned"
+ : "revoked")
+ << "\", "
+ << "\"partitions\": [ " << part_list_json(partitions) << "] }"
+ << std::endl;
+ }
+};
+
+
+
+/**
+ * @brief Read (Java client) configuration file
+ */
+static void read_conf_file(RdKafka::Conf *conf, const std::string &conf_file) {
+ std::ifstream inf(conf_file.c_str());
+
+ if (!inf) {
+ std::cerr << now() << ": " << conf_file << ": could not open file"
+ << std::endl;
+ exit(1);
+ }
+
+ std::cerr << now() << ": " << conf_file << ": read config file" << std::endl;
+
+ std::string line;
+ int linenr = 0;
+
+ while (std::getline(inf, line)) {
+ linenr++;
+
+ // Ignore comments and empty lines
+ if (line[0] == '#' || line.length() == 0)
+ continue;
+
+ // Match on key=value..
+ size_t d = line.find("=");
+ if (d == 0 || d == std::string::npos) {
+ std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << line
+ << ": ignoring invalid line (expect key=value): "
+ << ::std::endl;
+ continue;
+ }
+
+ std::string key = line.substr(0, d);
+ std::string val = line.substr(d + 1);
+
+ std::string errstr;
+ if (conf->set(key, val, errstr)) {
+ std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key
+ << "=" << val << ": " << errstr << ": ignoring error"
+ << std::endl;
+ } else {
+ std::cerr << now() << ": " << conf_file << ":" << linenr << ": " << key
+ << "=" << val << ": applied to configuration" << std::endl;
+ }
+ }
+
+ inf.close();
+}
+
+
+
+int main(int argc, char **argv) {
+ std::string brokers = "localhost";
+ std::string errstr;
+ std::vector<std::string> topics;
+ std::string mode = "P";
+ int throughput = 0;
+ int32_t partition = RdKafka::Topic::PARTITION_UA;
+ MyHashPartitionerCb hash_partitioner;
+ int64_t create_time = -1;
+
+ std::cerr << now() << ": librdkafka version " << RdKafka::version_str()
+ << " (" << RdKafka::version() << ")" << std::endl;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ /* Java VerifiableProducer defaults to acks=all */
+ if (conf->set("acks", "all", errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+
+ /* Avoid slow shutdown on error */
+ if (conf->set("message.timeout.ms", "60000", errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+
+ {
+ char hostname[128];
+ gethostname(hostname, sizeof(hostname) - 1);
+ conf->set("client.id", std::string("rdkafka@") + hostname, errstr);
+ }
+
+ conf->set("log.thread.name", "true", errstr);
+
+ /* auto commit is explicitly enabled with --enable-autocommit */
+ conf->set("enable.auto.commit", "false", errstr);
+
+ /* keep protocol request timeouts under the watchdog timeout
+ * to make sure things like commitSync() dont fall victim to the watchdog. */
+ conf->set("socket.timeout.ms", "10000", errstr);
+
+ conf->set("fetch.wait.max.ms", "500", errstr);
+ conf->set("fetch.min.bytes", "4096", errstr);
+
+ conf->set("enable.partition.eof", "true", errstr);
+
+ for (int i = 1; i < argc; i++) {
+ const char *name = argv[i];
+ const char *val = i + 1 < argc ? argv[i + 1] : NULL;
+
+ if (val && !strncmp(val, "-", 1))
+ val = NULL;
+
+ std::cout << now() << ": argument: " << name << " " << (val ? val : "")
+ << std::endl;
+
+ if (val) {
+ if (!strcmp(name, "--topic"))
+ topics.push_back(val);
+ else if (!strcmp(name, "--broker-list"))
+ brokers = val;
+ else if (!strcmp(name, "--max-messages"))
+ state.maxMessages = atoi(val);
+ else if (!strcmp(name, "--throughput"))
+ throughput = atoi(val);
+ else if (!strcmp(name, "--producer.config") ||
+ !strcmp(name, "--consumer.config"))
+ read_conf_file(conf, val);
+ else if (!strcmp(name, "--group-id"))
+ conf->set("group.id", val, errstr);
+ else if (!strcmp(name, "--session-timeout"))
+ conf->set("session.timeout.ms", val, errstr);
+ else if (!strcmp(name, "--reset-policy")) {
+ if (conf->set("auto.offset.reset", val, errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ } else if (!strcmp(name, "--assignment-strategy")) {
+ /* The system tests pass the Java class name(s) rather than
+ * the configuration value. Fix it.
+ * "org.apache.kafka.clients.consumer.RangeAssignor,.." -> "range,.."
+ */
+ std::string s = val;
+ size_t pos;
+
+ while ((pos = s.find("org.apache.kafka.clients.consumer.")) !=
+ std::string::npos)
+ s.erase(pos, strlen("org.apache.kafka.clients.consumer."));
+
+ while ((pos = s.find("Assignor")) != std::string::npos)
+ s.erase(pos, strlen("Assignor"));
+
+ std::transform(s.begin(), s.end(), s.begin(), tolower);
+
+ std::cerr << now() << ": converted " << name << " " << val << " to "
+ << s << std::endl;
+
+ if (conf->set("partition.assignment.strategy", s.c_str(), errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ } else if (!strcmp(name, "--value-prefix")) {
+ value_prefix = std::string(val) + ".";
+ } else if (!strcmp(name, "--acks")) {
+ if (conf->set("acks", val, errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ } else if (!strcmp(name, "--message-create-time")) {
+ create_time = (int64_t)atoi(val);
+ } else if (!strcmp(name, "--debug")) {
+ conf->set("debug", val, errstr);
+ } else if (!strcmp(name, "-X")) {
+ char *s = strdup(val);
+ char *t = strchr(s, '=');
+ if (!t)
+ t = (char *)"";
+ else {
+ *t = '\0';
+ t++;
+ }
+ if (conf->set(s, t, errstr)) {
+ std::cerr << now() << ": " << errstr << std::endl;
+ exit(1);
+ }
+ free(s);
+ } else {
+ std::cerr << now() << ": Unknown option " << name << std::endl;
+ exit(1);
+ }
+
+ i++;
+
+ } else {
+ if (!strcmp(name, "--consumer"))
+ mode = "C";
+ else if (!strcmp(name, "--producer"))
+ mode = "P";
+ else if (!strcmp(name, "--enable-autocommit")) {
+ state.consumer.useAutoCommit = true;
+ conf->set("enable.auto.commit", "true", errstr);
+ } else if (!strcmp(name, "-v"))
+ verbosity++;
+ else if (!strcmp(name, "-q"))
+ verbosity--;
+ else {
+ std::cerr << now() << ": Unknown option or missing argument to " << name
+ << std::endl;
+ exit(1);
+ }
+ }
+ }
+
+ if (topics.empty() || brokers.empty()) {
+ std::cerr << now() << ": Missing --topic and --broker-list" << std::endl;
+ exit(1);
+ }
+
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("metadata.broker.list", brokers, errstr);
+
+ ExampleEventCb ex_event_cb;
+ conf->set("event_cb", &ex_event_cb, errstr);
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+ signal(SIGALRM, sigwatchdog);
+
+
+ if (mode == "P") {
+ /*
+ * Producer mode
+ */
+
+ ExampleDeliveryReportCb ex_dr_cb;
+
+ /* Set delivery report callback */
+ conf->set("dr_cb", &ex_dr_cb, errstr);
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << now() << ": Failed to create producer: " << errstr
+ << std::endl;
+ exit(1);
+ }
+
+ std::cerr << now() << ": % Created producer " << producer->name()
+ << std::endl;
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic =
+ RdKafka::Topic::create(producer, topics[0], NULL, errstr);
+ if (!topic) {
+ std::cerr << now() << ": Failed to create topic: " << errstr << std::endl;
+ exit(1);
+ }
+
+ static const int delay_us = throughput ? 1000000 / throughput : 10;
+
+ if (state.maxMessages == -1)
+ state.maxMessages = 1000000; /* Avoid infinite produce */
+
+ for (int i = 0; run && i < state.maxMessages; i++) {
+ /*
+ * Produce message
+ */
+ std::ostringstream msg;
+ msg << value_prefix << i;
+ while (true) {
+ RdKafka::ErrorCode resp;
+ if (create_time == -1) {
+ resp = producer->produce(
+ topic, partition,
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL,
+ NULL);
+ } else {
+ resp = producer->produce(
+ topics[0], partition,
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ const_cast<char *>(msg.str().c_str()), msg.str().size(), NULL, 0,
+ create_time, NULL);
+ }
+
+ if (resp == RdKafka::ERR__QUEUE_FULL) {
+ producer->poll(100);
+ continue;
+ } else if (resp != RdKafka::ERR_NO_ERROR) {
+ errorString("producer_send_error", RdKafka::err2str(resp),
+ topic->name(), NULL, msg.str());
+ state.producer.numErr++;
+ } else {
+ state.producer.numSent++;
+ }
+ break;
+ }
+
+ producer->poll(delay_us / 1000);
+ usleep(1000);
+ watchdog_kick();
+ }
+ run = 1;
+
+ while (run && producer->outq_len() > 0) {
+ std::cerr << now() << ": Waiting for " << producer->outq_len()
+ << std::endl;
+ producer->poll(1000);
+ watchdog_kick();
+ }
+
+ std::cerr << now() << ": " << state.producer.numAcked << "/"
+ << state.producer.numSent << "/" << state.maxMessages
+ << " msgs acked/sent/max, " << state.producer.numErr << " errored"
+ << std::endl;
+
+ delete topic;
+ delete producer;
+
+
+ } else if (mode == "C") {
+ /*
+ * Consumer mode
+ */
+
+ conf->set("auto.offset.reset", "smallest", errstr);
+
+ ExampleRebalanceCb ex_rebalance_cb;
+ conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
+
+ conf->set("offset_commit_cb", &ex_offset_commit_cb, errstr);
+
+
+ /*
+ * Create consumer using accumulated global configuration.
+ */
+ consumer = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << now() << ": Failed to create consumer: " << errstr
+ << std::endl;
+ exit(1);
+ }
+
+ std::cerr << now() << ": % Created consumer " << consumer->name()
+ << std::endl;
+
+ /*
+ * Subscribe to topic(s)
+ */
+ RdKafka::ErrorCode resp = consumer->subscribe(topics);
+ if (resp != RdKafka::ERR_NO_ERROR) {
+ std::cerr << now() << ": Failed to subscribe to " << topics.size()
+ << " topics: " << RdKafka::err2str(resp) << std::endl;
+ exit(1);
+ }
+
+ watchdog_kick();
+
+ /*
+ * Consume messages
+ */
+ while (run) {
+ RdKafka::Message *msg = consumer->consume(500);
+ msg_consume(consumer, msg, NULL);
+ delete msg;
+ watchdog_kick();
+ }
+
+ std::cerr << now() << ": Final commit on termination" << std::endl;
+
+ /* Final commit */
+ do_commit(consumer, 1);
+
+ /*
+ * Stop consumer
+ */
+ consumer->close();
+
+ delete consumer;
+ }
+
+ std::cout << "{ \"name\": \"shutdown_complete\" }" << std::endl;
+
+ /*
+ * Wait for RdKafka to decommission.
+ * This is not strictly needed (when check outq_len() above), but
+ * allows RdKafka to clean up all its resources before the application
+ * exits so that memory profilers such as valgrind wont complain about
+ * memory leaks.
+ */
+ RdKafka::wait_destroyed(5000);
+
+ std::cerr << now() << ": EXITING WITH RETURN VALUE 0" << std::endl;
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c
new file mode 100644
index 000000000..03e878ee1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_group_offsets.c
@@ -0,0 +1,359 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * ListConsumerGroupOffsets usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "List consumer group offsets usage examples\n"
+ "\n"
+ "Usage: %s <options> <group_id> "
+ "<require_stable_offsets>\n"
+ " <topic1> <partition1>\n"
+ " <topic2> <partition2>\n"
+ " ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions,
+ int print_offset,
+ const char *prefix) {
+ int i;
+
+ if (partitions->cnt == 0) {
+ fprintf(fp, "%sNo partition found", prefix);
+ }
+ for (i = 0; i < partitions->cnt; i++) {
+ char offset_string[512] = {};
+ *offset_string = '\0';
+ if (print_offset) {
+ snprintf(offset_string, sizeof(offset_string),
+ " offset %" PRId64,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "%s%s %s [%" PRId32 "]%s error %s",
+ i > 0 ? "\n" : "", prefix, partitions->elems[i].topic,
+ partitions->elems[i].partition, offset_string,
+ rd_kafka_err2str(partitions->elems[i].err));
+ }
+ fprintf(fp, "\n");
+}
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+static void
+cmd_list_consumer_group_offsets(rd_kafka_conf_t *conf, int argc, char **argv) {
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ rd_kafka_t *rk; /* Admin client instance */
+ rd_kafka_AdminOptions_t *options; /* (Optional) Options for
+ * ListConsumerGroupOffsets() */
+ rd_kafka_event_t *event; /* ListConsumerGroupOffsets result event */
+ const int min_argc = 2;
+ char *topic;
+ int partition;
+ int require_stable_offsets = 0, num_partitions = 0;
+ rd_kafka_ListConsumerGroupOffsets_t *list_cgrp_offsets;
+ rd_kafka_error_t *error;
+ const char *group;
+
+ /*
+ * Argument validation
+ */
+ if (argc < min_argc || (argc - min_argc) % 2 != 0)
+ usage("Wrong number of arguments");
+ else {
+ require_stable_offsets =
+ parse_int("require_stable_offsets", argv[1]);
+ if (require_stable_offsets < 0 || require_stable_offsets > 1)
+ usage("Require stable not a 0-1 int");
+ }
+
+ num_partitions = (argc - min_argc) / 2;
+ group = argv[0];
+
+ /*
+ * Create an admin client, it can be created using any client type,
+ * so we choose producer since it requires no extra configuration
+ * and is more light-weight than the consumer.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* The Admin API is completely asynchronous, results are emitted
+ * on the result queue that is passed to ListConsumerGroupOffsets() */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ /* Set timeout (optional) */
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 30 * 1000 /* 30s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ exit(1);
+ }
+ /* Set requested require stable offsets */
+ if ((error = rd_kafka_AdminOptions_set_require_stable_offsets(
+ options, require_stable_offsets))) {
+ fprintf(stderr, "%% Failed to set require stable offsets: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ exit(1);
+ }
+
+ /* Read passed partition-offsets */
+ rd_kafka_topic_partition_list_t *partitions = NULL;
+ if (num_partitions > 0) {
+ int i;
+ partitions = rd_kafka_topic_partition_list_new(num_partitions);
+ for (i = 0; i < num_partitions; i++) {
+ topic = argv[min_argc + i * 2];
+ partition =
+ parse_int("partition", argv[min_argc + i * 2 + 1]);
+ rd_kafka_topic_partition_list_add(partitions, topic,
+ partition);
+ }
+ }
+
+ /* Create argument */
+ list_cgrp_offsets =
+ rd_kafka_ListConsumerGroupOffsets_new(group, partitions);
+ /* Call ListConsumerGroupOffsets */
+ rd_kafka_ListConsumerGroupOffsets(rk, &list_cgrp_offsets, 1, options,
+ queue);
+
+ /* Clean up input arguments */
+ rd_kafka_ListConsumerGroupOffsets_destroy(list_cgrp_offsets);
+ rd_kafka_AdminOptions_destroy(options);
+
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (30s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ /* ListConsumerGroupOffsets request failed */
+ fprintf(stderr, "%% ListConsumerGroupOffsets failed: %s\n",
+ rd_kafka_event_error_string(event));
+ exit(1);
+
+ } else {
+ /* ListConsumerGroupOffsets request succeeded, but individual
+ * partitions may have errors. */
+ const rd_kafka_ListConsumerGroupOffsets_result_t *result;
+ const rd_kafka_group_result_t **groups;
+ size_t n_groups, i;
+
+ result = rd_kafka_event_ListConsumerGroupOffsets_result(event);
+ groups = rd_kafka_ListConsumerGroupOffsets_result_groups(
+ result, &n_groups);
+
+ printf("ListConsumerGroupOffsets results:\n");
+ for (i = 0; i < n_groups; i++) {
+ const rd_kafka_group_result_t *group = groups[i];
+ const rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_group_result_partitions(group);
+ print_partition_list(stderr, partitions, 1, " ");
+ }
+ }
+
+ if (partitions)
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ /* Destroy event object when we're done with it.
+ * Note: rd_kafka_event_destroy() allows a NULL event. */
+ rd_kafka_event_destroy(event);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(queue);
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_list_consumer_group_offsets(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c
new file mode 100644
index 000000000..13656cd66
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/list_consumer_groups.c
@@ -0,0 +1,330 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * ListConsumerGroups usage example.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+static rd_kafka_queue_t *queue; /** Admin result queue.
+ * This is a global so we can
+ * yield in stop() */
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ if (!run) {
+ fprintf(stderr, "%% Forced termination\n");
+ exit(2);
+ }
+ run = 0;
+ rd_kafka_queue_yield(queue);
+}
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "List groups usage examples\n"
+ "\n"
+ "Usage: %s <options> <state1> <state2> ...\n"
+ "\n"
+ "Options:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+/**
+ * @brief Print group information.
+ */
+static int print_groups_info(const rd_kafka_ListConsumerGroups_result_t *list) {
+ size_t i;
+ const rd_kafka_ConsumerGroupListing_t **result_groups;
+ const rd_kafka_error_t **errors;
+ size_t result_groups_cnt;
+ size_t result_error_cnt;
+ result_groups =
+ rd_kafka_ListConsumerGroups_result_valid(list, &result_groups_cnt);
+ errors =
+ rd_kafka_ListConsumerGroups_result_errors(list, &result_error_cnt);
+
+ if (result_groups_cnt == 0) {
+ fprintf(stderr, "No matching groups found\n");
+ }
+
+ for (i = 0; i < result_groups_cnt; i++) {
+ const rd_kafka_ConsumerGroupListing_t *group = result_groups[i];
+ const char *group_id =
+ rd_kafka_ConsumerGroupListing_group_id(group);
+ rd_kafka_consumer_group_state_t state =
+ rd_kafka_ConsumerGroupListing_state(group);
+ int is_simple_consumer_group =
+ rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
+ group);
+
+ printf("Group \"%s\", is simple %" PRId32
+ ", "
+ "state %s",
+ group_id, is_simple_consumer_group,
+ rd_kafka_consumer_group_state_name(state));
+ printf("\n");
+ }
+ for (i = 0; i < result_error_cnt; i++) {
+ const rd_kafka_error_t *error = errors[i];
+ printf("Error[%" PRId32 "]: %s\n", rd_kafka_error_code(error),
+ rd_kafka_error_string(error));
+ }
+ return 0;
+}
+
+/**
+ * @brief Parse an integer or fail.
+ */
+int64_t parse_int(const char *what, const char *str) {
+ char *end;
+ unsigned long n = strtoull(str, &end, 0);
+
+ if (end != str + strlen(str)) {
+ fprintf(stderr, "%% Invalid input for %s: %s: not an integer\n",
+ what, str);
+ exit(1);
+ }
+
+ return (int64_t)n;
+}
+
+/**
+ * @brief Call rd_kafka_ListConsumerGroups() with a list of
+ * groups.
+ */
+static void
+cmd_list_consumer_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char **states_str = NULL;
+ char errstr[512];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_event_t *event = NULL;
+ rd_kafka_error_t *error = NULL;
+ int i;
+ int retval = 0;
+ int states_cnt = 0;
+ rd_kafka_consumer_group_state_t *states;
+
+
+ if (argc >= 1) {
+ states_str = (const char **)&argv[0];
+ states_cnt = argc;
+ }
+ states = calloc(states_cnt, sizeof(rd_kafka_consumer_group_state_t));
+ for (i = 0; i < states_cnt; i++) {
+ states[i] = parse_int("state code", states_str[i]);
+ }
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * List consumer groups
+ */
+ queue = rd_kafka_queue_new(rk);
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS);
+
+ if (rd_kafka_AdminOptions_set_request_timeout(
+ options, 10 * 1000 /* 10s */, errstr, sizeof(errstr))) {
+ fprintf(stderr, "%% Failed to set timeout: %s\n", errstr);
+ goto exit;
+ }
+
+ if ((error = rd_kafka_AdminOptions_set_match_consumer_group_states(
+ options, states, states_cnt))) {
+ fprintf(stderr, "%% Failed to set states: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ goto exit;
+ }
+ free(states);
+
+ rd_kafka_ListConsumerGroups(rk, options, queue);
+ rd_kafka_AdminOptions_destroy(options);
+
+ /* Wait for results */
+ event = rd_kafka_queue_poll(queue, -1 /* indefinitely but limited by
+ * the request timeout set
+ * above (10s) */);
+
+ if (!event) {
+ /* User hit Ctrl-C,
+ * see yield call in stop() signal handler */
+ fprintf(stderr, "%% Cancelled by user\n");
+
+ } else if (rd_kafka_event_error(event)) {
+ rd_kafka_resp_err_t err = rd_kafka_event_error(event);
+ /* ListConsumerGroups request failed */
+ fprintf(stderr,
+ "%% ListConsumerGroups failed[%" PRId32 "]: %s\n", err,
+ rd_kafka_event_error_string(event));
+ goto exit;
+
+ } else {
+ /* ListConsumerGroups request succeeded, but individual
+ * groups may have errors. */
+ const rd_kafka_ListConsumerGroups_result_t *result;
+
+ result = rd_kafka_event_ListConsumerGroups_result(event);
+ printf("ListConsumerGroups results:\n");
+ retval = print_groups_info(result);
+ }
+
+
+exit:
+ if (event)
+ rd_kafka_event_destroy(event);
+ rd_kafka_queue_destroy(queue);
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt;
+ argv0 = argv[0];
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+ cmd_list_consumer_groups(conf, argc - optind, &argv[optind]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c b/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c
new file mode 100644
index 000000000..b63ab577d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/misc.c
@@ -0,0 +1,287 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * A collection of smaller usage examples
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdarg.h>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#else
+#include <getopt.h>
+#endif
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+const char *argv0;
+
+
+static void usage(const char *reason, ...) {
+
+ fprintf(stderr,
+ "Miscellaneous librdkafka usage examples\n"
+ "\n"
+ "Usage: %s <options> <command> [<command arguments>]\n"
+ "\n"
+ "Commands:\n"
+ " List groups:\n"
+ " %s -b <brokers> list_groups <group>\n"
+ "\n"
+ " Show librdkafka version:\n"
+ " %s version\n"
+ "\n"
+ "Common options for all commands:\n"
+ " -b <brokers> Bootstrap server list to connect to.\n"
+ " -X <prop=val> Set librdkafka configuration property.\n"
+ " See CONFIGURATION.md for full list.\n"
+ " -d <dbg,..> Enable librdkafka debugging (%s).\n"
+ "\n",
+ argv0, argv0, argv0, rd_kafka_get_debug_contexts());
+
+ if (reason) {
+ va_list ap;
+ char reasonbuf[512];
+
+ va_start(ap, reason);
+ vsnprintf(reasonbuf, sizeof(reasonbuf), reason, ap);
+ va_end(ap);
+
+ fprintf(stderr, "ERROR: %s\n", reasonbuf);
+ }
+
+ exit(reason ? 1 : 0);
+}
+
+
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(2); \
+ } while (0)
+
+
+/**
+ * @brief Set config property. Exit on failure.
+ */
+static void conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ fatal("Failed to set %s=%s: %s", name, val, errstr);
+}
+
+
+/**
+ * Commands
+ *
+ */
+
+/**
+ * @brief Just print the librdkafka version
+ */
+static void cmd_version(rd_kafka_conf_t *conf, int argc, char **argv) {
+ if (argc)
+ usage("version command takes no arguments");
+
+ printf("librdkafka v%s\n", rd_kafka_version_str());
+ rd_kafka_conf_destroy(conf);
+}
+
+
+/**
+ * @brief Call rd_kafka_list_groups() with an optional groupid argument.
+ */
+static void cmd_list_groups(rd_kafka_conf_t *conf, int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char *groupid = NULL;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_group_list *grplist;
+ int i;
+ int retval = 0;
+
+ if (argc > 1)
+ usage("too many arguments to list_groups");
+
+ if (argc == 1)
+ groupid = argv[0];
+
+ /*
+ * Create consumer instance
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ fatal("Failed to create new consumer: %s", errstr);
+
+ /*
+ * List groups
+ */
+ err = rd_kafka_list_groups(rk, groupid, &grplist, 10 * 1000 /*10s*/);
+ if (err)
+ fatal("rd_kafka_list_groups(%s) failed: %s", groupid,
+ rd_kafka_err2str(err));
+
+ if (grplist->group_cnt == 0) {
+ if (groupid) {
+ fprintf(stderr, "Group %s not found\n", groupid);
+ retval = 1;
+ } else {
+ fprintf(stderr, "No groups in cluster\n");
+ }
+ }
+
+ /*
+ * Print group information
+ */
+ for (i = 0; i < grplist->group_cnt; i++) {
+ int j;
+ const struct rd_kafka_group_info *grp = &grplist->groups[i];
+
+ printf(
+ "Group \"%s\" protocol-type %s, protocol %s, "
+ "state %s, with %d member(s))",
+ grp->group, grp->protocol_type, grp->protocol, grp->state,
+ grp->member_cnt);
+ if (grp->err)
+ printf(" error: %s", rd_kafka_err2str(grp->err));
+ printf("\n");
+ for (j = 0; j < grp->member_cnt; j++) {
+ const struct rd_kafka_group_member_info *mb =
+ &grp->members[j];
+ printf(
+ " Member \"%s\" with client-id %s, host %s, "
+ "%d bytes of metadat, %d bytes of assignment\n",
+ mb->member_id, mb->client_id, mb->client_host,
+ mb->member_metadata_size,
+ mb->member_assignment_size);
+ }
+ }
+
+ rd_kafka_group_list_destroy(grplist);
+
+ /* Destroy the client instance */
+ rd_kafka_destroy(rk);
+
+ exit(retval);
+}
+
+
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf; /**< Client configuration object */
+ int opt, i;
+ const char *cmd;
+ static const struct {
+ const char *cmd;
+ void (*func)(rd_kafka_conf_t *conf, int argc, char **argv);
+ } cmds[] = {
+ {"version", cmd_version},
+ {"list_groups", cmd_list_groups},
+ {NULL},
+ };
+
+ argv0 = argv[0];
+
+ if (argc == 1)
+ usage(NULL);
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+
+ /*
+ * Parse common options
+ */
+ while ((opt = getopt(argc, argv, "b:X:d:")) != -1) {
+ switch (opt) {
+ case 'b':
+ conf_set(conf, "bootstrap.servers", optarg);
+ break;
+
+ case 'X': {
+ char *name = optarg, *val;
+
+ if (!(val = strchr(name, '=')))
+ fatal("-X expects a name=value argument");
+
+ *val = '\0';
+ val++;
+
+ conf_set(conf, name, val);
+ break;
+ }
+
+ case 'd':
+ conf_set(conf, "debug", optarg);
+ break;
+
+ default:
+ usage("Unknown option %c", (char)opt);
+ }
+ }
+
+
+ if (optind == argc)
+ usage("No command specified");
+
+
+ cmd = argv[optind++];
+
+ /*
+ * Find matching command and run it
+ */
+ for (i = 0; cmds[i].cmd; i++) {
+ if (!strcmp(cmds[i].cmd, cmd)) {
+ cmds[i].func(conf, argc - optind, &argv[optind]);
+ exit(0);
+ }
+ }
+
+ usage("Unknown command: %s", cmd);
+
+ /* NOTREACHED */
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp
new file mode 100644
index 000000000..401857e6b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/openssl_engine_example.cpp
@@ -0,0 +1,249 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * OpenSSL engine integration example. This example fetches metadata
+ * over SSL connection with broker, established using OpenSSL engine.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+static void metadata_print(const RdKafka::Metadata *metadata) {
+ std::cout << "Number of topics: " << metadata->topics()->size() << std::endl;
+
+ /* Iterate topics */
+ RdKafka::Metadata::TopicMetadataIterator it;
+ for (it = metadata->topics()->begin(); it != metadata->topics()->end(); ++it)
+ std::cout << " " << (*it)->topic() << " has "
+ << (*it)->partitions()->size() << " partitions." << std::endl;
+}
+
+
+class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb {
+ /* This SSL cert verification callback simply prints the incoming
+ * parameters. It provides no validation, everything is ok. */
+ public:
+ bool ssl_cert_verify_cb(const std::string &broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ std::string &errstr) {
+ std::cout << "ssl_cert_verify_cb :"
+ << ": broker_name=" << broker_name << ", broker_id=" << broker_id
+ << ", x509_error=" << *x509_error << ", depth=" << depth
+ << ", buf size=" << size << std::endl;
+
+ return true;
+ }
+};
+
+
+int main(int argc, char **argv) {
+ std::string brokers;
+ std::string errstr;
+ std::string engine_path;
+ std::string ca_location;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ std::string engine_id;
+ std::string engine_callback_data;
+ int opt;
+
+ if (conf->set("security.protocol", "ssl", errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ while ((opt = getopt(argc, argv, "b:p:c:t:d:i:e:X:")) != -1) {
+ switch (opt) {
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'p':
+ engine_path = optarg;
+ break;
+ case 'c':
+ ca_location = optarg;
+ break;
+ case 'i':
+ engine_id = optarg;
+ break;
+ case 'e':
+ engine_callback_data = optarg;
+ break;
+ case 'd':
+ if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ if (brokers.empty() || engine_path.empty() || optind != argc) {
+ usage:
+ std::string features;
+ conf->get("builtin.features", features);
+ fprintf(stderr,
+ "Usage: %s [options] -b <brokers> -p <engine-path> \n"
+ "\n"
+ "OpenSSL engine integration example. This example fetches\n"
+ "metadata over SSL connection with broker, established using\n"
+ "OpenSSL engine.\n"
+ "\n"
+ "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+ "\n"
+ " Options:\n"
+ " -b <brokers> Broker address\n"
+ " -p <engine-path> Path to OpenSSL engine\n"
+ " -i <engine-id> OpenSSL engine id\n"
+ " -e <engine-callback-data> OpenSSL engine callback_data\n"
+ " -c <ca-cert-location> File path to ca cert\n"
+ " -d [facs..] Enable debugging contexts: %s\n"
+ " -X <prop=name> Set arbitrary librdkafka configuration"
+ " property\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ features.c_str(), RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+ if (conf->set("bootstrap.servers", brokers, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ if (conf->set("ssl.engine.location", engine_path, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ if (ca_location.length() > 0 && conf->set("ssl.ca.location", ca_location,
+ errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ if (engine_id.length() > 0 &&
+ conf->set("ssl.engine.id", engine_id, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* engine_callback_data needs to be persistent
+ * and outlive the lifetime of the Kafka client handle. */
+ if (engine_callback_data.length() > 0 &&
+ conf->set_engine_callback_data((void *)engine_callback_data.c_str(),
+ errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* We use the Certificiate verification callback to print the
+ * certificate name being used. */
+ PrintingSSLVerifyCb ssl_verify_cb;
+
+ if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created producer " << producer->name() << std::endl;
+
+ class RdKafka::Metadata *metadata;
+
+ /* Fetch metadata */
+ RdKafka::ErrorCode err = producer->metadata(true, NULL, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR)
+ std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
+ << std::endl;
+
+ metadata_print(metadata);
+
+ delete metadata;
+ delete producer;
+ delete conf;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c
new file mode 100644
index 000000000..b6fb71150
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.c
@@ -0,0 +1,251 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Simple Apache Kafka producer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <string.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+ fclose(stdin); /* abort fgets() */
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll() and executes on
+ * the application's thread.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+ else
+ fprintf(stderr,
+ "%% Message delivered (%zd bytes, "
+ "partition %" PRId32 ")\n",
+ rkmessage->len, rkmessage->partition);
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *rk; /* Producer instance handle */
+ rd_kafka_conf_t *conf; /* Temporary configuration object */
+ char errstr[512]; /* librdkafka API error reporting buffer */
+ char buf[512]; /* Message value temporary buffer */
+ const char *brokers; /* Argument: broker list */
+ const char *topic; /* Argument: topic to produce to */
+
+ /*
+ * Argument validation
+ */
+ if (argc != 3) {
+ fprintf(stderr, "%% Usage: %s <broker> <topic>\n", argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ topic = argv[2];
+
+
+ /*
+ * Create Kafka client configuration place-holder
+ */
+ conf = rd_kafka_conf_new();
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%s\n", errstr);
+ return 1;
+ }
+
+ /* Set the delivery report callback.
+ * This callback will be called once per message to inform
+ * the application if delivery succeeded or failed.
+ * See dr_msg_cb() above.
+ * The callback is only triggered from rd_kafka_poll() and
+ * rd_kafka_flush(). */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /*
+ * Create producer instance.
+ *
+ * NOTE: rd_kafka_new() takes ownership of the conf object
+ * and the application must not reference it again after
+ * this call.
+ */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "%% Failed to create new producer: %s\n",
+ errstr);
+ return 1;
+ }
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ fprintf(stderr,
+ "%% Type some text and hit enter to produce message\n"
+ "%% Or just hit enter to only serve delivery reports\n"
+ "%% Press Ctrl-C or Ctrl-D to exit\n");
+
+ while (run && fgets(buf, sizeof(buf), stdin)) {
+ size_t len = strlen(buf);
+ rd_kafka_resp_err_t err;
+
+ if (buf[len - 1] == '\n') /* Remove newline */
+ buf[--len] = '\0';
+
+ if (len == 0) {
+ /* Empty line: only serve delivery reports */
+ rd_kafka_poll(rk, 0 /*non-blocking */);
+ continue;
+ }
+
+ /*
+ * Send/Produce message.
+ * This is an asynchronous call, on success it will only
+ * enqueue the message on the internal producer queue.
+ * The actual delivery attempts to the broker are handled
+ * by background threads.
+ * The previously registered delivery report callback
+ * (dr_msg_cb) is used to signal back to the application
+ * when the message has been delivered (or failed).
+ */
+ retry:
+ err = rd_kafka_producev(
+ /* Producer handle */
+ rk,
+ /* Topic name */
+ RD_KAFKA_V_TOPIC(topic),
+ /* Make a copy of the payload. */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ /* Message value and length */
+ RD_KAFKA_V_VALUE(buf, len),
+ /* Per-Message opaque, provided in
+ * delivery report callback as
+ * msg_opaque. */
+ RD_KAFKA_V_OPAQUE(NULL),
+ /* End sentinel */
+ RD_KAFKA_V_END);
+
+ if (err) {
+ /*
+ * Failed to *enqueue* message for producing.
+ */
+ fprintf(stderr,
+ "%% Failed to produce to topic %s: %s\n", topic,
+ rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If the internal queue is full, wait for
+ * messages to be delivered and then retry.
+ * The internal queue represents both
+ * messages to be sent and messages that have
+ * been sent or failed, awaiting their
+ * delivery report callback to be called.
+ *
+ * The internal queue is limited by the
+ * configuration property
+ * queue.buffering.max.messages and
+ * queue.buffering.max.kbytes */
+ rd_kafka_poll(rk,
+ 1000 /*block for max 1000ms*/);
+ goto retry;
+ }
+ } else {
+ fprintf(stderr,
+ "%% Enqueued message (%zd bytes) "
+ "for topic %s\n",
+ len, topic);
+ }
+
+
+ /* A producer application should continually serve
+ * the delivery report queue by calling rd_kafka_poll()
+ * at frequent intervals.
+ * Either put the poll call in your main loop, or in a
+ * dedicated thread, or call it after every
+ * rd_kafka_produce() call.
+ * Just make sure that rd_kafka_poll() is still called
+ * during periods where you are not producing any messages
+ * to make sure previously produced messages have their
+ * delivery report callback served (and any other callbacks
+ * you register). */
+ rd_kafka_poll(rk, 0 /*non-blocking*/);
+ }
+
+
+ /* Wait for final messages to be delivered or fail.
+ * rd_kafka_flush() is an abstraction over rd_kafka_poll() which
+ * waits for all messages to be delivered. */
+ fprintf(stderr, "%% Flushing final messages..\n");
+ rd_kafka_flush(rk, 10 * 1000 /* wait for max 10 seconds */);
+
+ /* If the output queue is still not empty there is an issue
+ * with producing messages to the clusters. */
+ if (rd_kafka_outq_len(rk) > 0)
+ fprintf(stderr, "%% %d message(s) were not delivered\n",
+ rd_kafka_outq_len(rk));
+
+ /* Destroy the producer instance */
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp
new file mode 100755
index 000000000..d4a8a0c49
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/producer.cpp
@@ -0,0 +1,228 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka producer
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#if _AIX
+#include <unistd.h>
+#endif
+
+/*
+ * Typical include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &message) {
+ /* If message.err() is non-zero the message delivery failed permanently
+ * for the message. */
+ if (message.err())
+ std::cerr << "% Message delivery failed: " << message.errstr()
+ << std::endl;
+ else
+ std::cerr << "% Message delivered to topic " << message.topic_name()
+ << " [" << message.partition() << "] at offset "
+ << message.offset() << std::endl;
+ }
+};
+
+int main(int argc, char **argv) {
+ if (argc != 3) {
+ std::cerr << "Usage: " << argv[0] << " <brokers> <topic>\n";
+ exit(1);
+ }
+
+ std::string brokers = argv[1];
+ std::string topic = argv[2];
+
+ /*
+ * Create configuration object
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ std::string errstr;
+
+ /* Set bootstrap broker(s) as a comma-separated list of
+ * host or host:port (default port 9092).
+ * librdkafka will use the bootstrap brokers to acquire the full
+ * set of brokers from the cluster. */
+ if (conf->set("bootstrap.servers", brokers, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+ /* Set the delivery report callback.
+ * This callback will be called once per message to inform
+ * the application if delivery succeeded or failed.
+ * See dr_msg_cb() above.
+ * The callback is only triggered from ::poll() and ::flush().
+ *
+ * IMPORTANT:
+ * Make sure the DeliveryReport instance outlives the Producer object,
+ * either by putting it on the heap or as in this case as a stack variable
+ * that will NOT go out of scope for the duration of the Producer object.
+ */
+ ExampleDeliveryReportCb ex_dr_cb;
+
+ if (conf->set("dr_cb", &ex_dr_cb, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Create producer instance.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ delete conf;
+
+ /*
+ * Read messages from stdin and produce to broker.
+ */
+ std::cout << "% Type message value and hit enter "
+ << "to produce message." << std::endl;
+
+ for (std::string line; run && std::getline(std::cin, line);) {
+ if (line.empty()) {
+ producer->poll(0);
+ continue;
+ }
+
+ /*
+ * Send/Produce message.
+ * This is an asynchronous call, on success it will only
+ * enqueue the message on the internal producer queue.
+ * The actual delivery attempts to the broker are handled
+ * by background threads.
+ * The previously registered delivery report callback
+ * is used to signal back to the application when the message
+ * has been delivered (or failed permanently after retries).
+ */
+ retry:
+ RdKafka::ErrorCode err = producer->produce(
+ /* Topic name */
+ topic,
+ /* Any Partition: the builtin partitioner will be
+ * used to assign the message to a topic based
+ * on the message key, or random partition if
+ * the key is not set. */
+ RdKafka::Topic::PARTITION_UA,
+ /* Make a copy of the value */
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ /* Value */
+ const_cast<char *>(line.c_str()), line.size(),
+ /* Key */
+ NULL, 0,
+ /* Timestamp (defaults to current time) */
+ 0,
+ /* Message headers, if any */
+ NULL,
+ /* Per-message opaque value passed to
+ * delivery report */
+ NULL);
+
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "% Failed to produce to topic " << topic << ": "
+ << RdKafka::err2str(err) << std::endl;
+
+ if (err == RdKafka::ERR__QUEUE_FULL) {
+ /* If the internal queue is full, wait for
+ * messages to be delivered and then retry.
+ * The internal queue represents both
+ * messages to be sent and messages that have
+ * been sent or failed, awaiting their
+ * delivery report callback to be called.
+ *
+ * The internal queue is limited by the
+ * configuration property
+ * queue.buffering.max.messages and queue.buffering.max.kbytes */
+ producer->poll(1000 /*block for max 1000ms*/);
+ goto retry;
+ }
+
+ } else {
+ std::cerr << "% Enqueued message (" << line.size() << " bytes) "
+ << "for topic " << topic << std::endl;
+ }
+
+ /* A producer application should continually serve
+ * the delivery report queue by calling poll()
+ * at frequent intervals.
+ * Either put the poll call in your main loop, or in a
+ * dedicated thread, or call it after every produce() call.
+ * Just make sure that poll() is still called
+ * during periods where you are not producing any messages
+ * to make sure previously produced messages have their
+ * delivery report callback served (and any other callbacks
+ * you register). */
+ producer->poll(0);
+ }
+
+ /* Wait for final messages to be delivered or fail.
+ * flush() is an abstraction over poll() which
+ * waits for all messages to be delivered. */
+ std::cerr << "% Flushing final messages..." << std::endl;
+ producer->flush(10 * 1000 /* wait for max 10 seconds */);
+
+ if (producer->outq_len() > 0)
+ std::cerr << "% " << producer->outq_len()
+ << " message(s) were not delivered" << std::endl;
+
+ delete producer;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c
new file mode 100644
index 000000000..1632b3030
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.c
@@ -0,0 +1,617 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka high level consumer example program
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <sys/time.h>
+#include <errno.h>
+#include <getopt.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static volatile sig_atomic_t run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int wait_eof = 0; /* number of partitions awaiting EOF */
+static int quiet = 0;
+static enum {
+ OUTPUT_HEXDUMP,
+ OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop(int sig) {
+ if (!run)
+ exit(1);
+ run = 0;
+ fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
+ const char *p = (const char *)ptr;
+ unsigned int of = 0;
+
+
+ if (name)
+ fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+ for (of = 0; of < len; of += 16) {
+ char hexen[16 * 3 + 1];
+ char charen[16 + 1];
+ int hof = 0;
+
+ int cof = 0;
+ int i;
+
+ for (i = of; i < (int)of + 16 && i < (int)len; i++) {
+ hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff);
+ cof += sprintf(charen + cof, "%c",
+ isprint((int)p[i]) ? p[i] : '.');
+ }
+ fprintf(fp, "%08x: %-48s %-16s\n", of, hexen, charen);
+ }
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void
+logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ fprintf(stdout, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec,
+ (int)(tv.tv_usec / 1000), level, fac, rd_kafka_name(rk), buf);
+}
+
+
+
+/**
+ * Handle and print a consumed message.
+ * Internally crafted messages are also used to propagate state from
+ * librdkafka to the application. The application needs to check
+ * the `rkmessage->err` field for this purpose.
+ */
+static void msg_consume(rd_kafka_message_t *rkmessage) {
+ if (rkmessage->err) {
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ fprintf(stderr,
+ "%% Consumer reached end of %s [%" PRId32
+ "] "
+ "message queue at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ if (exit_eof && --wait_eof == 0) {
+ fprintf(stderr,
+ "%% All partition(s) reached EOF: "
+ "exiting\n");
+ run = 0;
+ }
+
+ return;
+ }
+
+ if (rkmessage->rkt)
+ fprintf(stderr,
+ "%% Consume error for "
+ "topic \"%s\" [%" PRId32
+ "] "
+ "offset %" PRId64 ": %s\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+ else
+ fprintf(stderr, "%% Consumer error: %s: %s\n",
+ rd_kafka_err2str(rkmessage->err),
+ rd_kafka_message_errstr(rkmessage));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+ rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ run = 0;
+ return;
+ }
+
+ if (!quiet)
+ fprintf(stdout,
+ "%% Message (topic %s [%" PRId32
+ "], "
+ "offset %" PRId64 ", %zd bytes):\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rkmessage->len);
+
+ if (rkmessage->key_len) {
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Key", rkmessage->key,
+ rkmessage->key_len);
+ else
+ printf("Key: %.*s\n", (int)rkmessage->key_len,
+ (char *)rkmessage->key);
+ }
+
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Payload", rkmessage->payload,
+ rkmessage->len);
+ else
+ printf("%.*s\n", (int)rkmessage->len,
+ (char *)rkmessage->payload);
+}
+
+
+static void
+print_partition_list(FILE *fp,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+ for (i = 0; i < partitions->cnt; i++) {
+ fprintf(fp, "%s %s [%" PRId32 "] offset %" PRId64,
+ i > 0 ? "," : "", partitions->elems[i].topic,
+ partitions->elems[i].partition,
+ partitions->elems[i].offset);
+ }
+ fprintf(fp, "\n");
+}
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ fprintf(stderr, "%% Consumer group rebalanced: ");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ fprintf(stderr, "assigned (%s):\n",
+ rd_kafka_rebalance_protocol(rk));
+ print_partition_list(stderr, partitions);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ error = rd_kafka_incremental_assign(rk, partitions);
+ else
+ ret_err = rd_kafka_assign(rk, partitions);
+ wait_eof += partitions->cnt;
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ fprintf(stderr, "revoked (%s):\n",
+ rd_kafka_rebalance_protocol(rk));
+ print_partition_list(stderr, partitions);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ error = rd_kafka_incremental_unassign(rk, partitions);
+ wait_eof -= partitions->cnt;
+ } else {
+ ret_err = rd_kafka_assign(rk, NULL);
+ wait_eof = 0;
+ }
+ break;
+
+ default:
+ fprintf(stderr, "failed: %s\n", rd_kafka_err2str(err));
+ rd_kafka_assign(rk, NULL);
+ break;
+ }
+
+ if (error) {
+ fprintf(stderr, "incremental assign failure: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ } else if (ret_err) {
+ fprintf(stderr, "assign failure: %s\n",
+ rd_kafka_err2str(ret_err));
+ }
+}
+
+
+static int describe_groups(rd_kafka_t *rk, const char *group) {
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_group_list *grplist;
+ int i;
+
+ err = rd_kafka_list_groups(rk, group, &grplist, 10000);
+
+ if (err) {
+ fprintf(stderr, "%% Failed to acquire group list: %s\n",
+ rd_kafka_err2str(err));
+ return -1;
+ }
+
+ for (i = 0; i < grplist->group_cnt; i++) {
+ const struct rd_kafka_group_info *gi = &grplist->groups[i];
+ int j;
+
+ printf("Group \"%s\" in state %s on broker %d (%s:%d)\n",
+ gi->group, gi->state, gi->broker.id, gi->broker.host,
+ gi->broker.port);
+ if (gi->err)
+ printf(" Error: %s\n", rd_kafka_err2str(gi->err));
+ printf(
+ " Protocol type \"%s\", protocol \"%s\", "
+ "with %d member(s):\n",
+ gi->protocol_type, gi->protocol, gi->member_cnt);
+
+ for (j = 0; j < gi->member_cnt; j++) {
+ const struct rd_kafka_group_member_info *mi;
+ mi = &gi->members[j];
+
+ printf(" \"%s\", client id \"%s\" on host %s\n",
+ mi->member_id, mi->client_id, mi->client_host);
+ printf(" metadata: %d bytes\n",
+ mi->member_metadata_size);
+ printf(" assignment: %d bytes\n",
+ mi->member_assignment_size);
+ }
+ printf("\n");
+ }
+
+ if (group && !grplist->group_cnt)
+ fprintf(stderr, "%% No matching group (%s)\n", group);
+
+ rd_kafka_group_list_destroy(grplist);
+
+ return 0;
+}
+
+
+
+static void sig_usr1(int sig) {
+ rd_kafka_dump(stdout, rk);
+}
+
+int main(int argc, char **argv) {
+ char mode = 'C';
+ char *brokers = "localhost:9092";
+ int opt;
+ rd_kafka_conf_t *conf;
+ char errstr[512];
+ const char *debug = NULL;
+ int do_conf_dump = 0;
+ char tmp[16];
+ rd_kafka_resp_err_t err;
+ char *group = NULL;
+ rd_kafka_topic_partition_list_t *topics;
+ int is_subscription;
+ int i;
+
+ quiet = !isatty(STDIN_FILENO);
+
+ /* Kafka configuration */
+ conf = rd_kafka_conf_new();
+
+ /* Set logger */
+ rd_kafka_conf_set_log_cb(conf, logger);
+
+ /* Quick termination */
+ snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+ rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+
+ while ((opt = getopt(argc, argv, "g:b:qd:eX:ADO")) != -1) {
+ switch (opt) {
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'g':
+ group = optarg;
+ break;
+ case 'e':
+ exit_eof = 1;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'A':
+ output = OUTPUT_RAW;
+ break;
+ case 'X': {
+ char *name, *val;
+ rd_kafka_conf_res_t res;
+
+ if (!strcmp(optarg, "list") ||
+ !strcmp(optarg, "help")) {
+ rd_kafka_conf_properties_show(stdout);
+ exit(0);
+ }
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = 1;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ fprintf(stderr,
+ "%% Expected "
+ "-X property=value, not %s\n",
+ name);
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ } break;
+
+ case 'D':
+ case 'O':
+ mode = opt;
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+
+ if (do_conf_dump) {
+ const char **arr;
+ size_t cnt;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ if (pass == 0) {
+ arr = rd_kafka_conf_dump(conf, &cnt);
+ printf("# Global config\n");
+ } else {
+ rd_kafka_topic_conf_t *topic_conf =
+ rd_kafka_conf_get_default_topic_conf(conf);
+ if (topic_conf) {
+ printf("# Topic config\n");
+ arr = rd_kafka_topic_conf_dump(
+ topic_conf, &cnt);
+ } else {
+ arr = NULL;
+ }
+ }
+
+ if (!arr)
+ continue;
+
+ for (i = 0; i < (int)cnt; i += 2)
+ printf("%s = %s\n", arr[i], arr[i + 1]);
+
+ printf("\n");
+ rd_kafka_conf_dump_free(arr, cnt);
+ }
+
+ exit(0);
+ }
+
+
+ if (strchr("OC", mode) && optind == argc) {
+ usage:
+ fprintf(stderr,
+ "Usage: %s [options] <topic[:part]> <topic[:part]>..\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -g <group> Consumer group (%s)\n"
+ " -b <brokers> Broker address (%s)\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -D Describe group.\n"
+ " -O Get commmitted offset(s)\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -q Be quiet\n"
+ " -A Raw payload output (consumer)\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Use '-X list' to see the full list\n"
+ " of supported properties.\n"
+ "\n"
+ "For balanced consumer groups use the 'topic1 topic2..'"
+ " format\n"
+ "and for static assignment use "
+ "'topic1:part1 topic1:part2 topic2:part1..'\n"
+ "\n",
+ argv[0], rd_kafka_version_str(), rd_kafka_version(),
+ group, brokers, RD_KAFKA_DEBUG_CONTEXTS);
+ exit(1);
+ }
+
+
+ signal(SIGINT, stop);
+ signal(SIGUSR1, sig_usr1);
+
+ if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% Debug configuration failed: %s: %s\n",
+ errstr, debug);
+ exit(1);
+ }
+
+ /*
+ * Client/Consumer group
+ */
+
+ if (strchr("CO", mode)) {
+ /* Consumer groups require a group id */
+ if (!group)
+ group = "rdkafka_consumer_example";
+ if (rd_kafka_conf_set(conf, "group.id", group, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ /* Callback called on partition assignment changes */
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
+ 0);
+ }
+
+ /* Set bootstrap servers */
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr, "%% Failed to create new consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ if (mode == 'D') {
+ int r;
+ /* Describe groups */
+ r = describe_groups(rk, group);
+
+ rd_kafka_destroy(rk);
+ exit(r == -1 ? 1 : 0);
+ }
+
+ /* Redirect rd_kafka_poll() to consumer_poll() */
+ rd_kafka_poll_set_consumer(rk);
+
+ topics = rd_kafka_topic_partition_list_new(argc - optind);
+ is_subscription = 1;
+ for (i = optind; i < argc; i++) {
+ /* Parse "topic[:part] */
+ char *topic = argv[i];
+ char *t;
+ int32_t partition = -1;
+
+ if ((t = strstr(topic, ":"))) {
+ *t = '\0';
+ partition = atoi(t + 1);
+ is_subscription = 0; /* is assignment */
+ wait_eof++;
+ }
+
+ rd_kafka_topic_partition_list_add(topics, topic, partition);
+ }
+
+ if (mode == 'O') {
+ /* Offset query */
+
+ err = rd_kafka_committed(rk, topics, 5000);
+ if (err) {
+ fprintf(stderr, "%% Failed to fetch offsets: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+
+ for (i = 0; i < topics->cnt; i++) {
+ rd_kafka_topic_partition_t *p = &topics->elems[i];
+ printf("Topic \"%s\" partition %" PRId32, p->topic,
+ p->partition);
+ if (p->err)
+ printf(" error %s", rd_kafka_err2str(p->err));
+ else {
+ printf(" offset %" PRId64 "", p->offset);
+
+ if (p->metadata_size)
+ printf(" (%d bytes of metadata)",
+ (int)p->metadata_size);
+ }
+ printf("\n");
+ }
+
+ goto done;
+ }
+
+
+ if (is_subscription) {
+ fprintf(stderr, "%% Subscribing to %d topics\n", topics->cnt);
+
+ if ((err = rd_kafka_subscribe(rk, topics))) {
+ fprintf(stderr,
+ "%% Failed to start consuming topics: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+ } else {
+ fprintf(stderr, "%% Assigning %d partitions\n", topics->cnt);
+
+ if ((err = rd_kafka_assign(rk, topics))) {
+ fprintf(stderr, "%% Failed to assign partitions: %s\n",
+ rd_kafka_err2str(err));
+ }
+ }
+
+ while (run) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(rk, 1000);
+ if (rkmessage) {
+ msg_consume(rkmessage);
+ rd_kafka_message_destroy(rkmessage);
+ }
+ }
+
+done:
+ err = rd_kafka_consumer_close(rk);
+ if (err)
+ fprintf(stderr, "%% Failed to close consumer: %s\n",
+ rd_kafka_err2str(err));
+ else
+ fprintf(stderr, "%% Consumer closed\n");
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ /* Destroy handle */
+ rd_kafka_destroy(rk);
+
+ /* Let background threads clean up and terminate cleanly. */
+ run = 5;
+ while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
+ printf("Waiting for librdkafka to decommission\n");
+ if (run <= 0)
+ rd_kafka_dump(stdout, rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp
new file mode 100644
index 000000000..b4f158cbd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_complex_consumer_example.cpp
@@ -0,0 +1,467 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifndef _WIN32
+#include <sys/time.h>
+#else
+#include <windows.h> /* for GetLocalTime */
+#endif
+
+#ifdef _MSC_VER
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+static volatile sig_atomic_t run = 1;
+static bool exit_eof = false;
+static int eof_cnt = 0;
+static int partition_cnt = 0;
+static int verbosity = 1;
+static long msg_cnt = 0;
+static int64_t msg_bytes = 0;
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+/**
+ * @brief format a string timestamp from the current time
+ */
+static void print_time() {
+#ifndef _WIN32
+ struct timeval tv;
+ char buf[64];
+ gettimeofday(&tv, NULL);
+ strftime(buf, sizeof(buf) - 1, "%Y-%m-%d %H:%M:%S", localtime(&tv.tv_sec));
+ fprintf(stderr, "%s.%03d: ", buf, (int)(tv.tv_usec / 1000));
+#else
+ SYSTEMTIME lt = {0};
+ GetLocalTime(&lt);
+ // %Y-%m-%d %H:%M:%S.xxx:
+ fprintf(stderr, "%04d-%02d-%02d %02d:%02d:%02d.%03d: ", lt.wYear, lt.wMonth,
+ lt.wDay, lt.wHour, lt.wMinute, lt.wSecond, lt.wMilliseconds);
+#endif
+}
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+ void event_cb(RdKafka::Event &event) {
+ print_time();
+
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ if (event.fatal()) {
+ std::cerr << "FATAL ";
+ run = 0;
+ }
+ std::cerr << "ERROR (" << RdKafka::err2str(event.err())
+ << "): " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_STATS:
+ std::cerr << "\"STATS\": " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(),
+ event.str().c_str());
+ break;
+
+ case RdKafka::Event::EVENT_THROTTLE:
+ std::cerr << "THROTTLED: " << event.throttle_time() << "ms by "
+ << event.broker_name() << " id " << (int)event.broker_id()
+ << std::endl;
+ break;
+
+ default:
+ std::cerr << "EVENT " << event.type() << " ("
+ << RdKafka::err2str(event.err()) << "): " << event.str()
+ << std::endl;
+ break;
+ }
+ }
+};
+
+
+class ExampleRebalanceCb : public RdKafka::RebalanceCb {
+ private:
+ static void part_list_print(
+ const std::vector<RdKafka::TopicPartition *> &partitions) {
+ for (unsigned int i = 0; i < partitions.size(); i++)
+ std::cerr << partitions[i]->topic() << "[" << partitions[i]->partition()
+ << "], ";
+ std::cerr << "\n";
+ }
+
+ public:
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ std::cerr << "RebalanceCb: " << RdKafka::err2str(err) << ": ";
+
+ part_list_print(partitions);
+
+ RdKafka::Error *error = NULL;
+ RdKafka::ErrorCode ret_err = RdKafka::ERR_NO_ERROR;
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+ if (consumer->rebalance_protocol() == "COOPERATIVE")
+ error = consumer->incremental_assign(partitions);
+ else
+ ret_err = consumer->assign(partitions);
+ partition_cnt += (int)partitions.size();
+ } else {
+ if (consumer->rebalance_protocol() == "COOPERATIVE") {
+ error = consumer->incremental_unassign(partitions);
+ partition_cnt -= (int)partitions.size();
+ } else {
+ ret_err = consumer->unassign();
+ partition_cnt = 0;
+ }
+ }
+ eof_cnt = 0; /* FIXME: Won't work with COOPERATIVE */
+
+ if (error) {
+ std::cerr << "incremental assign failed: " << error->str() << "\n";
+ delete error;
+ } else if (ret_err)
+ std::cerr << "assign failed: " << RdKafka::err2str(ret_err) << "\n";
+ }
+};
+
+
+void msg_consume(RdKafka::Message *message, void *opaque) {
+ switch (message->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ /* Real message */
+ msg_cnt++;
+ msg_bytes += message->len();
+ if (verbosity >= 3)
+ std::cerr << "Read msg at offset " << message->offset() << std::endl;
+ RdKafka::MessageTimestamp ts;
+ ts = message->timestamp();
+ if (verbosity >= 2 &&
+ ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_NOT_AVAILABLE) {
+ std::string tsname = "?";
+ if (ts.type == RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
+ tsname = "create time";
+ else if (ts.type ==
+ RdKafka::MessageTimestamp::MSG_TIMESTAMP_LOG_APPEND_TIME)
+ tsname = "log append time";
+ std::cout << "Timestamp: " << tsname << " " << ts.timestamp << std::endl;
+ }
+ if (verbosity >= 2 && message->key()) {
+ std::cout << "Key: " << *message->key() << std::endl;
+ }
+ if (verbosity >= 1) {
+ printf("%.*s\n", static_cast<int>(message->len()),
+ static_cast<const char *>(message->payload()));
+ }
+ break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ /* Last message */
+ if (exit_eof && ++eof_cnt == partition_cnt) {
+ std::cerr << "%% EOF reached for all " << partition_cnt << " partition(s)"
+ << std::endl;
+ run = 0;
+ }
+ break;
+
+ case RdKafka::ERR__UNKNOWN_TOPIC:
+ case RdKafka::ERR__UNKNOWN_PARTITION:
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ break;
+
+ default:
+ /* Errors */
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ }
+}
+
+int main(int argc, char **argv) {
+ std::string brokers = "localhost";
+ std::string errstr;
+ std::string topic_str;
+ std::string mode;
+ std::string debug;
+ std::vector<std::string> topics;
+ bool do_conf_dump = false;
+ int opt;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ ExampleRebalanceCb ex_rebalance_cb;
+ conf->set("rebalance_cb", &ex_rebalance_cb, errstr);
+
+ conf->set("enable.partition.eof", "true", errstr);
+
+ while ((opt = getopt(argc, argv, "g:b:z:qd:eX:AM:qv")) != -1) {
+ switch (opt) {
+ case 'g':
+ if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'z':
+ if (conf->set("compression.codec", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'e':
+ exit_eof = true;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'M':
+ if (conf->set("statistics.interval.ms", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = true;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ RdKafka::Conf::ConfResult res = conf->set(name, val, errstr);
+ if (res != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ case 'q':
+ verbosity--;
+ break;
+
+ case 'v':
+ verbosity++;
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ for (; optind < argc; optind++)
+ topics.push_back(std::string(argv[optind]));
+
+ if (topics.empty() || optind != argc) {
+ usage:
+ fprintf(stderr,
+ "Usage: %s -g <group-id> [options] topic1 topic2..\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -g <group-id> Consumer group id\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -M <intervalms> Enable statistics\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Use '-X list' to see the full list\n"
+ " of supported properties.\n"
+ " -q Quiet / Decrease verbosity\n"
+ " -v Increase verbosity\n"
+ "\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+ if (exit_eof) {
+ std::string strategy;
+ if (conf->get("partition.assignment.strategy", strategy) ==
+ RdKafka::Conf::CONF_OK &&
+ strategy == "cooperative-sticky") {
+ std::cerr
+ << "Error: this example has not been modified to "
+ << "support -e (exit on EOF) when the partition.assignment.strategy "
+ << "is set to " << strategy << ": remove -e from the command line\n";
+ exit(1);
+ }
+ }
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("metadata.broker.list", brokers, errstr);
+
+ if (!debug.empty()) {
+ if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ }
+
+ ExampleEventCb ex_event_cb;
+ conf->set("event_cb", &ex_event_cb, errstr);
+
+ if (do_conf_dump) {
+ std::list<std::string> *dump;
+ dump = conf->dump();
+ std::cout << "# Global config" << std::endl;
+
+ for (std::list<std::string>::iterator it = dump->begin();
+ it != dump->end();) {
+ std::cout << *it << " = ";
+ it++;
+ std::cout << *it << std::endl;
+ it++;
+ }
+ std::cout << std::endl;
+
+ exit(0);
+ }
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+
+ /*
+ * Consumer mode
+ */
+
+ /*
+ * Create consumer using accumulated global configuration.
+ */
+ RdKafka::KafkaConsumer *consumer =
+ RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << "Failed to create consumer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ delete conf;
+
+ std::cout << "% Created consumer " << consumer->name() << std::endl;
+
+
+ /*
+ * Subscribe to topics
+ */
+ RdKafka::ErrorCode err = consumer->subscribe(topics);
+ if (err) {
+ std::cerr << "Failed to subscribe to " << topics.size()
+ << " topics: " << RdKafka::err2str(err) << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Consume messages
+ */
+ while (run) {
+ RdKafka::Message *msg = consumer->consume(1000);
+ msg_consume(msg, NULL);
+ delete msg;
+ }
+
+#ifndef _WIN32
+ alarm(10);
+#endif
+
+ /*
+ * Stop consumer
+ */
+ consumer->close();
+ delete consumer;
+
+ std::cerr << "% Consumed " << msg_cnt << " messages (" << msg_bytes
+ << " bytes)" << std::endl;
+
+ /*
+ * Wait for RdKafka to decommission.
+ * This is not strictly needed (with check outq_len() above), but
+ * allows RdKafka to clean up all its resources before the application
+ * exits so that memory profilers such as valgrind wont complain about
+ * memory leaks.
+ */
+ RdKafka::wait_destroyed(5000);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp
new file mode 100644
index 000000000..576b396f8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_consume_batch.cpp
@@ -0,0 +1,264 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ *
+ * This example shows how to read batches of messages.
+ * Note that messages are fetched from the broker in batches regardless
+ * of how the application polls messages from librdkafka, this example
+ * merely shows how to accumulate a set of messages in the application.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifndef _WIN32
+#include <sys/time.h>
+#endif
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#include <atltime.h>
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#include <unistd.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+static volatile sig_atomic_t run = 1;
+
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+
+/**
+ * @returns the current wall-clock time in milliseconds
+ */
+static int64_t now() {
+#ifndef _WIN32
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((int64_t)tv.tv_sec * 1000) + (tv.tv_usec / 1000);
+#else
+#error "now() not implemented for Windows, please submit a PR"
+#endif
+}
+
+
+
+/**
+ * @brief Accumulate a batch of \p batch_size messages, but wait
+ * no longer than \p batch_tmout milliseconds.
+ */
+static std::vector<RdKafka::Message *> consume_batch(
+ RdKafka::KafkaConsumer *consumer,
+ size_t batch_size,
+ int batch_tmout) {
+ std::vector<RdKafka::Message *> msgs;
+ msgs.reserve(batch_size);
+
+ int64_t end = now() + batch_tmout;
+ int remaining_timeout = batch_tmout;
+
+ while (msgs.size() < batch_size) {
+ RdKafka::Message *msg = consumer->consume(remaining_timeout);
+
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ delete msg;
+ return msgs;
+
+ case RdKafka::ERR_NO_ERROR:
+ msgs.push_back(msg);
+ break;
+
+ default:
+ std::cerr << "%% Consumer error: " << msg->errstr() << std::endl;
+ run = 0;
+ delete msg;
+ return msgs;
+ }
+
+ remaining_timeout = end - now();
+ if (remaining_timeout < 0)
+ break;
+ }
+
+ return msgs;
+}
+
+
+int main(int argc, char **argv) {
+ std::string errstr;
+ std::string topic_str;
+ std::vector<std::string> topics;
+ int batch_size = 100;
+ int batch_tmout = 1000;
+
+ /* Create configuration objects */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ if (conf->set("enable.partition.eof", "false", errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* Read command line arguments */
+ int opt;
+ while ((opt = getopt(argc, argv, "g:B:T:b:X:")) != -1) {
+ switch (opt) {
+ case 'g':
+ if (conf->set("group.id", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+
+ case 'B':
+ batch_size = atoi(optarg);
+ break;
+
+ case 'T':
+ batch_tmout = atoi(optarg);
+ break;
+
+ case 'b':
+ if (conf->set("bootstrap.servers", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+
+ case 'X': {
+ char *name, *val;
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ /* Topics to consume */
+ for (; optind < argc; optind++)
+ topics.push_back(std::string(argv[optind]));
+
+ if (topics.empty() || optind != argc) {
+ usage:
+ fprintf(
+ stderr,
+ "Usage: %s -g <group-id> -B <batch-size> [options] topic1 topic2..\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -g <group-id> Consumer group id\n"
+ " -B <batch-size> How many messages to batch (default: 100).\n"
+ " -T <batch-tmout> How long to wait for batch-size to accumulate in "
+ "milliseconds. (default 1000 ms)\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -X <prop=name> Set arbitrary librdkafka configuration property\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version());
+ exit(1);
+ }
+
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+ /* Create consumer */
+ RdKafka::KafkaConsumer *consumer =
+ RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << "Failed to create consumer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ delete conf;
+
+ /* Subscribe to topics */
+ RdKafka::ErrorCode err = consumer->subscribe(topics);
+ if (err) {
+ std::cerr << "Failed to subscribe to " << topics.size()
+ << " topics: " << RdKafka::err2str(err) << std::endl;
+ exit(1);
+ }
+
+ /* Consume messages in batches of \p batch_size */
+ while (run) {
+ auto msgs = consume_batch(consumer, batch_size, batch_tmout);
+ std::cout << "Accumulated " << msgs.size() << " messages:" << std::endl;
+
+ for (auto &msg : msgs) {
+ std::cout << " Message in " << msg->topic_name() << " ["
+ << msg->partition() << "] at offset " << msg->offset()
+ << std::endl;
+ delete msg;
+ }
+ }
+
+ /* Close and destroy consumer */
+ consumer->close();
+ delete consumer;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c
new file mode 100644
index 000000000..91415318a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.c
@@ -0,0 +1,853 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <syslog.h>
+#include <time.h>
+#include <sys/time.h>
+#include <getopt.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static volatile sig_atomic_t run = 1;
+static rd_kafka_t *rk;
+static int exit_eof = 0;
+static int quiet = 0;
+static enum {
+ OUTPUT_HEXDUMP,
+ OUTPUT_RAW,
+} output = OUTPUT_HEXDUMP;
+
+static void stop(int sig) {
+ run = 0;
+ fclose(stdin); /* abort fgets() */
+}
+
+
+static void hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
+ const char *p = (const char *)ptr;
+ size_t of = 0;
+
+
+ if (name)
+ fprintf(fp, "%s hexdump (%zd bytes):\n", name, len);
+
+ for (of = 0; of < len; of += 16) {
+ char hexen[16 * 3 + 1];
+ char charen[16 + 1];
+ int hof = 0;
+
+ int cof = 0;
+ int i;
+
+ for (i = of; i < (int)of + 16 && i < (int)len; i++) {
+ hof += sprintf(hexen + hof, "%02x ", p[i] & 0xff);
+ cof += sprintf(charen + cof, "%c",
+ isprint((int)p[i]) ? p[i] : '.');
+ }
+ fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen);
+ }
+}
+
+/**
+ * Kafka logger callback (optional)
+ */
+static void
+logger(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ fprintf(stderr, "%u.%03u RDKAFKA-%i-%s: %s: %s\n", (int)tv.tv_sec,
+ (int)(tv.tv_usec / 1000), level, fac,
+ rk ? rd_kafka_name(rk) : NULL, buf);
+}
+
+
+/**
+ * Message delivery report callback using the richer rd_kafka_message_t object.
+ */
+static void msg_delivered(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr,
+ "%% Message delivery failed (broker %" PRId32 "): %s\n",
+ rd_kafka_message_broker_id(rkmessage),
+ rd_kafka_err2str(rkmessage->err));
+ else if (!quiet)
+ fprintf(stderr,
+ "%% Message delivered (%zd bytes, offset %" PRId64
+ ", "
+ "partition %" PRId32 ", broker %" PRId32 "): %.*s\n",
+ rkmessage->len, rkmessage->offset, rkmessage->partition,
+ rd_kafka_message_broker_id(rkmessage),
+ (int)rkmessage->len, (const char *)rkmessage->payload);
+}
+
+
+static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err) {
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ fprintf(stderr,
+ "%% Consumer reached end of %s [%" PRId32
+ "] "
+ "message queue at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ if (exit_eof)
+ run = 0;
+
+ return;
+ }
+
+ fprintf(stderr,
+ "%% Consume error for topic \"%s\" [%" PRId32
+ "] "
+ "offset %" PRId64 ": %s\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+ rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ run = 0;
+ return;
+ }
+
+ if (!quiet) {
+ rd_kafka_timestamp_type_t tstype;
+ int64_t timestamp;
+ rd_kafka_headers_t *hdrs;
+
+ fprintf(stdout,
+ "%% Message (offset %" PRId64
+ ", %zd bytes, "
+ "broker %" PRId32 "):\n",
+ rkmessage->offset, rkmessage->len,
+ rd_kafka_message_broker_id(rkmessage));
+
+ timestamp = rd_kafka_message_timestamp(rkmessage, &tstype);
+ if (tstype != RD_KAFKA_TIMESTAMP_NOT_AVAILABLE) {
+ const char *tsname = "?";
+ if (tstype == RD_KAFKA_TIMESTAMP_CREATE_TIME)
+ tsname = "create time";
+ else if (tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME)
+ tsname = "log append time";
+
+ fprintf(stdout,
+ "%% Message timestamp: %s %" PRId64
+ " (%ds ago)\n",
+ tsname, timestamp,
+ !timestamp ? 0
+ : (int)time(NULL) -
+ (int)(timestamp / 1000));
+ }
+
+ if (!rd_kafka_message_headers(rkmessage, &hdrs)) {
+ size_t idx = 0;
+ const char *name;
+ const void *val;
+ size_t size;
+
+ fprintf(stdout, "%% Headers:");
+
+ while (!rd_kafka_header_get_all(hdrs, idx++, &name,
+ &val, &size)) {
+ fprintf(stdout, "%s%s=", idx == 1 ? " " : ", ",
+ name);
+ if (val)
+ fprintf(stdout, "\"%.*s\"", (int)size,
+ (const char *)val);
+ else
+ fprintf(stdout, "NULL");
+ }
+ fprintf(stdout, "\n");
+ }
+ }
+
+ if (rkmessage->key_len) {
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Key", rkmessage->key,
+ rkmessage->key_len);
+ else
+ printf("Key: %.*s\n", (int)rkmessage->key_len,
+ (char *)rkmessage->key);
+ }
+
+ if (output == OUTPUT_HEXDUMP)
+ hexdump(stdout, "Message Payload", rkmessage->payload,
+ rkmessage->len);
+ else
+ printf("%.*s\n", (int)rkmessage->len,
+ (char *)rkmessage->payload);
+}
+
+
+static void metadata_print(const char *topic,
+ const struct rd_kafka_metadata *metadata) {
+ int i, j, k;
+ int32_t controllerid;
+
+ printf("Metadata for %s (from broker %" PRId32 ": %s):\n",
+ topic ?: "all topics", metadata->orig_broker_id,
+ metadata->orig_broker_name);
+
+ controllerid = rd_kafka_controllerid(rk, 0);
+
+
+ /* Iterate brokers */
+ printf(" %i brokers:\n", metadata->broker_cnt);
+ for (i = 0; i < metadata->broker_cnt; i++)
+ printf(" broker %" PRId32 " at %s:%i%s\n",
+ metadata->brokers[i].id, metadata->brokers[i].host,
+ metadata->brokers[i].port,
+ controllerid == metadata->brokers[i].id ? " (controller)"
+ : "");
+
+ /* Iterate topics */
+ printf(" %i topics:\n", metadata->topic_cnt);
+ for (i = 0; i < metadata->topic_cnt; i++) {
+ const struct rd_kafka_metadata_topic *t = &metadata->topics[i];
+ printf(" topic \"%s\" with %i partitions:", t->topic,
+ t->partition_cnt);
+ if (t->err) {
+ printf(" %s", rd_kafka_err2str(t->err));
+ if (t->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE)
+ printf(" (try again)");
+ }
+ printf("\n");
+
+ /* Iterate topic's partitions */
+ for (j = 0; j < t->partition_cnt; j++) {
+ const struct rd_kafka_metadata_partition *p;
+ p = &t->partitions[j];
+ printf(" partition %" PRId32
+ ", "
+ "leader %" PRId32 ", replicas: ",
+ p->id, p->leader);
+
+ /* Iterate partition's replicas */
+ for (k = 0; k < p->replica_cnt; k++)
+ printf("%s%" PRId32, k > 0 ? "," : "",
+ p->replicas[k]);
+
+ /* Iterate partition's ISRs */
+ printf(", isrs: ");
+ for (k = 0; k < p->isr_cnt; k++)
+ printf("%s%" PRId32, k > 0 ? "," : "",
+ p->isrs[k]);
+ if (p->err)
+ printf(", %s\n", rd_kafka_err2str(p->err));
+ else
+ printf("\n");
+ }
+ }
+}
+
+
+static void sig_usr1(int sig) {
+ rd_kafka_dump(stdout, rk);
+}
+
+int main(int argc, char **argv) {
+ rd_kafka_topic_t *rkt;
+ char *brokers = "localhost:9092";
+ char mode = 'C';
+ char *topic = NULL;
+ int partition = RD_KAFKA_PARTITION_UA;
+ int opt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ int64_t start_offset = 0;
+ int do_conf_dump = 0;
+ char tmp[16];
+ int64_t seek_offset = 0;
+ int64_t tmp_offset = 0;
+ int get_wmarks = 0;
+ rd_kafka_headers_t *hdrs = NULL;
+ rd_kafka_resp_err_t err;
+
+ /* Kafka configuration */
+ conf = rd_kafka_conf_new();
+
+ /* Set logger */
+ rd_kafka_conf_set_log_cb(conf, logger);
+
+ /* Quick termination */
+ snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+ rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+
+ /* Topic configuration */
+ topic_conf = rd_kafka_topic_conf_new();
+
+ while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:As:H:")) != -1) {
+ switch (opt) {
+ case 'P':
+ case 'C':
+ case 'L':
+ mode = opt;
+ break;
+ case 't':
+ topic = optarg;
+ break;
+ case 'p':
+ partition = atoi(optarg);
+ break;
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'z':
+ if (rd_kafka_conf_set(conf, "compression.codec", optarg,
+ errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ break;
+ case 'o':
+ case 's':
+ if (!strcmp(optarg, "end"))
+ tmp_offset = RD_KAFKA_OFFSET_END;
+ else if (!strcmp(optarg, "beginning"))
+ tmp_offset = RD_KAFKA_OFFSET_BEGINNING;
+ else if (!strcmp(optarg, "stored"))
+ tmp_offset = RD_KAFKA_OFFSET_STORED;
+ else if (!strcmp(optarg, "wmark"))
+ get_wmarks = 1;
+ else {
+ tmp_offset = strtoll(optarg, NULL, 10);
+
+ if (tmp_offset < 0)
+ tmp_offset =
+ RD_KAFKA_OFFSET_TAIL(-tmp_offset);
+ }
+
+ if (opt == 'o')
+ start_offset = tmp_offset;
+ else if (opt == 's')
+ seek_offset = tmp_offset;
+ break;
+ case 'e':
+ exit_eof = 1;
+ break;
+ case 'd':
+ if (rd_kafka_conf_set(conf, "debug", optarg, errstr,
+ sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr,
+ "%% Debug configuration failed: "
+ "%s: %s\n",
+ errstr, optarg);
+ exit(1);
+ }
+ break;
+ case 'q':
+ quiet = 1;
+ break;
+ case 'A':
+ output = OUTPUT_RAW;
+ break;
+ case 'H': {
+ char *name, *val;
+ size_t name_sz = -1;
+
+ name = optarg;
+ val = strchr(name, '=');
+ if (val) {
+ name_sz = (size_t)(val - name);
+ val++; /* past the '=' */
+ }
+
+ if (!hdrs)
+ hdrs = rd_kafka_headers_new(8);
+
+ err = rd_kafka_header_add(hdrs, name, name_sz, val, -1);
+ if (err) {
+ fprintf(stderr,
+ "%% Failed to add header %s: %s\n",
+ name, rd_kafka_err2str(err));
+ exit(1);
+ }
+ } break;
+
+ case 'X': {
+ char *name, *val;
+ rd_kafka_conf_res_t res;
+
+ if (!strcmp(optarg, "list") ||
+ !strcmp(optarg, "help")) {
+ rd_kafka_conf_properties_show(stdout);
+ exit(0);
+ }
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = 1;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ char dest[512];
+ size_t dest_size = sizeof(dest);
+ /* Return current value for property. */
+
+ res = RD_KAFKA_CONF_UNKNOWN;
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ res = rd_kafka_topic_conf_get(
+ topic_conf, name + strlen("topic."),
+ dest, &dest_size);
+ if (res == RD_KAFKA_CONF_UNKNOWN)
+ res = rd_kafka_conf_get(
+ conf, name, dest, &dest_size);
+
+ if (res == RD_KAFKA_CONF_OK) {
+ printf("%s = %s\n", name, dest);
+ exit(0);
+ } else {
+ fprintf(stderr, "%% %s property\n",
+ res == RD_KAFKA_CONF_UNKNOWN
+ ? "Unknown"
+ : "Invalid");
+ exit(1);
+ }
+ }
+
+ *val = '\0';
+ val++;
+
+ res = RD_KAFKA_CONF_UNKNOWN;
+ /* Try "topic." prefixed properties on topic
+ * conf first, and then fall through to global if
+ * it didnt match a topic configuration property. */
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ res = rd_kafka_topic_conf_set(
+ topic_conf, name + strlen("topic."), val,
+ errstr, sizeof(errstr));
+
+ if (res == RD_KAFKA_CONF_UNKNOWN)
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ } break;
+
+ default:
+ goto usage;
+ }
+ }
+
+
+ if (do_conf_dump) {
+ const char **arr;
+ size_t cnt;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ int i;
+
+ if (pass == 0) {
+ arr = rd_kafka_conf_dump(conf, &cnt);
+ printf("# Global config\n");
+ } else {
+ printf("# Topic config\n");
+ arr =
+ rd_kafka_topic_conf_dump(topic_conf, &cnt);
+ }
+
+ for (i = 0; i < (int)cnt; i += 2)
+ printf("%s = %s\n", arr[i], arr[i + 1]);
+
+ printf("\n");
+
+ rd_kafka_conf_dump_free(arr, cnt);
+ }
+
+ exit(0);
+ }
+
+
+ if (optind != argc || (mode != 'L' && !topic)) {
+ usage:
+ fprintf(stderr,
+ "Usage: %s -C|-P|-L -t <topic> "
+ "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -C | -P Consumer or Producer mode\n"
+ " -L Metadata list mode\n"
+ " -t <topic> Topic to fetch / produce\n"
+ " -p <num> Partition (random partitioner)\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy|lz4|zstd\n"
+ " -o <offset> Start offset (consumer):\n"
+ " beginning, end, NNNNN or -NNNNN\n"
+ " wmark returns the current hi&lo "
+ "watermarks.\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -q Be quiet\n"
+ " -A Raw payload output (consumer)\n"
+ " -H <name[=value]> Add header to message (producer)\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Properties prefixed with \"topic.\" "
+ "will be set on topic object.\n"
+ " -X list Show full list of supported "
+ "properties.\n"
+ " -X dump Show configuration\n"
+ " -X <prop> Get single property value\n"
+ "\n"
+ " In Consumer mode:\n"
+ " writes fetched messages to stdout\n"
+ " In Producer mode:\n"
+ " reads messages from stdin and sends to broker\n"
+ " In List mode:\n"
+ " queries broker for metadata information, "
+ "topic is optional.\n"
+ "\n"
+ "\n"
+ "\n",
+ argv[0], rd_kafka_version_str(), rd_kafka_version(),
+ RD_KAFKA_DEBUG_CONTEXTS);
+ exit(1);
+ }
+
+ if ((mode == 'C' && !isatty(STDIN_FILENO)) ||
+ (mode != 'C' && !isatty(STDOUT_FILENO)))
+ quiet = 1;
+
+
+ signal(SIGINT, stop);
+ signal(SIGUSR1, sig_usr1);
+
+ /* Set bootstrap servers */
+ if (brokers &&
+ rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ if (mode == 'P') {
+ /*
+ * Producer
+ */
+ char buf[2048];
+ int sendcnt = 0;
+
+ /* Set up a message delivery report callback.
+ * It will be called once for each message, either on successful
+ * delivery to broker, or upon failure to deliver to broker. */
+ rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* Create topic */
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ topic_conf = NULL; /* Now owned by topic */
+
+ if (!quiet)
+ fprintf(stderr,
+ "%% Type stuff and hit enter to send\n");
+
+ while (run && fgets(buf, sizeof(buf), stdin)) {
+ size_t len = strlen(buf);
+ if (buf[len - 1] == '\n')
+ buf[--len] = '\0';
+
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Send/Produce message. */
+ if (hdrs) {
+ rd_kafka_headers_t *hdrs_copy;
+
+ hdrs_copy = rd_kafka_headers_copy(hdrs);
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_RKT(rkt),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_VALUE(buf, len),
+ RD_KAFKA_V_HEADERS(hdrs_copy),
+ RD_KAFKA_V_END);
+
+ if (err)
+ rd_kafka_headers_destroy(hdrs_copy);
+
+ } else {
+ if (rd_kafka_produce(
+ rkt, partition, RD_KAFKA_MSG_F_COPY,
+ /* Payload and length */
+ buf, len,
+ /* Optional key and its length */
+ NULL, 0,
+ /* Message opaque, provided in
+ * delivery report callback as
+ * msg_opaque. */
+ NULL) == -1) {
+ err = rd_kafka_last_error();
+ }
+ }
+
+ if (err) {
+ fprintf(stderr,
+ "%% Failed to produce to topic %s "
+ "partition %i: %s\n",
+ rd_kafka_topic_name(rkt), partition,
+ rd_kafka_err2str(err));
+
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 0);
+ continue;
+ }
+
+ if (!quiet)
+ fprintf(stderr,
+ "%% Sent %zd bytes to topic "
+ "%s partition %i\n",
+ len, rd_kafka_topic_name(rkt),
+ partition);
+ sendcnt++;
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 0);
+ }
+
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 0);
+
+ /* Wait for messages to be delivered */
+ while (run && rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 100);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+
+ } else if (mode == 'C') {
+ /*
+ * Consumer
+ */
+
+ rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
+ 0);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create new consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ if (get_wmarks) {
+ int64_t lo, hi;
+
+ /* Only query for hi&lo partition watermarks */
+
+ if ((err = rd_kafka_query_watermark_offsets(
+ rk, topic, partition, &lo, &hi, 5000))) {
+ fprintf(stderr,
+ "%% query_watermark_offsets() "
+ "failed: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+
+ printf(
+ "%s [%d]: low - high offsets: "
+ "%" PRId64 " - %" PRId64 "\n",
+ topic, partition, lo, hi);
+
+ rd_kafka_destroy(rk);
+ exit(0);
+ }
+
+
+ /* Create topic */
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ topic_conf = NULL; /* Now owned by topic */
+
+ /* Start consuming */
+ if (rd_kafka_consume_start(rkt, partition, start_offset) ==
+ -1) {
+ err = rd_kafka_last_error();
+ fprintf(stderr, "%% Failed to start consuming: %s\n",
+ rd_kafka_err2str(err));
+ if (err == RD_KAFKA_RESP_ERR__INVALID_ARG)
+ fprintf(stderr,
+ "%% Broker based offset storage "
+ "requires a group.id, "
+ "add: -X group.id=yourGroup\n");
+ exit(1);
+ }
+
+ while (run) {
+ rd_kafka_message_t *rkmessage;
+
+ /* Poll for errors, etc. */
+ rd_kafka_poll(rk, 0);
+
+ /* Consume single message.
+ * See rdkafka_performance.c for high speed
+ * consuming of messages. */
+ rkmessage = rd_kafka_consume(rkt, partition, 1000);
+ if (!rkmessage) /* timeout */
+ continue;
+
+ msg_consume(rkmessage, NULL);
+
+ /* Return message to rdkafka */
+ rd_kafka_message_destroy(rkmessage);
+
+ if (seek_offset) {
+ err = rd_kafka_seek(rkt, partition, seek_offset,
+ 2000);
+ if (err)
+ printf("Seek failed: %s\n",
+ rd_kafka_err2str(err));
+ else
+ printf("Seeked to %" PRId64 "\n",
+ seek_offset);
+ seek_offset = 0;
+ }
+ }
+
+ /* Stop consuming */
+ rd_kafka_consume_stop(rkt, partition);
+
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 10);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy handle */
+ rd_kafka_destroy(rk);
+
+ } else if (mode == 'L') {
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create new producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* Create topic */
+ if (topic) {
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ topic_conf = NULL; /* Now owned by topic */
+ } else
+ rkt = NULL;
+
+ while (run) {
+ const struct rd_kafka_metadata *metadata;
+
+ /* Fetch metadata */
+ err = rd_kafka_metadata(rk, rkt ? 0 : 1, rkt, &metadata,
+ 5000);
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR) {
+ fprintf(stderr,
+ "%% Failed to acquire metadata: %s\n",
+ rd_kafka_err2str(err));
+ run = 0;
+ break;
+ }
+
+ metadata_print(topic, metadata);
+
+ rd_kafka_metadata_destroy(metadata);
+ run = 0;
+ }
+
+ /* Destroy topic */
+ if (rkt)
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+
+ if (topic_conf)
+ rd_kafka_topic_conf_destroy(topic_conf);
+
+
+ /* Exit right away, dont wait for background cleanup, we haven't
+ * done anything important anyway. */
+ exit(err ? 2 : 0);
+ }
+
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+
+ if (topic_conf)
+ rd_kafka_topic_conf_destroy(topic_conf);
+
+ /* Let background threads clean up and terminate cleanly. */
+ run = 5;
+ while (run-- > 0 && rd_kafka_wait_destroyed(1000) == -1)
+ printf("Waiting for librdkafka to decommission\n");
+ if (run <= 0)
+ rd_kafka_dump(stdout, rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp
new file mode 100644
index 000000000..91c3440b3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_example.cpp
@@ -0,0 +1,679 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer example programs
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#elif _AIX
+#include <unistd.h>
+#else
+#include <getopt.h>
+#endif
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+static void metadata_print(const std::string &topic,
+ const RdKafka::Metadata *metadata) {
+ std::cout << "Metadata for " << (topic.empty() ? "" : "all topics")
+ << "(from broker " << metadata->orig_broker_id() << ":"
+ << metadata->orig_broker_name() << std::endl;
+
+ /* Iterate brokers */
+ std::cout << " " << metadata->brokers()->size() << " brokers:" << std::endl;
+ RdKafka::Metadata::BrokerMetadataIterator ib;
+ for (ib = metadata->brokers()->begin(); ib != metadata->brokers()->end();
+ ++ib) {
+ std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":"
+ << (*ib)->port() << std::endl;
+ }
+ /* Iterate topics */
+ std::cout << metadata->topics()->size() << " topics:" << std::endl;
+ RdKafka::Metadata::TopicMetadataIterator it;
+ for (it = metadata->topics()->begin(); it != metadata->topics()->end();
+ ++it) {
+ std::cout << " topic \"" << (*it)->topic() << "\" with "
+ << (*it)->partitions()->size() << " partitions:";
+
+ if ((*it)->err() != RdKafka::ERR_NO_ERROR) {
+ std::cout << " " << err2str((*it)->err());
+ if ((*it)->err() == RdKafka::ERR_LEADER_NOT_AVAILABLE)
+ std::cout << " (try again)";
+ }
+ std::cout << std::endl;
+
+ /* Iterate topic's partitions */
+ RdKafka::TopicMetadata::PartitionMetadataIterator ip;
+ for (ip = (*it)->partitions()->begin(); ip != (*it)->partitions()->end();
+ ++ip) {
+ std::cout << " partition " << (*ip)->id() << ", leader "
+ << (*ip)->leader() << ", replicas: ";
+
+ /* Iterate partition's replicas */
+ RdKafka::PartitionMetadata::ReplicasIterator ir;
+ for (ir = (*ip)->replicas()->begin(); ir != (*ip)->replicas()->end();
+ ++ir) {
+ std::cout << (ir == (*ip)->replicas()->begin() ? "" : ",") << *ir;
+ }
+
+ /* Iterate partition's ISRs */
+ std::cout << ", isrs: ";
+ RdKafka::PartitionMetadata::ISRSIterator iis;
+ for (iis = (*ip)->isrs()->begin(); iis != (*ip)->isrs()->end(); ++iis)
+ std::cout << (iis == (*ip)->isrs()->begin() ? "" : ",") << *iis;
+
+ if ((*ip)->err() != RdKafka::ERR_NO_ERROR)
+ std::cout << ", " << RdKafka::err2str((*ip)->err()) << std::endl;
+ else
+ std::cout << std::endl;
+ }
+ }
+}
+
+static volatile sig_atomic_t run = 1;
+static bool exit_eof = false;
+
+static void sigterm(int sig) {
+ run = 0;
+}
+
+
+class ExampleDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &message) {
+ std::string status_name;
+ switch (message.status()) {
+ case RdKafka::Message::MSG_STATUS_NOT_PERSISTED:
+ status_name = "NotPersisted";
+ break;
+ case RdKafka::Message::MSG_STATUS_POSSIBLY_PERSISTED:
+ status_name = "PossiblyPersisted";
+ break;
+ case RdKafka::Message::MSG_STATUS_PERSISTED:
+ status_name = "Persisted";
+ break;
+ default:
+ status_name = "Unknown?";
+ break;
+ }
+ std::cout << "Message delivery for (" << message.len()
+ << " bytes): " << status_name << ": " << message.errstr()
+ << std::endl;
+ if (message.key())
+ std::cout << "Key: " << *(message.key()) << ";" << std::endl;
+ }
+};
+
+
+class ExampleEventCb : public RdKafka::EventCb {
+ public:
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ if (event.fatal()) {
+ std::cerr << "FATAL ";
+ run = 0;
+ }
+ std::cerr << "ERROR (" << RdKafka::err2str(event.err())
+ << "): " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_STATS:
+ std::cerr << "\"STATS\": " << event.str() << std::endl;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ fprintf(stderr, "LOG-%i-%s: %s\n", event.severity(), event.fac().c_str(),
+ event.str().c_str());
+ break;
+
+ default:
+ std::cerr << "EVENT " << event.type() << " ("
+ << RdKafka::err2str(event.err()) << "): " << event.str()
+ << std::endl;
+ break;
+ }
+ }
+};
+
+
+/* Use of this partitioner is pretty pointless since no key is provided
+ * in the produce() call. */
+class MyHashPartitionerCb : public RdKafka::PartitionerCb {
+ public:
+ int32_t partitioner_cb(const RdKafka::Topic *topic,
+ const std::string *key,
+ int32_t partition_cnt,
+ void *msg_opaque) {
+ return djb_hash(key->c_str(), key->size()) % partition_cnt;
+ }
+
+ private:
+ static inline unsigned int djb_hash(const char *str, size_t len) {
+ unsigned int hash = 5381;
+ for (size_t i = 0; i < len; i++)
+ hash = ((hash << 5) + hash) + str[i];
+ return hash;
+ }
+};
+
+void msg_consume(RdKafka::Message *message, void *opaque) {
+ const RdKafka::Headers *headers;
+
+ switch (message->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ /* Real message */
+ std::cout << "Read msg at offset " << message->offset() << std::endl;
+ if (message->key()) {
+ std::cout << "Key: " << *message->key() << std::endl;
+ }
+ headers = message->headers();
+ if (headers) {
+ std::vector<RdKafka::Headers::Header> hdrs = headers->get_all();
+ for (size_t i = 0; i < hdrs.size(); i++) {
+ const RdKafka::Headers::Header hdr = hdrs[i];
+
+ if (hdr.value() != NULL)
+ printf(" Header: %s = \"%.*s\"\n", hdr.key().c_str(),
+ (int)hdr.value_size(), (const char *)hdr.value());
+ else
+ printf(" Header: %s = NULL\n", hdr.key().c_str());
+ }
+ }
+ printf("%.*s\n", static_cast<int>(message->len()),
+ static_cast<const char *>(message->payload()));
+ break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ /* Last message */
+ if (exit_eof) {
+ run = 0;
+ }
+ break;
+
+ case RdKafka::ERR__UNKNOWN_TOPIC:
+ case RdKafka::ERR__UNKNOWN_PARTITION:
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ break;
+
+ default:
+ /* Errors */
+ std::cerr << "Consume failed: " << message->errstr() << std::endl;
+ run = 0;
+ }
+}
+
+
+class ExampleConsumeCb : public RdKafka::ConsumeCb {
+ public:
+ void consume_cb(RdKafka::Message &msg, void *opaque) {
+ msg_consume(&msg, opaque);
+ }
+};
+
+
+
+int main(int argc, char **argv) {
+ std::string brokers = "localhost";
+ std::string errstr;
+ std::string topic_str;
+ std::string mode;
+ std::string debug;
+ int32_t partition = RdKafka::Topic::PARTITION_UA;
+ int64_t start_offset = RdKafka::Topic::OFFSET_BEGINNING;
+ bool do_conf_dump = false;
+ int opt;
+ MyHashPartitionerCb hash_partitioner;
+ int use_ccb = 0;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+
+ while ((opt = getopt(argc, argv, "PCLt:p:b:z:qd:o:eX:AM:f:")) != -1) {
+ switch (opt) {
+ case 'P':
+ case 'C':
+ case 'L':
+ mode = opt;
+ break;
+ case 't':
+ topic_str = optarg;
+ break;
+ case 'p':
+ if (!strcmp(optarg, "random"))
+ /* default */;
+ else if (!strcmp(optarg, "hash")) {
+ if (tconf->set("partitioner_cb", &hash_partitioner, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } else
+ partition = std::atoi(optarg);
+ break;
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'z':
+ if (conf->set("compression.codec", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'o':
+ if (!strcmp(optarg, "end"))
+ start_offset = RdKafka::Topic::OFFSET_END;
+ else if (!strcmp(optarg, "beginning"))
+ start_offset = RdKafka::Topic::OFFSET_BEGINNING;
+ else if (!strcmp(optarg, "stored"))
+ start_offset = RdKafka::Topic::OFFSET_STORED;
+ else
+ start_offset = strtoll(optarg, NULL, 10);
+ break;
+ case 'e':
+ exit_eof = true;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'M':
+ if (conf->set("statistics.interval.ms", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = true;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ /* Try "topic." prefixed properties on topic
+ * conf first, and then fall through to global if
+ * it didnt match a topic configuration property. */
+ RdKafka::Conf::ConfResult res;
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ res = tconf->set(name + strlen("topic."), val, errstr);
+ else
+ res = conf->set(name, val, errstr);
+
+ if (res != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ case 'f':
+ if (!strcmp(optarg, "ccb"))
+ use_ccb = 1;
+ else {
+ std::cerr << "Unknown option: " << optarg << std::endl;
+ exit(1);
+ }
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ if (mode.empty() || (topic_str.empty() && mode != "L") || optind != argc) {
+ usage:
+ std::string features;
+ conf->get("builtin.features", features);
+ fprintf(stderr,
+ "Usage: %s [-C|-P] -t <topic> "
+ "[-p <partition>] [-b <host1:port1,host2:port2,..>]\n"
+ "\n"
+ "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+ "\n"
+ " Options:\n"
+ " -C | -P Consumer or Producer mode\n"
+ " -L Metadata list mode\n"
+ " -t <topic> Topic to fetch / produce\n"
+ " -p <num> Partition (random partitioner)\n"
+ " -p <func> Use partitioner:\n"
+ " random (default), hash\n"
+ " -b <brokers> Broker address (localhost:9092)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy|lz4|zstd\n"
+ " -o <offset> Start offset (consumer)\n"
+ " -e Exit consumer when last message\n"
+ " in partition has been received.\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -M <intervalms> Enable statistics\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " Properties prefixed with \"topic.\" "
+ "will be set on topic object.\n"
+ " Use '-X list' to see the full list\n"
+ " of supported properties.\n"
+ " -f <flag> Set option:\n"
+ " ccb - use consume_callback\n"
+ "\n"
+ " In Consumer mode:\n"
+ " writes fetched messages to stdout\n"
+ " In Producer mode:\n"
+ " reads messages from stdin and sends to broker\n"
+ "\n"
+ "\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ features.c_str(), RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("metadata.broker.list", brokers, errstr);
+
+ if (!debug.empty()) {
+ if (conf->set("debug", debug, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ }
+
+ ExampleEventCb ex_event_cb;
+ conf->set("event_cb", &ex_event_cb, errstr);
+
+ if (do_conf_dump) {
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ std::list<std::string> *dump;
+ if (pass == 0) {
+ dump = conf->dump();
+ std::cout << "# Global config" << std::endl;
+ } else {
+ dump = tconf->dump();
+ std::cout << "# Topic config" << std::endl;
+ }
+
+ for (std::list<std::string>::iterator it = dump->begin();
+ it != dump->end();) {
+ std::cout << *it << " = ";
+ it++;
+ std::cout << *it << std::endl;
+ it++;
+ }
+ std::cout << std::endl;
+ }
+ exit(0);
+ }
+
+ signal(SIGINT, sigterm);
+ signal(SIGTERM, sigterm);
+
+
+ if (mode == "P") {
+ /*
+ * Producer mode
+ */
+
+ if (topic_str.empty())
+ goto usage;
+
+ ExampleDeliveryReportCb ex_dr_cb;
+
+ /* Set delivery report callback */
+ conf->set("dr_cb", &ex_dr_cb, errstr);
+
+ conf->set("default_topic_conf", tconf, errstr);
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created producer " << producer->name() << std::endl;
+
+
+ /*
+ * Read messages from stdin and produce to broker.
+ */
+ for (std::string line; run && std::getline(std::cin, line);) {
+ if (line.empty()) {
+ producer->poll(0);
+ continue;
+ }
+
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+ headers->add("my header", "header value");
+ headers->add("other header", "yes");
+
+ /*
+ * Produce message
+ */
+ RdKafka::ErrorCode resp =
+ producer->produce(topic_str, partition,
+ RdKafka::Producer::RK_MSG_COPY /* Copy payload */,
+ /* Value */
+ const_cast<char *>(line.c_str()), line.size(),
+ /* Key */
+ NULL, 0,
+ /* Timestamp (defaults to now) */
+ 0,
+ /* Message headers, if any */
+ headers,
+ /* Per-message opaque value passed to
+ * delivery report */
+ NULL);
+ if (resp != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "% Produce failed: " << RdKafka::err2str(resp)
+ << std::endl;
+ delete headers; /* Headers are automatically deleted on produce()
+ * success. */
+ } else {
+ std::cerr << "% Produced message (" << line.size() << " bytes)"
+ << std::endl;
+ }
+
+ producer->poll(0);
+ }
+ run = 1;
+
+ while (run && producer->outq_len() > 0) {
+ std::cerr << "Waiting for " << producer->outq_len() << std::endl;
+ producer->poll(1000);
+ }
+
+ delete producer;
+
+
+ } else if (mode == "C") {
+ /*
+ * Consumer mode
+ */
+
+ conf->set("enable.partition.eof", "true", errstr);
+
+ if (topic_str.empty())
+ goto usage;
+
+ /*
+ * Create consumer using accumulated global configuration.
+ */
+ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
+ if (!consumer) {
+ std::cerr << "Failed to create consumer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created consumer " << consumer->name() << std::endl;
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic =
+ RdKafka::Topic::create(consumer, topic_str, tconf, errstr);
+ if (!topic) {
+ std::cerr << "Failed to create topic: " << errstr << std::endl;
+ exit(1);
+ }
+
+ /*
+ * Start consumer for topic+partition at start offset
+ */
+ RdKafka::ErrorCode resp = consumer->start(topic, partition, start_offset);
+ if (resp != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "Failed to start consumer: " << RdKafka::err2str(resp)
+ << std::endl;
+ exit(1);
+ }
+
+ ExampleConsumeCb ex_consume_cb;
+
+ /*
+ * Consume messages
+ */
+ while (run) {
+ if (use_ccb) {
+ consumer->consume_callback(topic, partition, 1000, &ex_consume_cb,
+ &use_ccb);
+ } else {
+ RdKafka::Message *msg = consumer->consume(topic, partition, 1000);
+ msg_consume(msg, NULL);
+ delete msg;
+ }
+ consumer->poll(0);
+ }
+
+ /*
+ * Stop consumer
+ */
+ consumer->stop(topic, partition);
+
+ consumer->poll(1000);
+
+ delete topic;
+ delete consumer;
+ } else {
+ /* Metadata mode */
+
+ /*
+ * Create producer using accumulated global configuration.
+ */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ std::cout << "% Created producer " << producer->name() << std::endl;
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic = NULL;
+ if (!topic_str.empty()) {
+ topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
+ if (!topic) {
+ std::cerr << "Failed to create topic: " << errstr << std::endl;
+ exit(1);
+ }
+ }
+
+ while (run) {
+ class RdKafka::Metadata *metadata;
+
+ /* Fetch metadata */
+ RdKafka::ErrorCode err =
+ producer->metadata(!topic, topic, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
+ << std::endl;
+ run = 0;
+ break;
+ }
+
+ metadata_print(topic_str, metadata);
+
+ delete metadata;
+ run = 0;
+ }
+ }
+
+ delete conf;
+ delete tconf;
+
+ /*
+ * Wait for RdKafka to decommission.
+ * This is not strictly needed (when check outq_len() above), but
+ * allows RdKafka to clean up all its resources before the application
+ * exits so that memory profilers such as valgrind wont complain about
+ * memory leaks.
+ */
+ RdKafka::wait_destroyed(5000);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c
new file mode 100644
index 000000000..a12bb7471
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/rdkafka_performance.c
@@ -0,0 +1,1780 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Apache Kafka consumer & producer performance tester
+ * using the Kafka driver from librdkafka
+ * (https://github.com/edenhill/librdkafka)
+ */
+
+#ifdef _MSC_VER
+#define _CRT_SECURE_NO_WARNINGS /* Silence nonsense on MSVC */
+#endif
+
+#include "../src/rd.h"
+
+#define _GNU_SOURCE /* for strndup() */
+#include <ctype.h>
+#include <signal.h>
+#include <string.h>
+#include <errno.h>
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+/* Do not include these defines from your program, they will not be
+ * provided by librdkafka. */
+#include "rd.h"
+#include "rdtime.h"
+
+#ifdef _WIN32
+#include "../win32/wingetopt.h"
+#include "../win32/wintime.h"
+#endif
+
+
+static volatile sig_atomic_t run = 1;
+static int forever = 1;
+static rd_ts_t dispintvl = 1000;
+static int do_seq = 0;
+static int exit_after = 0;
+static int exit_eof = 0;
+static FILE *stats_fp;
+static int dr_disp_div;
+static int verbosity = 1;
+static int latency_mode = 0;
+static FILE *latency_fp = NULL;
+static int msgcnt = -1;
+static int incremental_mode = 0;
+static int partition_cnt = 0;
+static int eof_cnt = 0;
+static int with_dr = 1;
+static int read_hdrs = 0;
+
+
+static void stop(int sig) {
+ if (!run)
+ exit(0);
+ run = 0;
+}
+
+static long int msgs_wait_cnt = 0;
+static long int msgs_wait_produce_cnt = 0;
+static rd_ts_t t_end;
+static rd_kafka_t *global_rk;
+
+struct avg {
+ int64_t val;
+ int cnt;
+ uint64_t ts_start;
+};
+
+static struct {
+ rd_ts_t t_start;
+ rd_ts_t t_end;
+ rd_ts_t t_end_send;
+ uint64_t msgs;
+ uint64_t msgs_last;
+ uint64_t msgs_dr_ok;
+ uint64_t msgs_dr_err;
+ uint64_t bytes_dr_ok;
+ uint64_t bytes;
+ uint64_t bytes_last;
+ uint64_t tx;
+ uint64_t tx_err;
+ uint64_t avg_rtt;
+ uint64_t offset;
+ rd_ts_t t_fetch_latency;
+ rd_ts_t t_last;
+ rd_ts_t t_enobufs_last;
+ rd_ts_t t_total;
+ rd_ts_t latency_last;
+ rd_ts_t latency_lo;
+ rd_ts_t latency_hi;
+ rd_ts_t latency_sum;
+ int latency_cnt;
+ int64_t last_offset;
+} cnt;
+
+
+uint64_t wall_clock(void) {
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((uint64_t)tv.tv_sec * 1000000LLU) + ((uint64_t)tv.tv_usec);
+}
+
+static void err_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ if (err == RD_KAFKA_RESP_ERR__FATAL) {
+ char errstr[512];
+ err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ printf("%% FATAL ERROR CALLBACK: %s: %s: %s\n",
+ rd_kafka_name(rk), rd_kafka_err2str(err), errstr);
+ } else {
+ printf("%% ERROR CALLBACK: %s: %s: %s\n", rd_kafka_name(rk),
+ rd_kafka_err2str(err), reason);
+ }
+}
+
+static void throttle_cb(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque) {
+ printf("%% THROTTLED %dms by %s (%" PRId32 ")\n", throttle_time_ms,
+ broker_name, broker_id);
+}
+
+static void offset_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ int i;
+
+ if (err || verbosity >= 2)
+ printf("%% Offset commit of %d partition(s): %s\n",
+ offsets->cnt, rd_kafka_err2str(err));
+
+ for (i = 0; i < offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
+ if (rktpar->err || verbosity >= 2)
+ printf("%% %s [%" PRId32 "] @ %" PRId64 ": %s\n",
+ rktpar->topic, rktpar->partition, rktpar->offset,
+ rd_kafka_err2str(err));
+ }
+}
+
+/**
+ * @brief Add latency measurement
+ */
+static void latency_add(int64_t ts, const char *who) {
+ if (ts > cnt.latency_hi)
+ cnt.latency_hi = ts;
+ if (!cnt.latency_lo || ts < cnt.latency_lo)
+ cnt.latency_lo = ts;
+ cnt.latency_last = ts;
+ cnt.latency_cnt++;
+ cnt.latency_sum += ts;
+ if (latency_fp)
+ fprintf(latency_fp, "%" PRIu64 "\n", ts);
+}
+
+
+static void msg_delivered(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ static rd_ts_t last;
+ rd_ts_t now = rd_clock();
+ static int msgs;
+
+ msgs++;
+
+ msgs_wait_cnt--;
+
+ if (rkmessage->err)
+ cnt.msgs_dr_err++;
+ else {
+ cnt.msgs_dr_ok++;
+ cnt.bytes_dr_ok += rkmessage->len;
+ }
+
+ if (latency_mode) {
+ /* Extract latency */
+ int64_t source_ts;
+ if (sscanf(rkmessage->payload, "LATENCY:%" SCNd64,
+ &source_ts) == 1)
+ latency_add(wall_clock() - source_ts, "producer");
+ }
+
+
+ if ((rkmessage->err && (cnt.msgs_dr_err < 50 ||
+ !(cnt.msgs_dr_err % (dispintvl / 1000)))) ||
+ !last || msgs_wait_cnt < 5 || !(msgs_wait_cnt % dr_disp_div) ||
+ (now - last) >= dispintvl * 1000 || verbosity >= 3) {
+ if (rkmessage->err && verbosity >= 2)
+ printf("%% Message delivery failed (broker %" PRId32
+ "): "
+ "%s [%" PRId32
+ "]: "
+ "%s (%li remain)\n",
+ rd_kafka_message_broker_id(rkmessage),
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition,
+ rd_kafka_err2str(rkmessage->err), msgs_wait_cnt);
+ else if (verbosity > 2)
+ printf("%% Message delivered (offset %" PRId64
+ ", broker %" PRId32
+ "): "
+ "%li remain\n",
+ rkmessage->offset,
+ rd_kafka_message_broker_id(rkmessage),
+ msgs_wait_cnt);
+ if (verbosity >= 3 && do_seq)
+ printf(" --> \"%.*s\"\n", (int)rkmessage->len,
+ (const char *)rkmessage->payload);
+ last = now;
+ }
+
+ cnt.last_offset = rkmessage->offset;
+
+ if (msgs_wait_produce_cnt == 0 && msgs_wait_cnt == 0 && !forever) {
+ if (verbosity >= 2 && cnt.msgs > 0) {
+ double error_percent =
+ (double)(cnt.msgs - cnt.msgs_dr_ok) / cnt.msgs *
+ 100;
+ printf(
+ "%% Messages delivered with failure "
+ "percentage of %.5f%%\n",
+ error_percent);
+ }
+ t_end = rd_clock();
+ run = 0;
+ }
+
+ if (exit_after && exit_after <= msgs) {
+ printf("%% Hard exit after %i messages, as requested\n",
+ exit_after);
+ exit(0);
+ }
+}
+
+
+static void msg_consume(rd_kafka_message_t *rkmessage, void *opaque) {
+
+ if (rkmessage->err) {
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ cnt.offset = rkmessage->offset;
+
+ if (verbosity >= 1)
+ printf(
+ "%% Consumer reached end of "
+ "%s [%" PRId32
+ "] "
+ "message queue at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ if (exit_eof && ++eof_cnt == partition_cnt)
+ run = 0;
+
+ return;
+ }
+
+ printf("%% Consume error for topic \"%s\" [%" PRId32
+ "] "
+ "offset %" PRId64 ": %s\n",
+ rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
+ : "",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION ||
+ rkmessage->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ run = 0;
+
+ cnt.msgs_dr_err++;
+ return;
+ }
+
+ /* Start measuring from first message received */
+ if (!cnt.t_start)
+ cnt.t_start = cnt.t_last = rd_clock();
+
+ cnt.offset = rkmessage->offset;
+ cnt.msgs++;
+ cnt.bytes += rkmessage->len;
+
+ if (verbosity >= 3 || (verbosity >= 2 && !(cnt.msgs % 1000000)))
+ printf("@%" PRId64 ": %.*s: %.*s\n", rkmessage->offset,
+ (int)rkmessage->key_len, (char *)rkmessage->key,
+ (int)rkmessage->len, (char *)rkmessage->payload);
+
+
+ if (latency_mode) {
+ int64_t remote_ts, ts;
+
+ if (rkmessage->len > 8 &&
+ !memcmp(rkmessage->payload, "LATENCY:", 8) &&
+ sscanf(rkmessage->payload, "LATENCY:%" SCNd64,
+ &remote_ts) == 1) {
+ ts = wall_clock() - remote_ts;
+ if (ts > 0 && ts < (1000000 * 60 * 5)) {
+ latency_add(ts, "consumer");
+ } else {
+ if (verbosity >= 1)
+ printf(
+ "Received latency timestamp is too "
+ "far off: %" PRId64
+ "us (message offset %" PRId64
+ "): ignored\n",
+ ts, rkmessage->offset);
+ }
+ } else if (verbosity > 1)
+ printf("not a LATENCY payload: %.*s\n",
+ (int)rkmessage->len, (char *)rkmessage->payload);
+ }
+
+ if (read_hdrs) {
+ rd_kafka_headers_t *hdrs;
+ /* Force parsing of headers but don't do anything with them. */
+ rd_kafka_message_headers(rkmessage, &hdrs);
+ }
+
+ if (msgcnt != -1 && (int)cnt.msgs >= msgcnt)
+ run = 0;
+}
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (exit_eof && !strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ fprintf(stderr,
+ "%% This example has not been modified to "
+ "support -e (exit on EOF) when "
+ "partition.assignment.strategy "
+ "is set to an incremental/cooperative strategy: "
+ "-e will not behave as expected\n");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ fprintf(stderr,
+ "%% Group rebalanced (%s): "
+ "%d new partition(s) assigned\n",
+ rd_kafka_rebalance_protocol(rk), partitions->cnt);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ error = rd_kafka_incremental_assign(rk, partitions);
+ } else {
+ ret_err = rd_kafka_assign(rk, partitions);
+ eof_cnt = 0;
+ }
+
+ partition_cnt += partitions->cnt;
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ fprintf(stderr,
+ "%% Group rebalanced (%s): %d partition(s) revoked\n",
+ rd_kafka_rebalance_protocol(rk), partitions->cnt);
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ error = rd_kafka_incremental_unassign(rk, partitions);
+ partition_cnt -= partitions->cnt;
+ } else {
+ ret_err = rd_kafka_assign(rk, NULL);
+ partition_cnt = 0;
+ }
+
+ eof_cnt = 0; /* FIXME: Not correct for incremental case */
+ break;
+
+ default:
+ break;
+ }
+
+ if (error) {
+ fprintf(stderr, "%% incremental assign failure: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ } else if (ret_err) {
+ fprintf(stderr, "%% assign failure: %s\n",
+ rd_kafka_err2str(ret_err));
+ }
+}
+
+
+/**
+ * Find and extract single value from a two-level search.
+ * First find 'field1', then find 'field2' and extract its value.
+ * Returns 0 on miss else the value.
+ */
+static uint64_t json_parse_fields(const char *json,
+ const char **end,
+ const char *field1,
+ const char *field2) {
+ const char *t = json;
+ const char *t2;
+ int len1 = (int)strlen(field1);
+ int len2 = (int)strlen(field2);
+
+ while ((t2 = strstr(t, field1))) {
+ uint64_t v;
+
+ t = t2;
+ t += len1;
+
+ /* Find field */
+ if (!(t2 = strstr(t, field2)))
+ continue;
+ t2 += len2;
+
+ while (isspace((int)*t2))
+ t2++;
+
+ v = strtoull(t2, (char **)&t, 10);
+ if (t2 == t)
+ continue;
+
+ *end = t;
+ return v;
+ }
+
+ *end = t + strlen(t);
+ return 0;
+}
+
+/**
+ * Parse various values from rdkafka stats
+ */
+static void json_parse_stats(const char *json) {
+ const char *t;
+#define MAX_AVGS 100 /* max number of brokers to scan for rtt */
+ uint64_t avg_rtt[MAX_AVGS + 1];
+ int avg_rtt_i = 0;
+
+ /* Store totals at end of array */
+ avg_rtt[MAX_AVGS] = 0;
+
+ /* Extract all broker RTTs */
+ t = json;
+ while (avg_rtt_i < MAX_AVGS && *t) {
+ avg_rtt[avg_rtt_i] =
+ json_parse_fields(t, &t, "\"rtt\":", "\"avg\":");
+
+ /* Skip low RTT values, means no messages are passing */
+ if (avg_rtt[avg_rtt_i] < 100 /*0.1ms*/)
+ continue;
+
+
+ avg_rtt[MAX_AVGS] += avg_rtt[avg_rtt_i];
+ avg_rtt_i++;
+ }
+
+ if (avg_rtt_i > 0)
+ avg_rtt[MAX_AVGS] /= avg_rtt_i;
+
+ cnt.avg_rtt = avg_rtt[MAX_AVGS];
+}
+
+
+static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
+
+ /* Extract values for our own stats */
+ json_parse_stats(json);
+
+ if (stats_fp)
+ fprintf(stats_fp, "%s\n", json);
+ return 0;
+}
+
+#define _OTYPE_TAB 0x1 /* tabular format */
+#define _OTYPE_SUMMARY 0x2 /* summary format */
+#define _OTYPE_FORCE 0x4 /* force output regardless of interval timing */
+static void
+print_stats(rd_kafka_t *rk, int mode, int otype, const char *compression) {
+ rd_ts_t now = rd_clock();
+ rd_ts_t t_total;
+ static int rows_written = 0;
+ int print_header;
+ double latency_avg = 0.0f;
+ char extra[512];
+ int extra_of = 0;
+ *extra = '\0';
+
+ if (!(otype & _OTYPE_FORCE) &&
+ (((otype & _OTYPE_SUMMARY) && verbosity == 0) ||
+ cnt.t_last + dispintvl > now))
+ return;
+
+ print_header = !rows_written || (verbosity > 0 && !(rows_written % 20));
+
+ if (cnt.t_end_send)
+ t_total = cnt.t_end_send - cnt.t_start;
+ else if (cnt.t_end)
+ t_total = cnt.t_end - cnt.t_start;
+ else if (cnt.t_start)
+ t_total = now - cnt.t_start;
+ else
+ t_total = 1;
+
+ if (latency_mode && cnt.latency_cnt)
+ latency_avg = (double)cnt.latency_sum / (double)cnt.latency_cnt;
+
+ if (mode == 'P') {
+
+ if (otype & _OTYPE_TAB) {
+#define ROW_START() \
+ do { \
+ } while (0)
+#define COL_HDR(NAME) printf("| %10.10s ", (NAME))
+#define COL_PR64(NAME, VAL) printf("| %10" PRIu64 " ", (VAL))
+#define COL_PRF(NAME, VAL) printf("| %10.2f ", (VAL))
+#define ROW_END() \
+ do { \
+ printf("\n"); \
+ rows_written++; \
+ } while (0)
+
+ if (print_header) {
+ /* First time, print header */
+ ROW_START();
+ COL_HDR("elapsed");
+ COL_HDR("msgs");
+ COL_HDR("bytes");
+ COL_HDR("rtt");
+ COL_HDR("dr");
+ COL_HDR("dr_m/s");
+ COL_HDR("dr_MB/s");
+ COL_HDR("dr_err");
+ COL_HDR("tx_err");
+ COL_HDR("outq");
+ COL_HDR("offset");
+ if (latency_mode) {
+ COL_HDR("lat_curr");
+ COL_HDR("lat_avg");
+ COL_HDR("lat_lo");
+ COL_HDR("lat_hi");
+ }
+
+ ROW_END();
+ }
+
+ ROW_START();
+ COL_PR64("elapsed", t_total / 1000);
+ COL_PR64("msgs", cnt.msgs);
+ COL_PR64("bytes", cnt.bytes);
+ COL_PR64("rtt", cnt.avg_rtt / 1000);
+ COL_PR64("dr", cnt.msgs_dr_ok);
+ COL_PR64("dr_m/s",
+ ((cnt.msgs_dr_ok * 1000000) / t_total));
+ COL_PRF("dr_MB/s",
+ (float)((cnt.bytes_dr_ok) / (float)t_total));
+ COL_PR64("dr_err", cnt.msgs_dr_err);
+ COL_PR64("tx_err", cnt.tx_err);
+ COL_PR64("outq",
+ rk ? (uint64_t)rd_kafka_outq_len(rk) : 0);
+ COL_PR64("offset", (uint64_t)cnt.last_offset);
+ if (latency_mode) {
+ COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
+ COL_PRF("lat_avg", latency_avg / 1000.0f);
+ COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
+ COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
+ }
+ ROW_END();
+ }
+
+ if (otype & _OTYPE_SUMMARY) {
+ printf("%% %" PRIu64
+ " messages produced "
+ "(%" PRIu64
+ " bytes), "
+ "%" PRIu64
+ " delivered "
+ "(offset %" PRId64 ", %" PRIu64
+ " failed) "
+ "in %" PRIu64 "ms: %" PRIu64
+ " msgs/s and "
+ "%.02f MB/s, "
+ "%" PRIu64
+ " produce failures, %i in queue, "
+ "%s compression\n",
+ cnt.msgs, cnt.bytes, cnt.msgs_dr_ok,
+ cnt.last_offset, cnt.msgs_dr_err, t_total / 1000,
+ ((cnt.msgs_dr_ok * 1000000) / t_total),
+ (float)((cnt.bytes_dr_ok) / (float)t_total),
+ cnt.tx_err, rk ? rd_kafka_outq_len(rk) : 0,
+ compression);
+ }
+
+ } else {
+
+ if (otype & _OTYPE_TAB) {
+ if (print_header) {
+ /* First time, print header */
+ ROW_START();
+ COL_HDR("elapsed");
+ COL_HDR("msgs");
+ COL_HDR("bytes");
+ COL_HDR("rtt");
+ COL_HDR("m/s");
+ COL_HDR("MB/s");
+ COL_HDR("rx_err");
+ COL_HDR("offset");
+ if (latency_mode) {
+ COL_HDR("lat_curr");
+ COL_HDR("lat_avg");
+ COL_HDR("lat_lo");
+ COL_HDR("lat_hi");
+ }
+ ROW_END();
+ }
+
+ ROW_START();
+ COL_PR64("elapsed", t_total / 1000);
+ COL_PR64("msgs", cnt.msgs);
+ COL_PR64("bytes", cnt.bytes);
+ COL_PR64("rtt", cnt.avg_rtt / 1000);
+ COL_PR64("m/s", ((cnt.msgs * 1000000) / t_total));
+ COL_PRF("MB/s", (float)((cnt.bytes) / (float)t_total));
+ COL_PR64("rx_err", cnt.msgs_dr_err);
+ COL_PR64("offset", cnt.offset);
+ if (latency_mode) {
+ COL_PRF("lat_curr", cnt.latency_last / 1000.0f);
+ COL_PRF("lat_avg", latency_avg / 1000.0f);
+ COL_PRF("lat_lo", cnt.latency_lo / 1000.0f);
+ COL_PRF("lat_hi", cnt.latency_hi / 1000.0f);
+ }
+ ROW_END();
+ }
+
+ if (otype & _OTYPE_SUMMARY) {
+ if (latency_avg >= 1.0f)
+ extra_of += rd_snprintf(
+ extra + extra_of, sizeof(extra) - extra_of,
+ ", latency "
+ "curr/avg/lo/hi "
+ "%.2f/%.2f/%.2f/%.2fms",
+ cnt.latency_last / 1000.0f,
+ latency_avg / 1000.0f,
+ cnt.latency_lo / 1000.0f,
+ cnt.latency_hi / 1000.0f);
+ printf("%% %" PRIu64 " messages (%" PRIu64
+ " bytes) "
+ "consumed in %" PRIu64 "ms: %" PRIu64
+ " msgs/s "
+ "(%.02f MB/s)"
+ "%s\n",
+ cnt.msgs, cnt.bytes, t_total / 1000,
+ ((cnt.msgs * 1000000) / t_total),
+ (float)((cnt.bytes) / (float)t_total), extra);
+ }
+
+ if (incremental_mode && now > cnt.t_last) {
+ uint64_t i_msgs = cnt.msgs - cnt.msgs_last;
+ uint64_t i_bytes = cnt.bytes - cnt.bytes_last;
+ uint64_t i_time = cnt.t_last ? now - cnt.t_last : 0;
+
+ printf("%% INTERVAL: %" PRIu64
+ " messages "
+ "(%" PRIu64
+ " bytes) "
+ "consumed in %" PRIu64 "ms: %" PRIu64
+ " msgs/s "
+ "(%.02f MB/s)"
+ "%s\n",
+ i_msgs, i_bytes, i_time / 1000,
+ ((i_msgs * 1000000) / i_time),
+ (float)((i_bytes) / (float)i_time), extra);
+ }
+ }
+
+ cnt.t_last = now;
+ cnt.msgs_last = cnt.msgs;
+ cnt.bytes_last = cnt.bytes;
+}
+
+
+static void sig_usr1(int sig) {
+ rd_kafka_dump(stdout, global_rk);
+}
+
+
+/**
+ * @brief Read config from file
+ * @returns -1 on error, else 0.
+ */
+static int read_conf_file(rd_kafka_conf_t *conf, const char *path) {
+ FILE *fp;
+ char buf[512];
+ int line = 0;
+ char errstr[512];
+
+ if (!(fp = fopen(path, "r"))) {
+ fprintf(stderr, "%% Failed to open %s: %s\n", path,
+ strerror(errno));
+ return -1;
+ }
+
+ while (fgets(buf, sizeof(buf), fp)) {
+ char *s = buf;
+ char *t;
+ rd_kafka_conf_res_t r = RD_KAFKA_CONF_UNKNOWN;
+
+ line++;
+
+ while (isspace((int)*s))
+ s++;
+
+ if (!*s || *s == '#')
+ continue;
+
+ if ((t = strchr(buf, '\n')))
+ *t = '\0';
+
+ t = strchr(buf, '=');
+ if (!t || t == s || !*(t + 1)) {
+ fprintf(stderr, "%% %s:%d: expected key=value\n", path,
+ line);
+ fclose(fp);
+ return -1;
+ }
+
+ *(t++) = '\0';
+
+ /* Try global config */
+ r = rd_kafka_conf_set(conf, s, t, errstr, sizeof(errstr));
+
+ if (r == RD_KAFKA_CONF_OK)
+ continue;
+
+ fprintf(stderr, "%% %s:%d: %s=%s: %s\n", path, line, s, t,
+ errstr);
+ fclose(fp);
+ return -1;
+ }
+
+ fclose(fp);
+
+ return 0;
+}
+
+
+static rd_kafka_resp_err_t do_produce(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t size,
+ const void *key,
+ size_t key_size,
+ const rd_kafka_headers_t *hdrs) {
+
+ /* Send/Produce message. */
+ if (hdrs) {
+ rd_kafka_headers_t *hdrs_copy;
+ rd_kafka_resp_err_t err;
+
+ hdrs_copy = rd_kafka_headers_copy(hdrs);
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_RKT(rkt), RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_MSGFLAGS(msgflags),
+ RD_KAFKA_V_VALUE(payload, size),
+ RD_KAFKA_V_KEY(key, key_size),
+ RD_KAFKA_V_HEADERS(hdrs_copy), RD_KAFKA_V_END);
+
+ if (err)
+ rd_kafka_headers_destroy(hdrs_copy);
+
+ return err;
+
+ } else {
+ if (rd_kafka_produce(rkt, partition, msgflags, payload, size,
+ key, key_size, NULL) == -1)
+ return rd_kafka_last_error();
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Sleep for \p sleep_us microseconds.
+ */
+static void do_sleep(int sleep_us) {
+ if (sleep_us > 100) {
+#ifdef _WIN32
+ Sleep(sleep_us / 1000);
+#else
+ usleep(sleep_us);
+#endif
+ } else {
+ rd_ts_t next = rd_clock() + (rd_ts_t)sleep_us;
+ while (next > rd_clock())
+ ;
+ }
+}
+
+
+int main(int argc, char **argv) {
+ char *brokers = NULL;
+ char mode = 'C';
+ char *topic = NULL;
+ const char *key = NULL;
+ int *partitions = NULL;
+ int opt;
+ int sendflags = 0;
+ char *msgpattern = "librdkafka_performance testing!";
+ int msgsize = -1;
+ const char *debug = NULL;
+ int do_conf_dump = 0;
+ rd_ts_t now;
+ char errstr[512];
+ uint64_t seq = 0;
+ int seed = (int)time(NULL);
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *rkqu = NULL;
+ const char *compression = "no";
+ int64_t start_offset = 0;
+ int batch_size = 0;
+ int idle = 0;
+ const char *stats_cmd = NULL;
+ char *stats_intvlstr = NULL;
+ char tmp[128];
+ char *tmp2;
+ int otype = _OTYPE_SUMMARY;
+ double dtmp;
+ int rate_sleep = 0;
+ rd_kafka_topic_partition_list_t *topics;
+ int exitcode = 0;
+ rd_kafka_headers_t *hdrs = NULL;
+ rd_kafka_resp_err_t err;
+
+ /* Kafka configuration */
+ conf = rd_kafka_conf_new();
+ rd_kafka_conf_set_error_cb(conf, err_cb);
+ rd_kafka_conf_set_throttle_cb(conf, throttle_cb);
+ rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
+
+#ifdef SIGIO
+ /* Quick termination */
+ rd_snprintf(tmp, sizeof(tmp), "%i", SIGIO);
+ rd_kafka_conf_set(conf, "internal.termination.signal", tmp, NULL, 0);
+#endif
+
+ /* Producer config */
+ rd_kafka_conf_set(conf, "linger.ms", "1000", NULL, 0);
+ rd_kafka_conf_set(conf, "message.send.max.retries", "3", NULL, 0);
+ rd_kafka_conf_set(conf, "retry.backoff.ms", "500", NULL, 0);
+
+ /* Consumer config */
+ /* Tell rdkafka to (try to) maintain 1M messages
+ * in its internal receive buffers. This is to avoid
+ * application -> rdkafka -> broker per-message ping-pong
+ * latency.
+ * The larger the local queue, the higher the performance.
+ * Try other values with: ... -X queued.min.messages=1000
+ */
+ rd_kafka_conf_set(conf, "queued.min.messages", "1000000", NULL, 0);
+ rd_kafka_conf_set(conf, "session.timeout.ms", "6000", NULL, 0);
+ rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", NULL, 0);
+
+ topics = rd_kafka_topic_partition_list_new(1);
+
+ while ((opt = getopt(argc, argv,
+ "PCG:t:p:b:s:k:c:fi:MDd:m:S:x:"
+ "R:a:z:o:X:B:eT:Y:qvIur:lA:OwNH:")) != -1) {
+ switch (opt) {
+ case 'G':
+ if (rd_kafka_conf_set(conf, "group.id", optarg, errstr,
+ sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ /* FALLTHRU */
+ case 'P':
+ case 'C':
+ mode = opt;
+ break;
+ case 't':
+ rd_kafka_topic_partition_list_add(
+ topics, optarg, RD_KAFKA_PARTITION_UA);
+ break;
+ case 'p':
+ partition_cnt++;
+ partitions = realloc(partitions, sizeof(*partitions) *
+ partition_cnt);
+ partitions[partition_cnt - 1] = atoi(optarg);
+ break;
+
+ case 'b':
+ brokers = optarg;
+ break;
+ case 's':
+ msgsize = atoi(optarg);
+ break;
+ case 'k':
+ key = optarg;
+ break;
+ case 'c':
+ msgcnt = atoi(optarg);
+ break;
+ case 'D':
+ sendflags |= RD_KAFKA_MSG_F_FREE;
+ break;
+ case 'i':
+ dispintvl = atoi(optarg);
+ break;
+ case 'm':
+ msgpattern = optarg;
+ break;
+ case 'S':
+ seq = strtoull(optarg, NULL, 10);
+ do_seq = 1;
+ break;
+ case 'x':
+ exit_after = atoi(optarg);
+ break;
+ case 'R':
+ seed = atoi(optarg);
+ break;
+ case 'a':
+ if (rd_kafka_conf_set(conf, "acks", optarg, errstr,
+ sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ break;
+ case 'B':
+ batch_size = atoi(optarg);
+ break;
+ case 'z':
+ if (rd_kafka_conf_set(conf, "compression.codec", optarg,
+ errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ compression = optarg;
+ break;
+ case 'o':
+ if (!strcmp(optarg, "end"))
+ start_offset = RD_KAFKA_OFFSET_END;
+ else if (!strcmp(optarg, "beginning"))
+ start_offset = RD_KAFKA_OFFSET_BEGINNING;
+ else if (!strcmp(optarg, "stored"))
+ start_offset = RD_KAFKA_OFFSET_STORED;
+ else {
+ start_offset = strtoll(optarg, NULL, 10);
+
+ if (start_offset < 0)
+ start_offset =
+ RD_KAFKA_OFFSET_TAIL(-start_offset);
+ }
+
+ break;
+ case 'e':
+ exit_eof = 1;
+ break;
+ case 'd':
+ debug = optarg;
+ break;
+ case 'H':
+ if (!strcmp(optarg, "parse"))
+ read_hdrs = 1;
+ else {
+ char *name, *val;
+ size_t name_sz = -1;
+
+ name = optarg;
+ val = strchr(name, '=');
+ if (val) {
+ name_sz = (size_t)(val - name);
+ val++; /* past the '=' */
+ }
+
+ if (!hdrs)
+ hdrs = rd_kafka_headers_new(8);
+
+ err = rd_kafka_header_add(hdrs, name, name_sz,
+ val, -1);
+ if (err) {
+ fprintf(
+ stderr,
+ "%% Failed to add header %s: %s\n",
+ name, rd_kafka_err2str(err));
+ exit(1);
+ }
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+ rd_kafka_conf_res_t res;
+
+ if (!strcmp(optarg, "list") ||
+ !strcmp(optarg, "help")) {
+ rd_kafka_conf_properties_show(stdout);
+ exit(0);
+ }
+
+ if (!strcmp(optarg, "dump")) {
+ do_conf_dump = 1;
+ continue;
+ }
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ fprintf(stderr,
+ "%% Expected "
+ "-X property=value, not %s\n",
+ name);
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (!strcmp(name, "file")) {
+ if (read_conf_file(conf, val) == -1)
+ exit(1);
+ break;
+ }
+
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+ } break;
+
+ case 'T':
+ stats_intvlstr = optarg;
+ break;
+ case 'Y':
+ stats_cmd = optarg;
+ break;
+
+ case 'q':
+ verbosity--;
+ break;
+
+ case 'v':
+ verbosity++;
+ break;
+
+ case 'I':
+ idle = 1;
+ break;
+
+ case 'u':
+ otype = _OTYPE_TAB;
+ verbosity--; /* remove some fluff */
+ break;
+
+ case 'r':
+ dtmp = strtod(optarg, &tmp2);
+ if (tmp2 == optarg ||
+ (dtmp >= -0.001 && dtmp <= 0.001)) {
+ fprintf(stderr, "%% Invalid rate: %s\n",
+ optarg);
+ exit(1);
+ }
+
+ rate_sleep = (int)(1000000.0 / dtmp);
+ break;
+
+ case 'l':
+ latency_mode = 1;
+ break;
+
+ case 'A':
+ if (!(latency_fp = fopen(optarg, "w"))) {
+ fprintf(stderr, "%% Cant open %s: %s\n", optarg,
+ strerror(errno));
+ exit(1);
+ }
+ break;
+
+ case 'M':
+ incremental_mode = 1;
+ break;
+
+ case 'N':
+ with_dr = 0;
+ break;
+
+ default:
+ fprintf(stderr, "Unknown option: %c\n", opt);
+ goto usage;
+ }
+ }
+
+ if (topics->cnt == 0 || optind != argc) {
+ if (optind < argc)
+ fprintf(stderr, "Unknown argument: %s\n", argv[optind]);
+ usage:
+ fprintf(
+ stderr,
+ "Usage: %s [-C|-P] -t <topic> "
+ "[-p <partition>] [-b <broker,broker..>] [options..]\n"
+ "\n"
+ "librdkafka version %s (0x%08x)\n"
+ "\n"
+ " Options:\n"
+ " -C | -P | Consumer or Producer mode\n"
+ " -G <groupid> High-level Kafka Consumer mode\n"
+ " -t <topic> Topic to consume / produce\n"
+ " -p <num> Partition (defaults to random). "
+ "Multiple partitions are allowed in -C consumer mode.\n"
+ " -M Print consumer interval stats\n"
+ " -b <brokers> Broker address list (host[:port],..)\n"
+ " -s <size> Message size (producer)\n"
+ " -k <key> Message key (producer)\n"
+ " -H <name[=value]> Add header to message (producer)\n"
+ " -H parse Read message headers (consumer)\n"
+ " -c <cnt> Messages to transmit/receive\n"
+ " -x <cnt> Hard exit after transmitting <cnt> "
+ "messages (producer)\n"
+ " -D Copy/Duplicate data buffer (producer)\n"
+ " -i <ms> Display interval\n"
+ " -m <msg> Message payload pattern\n"
+ " -S <start> Send a sequence number starting at "
+ "<start> as payload\n"
+ " -R <seed> Random seed value (defaults to time)\n"
+ " -a <acks> Required acks (producer): "
+ "-1, 0, 1, >1\n"
+ " -B <size> Consume batch size (# of msgs)\n"
+ " -z <codec> Enable compression:\n"
+ " none|gzip|snappy\n"
+ " -o <offset> Start offset (consumer)\n"
+ " beginning, end, NNNNN or -NNNNN\n"
+ " -d [facs..] Enable debugging contexts:\n"
+ " %s\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ " -X file=<path> Read config from file.\n"
+ " -X list Show full list of supported properties.\n"
+ " -X dump Show configuration\n"
+ " -T <intvl> Enable statistics from librdkafka at "
+ "specified interval (ms)\n"
+ " -Y <command> Pipe statistics to <command>\n"
+ " -I Idle: dont produce any messages\n"
+ " -q Decrease verbosity\n"
+ " -v Increase verbosity (default 1)\n"
+ " -u Output stats in table format\n"
+ " -r <rate> Producer msg/s limit\n"
+ " -l Latency measurement.\n"
+ " Needs two matching instances, one\n"
+ " consumer and one producer, both\n"
+ " running with the -l switch.\n"
+ " -l Producer: per-message latency stats\n"
+ " -A <file> Write per-message latency stats to "
+ "<file>. Requires -l\n"
+ " -O Report produced offset (producer)\n"
+ " -N No delivery reports (producer)\n"
+ "\n"
+ " In Consumer mode:\n"
+ " consumes messages and prints thruput\n"
+ " If -B <..> is supplied the batch consumer\n"
+ " mode is used, else the callback mode is used.\n"
+ "\n"
+ " In Producer mode:\n"
+ " writes messages of size -s <..> and prints thruput\n"
+ "\n",
+ argv[0], rd_kafka_version_str(), rd_kafka_version(),
+ RD_KAFKA_DEBUG_CONTEXTS);
+ exit(1);
+ }
+
+
+ dispintvl *= 1000; /* us */
+
+ if (verbosity > 1)
+ printf("%% Using random seed %i, verbosity level %i\n", seed,
+ verbosity);
+ srand(seed);
+ signal(SIGINT, stop);
+#ifdef SIGUSR1
+ signal(SIGUSR1, sig_usr1);
+#endif
+
+
+ if (debug && rd_kafka_conf_set(conf, "debug", debug, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ printf("%% Debug configuration failed: %s: %s\n", errstr,
+ debug);
+ exit(1);
+ }
+
+ /* Always enable stats (for RTT extraction), and if user supplied
+ * the -T <intvl> option we let her take part of the stats aswell. */
+ rd_kafka_conf_set_stats_cb(conf, stats_cb);
+
+ if (!stats_intvlstr) {
+ /* if no user-desired stats, adjust stats interval
+ * to the display interval. */
+ rd_snprintf(tmp, sizeof(tmp), "%" PRId64, dispintvl / 1000);
+ }
+
+ if (rd_kafka_conf_set(conf, "statistics.interval.ms",
+ stats_intvlstr ? stats_intvlstr : tmp, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ if (do_conf_dump) {
+ const char **arr;
+ size_t cnt;
+ int pass;
+
+ for (pass = 0; pass < 2; pass++) {
+ int i;
+
+ if (pass == 0) {
+ arr = rd_kafka_conf_dump(conf, &cnt);
+ printf("# Global config\n");
+ } else {
+ rd_kafka_topic_conf_t *topic_conf =
+ rd_kafka_conf_get_default_topic_conf(conf);
+
+ if (topic_conf) {
+ printf("# Topic config\n");
+ arr = rd_kafka_topic_conf_dump(
+ topic_conf, &cnt);
+ } else {
+ arr = NULL;
+ }
+ }
+
+ if (!arr)
+ continue;
+
+ for (i = 0; i < (int)cnt; i += 2)
+ printf("%s = %s\n", arr[i], arr[i + 1]);
+
+ printf("\n");
+
+ rd_kafka_conf_dump_free(arr, cnt);
+ }
+
+ exit(0);
+ }
+
+ if (latency_mode)
+ do_seq = 0;
+
+ if (stats_intvlstr) {
+ /* User enabled stats (-T) */
+
+#ifndef _WIN32
+ if (stats_cmd) {
+ if (!(stats_fp = popen(stats_cmd,
+#ifdef __linux__
+ "we"
+#else
+ "w"
+#endif
+ ))) {
+ fprintf(stderr,
+ "%% Failed to start stats command: "
+ "%s: %s",
+ stats_cmd, strerror(errno));
+ exit(1);
+ }
+ } else
+#endif
+ stats_fp = stdout;
+ }
+
+ if (msgcnt != -1)
+ forever = 0;
+
+ if (msgsize == -1)
+ msgsize = (int)strlen(msgpattern);
+
+ topic = topics->elems[0].topic;
+
+ if (mode == 'C' || mode == 'G')
+ rd_kafka_conf_set(conf, "enable.partition.eof", "true", NULL,
+ 0);
+
+ if (read_hdrs && mode == 'P') {
+ fprintf(stderr, "%% producer can not read headers\n");
+ exit(1);
+ }
+
+ if (hdrs && mode != 'P') {
+ fprintf(stderr, "%% consumer can not add headers\n");
+ exit(1);
+ }
+
+ /* Set bootstrap servers */
+ if (brokers &&
+ rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "%% %s\n", errstr);
+ exit(1);
+ }
+
+ if (mode == 'P') {
+ /*
+ * Producer
+ */
+ char *sbuf;
+ char *pbuf;
+ int outq;
+ int keylen = key ? (int)strlen(key) : 0;
+ off_t rof = 0;
+ size_t plen = strlen(msgpattern);
+ int partition =
+ partitions ? partitions[0] : RD_KAFKA_PARTITION_UA;
+
+ if (latency_mode) {
+ int minlen = (int)(strlen("LATENCY:") +
+ strlen("18446744073709551615 ") + 1);
+ msgsize = RD_MAX(minlen, msgsize);
+ sendflags |= RD_KAFKA_MSG_F_COPY;
+ } else if (do_seq) {
+ int minlen = (int)strlen("18446744073709551615 ") + 1;
+ if (msgsize < minlen)
+ msgsize = minlen;
+
+ /* Force duplication of payload */
+ sendflags |= RD_KAFKA_MSG_F_FREE;
+ }
+
+ sbuf = malloc(msgsize);
+
+ /* Copy payload content to new buffer */
+ while (rof < msgsize) {
+ size_t xlen = RD_MIN((size_t)msgsize - rof, plen);
+ memcpy(sbuf + rof, msgpattern, xlen);
+ rof += (off_t)xlen;
+ }
+
+ if (msgcnt == -1)
+ printf("%% Sending messages of size %i bytes\n",
+ msgsize);
+ else
+ printf("%% Sending %i messages of size %i bytes\n",
+ msgcnt, msgsize);
+
+ if (with_dr)
+ rd_kafka_conf_set_dr_msg_cb(conf, msg_delivered);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create Kafka producer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ global_rk = rk;
+
+ /* Explicitly create topic to avoid per-msg lookups. */
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+
+ if (rate_sleep && verbosity >= 2)
+ fprintf(stderr,
+ "%% Inter message rate limiter sleep %ius\n",
+ rate_sleep);
+
+ dr_disp_div = msgcnt / 50;
+ if (dr_disp_div == 0)
+ dr_disp_div = 10;
+
+ cnt.t_start = cnt.t_last = rd_clock();
+
+ msgs_wait_produce_cnt = msgcnt;
+
+ while (run && (msgcnt == -1 || (int)cnt.msgs < msgcnt)) {
+ /* Send/Produce message. */
+
+ if (idle) {
+ rd_kafka_poll(rk, 1000);
+ continue;
+ }
+
+ if (latency_mode) {
+ rd_snprintf(sbuf, msgsize - 1,
+ "LATENCY:%" PRIu64, wall_clock());
+ } else if (do_seq) {
+ rd_snprintf(sbuf, msgsize - 1, "%" PRIu64 ": ",
+ seq);
+ seq++;
+ }
+
+ if (sendflags & RD_KAFKA_MSG_F_FREE) {
+ /* Duplicate memory */
+ pbuf = malloc(msgsize);
+ memcpy(pbuf, sbuf, msgsize);
+ } else
+ pbuf = sbuf;
+
+ if (msgsize == 0)
+ pbuf = NULL;
+
+ cnt.tx++;
+ while (run && (err = do_produce(
+ rk, rkt, partition, sendflags, pbuf,
+ msgsize, key, keylen, hdrs))) {
+ if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ printf(
+ "%% No such partition: "
+ "%" PRId32 "\n",
+ partition);
+ else if (verbosity >= 3 ||
+ (err !=
+ RD_KAFKA_RESP_ERR__QUEUE_FULL &&
+ verbosity >= 1))
+ printf(
+ "%% produce error: %s%s\n",
+ rd_kafka_err2str(err),
+ err == RD_KAFKA_RESP_ERR__QUEUE_FULL
+ ? " (backpressure)"
+ : "");
+
+ cnt.tx_err++;
+ if (err != RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ run = 0;
+ break;
+ }
+ now = rd_clock();
+ if (verbosity >= 2 &&
+ cnt.t_enobufs_last + dispintvl <= now) {
+ printf(
+ "%% Backpressure %i "
+ "(tx %" PRIu64
+ ", "
+ "txerr %" PRIu64 ")\n",
+ rd_kafka_outq_len(rk), cnt.tx,
+ cnt.tx_err);
+ cnt.t_enobufs_last = now;
+ }
+
+ /* Poll to handle delivery reports */
+ rd_kafka_poll(rk, 10);
+
+ print_stats(rk, mode, otype, compression);
+ }
+
+ msgs_wait_cnt++;
+ if (msgs_wait_produce_cnt != -1)
+ msgs_wait_produce_cnt--;
+ cnt.msgs++;
+ cnt.bytes += msgsize;
+
+ /* Must poll to handle delivery reports */
+ if (rate_sleep) {
+ rd_ts_t next = rd_clock() + (rd_ts_t)rate_sleep;
+ do {
+ rd_kafka_poll(
+ rk,
+ (int)RD_MAX(0, (next - rd_clock()) /
+ 1000));
+ } while (next > rd_clock());
+ } else if (cnt.msgs % 1000 == 0) {
+ rd_kafka_poll(rk, 0);
+ }
+
+ print_stats(rk, mode, otype, compression);
+ }
+
+ forever = 0;
+ if (verbosity >= 2)
+ printf(
+ "%% All messages produced, "
+ "now waiting for %li deliveries\n",
+ msgs_wait_cnt);
+
+ /* Wait for messages to be delivered */
+ while (run && rd_kafka_poll(rk, 1000) != -1)
+ print_stats(rk, mode, otype, compression);
+
+
+ outq = rd_kafka_outq_len(rk);
+ if (verbosity >= 2)
+ printf("%% %i messages in outq\n", outq);
+ cnt.msgs -= outq;
+ cnt.t_end = t_end;
+
+ if (cnt.tx_err > 0)
+ printf("%% %" PRIu64 " backpressures for %" PRIu64
+ " produce calls: %.3f%% backpressure rate\n",
+ cnt.tx_err, cnt.tx,
+ ((double)cnt.tx_err / (double)cnt.tx) * 100.0);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+ global_rk = rk = NULL;
+
+ free(sbuf);
+
+ exitcode = cnt.msgs == cnt.msgs_dr_ok ? 0 : 1;
+
+ } else if (mode == 'C') {
+ /*
+ * Consumer
+ */
+
+ rd_kafka_message_t **rkmessages = NULL;
+ size_t i = 0;
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create Kafka consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ global_rk = rk;
+
+ /* Create topic to consume from */
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ /* Batch consumer */
+ if (batch_size)
+ rkmessages = malloc(sizeof(*rkmessages) * batch_size);
+
+ /* Start consuming */
+ rkqu = rd_kafka_queue_new(rk);
+ for (i = 0; i < (size_t)partition_cnt; ++i) {
+ const int r = rd_kafka_consume_start_queue(
+ rkt, partitions[i], start_offset, rkqu);
+
+ if (r == -1) {
+ fprintf(
+ stderr, "%% Error creating queue: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ exit(1);
+ }
+ }
+
+ while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
+ /* Consume messages.
+ * A message may either be a real message, or
+ * an error signaling (if rkmessage->err is set).
+ */
+ uint64_t fetch_latency;
+ ssize_t r;
+
+ fetch_latency = rd_clock();
+
+ if (batch_size) {
+ int partition = partitions
+ ? partitions[0]
+ : RD_KAFKA_PARTITION_UA;
+
+ /* Batch fetch mode */
+ r = rd_kafka_consume_batch(rkt, partition, 1000,
+ rkmessages,
+ batch_size);
+ if (r != -1) {
+ for (i = 0; (ssize_t)i < r; i++) {
+ msg_consume(rkmessages[i],
+ NULL);
+ rd_kafka_message_destroy(
+ rkmessages[i]);
+ }
+ }
+ } else {
+ /* Queue mode */
+ r = rd_kafka_consume_callback_queue(
+ rkqu, 1000, msg_consume, NULL);
+ }
+
+ cnt.t_fetch_latency += rd_clock() - fetch_latency;
+ if (r == -1)
+ fprintf(
+ stderr, "%% Error: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ else if (r > 0 && rate_sleep) {
+ /* Simulate processing time
+ * if `-r <rate>` was set. */
+ do_sleep(rate_sleep);
+ }
+
+
+ print_stats(rk, mode, otype, compression);
+
+ /* Poll to handle stats callbacks */
+ rd_kafka_poll(rk, 0);
+ }
+ cnt.t_end = rd_clock();
+
+ /* Stop consuming */
+ for (i = 0; i < (size_t)partition_cnt; ++i) {
+ int r = rd_kafka_consume_stop(rkt, (int32_t)i);
+ if (r == -1) {
+ fprintf(
+ stderr, "%% Error in consume_stop: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ }
+ }
+ rd_kafka_queue_destroy(rkqu);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ if (batch_size)
+ free(rkmessages);
+
+ /* Destroy the handle */
+ rd_kafka_destroy(rk);
+
+ global_rk = rk = NULL;
+
+ } else if (mode == 'G') {
+ /*
+ * High-level balanced Consumer
+ */
+ rd_kafka_message_t **rkmessages = NULL;
+
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ /* Create Kafka handle */
+ if (!(rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr,
+ sizeof(errstr)))) {
+ fprintf(stderr,
+ "%% Failed to create Kafka consumer: %s\n",
+ errstr);
+ exit(1);
+ }
+
+ /* Forward all events to consumer queue */
+ rd_kafka_poll_set_consumer(rk);
+
+ global_rk = rk;
+
+ err = rd_kafka_subscribe(rk, topics);
+ if (err) {
+ fprintf(stderr, "%% Subscribe failed: %s\n",
+ rd_kafka_err2str(err));
+ exit(1);
+ }
+ fprintf(stderr, "%% Waiting for group rebalance..\n");
+
+ if (batch_size) {
+ rkmessages = malloc(sizeof(*rkmessages) * batch_size);
+ } else {
+ rkmessages = malloc(sizeof(*rkmessages));
+ }
+
+ rkqu = rd_kafka_queue_get_consumer(rk);
+
+ while (run && (msgcnt == -1 || msgcnt > (int)cnt.msgs)) {
+ /* Consume messages.
+ * A message may either be a real message, or
+ * an event (if rkmessage->err is set).
+ */
+ uint64_t fetch_latency;
+ ssize_t r;
+
+ fetch_latency = rd_clock();
+
+ if (batch_size) {
+ /* Batch fetch mode */
+ ssize_t i = 0;
+ r = rd_kafka_consume_batch_queue(
+ rkqu, 1000, rkmessages, batch_size);
+ if (r != -1) {
+ for (i = 0; i < r; i++) {
+ msg_consume(rkmessages[i],
+ NULL);
+ rd_kafka_message_destroy(
+ rkmessages[i]);
+ }
+ }
+
+ if (r == -1)
+ fprintf(stderr, "%% Error: %s\n",
+ rd_kafka_err2str(
+ rd_kafka_last_error()));
+ else if (r > 0 && rate_sleep) {
+ /* Simulate processing time
+ * if `-r <rate>` was set. */
+ do_sleep(rate_sleep);
+ }
+
+ } else {
+ rkmessages[0] =
+ rd_kafka_consumer_poll(rk, 1000);
+ if (rkmessages[0]) {
+ msg_consume(rkmessages[0], NULL);
+ rd_kafka_message_destroy(rkmessages[0]);
+
+ /* Simulate processing time
+ * if `-r <rate>` was set. */
+ if (rate_sleep)
+ do_sleep(rate_sleep);
+ }
+ }
+
+ cnt.t_fetch_latency += rd_clock() - fetch_latency;
+
+ print_stats(rk, mode, otype, compression);
+ }
+ cnt.t_end = rd_clock();
+
+ err = rd_kafka_consumer_close(rk);
+ if (err)
+ fprintf(stderr, "%% Failed to close consumer: %s\n",
+ rd_kafka_err2str(err));
+
+ free(rkmessages);
+ rd_kafka_queue_destroy(rkqu);
+ rd_kafka_destroy(rk);
+ }
+
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+
+ print_stats(NULL, mode, otype | _OTYPE_FORCE, compression);
+
+ if (cnt.t_fetch_latency && cnt.msgs)
+ printf("%% Average application fetch latency: %" PRIu64 "us\n",
+ cnt.t_fetch_latency / cnt.msgs);
+
+ if (latency_fp)
+ fclose(latency_fp);
+
+ if (stats_fp) {
+#ifndef _WIN32
+ pclose(stats_fp);
+#endif
+ stats_fp = NULL;
+ }
+
+ if (partitions)
+ free(partitions);
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ /* Let background threads clean up and terminate cleanly. */
+ rd_kafka_wait_destroyed(2000);
+
+ return exitcode;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c
new file mode 100644
index 000000000..e9f8d06f7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions-older-broker.c
@@ -0,0 +1,668 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name Transactions example for Apache Kafka <= 2.4.0 (no KIP-447 support).
+ *
+ * This example show-cases a simple transactional consume-process-produce
+ * application that reads messages from an input topic, extracts all
+ * numbers from the message's value string, adds them up, and sends
+ * the sum to the output topic as part of a transaction.
+ * The transaction is committed every 5 seconds or 100 messages, whichever
+ * comes first. As the transaction is committed a new transaction is started.
+ *
+ * @remark This example does not yet support incremental rebalancing and thus
+ * not the cooperative-sticky partition.assignment.strategy.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <ctype.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+static rd_kafka_t *consumer;
+
+/* From command-line arguments */
+static const char *brokers, *input_topic, *output_topic;
+
+
+/**
+ * @struct This is the per input partition state, constisting of
+ * a transactional producer and the in-memory state for the current transaction.
+ * This demo simply finds all numbers (ascii string numbers) in the message
+ * payload and adds them.
+ */
+struct state {
+ rd_kafka_t *producer; /**< Per-input partition output producer */
+ rd_kafka_topic_partition_t *rktpar; /**< Back-pointer to the
+ * input partition. */
+ time_t last_commit; /**< Last transaction commit */
+ int msgcnt; /**< Number of messages processed in current txn */
+};
+/* Current assignment for the input consumer.
+ * The .opaque field of each partition points to an allocated 'struct state'.
+ */
+static rd_kafka_topic_partition_list_t *assigned_partitions;
+
+
+
+/**
+ * @brief A fatal error has occurred, immediately exit the application.
+ */
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its
+ * error message, destroys the object and then exits fatally.
+ */
+#define fatal_error(what, error) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \
+ rd_kafka_error_name(error), \
+ rd_kafka_error_string(error)); \
+ rd_kafka_error_destroy(error); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll(), rd_kafka_flush(),
+ * rd_kafka_abort_transaction() and rd_kafka_commit_transaction() and
+ * executes on the application's thread.
+ *
+ * The current transactional will enter the abortable state if any
+ * message permanently fails delivery and the application must then
+ * call rd_kafka_abort_transaction(). But it does not need to be done from
+ * here, this state is checked by all the transactional APIs and it is better
+ * to perform this error checking when calling
+ * rd_kafka_send_offsets_to_transaction() and rd_kafka_commit_transaction().
+ * In the case of transactional producing the delivery report callback is
+ * mostly useful for logging the produce failures.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+/**
+ * @brief Create a transactional producer for the given input pratition
+ * and begin a new transaction.
+ */
+static rd_kafka_t *
+create_transactional_producer(const rd_kafka_topic_partition_t *rktpar) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_error_t *error;
+ char transactional_id[256];
+
+ snprintf(transactional_id, sizeof(transactional_id),
+ "librdkafka_transactions_older_example_%s-%d", rktpar->topic,
+ rktpar->partition);
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "transactional.id", transactional_id,
+ errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "transaction.timeout.ms", "60000", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ fatal("Failed to configure producer: %s", errstr);
+
+ /* This callback will be called once per message to indicate
+ * final delivery status. */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /* Create producer */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create producer: %s", errstr);
+ }
+
+ /* Initialize transactions, this is only performed once
+ * per transactional producer to acquire its producer id, et.al. */
+ error = rd_kafka_init_transactions(rk, -1);
+ if (error)
+ fatal_error("init_transactions()", error);
+
+
+ /* Begin a new transaction */
+ error = rd_kafka_begin_transaction(rk);
+ if (error)
+ fatal_error("begin_transaction()", error);
+
+ return rk;
+}
+
+
+/**
+ * @brief Abort the current transaction and destroy the producer.
+ */
+static void destroy_transactional_producer(rd_kafka_t *rk) {
+ rd_kafka_error_t *error;
+
+ fprintf(stdout, "%s: aborting transaction and terminating producer\n",
+ rd_kafka_name(rk));
+
+ /* Abort the current transaction, ignore any errors
+ * since we're terminating the producer anyway. */
+ error = rd_kafka_abort_transaction(rk, -1);
+ if (error) {
+ fprintf(stderr,
+ "WARNING: Ignoring abort_transaction() error since "
+ "producer is being destroyed: %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ }
+
+ rd_kafka_destroy(rk);
+}
+
+
+
+/**
+ * @brief Abort the current transaction and rewind consumer offsets to
+ * position where the transaction last started, i.e., the committed
+ * consumer offset.
+ */
+static void abort_transaction_and_rewind(struct state *state) {
+ rd_kafka_topic_t *rkt =
+ rd_kafka_topic_new(consumer, state->rktpar->topic, NULL);
+ rd_kafka_topic_partition_list_t *offset;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+
+ fprintf(stdout,
+ "Aborting transaction and rewinding offset for %s [%d]\n",
+ state->rktpar->topic, state->rktpar->partition);
+
+ /* Abort the current transaction */
+ error = rd_kafka_abort_transaction(state->producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction", error);
+
+ /* Begin a new transaction */
+ error = rd_kafka_begin_transaction(state->producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+
+ /* Get committed offset for this partition */
+ offset = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offset, state->rktpar->topic,
+ state->rktpar->partition);
+
+ /* Note: Timeout must be lower than max.poll.interval.ms */
+ err = rd_kafka_committed(consumer, offset, 10 * 1000);
+ if (err)
+ fatal("Failed to acquire committed offset for %s [%d]: %s",
+ state->rktpar->topic, (int)state->rktpar->partition,
+ rd_kafka_err2str(err));
+
+ /* Seek to committed offset, or start of partition if no
+ * no committed offset is available. */
+ err = rd_kafka_seek(rkt, state->rktpar->partition,
+ offset->elems[0].offset < 0
+ ?
+ /* No committed offset, start from beginning */
+ RD_KAFKA_OFFSET_BEGINNING
+ :
+ /* Use committed offset */
+ offset->elems[0].offset,
+ 0);
+
+ if (err)
+ fatal("Failed to seek %s [%d]: %s", state->rktpar->topic,
+ (int)state->rktpar->partition, rd_kafka_err2str(err));
+
+ rd_kafka_topic_destroy(rkt);
+}
+
+
+/**
+ * @brief Commit the current transaction and start a new transaction.
+ */
+static void commit_transaction_and_start_new(struct state *state) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_consumer_group_metadata_t *cgmd;
+ rd_kafka_topic_partition_list_t *offset;
+
+ fprintf(stdout, "Committing transaction for %s [%d]\n",
+ state->rktpar->topic, state->rktpar->partition);
+
+ /* Send the input consumer's offset to transaction
+ * to commit those offsets along with the transaction itself,
+ * this is what guarantees exactly-once-semantics (EOS), that
+ * input (offsets) and output (messages) are committed atomically. */
+
+ /* Get the consumer's current group state */
+ cgmd = rd_kafka_consumer_group_metadata(consumer);
+
+ /* Get consumer's current position for this partition */
+ offset = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offset, state->rktpar->topic,
+ state->rktpar->partition);
+ err = rd_kafka_position(consumer, offset);
+ if (err)
+ fatal("Failed to get consumer position for %s [%d]: %s",
+ state->rktpar->topic, state->rktpar->partition,
+ rd_kafka_err2str(err));
+
+ /* Send offsets to transaction coordinator */
+ error = rd_kafka_send_offsets_to_transaction(state->producer, offset,
+ cgmd, -1);
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+ rd_kafka_topic_partition_list_destroy(offset);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to send offsets to "
+ "transaction: %s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ abort_transaction_and_rewind(state);
+ return;
+ } else {
+ fatal_error("Failed to send offsets to transaction",
+ error);
+ }
+ }
+
+ /* Commit the transaction */
+ error = rd_kafka_commit_transaction(state->producer, -1);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to commit transaction: "
+ "%s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ abort_transaction_and_rewind(state);
+ rd_kafka_error_destroy(error);
+ return;
+ } else {
+ fatal_error("Failed to commit transaction", error);
+ }
+ }
+
+ /* Begin new transaction */
+ error = rd_kafka_begin_transaction(state->producer);
+ if (error)
+ fatal_error("Failed to begin new transaction", error);
+}
+
+/**
+ * @brief The rebalance will be triggered (from rd_kafka_consumer_poll())
+ * when the consumer's partition assignment is assigned or revoked.
+ *
+ * Prior to KIP-447 being supported there must be one transactional output
+ * producer for each consumed input partition, so we create and destroy
+ * these producer's from this callback.
+ */
+static void
+consumer_group_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ int i;
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ fatal(
+ "This example has not yet been modified to work with "
+ "cooperative incremental rebalancing "
+ "(partition.assignment.strategy=cooperative-sticky)");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ assigned_partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ fprintf(stdout, "Consumer group rebalanced, new assignment:\n");
+
+ /* Create a transactional producer for each input partition */
+ for (i = 0; i < assigned_partitions->cnt; i++) {
+ /* Store the partition-to-producer mapping
+ * in the partition's opaque field. */
+ rd_kafka_topic_partition_t *rktpar =
+ &assigned_partitions->elems[i];
+ struct state *state = calloc(1, sizeof(*state));
+
+ state->producer = create_transactional_producer(rktpar);
+ state->rktpar = rktpar;
+ rktpar->opaque = state;
+ state->last_commit = time(NULL);
+
+ fprintf(stdout,
+ " %s [%d] with transactional producer %s\n",
+ rktpar->topic, rktpar->partition,
+ rd_kafka_name(state->producer));
+ }
+
+ /* Let the consumer know the rebalance has been handled
+ * by calling assign.
+ * This will also tell the consumer to start fetching messages
+ * for the assigned partitions. */
+ rd_kafka_assign(rk, partitions);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ fprintf(stdout,
+ "Consumer group rebalanced, assignment revoked\n");
+
+ /* Abort the current transactions and destroy all producers */
+ for (i = 0; i < assigned_partitions->cnt; i++) {
+ /* Store the partition-to-producer mapping
+ * in the partition's opaque field. */
+ struct state *state =
+ (struct state *)assigned_partitions->elems[i]
+ .opaque;
+
+ destroy_transactional_producer(state->producer);
+ free(state);
+ }
+
+ rd_kafka_topic_partition_list_destroy(assigned_partitions);
+ assigned_partitions = NULL;
+
+ /* Let the consumer know the rebalance has been handled
+ * and revoke the current assignment. */
+ rd_kafka_assign(rk, NULL);
+ break;
+
+ default:
+ /* NOTREACHED */
+ fatal("Unexpected rebalance event: %s", rd_kafka_err2name(err));
+ }
+}
+
+
+/**
+ * @brief Create the input consumer.
+ */
+static rd_kafka_t *create_input_consumer(const char *brokers,
+ const char *input_topic) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *topics;
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "group.id",
+ "librdkafka_transactions_older_example_group",
+ errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ /* The input consumer's offsets are explicitly committed with the
+ * output producer's transaction using
+ * rd_kafka_send_offsets_to_transaction(), so auto commits
+ * must be disabled. */
+ rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fatal("Failed to configure consumer: %s", errstr);
+ }
+
+ /* This callback will be called when the consumer group is rebalanced
+ * and the consumer's partition assignment is assigned or revoked. */
+ rd_kafka_conf_set_rebalance_cb(conf, consumer_group_rebalance_cb);
+
+ /* Create consumer */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create consumer: %s", errstr);
+ }
+
+ /* Forward all partition messages to the main queue and
+ * rd_kafka_consumer_poll(). */
+ rd_kafka_poll_set_consumer(rk);
+
+ /* Subscribe to the input topic */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, input_topic,
+ /* The partition is ignored in
+ * rd_kafka_subscribe() */
+ RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(rk, topics);
+ rd_kafka_topic_partition_list_destroy(topics);
+ if (err) {
+ rd_kafka_destroy(rk);
+ fatal("Failed to subscribe to %s: %s\n", input_topic,
+ rd_kafka_err2str(err));
+ }
+
+ return rk;
+}
+
+
+/**
+ * @brief Find and parse next integer string in \p start.
+ * @returns Pointer after found integer string, or NULL if not found.
+ */
+static const void *
+find_next_int(const void *start, const void *end, int *intp) {
+ const char *p;
+ int collecting = 0;
+ int num = 0;
+
+ for (p = (const char *)start; p < (const char *)end; p++) {
+ if (isdigit((int)(*p))) {
+ collecting = 1;
+ num = (num * 10) + ((int)*p - ((int)'0'));
+ } else if (collecting)
+ break;
+ }
+
+ if (!collecting)
+ return NULL; /* No integer string found */
+
+ *intp = num;
+
+ return p;
+}
+
+
+/**
+ * @brief Process a message from the input consumer by parsing all
+ * integer strings, adding them, and then producing the sum
+ * the output topic using the transactional producer for the given
+ * inut partition.
+ */
+static void process_message(struct state *state,
+ const rd_kafka_message_t *rkmessage) {
+ int num;
+ long unsigned sum = 0;
+ const void *p, *end;
+ rd_kafka_resp_err_t err;
+ char value[64];
+
+ if (rkmessage->len == 0)
+ return; /* Ignore empty messages */
+
+ p = rkmessage->payload;
+ end = ((const char *)rkmessage->payload) + rkmessage->len;
+
+ /* Find and sum all numbers in the message */
+ while ((p = find_next_int(p, end, &num)))
+ sum += num;
+
+ if (sum == 0)
+ return; /* No integers in message, ignore it. */
+
+ snprintf(value, sizeof(value), "%lu", sum);
+
+ /* Emit output message on transactional producer */
+ while (1) {
+ err = rd_kafka_producev(
+ state->producer, RD_KAFKA_V_TOPIC(output_topic),
+ /* Use same key as input message */
+ RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len),
+ /* Value is the current sum of this
+ * transaction. */
+ RD_KAFKA_V_VALUE(value, strlen(value)),
+ /* Copy value since it is allocated on the stack */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+
+ if (!err)
+ break;
+ else if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If output queue fills up we need to wait for
+ * some delivery reports and then retry. */
+ rd_kafka_poll(state->producer, 100);
+ continue;
+ } else {
+ fprintf(stderr,
+ "WARNING: Failed to produce message to %s: "
+ "%s: aborting transaction\n",
+ output_topic, rd_kafka_err2str(err));
+ abort_transaction_and_rewind(state);
+ return;
+ }
+ }
+}
+
+
+int main(int argc, char **argv) {
+ /*
+ * Argument validation
+ */
+ if (argc != 4) {
+ fprintf(stderr,
+ "%% Usage: %s <broker> <input-topic> <output-topic>\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ input_topic = argv[2];
+ output_topic = argv[3];
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ consumer = create_input_consumer(brokers, input_topic);
+
+ fprintf(stdout,
+ "Expecting integers to sum on input topic %s ...\n"
+ "To generate input messages you can use:\n"
+ " $ seq 1 100 | examples/producer %s %s\n",
+ input_topic, brokers, input_topic);
+
+ while (run) {
+ rd_kafka_message_t *msg;
+ struct state *state;
+ rd_kafka_topic_partition_t *rktpar;
+
+ /* Wait for new mesages or error events */
+ msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/);
+ if (!msg)
+ continue;
+
+ if (msg->err) {
+ /* Client errors are typically just informational
+ * since the client will automatically try to recover
+ * from all types of errors.
+ * It is thus sufficient for the application to log and
+ * continue operating when an error is received. */
+ fprintf(stderr, "WARNING: Consumer error: %s\n",
+ rd_kafka_message_errstr(msg));
+ rd_kafka_message_destroy(msg);
+ continue;
+ }
+
+ /* Find output producer for this input partition */
+ rktpar = rd_kafka_topic_partition_list_find(
+ assigned_partitions, rd_kafka_topic_name(msg->rkt),
+ msg->partition);
+ if (!rktpar)
+ fatal(
+ "BUG: No output producer for assigned "
+ "partition %s [%d]",
+ rd_kafka_topic_name(msg->rkt), (int)msg->partition);
+
+ /* Get state struct for this partition */
+ state = (struct state *)rktpar->opaque;
+
+ /* Process message */
+ process_message(state, msg);
+
+ rd_kafka_message_destroy(msg);
+
+ /* Commit transaction every 100 messages or 5 seconds */
+ if (++state->msgcnt > 100 ||
+ state->last_commit + 5 <= time(NULL)) {
+ commit_transaction_and_start_new(state);
+ state->msgcnt = 0;
+ state->last_commit = time(NULL);
+ }
+ }
+
+ fprintf(stdout, "Closing consumer\n");
+ rd_kafka_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c
new file mode 100644
index 000000000..0a8b9a8cf
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/transactions.c
@@ -0,0 +1,665 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name Transactions example for Apache Kafka 2.5.0 (KIP-447) and later.
+ *
+ * This example show-cases a simple transactional consume-process-produce
+ * application that reads messages from an input topic, extracts all
+ * numbers from the message's value string, adds them up, and sends
+ * the sum to the output topic as part of a transaction.
+ * The transaction is committed every 5 seconds or 100 messages, whichever
+ * comes first. As the transaction is committed a new transaction is started.
+ *
+ * This example makes use of incremental rebalancing (KIP-429) and the
+ * cooperative-sticky partition.assignment.strategy on the consumer, providing
+ * hitless rebalances.
+ */
+
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+#include <stdlib.h>
+#include <time.h>
+#include <ctype.h>
+
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is builtin from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+
+static volatile sig_atomic_t run = 1;
+
+/**
+ * @brief A fatal error has occurred, immediately exit the application.
+ */
+#define fatal(...) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Same as fatal() but takes an rd_kafka_error_t object, prints its
+ * error message, destroys the object and then exits fatally.
+ */
+#define fatal_error(what, error) \
+ do { \
+ fprintf(stderr, "FATAL ERROR: %s: %s: %s\n", what, \
+ rd_kafka_error_name(error), \
+ rd_kafka_error_string(error)); \
+ rd_kafka_error_destroy(error); \
+ exit(1); \
+ } while (0)
+
+/**
+ * @brief Signal termination of program
+ */
+static void stop(int sig) {
+ run = 0;
+}
+
+
+/**
+ * @brief Message delivery report callback.
+ *
+ * This callback is called exactly once per message, indicating if
+ * the message was succesfully delivered
+ * (rkmessage->err == RD_KAFKA_RESP_ERR_NO_ERROR) or permanently
+ * failed delivery (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR).
+ *
+ * The callback is triggered from rd_kafka_poll(), rd_kafka_flush(),
+ * rd_kafka_abort_transaction() and rd_kafka_commit_transaction() and
+ * executes on the application's thread.
+ *
+ * The current transactional will enter the abortable state if any
+ * message permanently fails delivery and the application must then
+ * call rd_kafka_abort_transaction(). But it does not need to be done from
+ * here, this state is checked by all the transactional APIs and it is better
+ * to perform this error checking when calling
+ * rd_kafka_send_offsets_to_transaction() and rd_kafka_commit_transaction().
+ * In the case of transactional producing the delivery report callback is
+ * mostly useful for logging the produce failures.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ if (rkmessage->err)
+ fprintf(stderr, "%% Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+
+ /* The rkmessage is destroyed automatically by librdkafka */
+}
+
+
+
+/**
+ * @brief Create a transactional producer.
+ */
+static rd_kafka_t *create_transactional_producer(const char *brokers,
+ const char *output_topic) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_error_t *error;
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "transactional.id",
+ "librdkafka_transactions_example", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ fatal("Failed to configure producer: %s", errstr);
+
+ /* This callback will be called once per message to indicate
+ * final delivery status. */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ /* Create producer */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create producer: %s", errstr);
+ }
+
+ /* Initialize transactions, this is only performed once
+ * per transactional producer to acquire its producer id, et.al. */
+ error = rd_kafka_init_transactions(rk, -1);
+ if (error)
+ fatal_error("init_transactions()", error);
+
+ return rk;
+}
+
+
+/**
+ * @brief Rewind consumer's consume position to the last committed offsets
+ * for the current assignment.
+ */
+static void rewind_consumer(rd_kafka_t *consumer) {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+ int i;
+
+ /* Get committed offsets for the current assignment, if there
+ * is a current assignment. */
+ err = rd_kafka_assignment(consumer, &offsets);
+ if (err) {
+ fprintf(stderr, "No current assignment to rewind: %s\n",
+ rd_kafka_err2str(err));
+ return;
+ }
+
+ if (offsets->cnt == 0) {
+ fprintf(stderr, "No current assignment to rewind\n");
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return;
+ }
+
+ /* Note: Timeout must be lower than max.poll.interval.ms */
+ err = rd_kafka_committed(consumer, offsets, 10 * 1000);
+ if (err)
+ fatal("Failed to acquire committed offsets: %s",
+ rd_kafka_err2str(err));
+
+ /* Seek to committed offset, or start of partition if no
+ * committed offset is available. */
+ for (i = 0; i < offsets->cnt; i++) {
+ /* No committed offset, start from beginning */
+ if (offsets->elems[i].offset < 0)
+ offsets->elems[i].offset = RD_KAFKA_OFFSET_BEGINNING;
+ }
+
+ /* Perform seek */
+ error = rd_kafka_seek_partitions(consumer, offsets, -1);
+ if (error)
+ fatal_error("Failed to seek", error);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+}
+
+/**
+ * @brief Abort the current transaction and rewind consumer offsets to
+ * position where the transaction last started, i.e., the committed
+ * consumer offset, then begin a new transaction.
+ */
+static void abort_transaction_and_rewind(rd_kafka_t *consumer,
+ rd_kafka_t *producer) {
+ rd_kafka_error_t *error;
+
+ fprintf(stdout, "Aborting transaction and rewinding offsets\n");
+
+ /* Abort the current transaction */
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction", error);
+
+ /* Rewind consumer */
+ rewind_consumer(consumer);
+
+ /* Begin a new transaction */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+}
+
+
+/**
+ * @brief Commit the current transaction.
+ *
+ * @returns 1 if transaction was successfully committed, or 0
+ * if the current transaction was aborted.
+ */
+static int commit_transaction(rd_kafka_t *consumer, rd_kafka_t *producer) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_consumer_group_metadata_t *cgmd;
+ rd_kafka_topic_partition_list_t *offsets;
+
+ fprintf(stdout, "Committing transaction\n");
+
+ /* Send the input consumer's offset to transaction
+ * to commit those offsets along with the transaction itself,
+ * this is what guarantees exactly-once-semantics (EOS), that
+ * input (offsets) and output (messages) are committed atomically. */
+
+ /* Get the consumer's current group metadata state */
+ cgmd = rd_kafka_consumer_group_metadata(consumer);
+
+ /* Get consumer's current assignment */
+ err = rd_kafka_assignment(consumer, &offsets);
+ if (err || offsets->cnt == 0) {
+ /* No partition offsets to commit because consumer
+ * (most likely) lost the assignment, abort transaction. */
+ if (err)
+ fprintf(stderr,
+ "Failed to get consumer assignment to commit: "
+ "%s\n",
+ rd_kafka_err2str(err));
+ else
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction", error);
+
+ return 0;
+ }
+
+ /* Get consumer's current position for this partition */
+ err = rd_kafka_position(consumer, offsets);
+ if (err)
+ fatal("Failed to get consumer position: %s",
+ rd_kafka_err2str(err));
+
+ /* Send offsets to transaction coordinator */
+ error =
+ rd_kafka_send_offsets_to_transaction(producer, offsets, cgmd, -1);
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to send offsets to "
+ "transaction: %s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ /* Abort transaction */
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction",
+ error);
+ return 0;
+ } else {
+ fatal_error("Failed to send offsets to transaction",
+ error);
+ }
+ }
+
+ /* Commit the transaction */
+ error = rd_kafka_commit_transaction(producer, -1);
+ if (error) {
+ if (rd_kafka_error_txn_requires_abort(error)) {
+ fprintf(stderr,
+ "WARNING: Failed to commit transaction: "
+ "%s: %s: aborting transaction\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ /* Abort transaction */
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction",
+ error);
+ return 0;
+ } else {
+ fatal_error("Failed to commit transaction", error);
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * @brief Commit the current transaction and start a new transaction.
+ */
+static void commit_transaction_and_start_new(rd_kafka_t *consumer,
+ rd_kafka_t *producer) {
+ rd_kafka_error_t *error;
+
+ /* Commit transaction.
+ * If commit failed the transaction is aborted and we need
+ * to rewind the consumer to the last committed offsets. */
+ if (!commit_transaction(consumer, producer))
+ rewind_consumer(consumer);
+
+ /* Begin new transaction */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin new transaction", error);
+}
+
+/**
+ * @brief The rebalance will be triggered (from rd_kafka_consumer_poll())
+ * when the consumer's partition assignment is assigned or revoked.
+ */
+static void
+consumer_group_rebalance_cb(rd_kafka_t *consumer,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ rd_kafka_t *producer = (rd_kafka_t *)opaque;
+ rd_kafka_error_t *error;
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ fprintf(stdout,
+ "Consumer group rebalanced: "
+ "%d new partition(s) assigned\n",
+ partitions->cnt);
+
+ /* Start fetching messages for the assigned partitions
+ * and add them to the consumer's local assignment. */
+ error = rd_kafka_incremental_assign(consumer, partitions);
+ if (error)
+ fatal_error("Incremental assign failed", error);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ if (rd_kafka_assignment_lost(consumer)) {
+ fprintf(stdout,
+ "Consumer group rebalanced: assignment lost: "
+ "aborting current transaction\n");
+
+ error = rd_kafka_abort_transaction(producer, -1);
+ if (error)
+ fatal_error("Failed to abort transaction",
+ error);
+ } else {
+ fprintf(stdout,
+ "Consumer group rebalanced: %d partition(s) "
+ "revoked: committing current transaction\n",
+ partitions->cnt);
+
+ commit_transaction(consumer, producer);
+ }
+
+ /* Begin new transaction */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+
+ /* Stop fetching messages for the revoekd partitions
+ * and remove them from the consumer's local assignment. */
+ error = rd_kafka_incremental_unassign(consumer, partitions);
+ if (error)
+ fatal_error("Incremental unassign failed", error);
+ break;
+
+ default:
+ /* NOTREACHED */
+ fatal("Unexpected rebalance event: %s", rd_kafka_err2name(err));
+ }
+}
+
+
+/**
+ * @brief Create the input consumer.
+ */
+static rd_kafka_t *create_input_consumer(const char *brokers,
+ const char *input_topic,
+ rd_kafka_t *producer) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[256];
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *topics;
+
+ if (rd_kafka_conf_set(conf, "bootstrap.servers", brokers, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "group.id",
+ "librdkafka_transactions_example_group", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "partition.assignment.strategy",
+ "cooperative-sticky", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ rd_kafka_conf_set(conf, "auto.offset.reset", "earliest", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK ||
+ /* The input consumer's offsets are explicitly committed with the
+ * output producer's transaction using
+ * rd_kafka_send_offsets_to_transaction(), so auto commits
+ * must be disabled. */
+ rd_kafka_conf_set(conf, "enable.auto.commit", "false", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ fatal("Failed to configure consumer: %s", errstr);
+ }
+
+ /* This callback will be called when the consumer group is rebalanced
+ * and the consumer's partition assignment is assigned or revoked. */
+ rd_kafka_conf_set_rebalance_cb(conf, consumer_group_rebalance_cb);
+
+ /* The producer handle is needed in the consumer's rebalance callback
+ * to be able to abort and commit transactions, so we pass the
+ * producer as the consumer's opaque. */
+ rd_kafka_conf_set_opaque(conf, producer);
+
+ /* Create consumer */
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ rd_kafka_conf_destroy(conf);
+ fatal("Failed to create consumer: %s", errstr);
+ }
+
+ /* Forward all partition messages to the main queue and
+ * rd_kafka_consumer_poll(). */
+ rd_kafka_poll_set_consumer(rk);
+
+ /* Subscribe to the input topic */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, input_topic,
+ /* The partition is ignored in
+ * rd_kafka_subscribe() */
+ RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(rk, topics);
+ rd_kafka_topic_partition_list_destroy(topics);
+ if (err) {
+ rd_kafka_destroy(rk);
+ fatal("Failed to subscribe to %s: %s\n", input_topic,
+ rd_kafka_err2str(err));
+ }
+
+ return rk;
+}
+
+
+/**
+ * @brief Find and parse next integer string in \p start.
+ * @returns Pointer after found integer string, or NULL if not found.
+ */
+static const void *
+find_next_int(const void *start, const void *end, int *intp) {
+ const char *p;
+ int collecting = 0;
+ int num = 0;
+
+ for (p = (const char *)start; p < (const char *)end; p++) {
+ if (isdigit((int)(*p))) {
+ collecting = 1;
+ num = (num * 10) + ((int)*p - ((int)'0'));
+ } else if (collecting)
+ break;
+ }
+
+ if (!collecting)
+ return NULL; /* No integer string found */
+
+ *intp = num;
+
+ return p;
+}
+
+
+/**
+ * @brief Process a message from the input consumer by parsing all
+ * integer strings, adding them, and then producing the sum
+ * the output topic using the transactional producer for the given
+ * inut partition.
+ */
+static void process_message(rd_kafka_t *consumer,
+ rd_kafka_t *producer,
+ const char *output_topic,
+ const rd_kafka_message_t *rkmessage) {
+ int num;
+ long unsigned sum = 0;
+ const void *p, *end;
+ rd_kafka_resp_err_t err;
+ char value[64];
+
+ if (rkmessage->len == 0)
+ return; /* Ignore empty messages */
+
+ p = rkmessage->payload;
+ end = ((const char *)rkmessage->payload) + rkmessage->len;
+
+ /* Find and sum all numbers in the message */
+ while ((p = find_next_int(p, end, &num)))
+ sum += num;
+
+ if (sum == 0)
+ return; /* No integers in message, ignore it. */
+
+ snprintf(value, sizeof(value), "%lu", sum);
+
+ /* Emit output message on transactional producer */
+ while (1) {
+ err = rd_kafka_producev(
+ producer, RD_KAFKA_V_TOPIC(output_topic),
+ /* Use same key as input message */
+ RD_KAFKA_V_KEY(rkmessage->key, rkmessage->key_len),
+ /* Value is the current sum of this
+ * transaction. */
+ RD_KAFKA_V_VALUE(value, strlen(value)),
+ /* Copy value since it is allocated on the stack */
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+
+ if (!err)
+ break;
+ else if (err == RD_KAFKA_RESP_ERR__QUEUE_FULL) {
+ /* If output queue fills up we need to wait for
+ * some delivery reports and then retry. */
+ rd_kafka_poll(producer, 100);
+ continue;
+ } else {
+ fprintf(stderr,
+ "WARNING: Failed to produce message to %s: "
+ "%s: aborting transaction\n",
+ output_topic, rd_kafka_err2str(err));
+ abort_transaction_and_rewind(consumer, producer);
+ return;
+ }
+ }
+}
+
+
+int main(int argc, char **argv) {
+ rd_kafka_t *producer, *consumer;
+ int msgcnt = 0;
+ time_t last_commit = 0;
+ const char *brokers, *input_topic, *output_topic;
+ rd_kafka_error_t *error;
+
+ /*
+ * Argument validation
+ */
+ if (argc != 4) {
+ fprintf(stderr,
+ "%% Usage: %s <broker> <input-topic> <output-topic>\n",
+ argv[0]);
+ return 1;
+ }
+
+ brokers = argv[1];
+ input_topic = argv[2];
+ output_topic = argv[3];
+
+ /* Signal handler for clean shutdown */
+ signal(SIGINT, stop);
+
+ producer = create_transactional_producer(brokers, output_topic);
+
+ consumer = create_input_consumer(brokers, input_topic, producer);
+
+ fprintf(stdout,
+ "Expecting integers to sum on input topic %s ...\n"
+ "To generate input messages you can use:\n"
+ " $ seq 1 100 | examples/producer %s %s\n"
+ "Observe summed integers on output topic %s:\n"
+ " $ examples/consumer %s just-watching %s\n"
+ "\n",
+ input_topic, brokers, input_topic, output_topic, brokers,
+ output_topic);
+
+ /* Begin transaction and start waiting for messages */
+ error = rd_kafka_begin_transaction(producer);
+ if (error)
+ fatal_error("Failed to begin transaction", error);
+
+ while (run) {
+ rd_kafka_message_t *msg;
+
+ /* Commit transaction every 100 messages or 5 seconds */
+ if (msgcnt > 0 &&
+ (msgcnt > 100 || last_commit + 5 <= time(NULL))) {
+ printf("msgcnt %d, elapsed %d\n", msgcnt,
+ (int)(time(NULL) - last_commit));
+ commit_transaction_and_start_new(consumer, producer);
+ msgcnt = 0;
+ last_commit = time(NULL);
+ }
+
+ /* Wait for new mesages or error events */
+ msg = rd_kafka_consumer_poll(consumer, 1000 /*1 second*/);
+ if (!msg)
+ continue; /* Poll timeout */
+
+ if (msg->err) {
+ /* Client errors are typically just informational
+ * since the client will automatically try to recover
+ * from all types of errors.
+ * It is thus sufficient for the application to log and
+ * continue operating when a consumer error is
+ * encountered. */
+ fprintf(stderr, "WARNING: Consumer error: %s\n",
+ rd_kafka_message_errstr(msg));
+ rd_kafka_message_destroy(msg);
+ continue;
+ }
+
+ /* Process message */
+ process_message(consumer, producer, output_topic, msg);
+
+ rd_kafka_message_destroy(msg);
+
+ msgcnt++;
+ }
+
+ fprintf(stdout, "Closing consumer\n");
+ rd_kafka_consumer_close(consumer);
+ rd_kafka_destroy(consumer);
+
+ fprintf(stdout, "Closing producer\n");
+ rd_kafka_destroy(producer);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp b/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp
new file mode 100644
index 000000000..a80dfea30
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/examples/win_ssl_cert_store.cpp
@@ -0,0 +1,395 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Example of utilizing the Windows Certificate store with SSL.
+ */
+
+#include <iostream>
+#include <string>
+#include <cstdlib>
+#include <cstdio>
+#include <csignal>
+#include <cstring>
+#include <sstream>
+
+#include "../win32/wingetopt.h"
+#include <windows.h>
+#include <wincrypt.h>
+
+/*
+ * Typically include path in a real application would be
+ * #include <librdkafka/rdkafkacpp.h>
+ */
+#include "rdkafkacpp.h"
+
+
+
+class ExampleStoreRetriever {
+ public:
+ ExampleStoreRetriever(std::string const &subject, std::string const &pass) :
+ m_cert_subject(subject),
+ m_password(pass),
+ m_cert_store(NULL),
+ m_cert_ctx(NULL) {
+ load_certificate();
+ }
+
+ ~ExampleStoreRetriever() {
+ if (m_cert_ctx)
+ CertFreeCertificateContext(m_cert_ctx);
+
+ if (m_cert_store)
+ CertCloseStore(m_cert_store, 0);
+ }
+
+ /* @returns the public key in DER format */
+ const std::vector<unsigned char> get_public_key() {
+ std::vector<unsigned char> buf((size_t)m_cert_ctx->cbCertEncoded);
+ buf.assign((const char *)m_cert_ctx->pbCertEncoded,
+ (const char *)m_cert_ctx->pbCertEncoded +
+ (size_t)m_cert_ctx->cbCertEncoded);
+ return buf;
+ }
+
+ /* @returns the private key in PCKS#12 format */
+ const std::vector<unsigned char> get_private_key() {
+ ssize_t ret = 0;
+ /*
+ * In order to export the private key the certificate
+ * must first be marked as exportable.
+ *
+ * Steps to export the certificate
+ * 1) Create an in-memory cert store
+ * 2) Add the certificate to the store
+ * 3) Export the private key from the in-memory store
+ */
+
+ /* Create an in-memory cert store */
+ HCERTSTORE hMemStore =
+ CertOpenStore(CERT_STORE_PROV_MEMORY, 0, NULL, 0, NULL);
+ if (!hMemStore)
+ throw "Failed to create in-memory cert store: " +
+ GetErrorMsg(GetLastError());
+
+ /* Add certificate to store */
+ if (!CertAddCertificateContextToStore(hMemStore, m_cert_ctx,
+ CERT_STORE_ADD_USE_EXISTING, NULL))
+ throw "Failed to add certificate to store: " +
+ GetErrorMsg(GetLastError());
+
+ /*
+ * Export private key from cert
+ */
+ CRYPT_DATA_BLOB db = {NULL};
+
+ std::wstring w_password(m_password.begin(), m_password.end());
+
+ /* Acquire output size */
+ if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL,
+ EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY |
+ REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
+ throw "Failed to export private key: " + GetErrorMsg(GetLastError());
+
+ std::vector<unsigned char> buf;
+
+ buf.resize(db.cbData);
+ db.pbData = &buf[0];
+
+ /* Extract key */
+ if (!PFXExportCertStoreEx(hMemStore, &db, w_password.c_str(), NULL,
+ EXPORT_PRIVATE_KEYS | REPORT_NO_PRIVATE_KEY |
+ REPORT_NOT_ABLE_TO_EXPORT_PRIVATE_KEY))
+ throw "Failed to export private key (PFX): " +
+ GetErrorMsg(GetLastError());
+
+ CertCloseStore(hMemStore, 0);
+
+ buf.resize(db.cbData);
+
+ return buf;
+ }
+
+ private:
+ void load_certificate() {
+ if (m_cert_ctx)
+ return;
+
+ m_cert_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, NULL,
+ CERT_SYSTEM_STORE_CURRENT_USER, L"My");
+ if (!m_cert_store)
+ throw "Failed to open cert store: " + GetErrorMsg(GetLastError());
+
+ m_cert_ctx = CertFindCertificateInStore(
+ m_cert_store, X509_ASN_ENCODING, 0, CERT_FIND_SUBJECT_STR,
+ /* should probally do a better std::string to std::wstring conversion */
+ std::wstring(m_cert_subject.begin(), m_cert_subject.end()).c_str(),
+ NULL);
+ if (!m_cert_ctx) {
+ CertCloseStore(m_cert_store, 0);
+ m_cert_store = NULL;
+ throw "Certificate " + m_cert_subject +
+ " not found in cert store: " + GetErrorMsg(GetLastError());
+ }
+ }
+
+ std::string GetErrorMsg(unsigned long error) {
+ char *message = NULL;
+ size_t ret = FormatMessageA(
+ FORMAT_MESSAGE_ALLOCATE_BUFFER | FORMAT_MESSAGE_FROM_SYSTEM, nullptr,
+ error, 0, (char *)&message, 0, nullptr);
+ if (ret == 0) {
+ std::stringstream ss;
+
+ ss << std::string("could not format message for ") << error;
+ return ss.str();
+ } else {
+ std::string result(message, ret);
+ LocalFree(message);
+ return result;
+ }
+ }
+
+ private:
+ std::string m_cert_subject;
+ std::string m_password;
+ PCCERT_CONTEXT m_cert_ctx;
+ HCERTSTORE m_cert_store;
+};
+
+
+class PrintingSSLVerifyCb : public RdKafka::SslCertificateVerifyCb {
+ /* This SSL cert verification callback simply prints the certificates
+ * in the certificate chain.
+ * It provides no validation, everything is ok. */
+ public:
+ bool ssl_cert_verify_cb(const std::string &broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ std::string &errstr) {
+ PCCERT_CONTEXT ctx = CertCreateCertificateContext(
+ X509_ASN_ENCODING | PKCS_7_ASN_ENCODING, (const uint8_t *)buf,
+ static_cast<unsigned long>(size));
+
+ if (!ctx)
+ std::cerr << "Failed to parse certificate" << std::endl;
+
+ char subject[256] = "n/a";
+ char issuer[256] = "n/a";
+
+ CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE, 0, NULL, subject,
+ sizeof(subject));
+
+ CertGetNameStringA(ctx, CERT_NAME_FRIENDLY_DISPLAY_TYPE,
+ CERT_NAME_ISSUER_FLAG, NULL, issuer, sizeof(issuer));
+
+ std::cerr << "Broker " << broker_name << " (" << broker_id << "): "
+ << "certificate depth " << depth << ", X509 error " << *x509_error
+ << ", subject " << subject << ", issuer " << issuer << std::endl;
+
+ if (ctx)
+ CertFreeCertificateContext(ctx);
+
+ return true;
+ }
+};
+
+
+/**
+ * @brief Print the brokers in the cluster.
+ */
+static void print_brokers(RdKafka::Handle *handle,
+ const RdKafka::Metadata *md) {
+ std::cout << md->brokers()->size() << " broker(s) in cluster "
+ << handle->clusterid(0) << std::endl;
+
+ /* Iterate brokers */
+ RdKafka::Metadata::BrokerMetadataIterator ib;
+ for (ib = md->brokers()->begin(); ib != md->brokers()->end(); ++ib)
+ std::cout << " broker " << (*ib)->id() << " at " << (*ib)->host() << ":"
+ << (*ib)->port() << std::endl;
+}
+
+
+int main(int argc, char **argv) {
+ std::string brokers;
+ std::string errstr;
+ std::string cert_subject;
+ std::string priv_key_pass;
+
+ /*
+ * Create configuration objects
+ */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ RdKafka::Conf *tconf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+ int opt;
+ while ((opt = getopt(argc, argv, "b:d:X:s:p:")) != -1) {
+ switch (opt) {
+ case 'b':
+ brokers = optarg;
+ break;
+ case 'd':
+ if (conf->set("debug", optarg, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ break;
+ case 'X': {
+ char *name, *val;
+
+ name = optarg;
+ if (!(val = strchr(name, '='))) {
+ std::cerr << "%% Expected -X property=value, not " << name << std::endl;
+ exit(1);
+ }
+
+ *val = '\0';
+ val++;
+
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+ } break;
+
+ case 's':
+ cert_subject = optarg;
+ break;
+
+ case 'p':
+ priv_key_pass = optarg;
+ if (conf->set("ssl.key.password", optarg, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ break;
+
+ default:
+ goto usage;
+ }
+ }
+
+ if (brokers.empty() || optind != argc) {
+ usage:
+ std::string features;
+ conf->get("builtin.features", features);
+ fprintf(stderr,
+ "Usage: %s [options] -b <brokers> -s <cert-subject> -p "
+ "<priv-key-password>\n"
+ "\n"
+ "Windows Certificate Store integration example.\n"
+ "Use certlm.msc or mmc to view your certificates.\n"
+ "\n"
+ "librdkafka version %s (0x%08x, builtin.features \"%s\")\n"
+ "\n"
+ " Options:\n"
+ " -b <brokers> Broker address\n"
+ " -s <cert> The subject name of the client's SSL "
+ "certificate to use\n"
+ " -p <pass> The private key password\n"
+ " -d [facs..] Enable debugging contexts: %s\n"
+ " -X <prop=name> Set arbitrary librdkafka "
+ "configuration property\n"
+ "\n",
+ argv[0], RdKafka::version_str().c_str(), RdKafka::version(),
+ features.c_str(), RdKafka::get_debug_contexts().c_str());
+ exit(1);
+ }
+
+ if (!cert_subject.empty()) {
+ try {
+ /* Load certificates from the Windows store */
+ ExampleStoreRetriever certStore(cert_subject, priv_key_pass);
+
+ std::vector<unsigned char> pubkey, privkey;
+
+ pubkey = certStore.get_public_key();
+ privkey = certStore.get_private_key();
+
+ if (conf->set_ssl_cert(RdKafka::CERT_PUBLIC_KEY, RdKafka::CERT_ENC_DER,
+ &pubkey[0], pubkey.size(),
+ errstr) != RdKafka::Conf::CONF_OK)
+ throw "Failed to set public key: " + errstr;
+
+ if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY,
+ RdKafka::CERT_ENC_PKCS12, &privkey[0],
+ privkey.size(), errstr) != RdKafka::Conf::CONF_OK)
+ throw "Failed to set private key: " + errstr;
+
+ } catch (const std::string &ex) {
+ std::cerr << ex << std::endl;
+ exit(1);
+ }
+ }
+
+
+ /*
+ * Set configuration properties
+ */
+ conf->set("bootstrap.servers", brokers, errstr);
+
+ /* We use the Certificiate verification callback to print the
+ * certificate chains being used. */
+ PrintingSSLVerifyCb ssl_verify_cb;
+
+ if (conf->set("ssl_cert_verify_cb", &ssl_verify_cb, errstr) !=
+ RdKafka::Conf::CONF_OK) {
+ std::cerr << errstr << std::endl;
+ exit(1);
+ }
+
+ /* Create any type of client, producering being the cheapest. */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Failed to create producer: " << errstr << std::endl;
+ exit(1);
+ }
+
+ RdKafka::Metadata *metadata;
+
+ /* Fetch metadata */
+ RdKafka::ErrorCode err = producer->metadata(false, NULL, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::cerr << "%% Failed to acquire metadata: " << RdKafka::err2str(err)
+ << std::endl;
+ exit(1);
+ }
+
+ print_brokers(producer, metadata);
+
+ delete metadata;
+ delete producer;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/lds-gen.py b/fluent-bit/lib/librdkafka-2.1.0/lds-gen.py
new file mode 100755
index 000000000..44c718d13
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/lds-gen.py
@@ -0,0 +1,73 @@
+#!/usr/bin/env python3
+#
+# librdkafka - Apache Kafka C library
+#
+# Copyright (c) 2018 Magnus Edenhill
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+#
+# Generate linker script to only expose symbols of the public API
+#
+
+import sys
+import re
+
+
+if __name__ == '__main__':
+
+ funcs = list()
+ last_line = ''
+
+ for line in sys.stdin:
+ if line.startswith('typedef'):
+ last_line = line
+ continue
+ m = re.match(r'^(\S+.*\s+\**)?(rd_kafka_[\w_]+)\s*\([^)]', line)
+ if m:
+ sym = m.group(2)
+ # Ignore static (unused) functions
+ m2 = re.match(
+ r'(RD_UNUSED|__attribute__\(\(unused\)\))',
+ last_line)
+ if not m2:
+ funcs.append(sym)
+ last_line = ''
+ else:
+ last_line = line
+
+ # Special symbols not covered by above matches or not exposed in
+ # the public header files.
+ funcs.append('rd_ut_coverage_check')
+
+ print('# Automatically generated by lds-gen.py - DO NOT EDIT')
+ print('{\n global:')
+ if len(funcs) == 0:
+ print(' *;')
+ else:
+ for f in sorted(funcs):
+ print(' %s;' % f)
+
+ print('local:\n *;')
+
+ print('};')
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mainpage.doxy b/fluent-bit/lib/librdkafka-2.1.0/mainpage.doxy
new file mode 100644
index 000000000..a02499744
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mainpage.doxy
@@ -0,0 +1,40 @@
+/**
+ * @mainpage librdkafka documentation
+ *
+ * librdkafka is the Apache Kafka C/C++ client library.
+ *
+ * @section intro Introduction
+ *
+ * For an introduction and manual to librdkafka see \ref INTRODUCTION.md
+ *
+ * @section conf Configuration
+ *
+ * librdkafka is highly configurable to meet any deployment demands.
+ * It is usually safe to leave most configuration properties to their default
+ * values.
+ *
+ * See \ref CONFIGURATION.md for the full list of supported configuration properties.
+*
+ * @remark Application developers are recommended to provide a non-hardcoded
+ * interface to librdkafka's string based name-value configuration
+ * property interface, allowing users to configure any librdkafka
+ * property directly without alterations to the application.
+ * This allows for seamless upgrades where linking to a new version
+ * of librdkafka automatically provides new configuration
+ * based features.
+ *
+ * @section stats Statistics
+ *
+ * librdkafka provides detailed metrics through its statistics interface.
+ *
+ * See \ref STATISTICS.md and \ref rd_kafka_conf_set_stats_cb.
+ *
+ * @section c_api C API
+ *
+ * The C API is documented in rdkafka.h
+ *
+ * @section cpp_api C++ API
+ *
+ * The C++ API is documented in rdkafkacpp.h
+ */
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/mklove/.gitignore
new file mode 100644
index 000000000..3f9cfafd3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/.gitignore
@@ -0,0 +1 @@
+deps
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/Makefile.base b/fluent-bit/lib/librdkafka-2.1.0/mklove/Makefile.base
new file mode 100755
index 000000000..91be43917
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/Makefile.base
@@ -0,0 +1,329 @@
+# Base Makefile providing various standard targets
+# Part of mklove suite but may be used independently.
+
+MKL_RED?= \033[031m
+MKL_GREEN?= \033[032m
+MKL_YELLOW?= \033[033m
+MKL_BLUE?= \033[034m
+MKL_CLR_RESET?= \033[0m
+
+DEPS= $(OBJS:%.o=%.d)
+
+# TOPDIR is "TOPDIR/mklove/../" i.e., TOPDIR.
+# We do it with two dir calls instead of /.. to support mklove being symlinked.
+MKLOVE_DIR := $(dir $(lastword $(MAKEFILE_LIST)))
+TOPDIR = $(MKLOVE_DIR:mklove/=.)
+
+
+# Convert LIBNAME ("libxyz") to "xyz"
+LIBNAME0=$(LIBNAME:lib%=%)
+
+# Silence lousy default ARFLAGS (rv)
+ARFLAGS=
+
+ifndef MKL_MAKEFILE_CONFIG
+-include $(TOPDIR)/Makefile.config
+endif
+
+# Use C compiler as default linker.
+# C++ libraries will need to override this with CXX after
+# including Makefile.base
+CC_LD?=$(CC)
+
+_UNAME_S := $(shell uname -s)
+ifeq ($(_UNAME_S),Darwin)
+ LIBFILENAME=$(LIBNAME).$(LIBVER)$(SOLIB_EXT)
+ LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT)
+ LIBFILENAMEDBG=$(LIBNAME)-dbg.$(LIBVER)$(SOLIB_EXT)
+ LDD_PRINT="otool -L"
+else
+ LIBFILENAME=$(LIBNAME)$(SOLIB_EXT).$(LIBVER)
+ LIBFILENAMELINK=$(LIBNAME)$(SOLIB_EXT)
+ LIBFILENAMEDBG=$(LIBNAME)-dbg$(SOLIB_EXT).$(LIBVER)
+ LDD_PRINT="ldd"
+endif
+
+# DESTDIR must be an absolute path
+ifneq ($(DESTDIR),)
+DESTDIR:=$(abspath $(DESTDIR))
+endif
+
+INSTALL?= install
+INSTALL_PROGRAM?= $(INSTALL)
+INSTALL_DATA?= $(INSTALL) -m 644
+
+prefix?= /usr/local
+exec_prefix?= $(prefix)
+bindir?= $(exec_prefix)/bin
+sbindir?= $(exec_prefix)/sbin
+libexecdir?= $(exec_prefix)/libexec/ # append PKGNAME on install
+datarootdir?= $(prefix)/share
+datadir?= $(datarootdir) # append PKGNAME on install
+sysconfdir?= $(prefix)/etc
+sharedstatedir?=$(prefix)/com
+localestatedir?=$(prefix)/var
+runstatedir?= $(localestatedir)/run
+includedir?= $(prefix)/include
+docdir?= $(datarootdir)/doc/$(PKGNAME)
+infodir?= $(datarootdir)/info
+libdir?= $(prefix)/lib
+localedir?= $(datarootdir)/locale
+pkgconfigdir?= $(libdir)/pkgconfig
+mandir?= $(datarootdir)/man
+man1dir?= $(mandir)/man1
+man2dir?= $(mandir)/man2
+man3dir?= $(mandir)/man3
+man4dir?= $(mandir)/man4
+man5dir?= $(mandir)/man5
+man6dir?= $(mandir)/man6
+man7dir?= $(mandir)/man7
+man8dir?= $(mandir)/man8
+
+# An application Makefile should set DISABLE_LDS=y prior to
+# including Makefile.base if it does not wish to have a linker-script.
+ifeq ($(WITH_LDS)-$(DISABLE_LDS),y-)
+# linker-script file
+LIBNAME_LDS?=$(LIBNAME).lds
+endif
+
+# Checks that mklove is set up and ready for building
+mklove-check:
+ @if [ ! -f "$(TOPDIR)/Makefile.config" ]; then \
+ printf "$(MKL_RED)$(TOPDIR)/Makefile.config missing: please run ./configure$(MKL_CLR_RESET)\n" ; \
+ exit 1 ; \
+ fi
+
+%.o: %.c
+ $(CC) -MD -MP $(CPPFLAGS) $(CFLAGS) -c $< -o $@
+
+%.o: %.cpp
+ $(CXX) -MD -MP $(CPPFLAGS) $(CXXFLAGS) -c $< -o $@
+
+
+lib: $(LIBFILENAME) $(LIBNAME).a $(LIBNAME)-static.a $(LIBFILENAMELINK) lib-gen-pkg-config
+
+# Linker-script (if WITH_LDS=y): overridable by application Makefile
+$(LIBNAME_LDS):
+
+$(LIBFILENAME): $(OBJS) $(LIBNAME_LDS)
+ @printf "$(MKL_YELLOW)Creating shared library $@$(MKL_CLR_RESET)\n"
+ $(CC_LD) $(LDFLAGS) $(LIB_LDFLAGS) $(OBJS) -o $@ $(LIBS)
+ cp $@ $(LIBFILENAMEDBG)
+ifeq ($(WITH_STRIP),y)
+ $(STRIP) -S $@
+endif
+
+$(LIBNAME).a: $(OBJS)
+ @printf "$(MKL_YELLOW)Creating static library $@$(MKL_CLR_RESET)\n"
+ $(AR) rcs$(ARFLAGS) $@ $(OBJS)
+ cp $@ $(LIBNAME)-dbg.a
+ifeq ($(WITH_STRIP),y)
+ $(STRIP) -S $@
+ $(RANLIB) $@
+endif
+
+ifeq ($(MKL_NO_SELFCONTAINED_STATIC_LIB),y)
+_STATIC_FILENAME=$(LIBNAME).a
+$(LIBNAME)-static.a:
+
+else # MKL_NO_SELFCONTAINED_STATIC_LIB
+
+ifneq ($(MKL_STATIC_LIBS),)
+_STATIC_FILENAME=$(LIBNAME)-static.a
+$(LIBNAME)-static.a: $(LIBNAME).a
+ @printf "$(MKL_YELLOW)Creating self-contained static library $@$(MKL_CLR_RESET)\n"
+ifeq ($(HAS_LIBTOOL_STATIC),y)
+ $(LIBTOOL) -static -o $@ - $(LIBNAME).a $(MKL_STATIC_LIBS)
+else ifeq ($(HAS_GNU_AR),y)
+ (_tmp=$$(mktemp arstaticXXXXXX) ; \
+ echo "CREATE $@" > $$_tmp ; \
+ for _f in $(LIBNAME).a $(MKL_STATIC_LIBS) ; do \
+ echo "ADDLIB $$_f" >> $$_tmp ; \
+ done ; \
+ echo "SAVE" >> $$_tmp ; \
+ echo "END" >> $$_tmp ; \
+ cat $$_tmp ; \
+ ar -M < $$_tmp || exit 1 ; \
+ rm $$_tmp)
+else
+ for _f in $(LIBNAME).a $(MKL_STATIC_LIBS) ; do \
+ ar -r $@ $$_f ; \
+ done
+endif
+ cp $@ $(LIBNAME)-static-dbg.a
+# The self-contained static library is always stripped, regardless
+# of --enable-strip, since otherwise it would become too big.
+ $(STRIP) -S $@
+ $(RANLIB) $@
+
+ifneq ($(MKL_DYNAMIC_LIBS),)
+ @printf "$(MKL_RED)WARNING:$(MKL_YELLOW) $@: The following libraries were not available as static libraries and need to be linked dynamically: $(MKL_DYNAMIC_LIBS)$(MKL_CLR_RESET)\n"
+endif # MKL_DYNAMIC_LIBS
+
+else # MKL_STATIC_LIBS is empty
+_STATIC_FILENAME=$(LIBNAME).a
+$(LIBNAME)-static.a: $(LIBNAME).a
+ @printf "$(MKL_RED)WARNING:$(MKL_YELLOW) $@: No static libraries available/enabled for inclusion in self-contained static library $@: this library will be identical to $(LIBNAME).a$(MKL_CLR_RESET)\n"
+ifneq ($(MKL_DYNAMIC_LIBS),)
+ @printf "$(MKL_RED)WARNING:$(MKL_YELLOW) $@: The following libraries were not available as static libraries and need to be linked dynamically: $(MKL_DYNAMIC_LIBS)$(MKL_CLR_RESET)\n"
+ cp $(LIBNAME).a $@
+ cp $(LIBNAME)-dbg.a $(LIBNAME)-static-dbg.a
+ cp $@ $(LIBNAME)-static-dbg.a
+endif # MKL_DYNAMIC_LIBS
+endif # MKL_STATIC_LIBS
+
+endif # MKL_NO_SELFCONTAINED_STATIC_LIB
+
+$(LIBFILENAMELINK): $(LIBFILENAME)
+ @printf "$(MKL_YELLOW)Creating $@ symlink$(MKL_CLR_RESET)\n"
+ rm -f "$@" && ln -s "$^" "$@"
+
+
+# pkg-config .pc file definition
+ifeq ($(GEN_PKG_CONFIG),y)
+define _PKG_CONFIG_DEF
+prefix=$(prefix)
+libdir=$(libdir)
+includedir=$(includedir)
+
+Name: $(LIBNAME)
+Description: $(MKL_APP_DESC_ONELINE)
+Version: $(MKL_APP_VERSION)
+Requires.private: $(MKL_PKGCONFIG_REQUIRES_PRIVATE)
+Cflags: -I$${includedir}
+Libs: -L$${libdir} -l$(LIBNAME0)
+Libs.private: $(MKL_PKGCONFIG_LIBS_PRIVATE)
+endef
+
+export _PKG_CONFIG_DEF
+
+define _PKG_CONFIG_STATIC_DEF
+prefix=$(prefix)
+libdir=$(libdir)
+includedir=$(includedir)
+
+Name: $(LIBNAME)-static
+Description: $(MKL_APP_DESC_ONELINE) (static)
+Version: $(MKL_APP_VERSION)
+Requires: $(MKL_PKGCONFIG_REQUIRES:rdkafka=rdkafka-static)
+Cflags: -I$${includedir}
+Libs: -L$${libdir} $${pc_sysrootdir}$${libdir}/$(_STATIC_FILENAME) $(MKL_PKGCONFIG_LIBS_PRIVATE)
+endef
+
+export _PKG_CONFIG_STATIC_DEF
+
+$(LIBNAME0).pc: $(TOPDIR)/Makefile.config
+ @printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n"
+ @echo "$$_PKG_CONFIG_DEF" > $@
+
+$(LIBNAME0)-static.pc: $(TOPDIR)/Makefile.config $(LIBNAME)-static.a
+ @printf "$(MKL_YELLOW)Generating pkg-config file $@$(MKL_CLR_RESET)\n"
+ @echo "$$_PKG_CONFIG_STATIC_DEF" > $@
+
+lib-gen-pkg-config: $(LIBNAME0).pc $(LIBNAME0)-static.pc
+
+lib-clean-pkg-config:
+ rm -f $(LIBNAME0).pc $(LIBNAME0)-static.pc
+else
+lib-gen-pkg-config:
+lib-clean-pkg-config:
+endif
+
+
+$(BIN): $(OBJS)
+ @printf "$(MKL_YELLOW)Creating program $@$(MKL_CLR_RESET)\n"
+ $(CC_LD) $(CPPFLAGS) $(LDFLAGS) $(OBJS) -o $@ $(LIBS)
+
+
+file-check:
+ @printf "$(MKL_YELLOW)Checking $(LIBNAME) integrity$(MKL_CLR_RESET)\n"
+ @RET=true ; \
+ for f in $(CHECK_FILES) ; do \
+ printf "%-30s " $$f ; \
+ if [ -f "$$f" ]; then \
+ printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n" ; \
+ else \
+ printf "$(MKL_RED)MISSING$(MKL_CLR_RESET)\n" ; \
+ RET=false ; \
+ fi ; \
+ done ; \
+ $$RET
+
+copyright-check:
+ @(_exit=0 ; \
+ for f in $$(git ls-tree -r --name-only HEAD | \
+ egrep '\.(c|h|cpp|sh|py|pl)$$' ) ; do \
+ if [ -n "$(MKL_COPYRIGHT_SKIP)" ] && echo "$$f" | egrep -q "$(MKL_COPYRIGHT_SKIP)" ; then \
+ continue ; \
+ fi ; \
+ if ! head -40 $$f | grep -qi copyright $$f ; then \
+ echo error: Copyright missing in $$f ; \
+ _exit=1 ; \
+ fi; \
+ done ; \
+ exit $$_exit)
+
+
+lib-install:
+ @printf "$(MKL_YELLOW)Install $(LIBNAME) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+ $(INSTALL) -d $$DESTDIR$(includedir)/$(PKGNAME)
+ $(INSTALL) -d $$DESTDIR$(libdir)
+ $(INSTALL) $(HDRS) $$DESTDIR$(includedir)/$(PKGNAME)
+ $(INSTALL) $(LIBNAME).a $$DESTDIR$(libdir)
+ [ ! -f $(LIBNAME)-static.a ] || $(INSTALL) $(LIBNAME)-static.a $$DESTDIR$(libdir)
+ $(INSTALL) $(LIBFILENAME) $$DESTDIR$(libdir)
+ [ -f "$(LIBNAME0).pc" ] && ( \
+ $(INSTALL) -d $$DESTDIR$(pkgconfigdir) && \
+ $(INSTALL) -m 0644 $(LIBNAME0).pc $$DESTDIR$(pkgconfigdir) \
+ )
+ [ -f "$(LIBNAME0)-static.pc" ] && ( \
+ $(INSTALL) -d $$DESTDIR$(pkgconfigdir) && \
+ $(INSTALL) -m 0644 $(LIBNAME0)-static.pc $$DESTDIR$(pkgconfigdir) \
+ )
+ (cd $$DESTDIR$(libdir) && ln -sf $(LIBFILENAME) $(LIBFILENAMELINK))
+
+lib-uninstall:
+ @printf "$(MKL_YELLOW)Uninstall $(LIBNAME) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+ for hdr in $(HDRS) ; do \
+ rm -f $$DESTDIR$(includedir)/$(PKGNAME)/$$hdr ; done
+ rm -f $$DESTDIR$(libdir)/$(LIBNAME).a
+ rm -f $$DESTDIR$(libdir)/$(LIBNAME)-static.a
+ rm -f $$DESTDIR$(libdir)/$(LIBFILENAME)
+ rm -f $$DESTDIR$(libdir)/$(LIBFILENAMELINK)
+ rmdir $$DESTDIR$(includedir)/$(PKGNAME) || true
+ rm -f $$DESTDIR$(pkgconfigdir)/$(LIBNAME0).pc
+ rm -f $$DESTDIR$(pkgconfigdir)/$(LIBNAME0)-static.pc
+ rmdir $$DESTDIR$(pkgconfigdir) || true
+
+bin-install:
+ @printf "$(MKL_YELLOW)Install $(BIN) to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+ $(INSTALL) -d $$DESTDIR$(bindir) && \
+ $(INSTALL) $(BIN) $$DESTDIR$(bindir)
+
+bin-uninstall:
+ @printf "$(MKL_YELLOW)Uninstall $(BIN) from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+ rm -f $$DESTDIR$(bindir)/$(BIN)
+ rmdir $$DESTDIR$(bindir) || true
+
+doc-install: $(DOC_FILES)
+ @printf "$(MKL_YELLOW)Installing documentation to $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+ $(INSTALL) -d $$DESTDIR$(docdir)
+ $(INSTALL) $(DOC_FILES) $$DESTDIR$(docdir)
+
+doc-uninstall:
+ @printf "$(MKL_YELLOW)Uninstall documentation from $$DESTDIR$(prefix)$(MKL_CLR_RESET)\n"
+ for _f in $(DOC_FILES) ; do rm -f $$DESTDIR$(docdir)/$$_f ; done
+ rmdir $$DESTDIR$(docdir) || true
+
+generic-clean:
+ rm -f $(OBJS) $(DEPS)
+
+lib-clean: generic-clean lib-clean-pkg-config
+ rm -f $(LIBNAME)*.a $(LIBFILENAME) $(LIBFILENAMEDBG) \
+ $(LIBFILENAMELINK) $(LIBNAME_LDS)
+
+bin-clean: generic-clean
+ rm -f $(BIN)
+
+deps-clean:
+ rm -rf "$(MKLOVE_DIR)/deps"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.atomics b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.atomics
new file mode 100644
index 000000000..31639a7e4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.atomics
@@ -0,0 +1,144 @@
+#!/bin/bash
+#
+# Checks for atomic ops:
+# compiler builtin (__sync_..) and portable libatomic's (__atomic_..)
+# Will also provide abstraction by defining the prefix to use.
+#
+# Sets:
+# HAVE_ATOMICS
+# HAVE_ATOMICS_32
+# HAVE_ATOMICS_64
+# HAVE_ATOMICS_32_ATOMIC __atomic interface
+# HAVE_ATOMICS_32_SYNC __sync interface
+# HAVE_ATOMICS_64_ATOMIC __atomic interface
+# HAVE_ATOMICS_64_SYNC __sync interface
+# WITH_LIBATOMIC
+# LIBS
+#
+# ATOMIC_OP(OP1,OP2,PTR,VAL)
+# ATOMIC_OP32(OP1,OP2,PTR,VAL)
+# ATOMIC_OP64(OP1,OP2,PTR,VAL)
+# where op* is 'add,sub,fetch'
+# e.g: ATOMIC_OP32(add, fetch, &i, 10)
+# becomes __atomic_add_fetch(&i, 10, ..) or
+# __sync_add_and_fetch(&i, 10)
+#
+
+function checks {
+
+
+ # We prefer the newer __atomic stuff, but 64-bit atomics might
+ # require linking with -latomic, so we need to perform these tests
+ # in the proper order:
+ # __atomic 32
+ # __atomic 32 -latomic
+ # __sync 32
+ #
+ # __atomic 64
+ # __atomic 64 -latomic
+ # __sync 64
+
+ local _libs=
+ local _a32="__atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)"
+ local _a64="__atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)"
+
+ # 32-bit:
+ # Try fully builtin __atomic
+ if ! mkl_compile_check __atomic_32 HAVE_ATOMICS_32 cont CC "" \
+ "
+#include <inttypes.h>
+int32_t foo (int32_t i) {
+ return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+ then
+ # Try __atomic with -latomic
+ if mkl_compile_check --ldflags="-latomic" __atomic_32_lib HAVE_ATOMICS_32 \
+ cont CC "" \
+ "
+#include <inttypes.h>
+int32_t foo (int32_t i) {
+ return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+ then
+ _libs="-latomic"
+ mkl_allvar_set "__atomic_32_lib" "HAVE_ATOMICS_32_ATOMIC" "y"
+ else
+ # Try __sync interface
+ if mkl_compile_check __sync_32 HAVE_ATOMICS_32 disable CC "" \
+ "
+#include <inttypes.h>
+int32_t foo (int32_t i) {
+ return __sync_add_and_fetch(&i, 1);
+}"
+ then
+ _a32="__sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)"
+ mkl_allvar_set "__sync_32" "HAVE_ATOMICS_32_SYNC" "y"
+ else
+ _a32=""
+ fi
+ fi
+ else
+ mkl_allvar_set "__atomic_32" "HAVE_ATOMICS_32_ATOMIC" "y"
+ fi
+
+
+ if [[ ! -z $_a32 ]]; then
+ mkl_define_set "atomic_32" "ATOMIC_OP32(OP1,OP2,PTR,VAL)" "code:$_a32"
+ fi
+
+
+
+ # 64-bit:
+ # Try fully builtin __atomic
+ if ! mkl_compile_check __atomic_64 HAVE_ATOMICS_64 cont CC "" \
+ "
+#include <inttypes.h>
+int64_t foo (int64_t i) {
+ return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+ then
+ # Try __atomic with -latomic
+ if mkl_compile_check --ldflags="-latomic" __atomic_64_lib HAVE_ATOMICS_64 \
+ cont CC "" \
+ "
+#include <inttypes.h>
+int64_t foo (int64_t i) {
+ return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}"
+ then
+ _libs="-latomic"
+ mkl_allvar_set "__atomic_64_lib" "HAVE_ATOMICS_64_ATOMIC" "y"
+ else
+ # Try __sync interface
+ if mkl_compile_check __sync_64 HAVE_ATOMICS_64 disable CC "" \
+ "
+#include <inttypes.h>
+int64_t foo (int64_t i) {
+ return __sync_add_and_fetch(&i, 1);
+}"
+ then
+ _a64="__sync_ ## OP1 ## _and_ ## OP2 (PTR, VAL)"
+ mkl_allvar_set "__sync_64" "HAVE_ATOMICS_64_SYNC" "y"
+ else
+ _a64=""
+ fi
+ fi
+ else
+ mkl_allvar_set "__atomic_64" "HAVE_ATOMICS_64_ATOMIC" "y"
+ fi
+
+
+ if [[ ! -z $_a64 ]]; then
+ mkl_define_set "atomic_64" "ATOMIC_OP64(OP1,OP2,PTR,VAL)" "code:$_a64"
+
+ # Define generic ATOMIC() macro identical to 64-bit atomics"
+ mkl_define_set "atomic_64" "ATOMIC_OP(OP1,OP2,PTR,VAL)" "code:$_a64"
+ fi
+
+
+ if [[ ! -z $_libs ]]; then
+ mkl_mkvar_append LDFLAGS LDFLAGS "-Wl,--as-needed"
+ mkl_mkvar_append LIBS LIBS "$_libs"
+ fi
+
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.base b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.base
new file mode 100644
index 000000000..1e216692b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.base
@@ -0,0 +1,2484 @@
+#!/bin/bash
+#
+#
+# mklove base configure module, implements the mklove configure framework
+#
+
+MKL_MODULES="base"
+MKL_CACHEVARS="CFLAGS LDFLAGS PKG_CONFIG_PATH"
+MKL_MKVARS=""
+MKL_DEFINES=""
+MKL_CHECKS=""
+MKL_LOAD_STACK=""
+
+MKL_IDNEXT=1
+
+# Default mklove directory to PWD/mklove
+[[ -z "$MKLOVE_DIR" ]] && MKLOVE_DIR="$PWD/mklove"
+
+MKL_OUTMK="$PWD/_mklout.mk"
+MKL_OUTH="$PWD/_mklout.h"
+MKL_OUTDBG="$PWD/config.log"
+
+MKL_GENERATORS="base:mkl_generate_late_vars"
+MKL_CLEANERS=""
+
+MKL_FAILS=""
+MKL_LATE_VARS=""
+
+MKL_OPTS_SET=""
+
+MKL_RED=""
+MKL_GREEN=""
+MKL_YELLOW=""
+MKL_BLUE=""
+MKL_CLR_RESET=""
+
+
+MKL_NO_DOWNLOAD=0
+MKL_INSTALL_DEPS=n
+MKL_SOURCE_DEPS_ONLY=n
+
+MKL_DESTDIR_ADDED=n
+
+if [[ -z "$MKL_REPO_URL" ]]; then
+ MKL_REPO_URL="http://github.com/edenhill/mklove/raw/master"
+fi
+
+
+
+###########################################################################
+#
+# Variable types:
+# env - Standard environment variables.
+# var - mklove runtime variable, cached or not.
+# mkvar - Makefile variables, also sets runvar
+# define - config.h variables/defines
+#
+###########################################################################
+
+# Low level variable assignment
+# Arguments:
+# variable name
+# variable value
+function mkl_var0_set {
+ export "$1"="$2"
+}
+
+# Sets a runtime variable (only used during configure)
+# If "cache" is provided these variables are cached to config.cache.
+# Arguments:
+# variable name
+# variable value
+# [ "cache" ]
+function mkl_var_set {
+ mkl_var0_set "$1" "$2"
+ if [[ $3 == "cache" ]]; then
+ if ! mkl_in_list "$MKL_CACHEVARS" "$1" ; then
+ MKL_CACHEVARS="$MKL_CACHEVARS $1"
+ fi
+ fi
+}
+
+# Unsets a mkl variable
+# Arguments:
+# variable name
+function mkl_var_unset {
+ unset $1
+}
+
+# Appends to a mkl variable (space delimited)
+# Arguments:
+# variable name
+# variable value
+function mkl_var_append {
+ if [[ -z ${!1} ]]; then
+ mkl_var_set "$1" "$2"
+ else
+ mkl_var0_set "$1" "${!1} $2"
+ fi
+}
+
+
+# Prepends to a mkl variable (space delimited)
+# Arguments:
+# variable name
+# variable value
+function mkl_var_prepend {
+ if [[ -z ${!1} ]]; then
+ mkl_var_set "$1" "$2"
+ else
+ mkl_var0_set "$1" "$2 ${!1}"
+ fi
+}
+
+# Shift the first word off a variable.
+# Arguments:
+# variable name
+function mkl_var_shift {
+ local n="${!1}"
+ mkl_var0_set "$1" "${n#* }"
+ return 0
+}
+
+
+# Returns the contents of mkl variable
+# Arguments:
+# variable name
+function mkl_var_get {
+ echo "${!1}"
+}
+
+
+
+
+# Set environment variable (runtime)
+# These variables are not cached nor written to any of the output files,
+# its just simply a helper wrapper for standard envs.
+# Arguments:
+# varname
+# varvalue
+function mkl_env_set {
+ mkl_var0_set "$1" "$2"
+}
+
+# Append to environment variable
+# Arguments:
+# varname
+# varvalue
+# [ separator (" ") ]
+function mkl_env_append {
+ local sep=" "
+ if [[ -z ${!1} ]]; then
+ mkl_env_set "$1" "$2"
+ else
+ [ ! -z ${3} ] && sep="$3"
+ mkl_var0_set "$1" "${!1}${sep}$2"
+ fi
+
+}
+
+# Prepend to environment variable
+# Arguments:
+# varname
+# varvalue
+# [ separator (" ") ]
+function mkl_env_prepend {
+ local sep=" "
+ if [[ -z ${!1} ]]; then
+ mkl_env_set "$1" "$2"
+ else
+ [ ! -z ${3} ] && sep="$3"
+ mkl_var0_set "$1" "$2${sep}${!1}"
+ fi
+
+}
+
+
+
+
+# Set a make variable (Makefile.config)
+# Arguments:
+# config name
+# variable name
+# value
+function mkl_mkvar_set {
+ if [[ ! -z $2 ]]; then
+ mkl_env_set "$2" "$3"
+ mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+ fi
+}
+
+
+# Prepends to a make variable (Makefile.config)
+# Arguments:
+# config name
+# variable name
+# value
+# [ separator (" ") ]
+function mkl_mkvar_prepend {
+ if [[ ! -z $2 ]]; then
+ mkl_env_prepend "$2" "$3" "$4"
+ mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+ fi
+}
+
+
+# Appends to a make variable (Makefile.config)
+# Arguments:
+# config name
+# variable name
+# value
+# [ separator (" ") ]
+function mkl_mkvar_append {
+ if [[ ! -z $2 ]]; then
+ mkl_env_append "$2" "$3" "$4"
+ mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+ fi
+}
+
+
+# Prepends to a make variable (Makefile.config)
+# Arguments:
+# config name
+# variable name
+# value
+# [ separator (" ") ]
+function mkl_mkvar_prepend {
+ if [[ ! -z $2 ]]; then
+ mkl_env_prepend "$2" "$3" "$4"
+ mkl_in_list "$MKL_MKVARS" "$2"|| mkl_env_append MKL_MKVARS $2
+ fi
+}
+
+# Return mkvar variable value
+# Arguments:
+# variable name
+function mkl_mkvar_get {
+ [[ -z ${!1} ]] && return 1
+ echo ${!1}
+ return 0
+}
+
+
+
+# Defines a config header define (config.h)
+# Arguments:
+# config name
+# define name
+# define value (optional, default: 1)
+# if value starts with code: then no "" are added
+function mkl_define_set {
+
+ if [[ -z $2 ]]; then
+ return 0
+ fi
+
+ local stmt=""
+ local defid=
+ if [[ $2 = *\(* ]]; then
+ # macro
+ defid="def_${2%%(*}"
+ else
+ # define
+ defid="def_$2"
+ fi
+
+ [[ -z $1 ]] || stmt="// $1\n"
+
+ local val="$3"
+ if [[ -z "$val" ]]; then
+ val="$(mkl_def $2 1)"
+ fi
+
+ # Define as code, string or integer?
+ if [[ $val == code:* ]]; then
+ # Code block, copy verbatim without quotes, strip code: prefix
+ val=${val#code:}
+ elif [[ ! ( "$val" =~ ^[0-9]+([lL]?[lL][dDuU]?)?$ || \
+ "$val" =~ ^0x[0-9a-fA-F]+([lL]?[lL][dDuU]?)?$ ) ]]; then
+ # String: quote
+ val="\"$val\""
+ fi
+ # else: unquoted integer/hex
+
+ stmt="${stmt}#define $2 $val"
+ mkl_env_set "$defid" "$stmt"
+ mkl_env_append MKL_DEFINES "$defid"
+}
+
+
+
+
+
+# Sets "all" configuration variables, that is:
+# for name set: Makefile variable, config.h define
+# Will convert value "y"|"n" to 1|0 for config.h
+# Arguments:
+# config name
+# variable name
+# value
+function mkl_allvar_set {
+ mkl_mkvar_set "$1" "$2" "$3"
+ local val=$3
+ if [[ $3 = "y" ]]; then
+ val=1
+ elif [[ $3 = "n" ]]; then
+ val=0
+ fi
+ mkl_define_set "$1" "$2" "$val"
+}
+
+
+###########################################################################
+#
+# Dependency installation, et.al.
+#
+#
+###########################################################################
+
+# Returns the local dependency directory.
+function mkl_depdir {
+ local dir="$MKLOVE_DIR/deps"
+ [[ -d $dir ]] || mkdir -p "$dir"
+ if ! grep -q ^deps$ "$MKLOVE_DIR/.gitignore" 2>/dev/null ; then
+ echo "deps" >> "$MKLOVE_DIR/.gitignore"
+ fi
+
+ echo "$dir"
+}
+
+# Returns the package's installation directory / DESTDIR.
+function mkl_dep_destdir {
+ echo "$(mkl_depdir)/dest"
+}
+
+# Returns the package's source directory.
+function mkl_dep_srcdir {
+ echo "$(mkl_depdir)/src/$1"
+}
+
+
+# Get the static library file name(s) for a package.
+function mkl_lib_static_fnames {
+ local name=$1
+ mkl_meta_get $name "static" ""
+}
+
+
+# Returns true if previous ./configure ran a dep install for this package.
+function mkl_dep_install_cached {
+ local name=$1
+
+ if [[ -n $(mkl_var_get "MKL_STATUS_${1}_INSTALL") ]] ||
+ [[ -n $(mkl_var_get "MKL_STATUS_${1}_INSTALL_SRC") ]]; then
+ return 0 # true
+ else
+ return 1 # false
+ fi
+}
+
+# Install an external dependency using the platform's native
+# package manager.
+# Should only be called from mkl_dep_install
+#
+# Param 1: config name
+function mkl_dep_install_pkg {
+ local name=$1
+ local iname="${name}_INSTALL"
+ local retcode=1 # default to fail
+ local method="none"
+ local pkgs
+ local cmd
+
+ mkl_dbg "Attempting native install of dependency $name on $MKL_DISTRO with effective user $EUID"
+
+
+ # Try the platform specific installer first.
+ case ${MKL_DISTRO}-${EUID} in
+ debian-0|ubuntu-0)
+ method=apt
+ pkgs=$(mkl_meta_get $name deb)
+ cmd="apt install -y $pkgs"
+ ;;
+
+ centos-0|rhel-0|redhat-0|fedora-0)
+ method=yum
+ pkgs=$(mkl_meta_get $name rpm)
+ cmd="yum install -y $pkgs"
+ ;;
+
+ alpine-0)
+ method=apk
+ pkgs=$(mkl_meta_get $name apk)
+ cmd="apk add $pkgs"
+ ;;
+
+ osx-*)
+ method=brew
+ pkgs=$(mkl_meta_get $name brew)
+ cmd="brew install $pkgs"
+ ;;
+
+ *)
+ mkl_dbg "$name: No native installer set for $name on $MKL_DISTRO (euid $EUID)"
+ return 1
+ ;;
+ esac
+
+ if [[ -z $pkgs ]]; then
+ mkl_dbg "$name: No packages to install ($method)"
+ return 1
+ fi
+
+ mkl_check_begin --verb "installing dependencies ($cmd)" $iname "" no-cache "$name"
+ $cmd >>$MKL_OUTDBG 2>&1
+ retcode=$?
+
+ if [[ $retcode -eq 0 ]]; then
+ mkl_dbg "Native install of $name (using $method, $cmd) succeeded"
+ mkl_check_done "$iname" "" cont "using $method"
+ mkl_meta_set $name installed_with "$method"
+ elif [[ $method != "none" ]]; then
+ mkl_dbg "Native install of $name (using $method, $cmd) failed: retcode $retcode"
+ mkl_check_failed "$iname" "" cont "using $method"
+ fi
+
+ return $retcode
+}
+
+
+# Returns 0 (yes) if this dependency has a source builder, else 1 (no)
+function mkl_dep_has_builder {
+ local name=$1
+ local func="${name}_install_source"
+ mkl_func_exists $func
+}
+
+
+# Returns 0 (yes) if this dependency has a package installer, else 1 (no)
+function mkl_dep_has_installer {
+ local name=$1
+ if mkl_dep_has_builder "$name" || \
+ [[ -n $(mkl_meta_get $name deb) ]] || \
+ [[ -n $(mkl_meta_get $name rpm) ]] || \
+ [[ -n $(mkl_meta_get $name brew) ]] || \
+ [[ -n $(mkl_meta_get $name apk) ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+
+# Install an external dependency from source.
+#
+# The resulting libraries must be installed in $ddir/usr/lib (or lib64),
+# and include files in $ddir/usr/include.
+#
+# Any dependency installed from source will be linked statically
+# regardless of --enable-static, if the build produced static libraries.
+
+#
+# Param 1: config name
+function mkl_dep_install_source {
+ local name=$1
+ local iname="${name}_INSTALL_SRC"
+ local retcode=
+
+ local func="${name}_install_source"
+
+ if ! mkl_dep_has_builder $name ; then
+ mkl_dbg "No source builder for $name ($func) available"
+ return 1
+ fi
+
+ mkl_check_begin --verb "building dependency" $iname "" no-cache "$name"
+
+ # Create install directory / DESTDIR
+ local ddir=$(mkl_dep_destdir $name)
+ [[ -d $ddir ]] || mkdir -p "$ddir"
+
+ # Create and go to source directory
+ local sdir=$(mkl_dep_srcdir $name)
+ [[ -d $sdir ]] || mkdir -p "$sdir"
+ mkl_pushd "$sdir"
+
+ local ilog="${sdir}/_mkl_install.log"
+
+ # Build and install
+ mkl_dbg "Building $name from source in $sdir (func $func)"
+
+ $func $name "$ddir" >$ilog 2>&1
+ retcode=$?
+
+ mkl_popd # $sdir
+
+ if [[ $retcode -eq 0 ]]; then
+ mkl_dbg "Source install of $name succeeded"
+ mkl_check_done "$iname" "" cont "ok" "from source"
+ mkl_meta_set $name installed_with "source"
+ else
+ mkl_dbg "Source install of $name failed"
+ mkl_check_failed "$iname" "" disable "source installer failed (see $ilog)"
+ mkl_err "$name source build failed, see $ilog for details. First 50 and last 50 lines:"
+ head -50 "$ilog"
+ echo " .... and last 50 lines ...."
+ tail -50 "$ilog"
+ fi
+
+ return $retcode
+}
+
+
+# Tries to resolve/find full paths to static libraries for a module,
+# using the provided scan dir path.
+# Any found libraries are set as STATIC_LIB_.. defines.
+#
+# Param 1: config name
+# Param 2: scandir
+#
+# Returns 0 if libraries were found, else 1.
+function mkl_resolve_static_libs {
+ local name="$1"
+ local scandir="$2"
+ local stlibfnames=$(mkl_lib_static_fnames $name)
+ local stlibvar="STATIC_LIB_${name}"
+
+ if [[ -z $stlibfnames || -n "${!stlibvar}" ]]; then
+ mkl_dbg "$name: not resolving static libraries (stlibfnames=$stlibfnames, $stlibvar=${!stlibvar})"
+ mkl_allvar_set "$name" "WITH_STATIC_LIB_$name" y
+ return 1
+ fi
+
+ local fname=
+ local stlibs=""
+ mkl_dbg "$name: resolving static libraries from $stlibfnames in $scandir"
+ for fname in $stlibfnames ; do
+ local stlib=$(find "${scandir}" -name "$fname" 2>/dev/null | head -1)
+ if [[ -n $stlib ]]; then
+ stlibs="${stlibs} $stlib"
+ fi
+ done
+
+ # Trim leading whitespaces
+ stlibs=${stlibs# }
+
+ if [[ -n $stlibs ]]; then
+ mkl_dbg "$name: $stlibvar: found static libs: $stlibs"
+ mkl_var_set $stlibvar "$stlibs" "cache"
+ mkl_allvar_set "$name" "WITH_STATIC_LIB_$name" y
+ return 0
+ else
+ mkl_dbg "$name: did not find any static libraries for $stlibfnames in ${scandir}"
+ return 1
+ fi
+}
+
+
+# Install an external dependecy
+#
+# Param 1: config name (e.g zstd)
+function mkl_dep_install {
+ local name=$1
+ local retcode=
+
+ local ddir=$(mkl_dep_destdir $name)
+
+ if [[ $MKL_SOURCE_DEPS_ONLY != y ]] || ! mkl_dep_has_builder $name ; then
+ #
+ # Try native package manager first, or if no source builder
+ # is available for this dependency.
+ #
+ mkl_dep_install_pkg $name
+ retcode=$?
+
+ if [[ $retcode -eq 0 ]]; then
+ return $retcode
+ fi
+ fi
+
+ #
+ # Try source installer.
+ #
+ mkl_dep_install_source $name
+ retcode=$?
+
+ if [[ $retcode -ne 0 ]]; then
+ if [[ $MKL_SOURCE_DEPS_ONLY == y ]]; then
+ # Require dependencies, regardless of original action,
+ # if --source-deps-only is specified, to ensure
+ # that we do indeed link with the desired library.
+ mkl_fail "$name" "" fail "Failed to install dependency $name"
+ fi
+ return $retcode
+ fi
+
+ local ddir=$(mkl_dep_destdir $name)
+
+ # Find the static library(s), if any.
+ if ! mkl_resolve_static_libs "$name" "${ddir}/usr"; then
+ # No static libraries found, set up dynamic linker path
+ mkl_mkvar_prepend LDFLAGS LDFLAGS "-L${ddir}/usr/lib64 -L${ddir}/usr/lib"
+ fi
+
+ # Add the deps destdir to various build flags so that tools can pick
+ # up the artifacts (.pc files, includes, libs, etc) they need.
+ if [[ $MKL_DESTDIR_ADDED == n ]]; then
+ # Add environment variables so that later built dependencies
+ # can find this one.
+ mkl_env_prepend LDFLAGS "-L${ddir}/usr/lib64 -L${ddir}/usr/lib"
+ mkl_env_prepend CPPFLAGS "-I${ddir}/usr/include"
+ mkl_env_prepend PKG_CONFIG_PATH "${ddir}/usr/lib/pkgconfig" ":"
+ # And tell pkg-config to get static linker flags.
+ mkl_env_set PKG_CONFIG "${PKG_CONFIG} --static"
+ MKL_DESTDIR_ADDED=y
+ fi
+
+ # Append the package's install path to compiler and linker flags.
+ mkl_dbg "$name: Adding install-deps paths ($ddir) to compiler and linker flags"
+ mkl_mkvar_prepend CPPFLAGS CPPFLAGS "-I${ddir}/usr/include"
+
+ return $retcode
+}
+
+
+# Apply patch to a source dependency.
+#
+# Param 1: config name (e.g. libssl)
+# Param 2: patch number (optional, else all)
+#
+# Returns 0 on success or 1 on error.
+function mkl_patch {
+ local name=$1
+ local patchnr="$2"
+
+ if [[ -z $patchnr ]]; then
+ patchnr="????"
+ fi
+
+ local patchfile=
+ local cnt=0
+ for patchfile in $(echo ${MKLOVE_DIR}/modules/patches/${name}.${patchnr}-*.patch | sort); do
+ mkl_dbg "$1: applying patch $patchfile"
+ patch -p1 < $patchfile
+ local retcode=$?
+ if [[ $retcode != 0 ]]; then
+ mkl_err "mkl_patch: $1: failed to apply patch $patchfile: see source dep build log for details"
+ return 1
+ fi
+ cnt=$(($cnt + 1))
+ done
+
+ if [[ $cnt -lt 1 ]]; then
+ mkl_err "mkl_patch: $1: no patches matchign $patchnr found"
+ return 1
+ fi
+
+ return 0
+}
+
+
+###########################################################################
+#
+#
+# Check failure functionality
+#
+#
+###########################################################################
+
+
+# Summarize all fatal failures and then exits.
+function mkl_fail_summary {
+ echo "
+
+"
+
+ local pkg_cmd=""
+ local install_pkgs=""
+ mkl_err "###########################################################"
+ mkl_err "### Configure failed ###"
+ mkl_err "###########################################################"
+ mkl_err "### Accumulated failures: ###"
+ mkl_err "###########################################################"
+ local n
+ for n in $MKL_FAILS ; do
+ local conf=$(mkl_var_get MKL_FAIL__${n}__conf)
+ mkl_err " $conf ($(mkl_var_get MKL_FAIL__${n}__define)) $(mkl_meta_get $conf name)"
+ if mkl_meta_exists $conf desc; then
+ mkl_err0 " desc: $MKL_YELLOW$(mkl_meta_get $conf desc)$MKL_CLR_RESET"
+ fi
+ mkl_err0 " module: $(mkl_var_get MKL_FAIL__${n}__module)"
+ mkl_err0 " action: $(mkl_var_get MKL_FAIL__${n}__action)"
+ mkl_err0 " reason:
+$(mkl_var_get MKL_FAIL__${n}__reason)
+"
+ # Dig up some metadata to assist the user
+ case $MKL_DISTRO in
+ debian|ubuntu)
+ local debs=$(mkl_meta_get $conf "deb")
+ pkg_cmd="sudo apt install -y"
+ if [[ ${#debs} > 0 ]]; then
+ install_pkgs="$install_pkgs $debs"
+ fi
+ ;;
+ centos|rhel|redhat|fedora)
+ local rpms=$(mkl_meta_get $conf "rpm")
+ pkg_cmd="sudo yum install -y"
+ if [[ ${#rpms} > 0 ]]; then
+ install_pkgs="$install_pkgs $rpms"
+ fi
+ ;;
+ alpine)
+ local apks=$(mkl_meta_get $conf "apk")
+ pkg_cmd="apk add "
+ if [[ ${#apks} > 0 ]]; then
+ install_pkgs="$install_pkgs $apks"
+ fi
+ ;;
+ osx)
+ local pkgs=$(mkl_meta_get $conf "brew")
+ pkg_cmd="brew install"
+ if [[ ${#pkgs} > 0 ]]; then
+ install_pkgs="$install_pkgs $pkgs"
+ fi
+ ;;
+ esac
+ done
+
+ if [[ ! -z $install_pkgs ]]; then
+ mkl_err "###########################################################"
+ mkl_err "### Installing the following packages might help: ###"
+ mkl_err "###########################################################"
+ mkl_err0 "$pkg_cmd $install_pkgs"
+ mkl_err0 ""
+ fi
+ exit 1
+}
+
+
+# Checks if there were failures.
+# Returns 0 if there were no failures, else calls failure summary and exits.
+function mkl_check_fails {
+ if [[ ${#MKL_FAILS} = 0 ]]; then
+ return 0
+ fi
+ mkl_fail_summary
+}
+
+# A check has failed but we want to carry on (and we should!).
+# We fail it all later.
+# Arguments:
+# config name
+# define name
+# action
+# reason
+function mkl_fail {
+ local n="$(mkl_env_esc "$1")"
+ mkl_var_set "MKL_FAIL__${n}__conf" "$1"
+ mkl_var_set "MKL_FAIL__${n}__module" $MKL_MODULE
+ mkl_var_set "MKL_FAIL__${n}__define" $2
+ mkl_var_set "MKL_FAIL__${n}__action" "$3"
+ if [[ -z $(mkl_var_get "MKL_FAIL__${n}__reason") ]]; then
+ mkl_var_set "MKL_FAIL__${n}__reason" "$4"
+ else
+ mkl_var_append "MKL_FAIL__${n}__reason" "
+And also:
+$4"
+ fi
+ mkl_in_list "$MKL_FAILS" "$n" || mkl_var_append MKL_FAILS "$n"
+}
+
+
+# A check failed, handle it
+# Arguments:
+# config name
+# define name
+# action (fail|disable|ignore|cont)
+# reason
+function mkl_check_failed {
+ # Override action based on require directives, unless the action is
+ # set to cont (for fallthrough to sub-sequent tests).
+ local action="$3"
+ if [[ $3 != "cont" ]]; then
+ action=$(mkl_meta_get "MOD__$MKL_MODULE" "override_action" $3)
+ fi
+
+ # --fail-fatal option
+ [[ $MKL_FAILFATAL ]] && action="fail"
+
+ mkl_check_done "$1" "$2" "$action" "failed"
+ mkl_dbg "Check $1 ($2, action $action (originally $3)) failed: $4"
+
+
+ case $action in
+ fail)
+ # Check failed fatally, fail everything eventually
+ mkl_fail "$1" "$2" "$3" "$4"
+ return 1
+ ;;
+
+ disable)
+ # Check failed, disable
+ [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "n"
+ return 1
+ ;;
+ ignore)
+ # Check failed but we ignore the results and set it anyway.
+ [[ ! -z $2 ]] && mkl_define_set "$1" "$2" "1"
+ [[ ! -z $2 ]] && mkl_mkvar_set "$1" "$2" "y"
+ return 1
+ ;;
+ cont)
+ # Check failed but we ignore the results and do nothing.
+ return 0
+ ;;
+ esac
+}
+
+
+
+
+###########################################################################
+#
+#
+# Output generators
+#
+#
+###########################################################################
+
+# Generate late variables.
+# Late variables are those referenced in command line option defaults
+# but then never set by --option.
+function mkl_generate_late_vars {
+ local n
+ for n in $MKL_LATE_VARS ; do
+ local func=${n%:*}
+ local safeopt=${func#opt_}
+ local val=${n#*:}
+ if mkl_in_list "$MKL_OPTS_SET" "$safeopt" ; then
+ # Skip options set explicitly with --option
+ continue
+ fi
+ # Expand variable references "\$foo" by calling eval
+ # and pass it opt_... function.
+ $func "$(eval echo $val)"
+ done
+}
+
+
+# Generate MKL_DYNAMIC_LIBS and MKL_STATIC_LIBS for Makefile.config
+#
+# Params: $LIBS
+function mkl_generate_libs {
+ while [[ $# -gt 0 ]]; do
+ if [[ $1 == -l* ]]; then
+ mkl_mkvar_append "" MKL_DYNAMIC_LIBS $1
+ elif [[ $1 == *.a ]]; then
+ mkl_mkvar_append "" MKL_STATIC_LIBS $1
+ elif [[ $1 == -framework ]]; then
+ mkl_mkvar_append "" MKL_DYNAMIC_LIBS "$1 $2"
+ shift # two args
+ else
+ mkl_dbg "Ignoring arg $1 from LIBS while building STATIC and DYNAMIC lists"
+ fi
+ shift # remove arg
+ done
+}
+
+# Generate output files.
+# Must be called following a succesful configure run.
+function mkl_generate {
+
+ # Generate MKL_STATIC_LIBS and MKL_DYNAMIC_LIBS from LIBS
+ mkl_generate_libs $LIBS
+
+ local mf=
+ for mf in $MKL_GENERATORS ; do
+ MKL_MODULE=${mf%:*}
+ local func=${mf#*:}
+ $func || exit 1
+ done
+
+ # Generate a built-in options define based on WITH_..=y
+ local with_y=
+ for n in $MKL_MKVARS ; do
+ if [[ $n == WITH_* ]] && [[ $n != WITH_STATIC_LIB_* ]] && [[ ${!n} == y ]]; then
+ with_y="$with_y ${n#WITH_}"
+ fi
+ done
+ with_y="${with_y# }"
+
+ mkl_allvar_set "BUILT_WITH" "BUILT_WITH" "$with_y"
+
+ mkl_write_mk "# Automatically generated by $0 $*"
+ mkl_write_mk "# Config variables"
+ mkl_write_mk "#"
+ mkl_write_mk "# Generated by:"
+ mkl_write_mk "# $MKL_CONFIGURE_ARGS"
+ mkl_write_mk ""
+
+ # This variable is used by Makefile.base to avoid multiple inclusions.
+ mkl_write_mk "MKL_MAKEFILE_CONFIG=y"
+
+ # Export colors to Makefile.config
+ mkl_write_mk "MKL_RED=\t${MKL_RED}"
+ mkl_write_mk "MKL_GREEN=\t${MKL_GREEN}"
+ mkl_write_mk "MKL_YELLOW=\t${MKL_YELLOW}"
+ mkl_write_mk "MKL_BLUE=\t${MKL_BLUE}"
+ mkl_write_mk "MKL_CLR_RESET=\t${MKL_CLR_RESET}"
+
+ local n=
+ for n in $MKL_MKVARS ; do
+ # Some special variables should be prefixable by the caller, so
+ # define them in the makefile as appends.
+ local op="="
+ case $n in
+ CFLAGS|CPPFLAGS|CXXFLAGS|LDFLAGS|LIBS)
+ op="+="
+ ;;
+ esac
+ mkl_write_mk "$n$op\t${!n}"
+ done
+ mkl_write_mk "# End of config variables"
+
+ MKL_OUTMK_FINAL=Makefile.config
+ mv $MKL_OUTMK $MKL_OUTMK_FINAL
+
+ echo "Generated $MKL_OUTMK_FINAL"
+
+ # Generate config.h
+ mkl_write_h "// Automatically generated by $0 $*"
+ mkl_write_h "#ifndef _CONFIG_H_"
+ mkl_write_h "#define _CONFIG_H_"
+ for n in $MKL_DEFINES ; do
+ mkl_write_h "${!n}"
+ done
+ mkl_write_h "#endif /* _CONFIG_H_ */"
+
+ MKL_OUTH_FINAL=config.h
+ mv $MKL_OUTH $MKL_OUTH_FINAL
+
+ echo "Generated $MKL_OUTH_FINAL"
+}
+
+# Remove file noisily, if it exists
+function mkl_rm {
+ if [[ -f $fname ]]; then
+ echo "Removing $fname"
+ rm -f "$fname"
+ fi
+}
+
+# Remove files generated by configure
+function mkl_clean {
+ for fname in Makefile.config config.h config.cache config.log ; do
+ mkl_rm "$fname"
+ done
+
+ local mf=
+ for mf in $MKL_CLEANERS ; do
+ MKL_MODULE=${mf%:*}
+ local func=${mf#*:}
+ $func || exit 1
+ done
+
+}
+
+
+# Print summary of succesful configure run
+function mkl_summary {
+
+ echo "
+Configuration summary:"
+ local n=
+ for n in $MKL_MKVARS ; do
+ # Skip the boring booleans
+ if [[ $n == ENABLE_* || $n == WITH_* || $n == WITHOUT_* || $n == HAVE_* || $n == def_* ]]; then
+ continue
+ fi
+ printf " %-24s %s\n" "$n" "${!n}"
+ done
+}
+
+
+
+# Write to mk file
+# Argument:
+# string ..
+function mkl_write_mk {
+ echo -e "$*" >> $MKL_OUTMK
+}
+
+# Write to header file
+# Argument:
+# string ..
+function mkl_write_h {
+ echo -e "$*" >> $MKL_OUTH
+}
+
+
+
+###########################################################################
+#
+#
+# Logging and debugging
+#
+#
+###########################################################################
+
+# Debug print
+# Only visible on terminal if MKL_DEBUG is set.
+# Always written to config.log
+# Argument:
+# string ..
+function mkl_dbg {
+ if [[ ! -z $MKL_DEBUG ]]; then
+ echo -e "${MKL_BLUE}DBG:$$: $*${MKL_CLR_RESET}" 1>&2
+ fi
+ echo "DBG $$: $*" >> $MKL_OUTDBG
+}
+
+# Error print (with color)
+# Always printed to terminal and config.log
+# Argument:
+# string ..
+function mkl_err {
+ echo -e "${MKL_RED}$*${MKL_CLR_RESET}" 1>&2
+ echo "$*" >> $MKL_OUTDBG
+}
+
+# Same as mkl_err but without coloring
+# Argument:
+# string ..
+function mkl_err0 {
+ echo -e "$*" 1>&2
+ echo "$*" >> $MKL_OUTDBG
+}
+
+# Standard print
+# Always printed to terminal and config.log
+# Argument:
+# string ..
+function mkl_info {
+ echo -e "$*" 1>&2
+ echo -e "$*" >> $MKL_OUTDBG
+}
+
+
+
+
+
+
+
+###########################################################################
+#
+#
+# Misc helpers
+#
+#
+###########################################################################
+
+# Returns the absolute path (but not necesarily canonical) of the first argument
+function mkl_abspath {
+ echo $1 | sed -e "s|^\([^/]\)|$PWD/\1|"
+}
+
+# Returns true (0) if function $1 exists, else false (1)
+function mkl_func_exists {
+ declare -f "$1" > /dev/null
+ return $?
+}
+
+# Rename function.
+# Returns 0 on success or 1 if old function (origname) was not defined.
+# Arguments:
+# origname
+# newname
+function mkl_func_rename {
+ if ! mkl_func_exists $1 ; then
+ return 1
+ fi
+ local orig=$(declare -f $1)
+ local new="$2${orig#$1}"
+ eval "$new"
+ unset -f "$1"
+ return 0
+}
+
+
+# Push module function for later call by mklove.
+# The function is renamed to an internal name.
+# Arguments:
+# list variable name
+# module name
+# function name
+function mkl_func_push {
+ local newfunc="__mkl__f_${2}_$(( MKL_IDNEXT++ ))"
+ if mkl_func_rename "$3" "$newfunc" ; then
+ mkl_var_append "$1" "$2:$newfunc"
+ fi
+}
+
+
+
+# Returns value, or the default string if value is empty.
+# Arguments:
+# value
+# default
+function mkl_def {
+ if [[ ! -z $1 ]]; then
+ echo $1
+ else
+ echo $2
+ fi
+}
+
+
+# Render a string (e.g., evaluate its $varrefs)
+# Arguments:
+# string
+function mkl_render {
+ if [[ $* == *\$* ]]; then
+ eval "echo $*"
+ else
+ echo "$*"
+ fi
+}
+
+# Escape a string so that it becomes suitable for being an env variable.
+# This is a destructive operation and the original string cannot be restored.
+function mkl_env_esc {
+ echo $* | LC_ALL=C sed -e 's/[^a-zA-Z0-9_]/_/g'
+}
+
+# Convert arguments to upper case
+function mkl_upper {
+ echo "$*" | tr '[:lower:]' '[:upper:]'
+}
+
+# Convert arguments to lower case
+function mkl_lower {
+ echo "$*" | tr '[:upper:]' '[:lower:]'
+}
+
+
+# Checks if element is in list
+# Arguments:
+# list
+# element
+function mkl_in_list {
+ local n
+ for n in $1 ; do
+ [[ $n == $2 ]] && return 0
+ done
+ return 1
+}
+
+
+# Silent versions of pushd and popd
+function mkl_pushd {
+ pushd "$1" >/dev/null
+}
+
+function mkl_popd {
+ popd >/dev/null
+}
+
+
+###########################################################################
+#
+#
+# Cache functionality
+#
+#
+###########################################################################
+
+
+# Write cache file
+function mkl_cache_write {
+ [[ ! -z "$MKL_NOCACHE" ]] && return 0
+ echo "# mklove configure cache file generated at $(date)" > config.cache
+ for n in $MKL_CACHEVARS ; do
+ echo "$n=${!n}" >> config.cache
+ done
+ echo "Generated config.cache"
+}
+
+
+# Read cache file
+function mkl_cache_read {
+ [[ ! -z "$MKL_NOCACHE" ]] && return 0
+ [ -f config.cache ] || return 1
+
+ echo "using cache file config.cache"
+
+ local ORIG_IFS=$IFS
+ IFS="$IFS="
+ while read -r n v ; do
+ [[ -z $n || $n = \#* || -z $v ]] && continue
+ # Don't let cache overwrite variables
+ [[ -n ${n+r} ]] || mkl_var_set $n $v cache
+ done < config.cache
+ IFS=$ORIG_IFS
+}
+
+
+###########################################################################
+#
+#
+# Config name meta data
+#
+#
+###########################################################################
+
+# Set metadata for config name
+# This metadata is used by mkl in various situations
+# Arguments:
+# config name
+# metadata key
+# metadata value (appended)
+function mkl_meta_set {
+ local metaname="mkl__$1__$2"
+ eval "$metaname=\"\$$metaname $3\""
+}
+
+# Returns metadata for config name
+# Arguments:
+# config name
+# metadata key
+# default (optional)
+function mkl_meta_get {
+ local metaname="mkl__$1__$2"
+ if [[ ! -z ${!metaname} ]]; then
+ echo ${!metaname}
+ else
+ echo "$3"
+ fi
+}
+
+# Checks if metadata exists
+# Arguments:
+# config name
+# metadata key
+function mkl_meta_exists {
+ local metaname="mkl__$1__$2"
+ if [[ ! -z ${!metaname} ]]; then
+ return 0
+ else
+ return 1
+ fi
+}
+
+
+
+
+
+###########################################################################
+#
+#
+# Check framework
+#
+#
+###########################################################################
+
+
+# Print that a check is beginning to run
+# Returns 0 if a cached result was used (do not continue with your tests),
+# else 1.
+#
+# If the check should not be cachable then specify argument 3 as "no-cache",
+# this is useful when a check not only checks but actually sets config
+# variables itself (which is not recommended, but desired sometimes).
+#
+# Arguments:
+# [ --verb "verb.." ] (replace "checking for")
+# config name
+# define name
+# action (fail,cont,disable or no-cache)
+# [ display name ]
+function mkl_check_begin {
+ local verb="checking for"
+ if [[ $1 == "--verb" ]]; then
+ verb="$2"
+ shift
+ shift
+ fi
+
+ local name=$(mkl_meta_get $1 name "$4")
+ [[ -z $name ]] && name="$1"
+
+ echo -n "$verb $name..."
+ if [[ $3 != "no-cache" ]]; then
+ local status=$(mkl_var_get "MKL_STATUS_$1")
+ # Check cache (from previous run or this one).
+ # Only used cached value if the cached check succeeded:
+ # it is more likely that a failed check has been fixed than the other
+ # way around.
+ if [[ ! -z $status && ( $status = "ok" ) ]]; then
+ mkl_check_done "$1" "$2" "$3" $status "cached"
+ return 0
+ fi
+ fi
+ return 1
+}
+
+
+# Calls the manual_checks function for the given module.
+# Use this for modules that provide check hooks that require
+# certain call ordering, such as dependent library checks.
+#
+# Param 1: module name
+function mkl_check {
+ local modname=$1
+
+ local func="${modname}_manual_checks"
+ if ! mkl_func_exists "$func" ; then
+ mkl_fail "Check function for module $modname not found: missing mkl_require $modname ?"
+ return 1
+ fi
+
+ $func
+ return $?
+}
+
+
+# Print that a check is done
+# Arguments:
+# config name
+# define name
+# action
+# status (ok|failed)
+# extra-info (optional)
+function mkl_check_done {
+ # Clean up configname to be a safe varname
+ local cname=${1//-/_}
+ mkl_var_set "MKL_STATUS_$cname" "$4" cache
+
+ mkl_dbg "Setting $1 ($cname) status to $4 (action $3)"
+
+ local extra=""
+ if [[ $4 = "failed" ]]; then
+ local clr=$MKL_YELLOW
+ extra=" ($3)"
+ case "$3" in
+ fail)
+ clr=$MKL_RED
+ ;;
+ cont)
+ extra=""
+ ;;
+ esac
+ echo -e " $clr$4$MKL_CLR_RESET${extra}"
+ else
+ [[ ! -z $2 ]] && mkl_define_set "$cname" "$2" "1"
+ [[ ! -z $2 ]] && mkl_mkvar_set "$cname" "$2" "y"
+ [ ! -z "$5" ] && extra=" ($5)"
+ echo -e " $MKL_GREEN$4${MKL_CLR_RESET}$extra"
+ fi
+}
+
+
+# Perform configure check by compiling source snippet
+# Arguments:
+# [--sub] (run checker as a sub-check, not doing begin/fail/ok)
+# [--ldflags="..." ] (appended after "compiler arguments" below)
+# config name
+# define name
+# action (fail|disable)
+# compiler (CC|CXX)
+# compiler arguments (optional "", example: "-lzookeeper")
+# source snippet
+function mkl_compile_check {
+
+ local sub=0
+ if [[ $1 == --sub ]]; then
+ sub=1
+ shift
+ fi
+
+ local ldf=
+ if [[ $1 == --ldflags=* ]]; then
+ ldf=${1#*=}
+ shift
+ fi
+
+ if [[ $sub -eq 0 ]]; then
+ mkl_check_begin "$1" "$2" "$3" "$1 (by compile)" && return $?
+ fi
+
+ local cflags=
+
+ if [[ $4 = "CXX" ]]; then
+ local ext=cpp
+ cflags="$(mkl_mkvar_get CXXFLAGS)"
+ else
+ local ext=c
+ cflags="$(mkl_mkvar_get CFLAGS)"
+ fi
+
+ local srcfile=$(mktemp _mkltmpXXXXXX)
+ mv "$srcfile" "${srcfile}.$ext"
+ srcfile="$srcfile.$ext"
+ echo "$6" > $srcfile
+ echo "
+int main () { return 0; }
+" >> $srcfile
+
+ local cmd="${!4} $cflags $(mkl_mkvar_get CPPFLAGS) -Wall -Werror $srcfile -o ${srcfile}.o $ldf $(mkl_mkvar_get LDFLAGS) $5 $(mkl_mkvar_get LIBS)";
+ mkl_dbg "Compile check $1 ($2) (sub=$sub): $cmd"
+
+ local output
+ output=$($cmd 2>&1)
+
+ if [[ $? != 0 ]] ; then
+ mkl_dbg "compile check for $1 ($2) failed: $cmd: $output"
+ [[ $sub -eq 0 ]] && mkl_check_failed "$1" "$2" "$3" "compile check failed:
+CC: $4
+flags: $5
+$cmd:
+$output
+source: $6"
+ local ret=1
+ else
+ [[ $sub -eq 0 ]] && mkl_check_done "$1" "$2" "$3" "ok"
+ local ret=0
+ fi
+
+ # OSX XCode toolchain creates dSYM directories when -g is set,
+ # delete them specifically.
+ rm -rf "$srcfile" "${srcfile}.o" "$srcfile*dSYM"
+
+ return $ret
+}
+
+
+# Low-level: Try to link with a library.
+# Arguments:
+# linker flags (e.g. "-lpthreads")
+function mkl_link_check0 {
+ local libs=$1
+ local srcfile=$(mktemp _mktmpXXXXXX)
+ echo "#include <stdio.h>
+int main () { FILE *fp = stderr; return fp ? 0 : 0; }" > ${srcfile}.c
+
+ local cmd="${CC} $(mkl_mkvar_get CFLAGS) $(mkl_mkvar_get LDFLAGS) ${srcfile}.c -o ${srcfile}_out $libs";
+ mkl_dbg "Link check for $1: $cmd"
+
+ local output
+ output=$($cmd 2>&1)
+ local retcode=$?
+
+ if [[ $retcode -ne 0 ]] ; then
+ mkl_dbg "Link check for $1 failed: $output"
+ fi
+
+ rm -f $srcfile*
+ return $retcode
+}
+
+
+# Try to link with a library.
+# Arguments:
+# config name
+# define name
+# action (fail|disable)
+# linker flags (e.g. "-lpthreads")
+function mkl_link_check {
+ mkl_check_begin "$1" "$2" "$3" "$1 (by linking)" && return $?
+
+ if mkl_link_check0 "$4" ; then
+ mkl_check_done "$1" "$2" "$3" "ok" "$4"
+ return 0
+ else
+ mkl_dbg "link check for $1 ($2) failed: $output"
+ mkl_check_failed "$1" "$2" "$3" "compile check failed:
+$output"
+ return 1
+ fi
+}
+
+
+
+# Tries to figure out if we can use a static library or not.
+#
+# WARNING: This function must not emit any stdout output other than the
+# updated list of libs. Do not use any stdout-printing checker.
+#
+# Arguments:
+# config name (e.g., zstd)
+# compiler flags (optional "", e.g: "-lzstd")
+# Returns/outputs:
+# New list of compiler flags
+function mkl_lib_check_static {
+ local configname=$1
+ local libs=$2
+ local arfile_var=STATIC_LIB_${configname}
+ local stfnames=$(mkl_lib_static_fnames $configname)
+
+ mkl_dbg "$configname: Check for static library (libs $libs, arfile variable $arfile_var=${!arfile_var}, static filenames $stfnames)"
+
+ # If STATIC_LIB_<configname> specifies .a file(s) we use that instead.
+ if [[ -n ${!arfile_var} ]]; then
+ libs="${!arfile_var}"
+
+ elif [[ $WITH_STATIC_LINKING != y ]]; then
+ # Static linking not enabled
+ echo ""
+ return
+
+ elif [[ $HAS_LDFLAGS_STATIC == y ]] && [[ -n $stfnames ]]; then
+ local libname
+ local stlibs=
+ for libname in $stfnames; do
+ # Convert the static filename to a linker flag:
+ # libzstd.a -> -lzstd
+ libname=${libname#lib}
+ libname="-l${libname%.a}"
+ stlibs="${stlibs}${libname} "
+ done
+ libs="${LDFLAGS_STATIC} $stlibs ${LDFLAGS_DYNAMIC}"
+ mkl_dbg "$configname: after replacing libs: $libs"
+
+ elif [[ $libs == *-L* ]]; then
+ # Try to resolve full static paths using any -Lpaths in $libs
+ local lpath
+ for lpath in $libs; do
+ [[ $lpath == -L* ]] || continue
+
+ lpath="${lpath#-L}"
+ [[ -d $lpath ]] || continue
+
+ if mkl_resolve_static_libs "$configname" "$lpath"; then
+ break
+ fi
+ done
+
+ libs="${!arfile_var}"
+ mkl_dbg "$configname: after -L resolve, libs is $libs"
+
+ else
+ mkl_dbg "$configname: Neither $arfile_var=/path/to/libname.a specified nor static linker flags supported: static linking probably won't work"
+ libs=""
+ fi
+
+ if [[ -z $libs ]]; then
+ echo ""
+ return
+ fi
+
+ # Attempt to link a small program with these static libraries
+ mkl_dbg "$configname: verifying that linking \"$libs\" works"
+ if ! mkl_link_check0 "$libs" ; then
+ mkl_dbg "$configname: Could not use static libray flags: $libs"
+ echo ""
+ return
+ fi
+
+ mkl_allvar_set "$configname" "${configname}_STATIC" "y"
+
+ echo $libs
+}
+
+
+# Checks that the specified lib is available through a number of methods.
+# compiler flags are automatically appended to "LIBS" mkvar on success.
+#
+# If STATIC_LIB_<libname_without_-l> is set to the path of an <libname>.a file
+# it will be used instead of -l<libname>.
+#
+# <definename>_STATIC will be automatically defined (for both Makefile.config
+# and config.h) if the library is to be linked statically, or was installed
+# with a source dependency installer.
+#
+# Arguments:
+# [--override-action=<action>] (internal use, overrides action argument)
+# [--no-static] (do not attempt to link the library statically)
+# [--libname=<lib>] (library name if different from config name, such as
+# when the libname includes a dash)
+# config name (library name (for pkg-config))
+# define name
+# action (fail|disable|cont)
+# compiler (CC|CXX)
+# compiler flags (optional "", e.g: "-lyajl")
+# source snippet
+function mkl_lib_check0 {
+
+ local override_action=
+ local nostaticopt=
+ local libnameopt=
+ local libname=
+
+ while [[ $1 == --* ]]; do
+ if [[ $1 == --override-action=* ]]; then
+ override_action=${1#*=}
+ elif [[ $1 == --no-static ]]; then
+ nostaticopt=$1
+ elif [[ $1 == --libname* ]]; then
+ libnameopt=$1
+ libname="${libnameopt#*=}"
+ else
+ mkl_err "mkl_lib_check: invalid option $1"
+ exit 1
+ fi
+ shift
+ done
+
+ if [[ -z $libname ]]; then
+ libname=$1
+ fi
+
+ local action=$3
+ if [[ -n $override_action ]]; then
+ action=$override_action
+ fi
+
+ # pkg-config result (0=ok)
+ local pkg_conf_failed=1
+ if [[ $WITH_PKGCONFIG == "y" ]]; then
+ # Let pkg-config populate CFLAGS, et.al.
+ # Return on success.
+ mkl_pkg_config_check $nostaticopt $libnameopt "$1" "$2" cont "$4" "$6" && return $?
+ fi
+
+ local libs="$5"
+ local is_static=0
+
+ if [[ -z $nostaticopt ]]; then
+ local stlibs=$(mkl_lib_check_static $1 "$libs")
+ if [[ -n $stlibs ]]; then
+ libs=$stlibs
+ is_static=1
+ fi
+ fi
+
+ if ! mkl_compile_check "$1" "$2" "$action" "$4" "$libs" "$6"; then
+ return 1
+ fi
+
+ if [[ -n $libs ]]; then
+ # Add libraries in reverse order to make sure inter-dependencies
+ # are resolved in the correct order.
+ # E.g., check for crypto and then ssl should result in -lssl -lcrypto
+ mkl_dbg "$1: from lib_check: LIBS: prepend $libs"
+ mkl_mkvar_prepend "$1" LIBS "$libs"
+ if [[ $is_static == 0 ]]; then
+ # Static libraries are automatically bundled with
+ # librdkafka-static.a so there is no need to add them as an
+ # external linkage dependency.
+ mkl_mkvar_prepend "$1" MKL_PKGCONFIG_LIBS_PRIVATE "$libs"
+ fi
+ fi
+
+ return 0
+}
+
+
+# Wrapper for mkl_lib_check0 which attempts dependency installation
+# if --install-deps is specified.
+#
+# See mkl_lib_check0 for arguments and details.
+function mkl_lib_check {
+
+ local arg=
+ local name=
+
+ # Find config name parameter (first non-option (--...))
+ for arg in $* ; do
+ if [[ $arg == --* ]]; then
+ continue
+ fi
+ name=$arg
+ break
+ done
+
+ if [[ $MKL_INSTALL_DEPS != y ]] || ! mkl_dep_has_installer "$name" ; then
+ mkl_lib_check0 "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8"
+ return $?
+ fi
+
+
+ # Automatic dependency installation mode:
+ # First pass is lib check with cont,
+ # if it fails, attempt dependency installation,
+ # and then make second with caller's fail-action.
+
+ local retcode=
+
+ # With --source-deps-only we want to make sure the dependency
+ # being used is in-fact from the dependency builder (if supported),
+ # rather than a system installed alternative, so skip the pre-check and
+ # go directly to dependency installation/build below.
+ if [[ $MKL_SOURCE_DEPS_ONLY != y ]] || ! mkl_dep_has_builder $name ; then
+ mkl_lib_check0 --override-action=cont "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8"
+ retcode=$?
+ if [[ $retcode -eq 0 ]]; then
+ # Successful on first pass
+ return $retcode
+ fi
+ else
+ mkl_dbg "$name: skipping dependency pre-check in favour of --source-deps-only"
+ fi
+
+ # Install dependency
+ if ! mkl_dep_install "$name" ; then
+ return 1
+ fi
+
+ # Second pass: check again, this time fail hard
+ mkl_lib_check0 --override-action=fail "$1" "$2" "$3" "$4" "$5" "$6" "$7" "$8"
+ return $?
+}
+
+
+
+# Check for library with pkg-config
+# Automatically sets CFLAGS and LIBS from pkg-config information.
+# Arguments:
+# [--no-static] (do not attempt to link the library statically)
+# [--libname=<lib>] (library name if different from config name, such as
+# when the libname includes a dash)
+# config name
+# define name
+# action (fail|disable|ignore|cont)
+# compiler (CC|CXX)
+# source snippet
+function mkl_pkg_config_check {
+
+ local nostaticopt=
+ if [[ $1 == --no-static ]]; then
+ nostaticopt=$1
+ shift
+ fi
+
+ local libname=$1
+ if [[ $1 == --libname* ]]; then
+ libname="${libnameopt#*=}"
+ shift
+ fi
+
+ local cname="${1}_PKGCONFIG"
+ mkl_check_begin "$cname" "$2" "no-cache" "$1 (by pkg-config)" && return $?
+
+ local cflags=
+ local cmd="${PKG_CONFIG} --short-errors --cflags $libname"
+ mkl_dbg "pkg-config check $libname for CFLAGS ($2): $cmd"
+
+ cflags=$($cmd 2>&1)
+ if [[ $? != 0 ]]; then
+ mkl_dbg "'$cmd' failed: $cflags"
+ # Clear define name ($2): caller may have additional checks
+ mkl_check_failed "$cname" "" "$3" "'$cmd' failed:
+$cflags"
+ return 1
+ fi
+
+ if [[ $(mkl_meta_get $1 installed_with) == "source" && \
+ $WITH_STATIC_LINKING == y && \
+ $MKL_SOURCE_DEPS_ONLY == y ]]; then
+ # If attempting static linking and we're using source-only
+ # dependencies, then there is no need for pkg-config since
+ # the source installer will have set the required flags.
+ mkl_check_failed "$cname" "" "ignore" "pkg-config ignored for static build"
+ return 1
+ fi
+
+ local libs=
+ cmd="${PKG_CONFIG} --short-errors --libs $libname"
+ mkl_dbg "pkg-config check $libname for LIBS ($2): $cmd"
+ libs=$($cmd 2>&1)
+ if [[ $? != 0 ]]; then
+ mkl_dbg "${PKG_CONFIG} --libs $libname failed: $libs"
+ # Clear define name ($2): caller may have additional checks
+ mkl_check_failed "$cname" "" "$3" "pkg-config --libs failed"
+ return 1
+ fi
+
+ mkl_dbg "$1: from pkg-config: CFLAGS '$CFLAGS', LIBS '$LIBS'"
+
+ local snippet="$5"
+ if [[ -n $snippet ]]; then
+ mkl_dbg "$1: performing compile check using pkg-config info"
+
+ if ! mkl_compile_check --sub "$1" "$2" "no-cache" "$4" "$cflags $libs" "$snippet"; then
+ mkl_check_failed "$cname" "" "$3" "compile check failed"
+ return 1
+ fi
+ fi
+
+ mkl_mkvar_append $1 "MKL_PKGCONFIG_REQUIRES_PRIVATE" "$libname"
+
+ mkl_mkvar_append $1 "CFLAGS" "$cflags"
+
+ if [[ -z $nostaticopt ]]; then
+ local stlibs=$(mkl_lib_check_static $1 "$libs")
+ if [[ -n $stlibs ]]; then
+ libs=$stlibs
+ else
+ # if we don't find a static library to bundle into the
+ # -static.a, we need to export a pkgconfig dependency
+ # so it can be resolved when linking downstream packages
+ mkl_mkvar_append $1 "MKL_PKGCONFIG_REQUIRES" "$libname"
+ fi
+ fi
+
+ mkl_dbg "$1: from pkg-config: LIBS: prepend $libs"
+ mkl_mkvar_prepend "$1" LIBS "$libs"
+
+ mkl_check_done "$1" "$2" "$3" "ok"
+
+ return 0
+}
+
+
+# Check that a command runs and exits succesfully.
+# Arguments:
+# config name
+# define name (optional, can be empty)
+# action
+# command
+function mkl_command_check {
+ mkl_check_begin "$1" "$2" "$3" "$1 (by command)" && return $?
+
+ local out=
+ out=$($4 2>&1)
+ if [[ $? != 0 ]]; then
+ mkl_dbg "$1: $2: $4 failed: $out"
+ mkl_check_failed "$1" "$2" "$3" "command '$4' failed:
+$out"
+ return 1
+ fi
+
+ mkl_check_done "$1" "$2" "$3" "ok"
+
+ return 0
+}
+
+
+# Check that a program is executable, but will not execute it.
+# Arguments:
+# config name
+# define name (optional, can be empty)
+# action
+# program name (e.g, objdump)
+function mkl_prog_check {
+ mkl_check_begin --verb "checking executable" "$1" "$2" "$3" "$1" && return $?
+
+ local out=
+ out=$(command -v "$4" 2>&1)
+ if [[ $? != 0 ]]; then
+ mkl_dbg "$1: $2: $4 is not executable: $out"
+ mkl_check_failed "$1" "$2" "$3" "$4 is not executable"
+ return 1
+ fi
+
+ mkl_check_done "$1" "$2" "$3" "ok"
+
+ return 0
+}
+
+
+
+
+# Checks that the check for the given config name passed.
+# This does not behave like the other checks, if the given config name passed
+# its test then nothing is printed. Else the configure will fail.
+# Arguments:
+# checked config name
+function mkl_config_check {
+ local status=$(mkl_var_get "MKL_STATUS_$1")
+ [[ $status = "ok" ]] && return 0
+ mkl_fail $1 "" "fail" "$MKL_MODULE requires $1"
+ return 1
+}
+
+
+# Checks that all provided config names are set.
+# Arguments:
+# config name
+# define name
+# action
+# check_config_name1
+# check_config_name2..
+function mkl_config_check_all {
+ local cname=
+ local res="ok"
+ echo start this now for $1
+ for cname in ${@:4}; do
+ local st=$(mkl_var_get "MKL_STATUS_$cname")
+ [[ $status = "ok" ]] && continue
+ mkl_fail $1 $2 $3 "depends on $cname"
+ res="failed"
+ done
+
+ echo "try res $res"
+ mkl_check_done "$1" "$2" "$3" $res
+}
+
+
+# Check environment variable
+# Arguments:
+# config name
+# define name
+# action
+# environment variable
+function mkl_env_check {
+ mkl_check_begin "$1" "$2" "$3" "$1 (by env $4)" && return $?
+
+ if [[ -z ${!4} ]]; then
+ mkl_check_failed "$1" "$2" "$3" "environment variable $4 not set"
+ return 1
+ fi
+
+ mkl_check_done "$1" "$2" "$3" "ok" "${!4}"
+
+ return 0
+}
+
+
+# Run all checks
+function mkl_checks_run {
+ # Set up common variables
+ mkl_allvar_set "" MKL_APP_NAME $(mkl_meta_get description name)
+ mkl_allvar_set "" MKL_APP_DESC_ONELINE "$(mkl_meta_get description oneline)"
+
+ # Call checks functions in dependency order
+ local mf
+ for mf in $MKL_CHECKS ; do
+ MKL_MODULE=${mf%:*}
+ local func=${mf#*:}
+
+ if mkl_func_exists $func ; then
+ $func
+ else
+ mkl_err "Check function $func from $MKL_MODULE disappeared ($mf)"
+ fi
+ unset MKL_MODULE
+ done
+}
+
+
+# Check for color support in terminal.
+# If the terminal supports colors, the function will alter
+# MKL_RED
+# MKL_GREEN
+# MKL_YELLOW
+# MKL_BLUE
+# MKL_CLR_RESET
+function mkl_check_terminal_color_support {
+ local use_color=false
+ local has_tput=false
+
+ if [[ -z ${TERM} ]]; then
+ # tput and dircolors require $TERM
+ mkl_dbg "\$TERM is not set! Cannot check for color support in terminal."
+ return 1
+ elif hash tput 2>/dev/null; then
+ has_tput=true
+ [[ $(tput colors 2>/dev/null) -ge 8 ]] && use_color=true
+ mkl_dbg "tput reports color support: ${use_color}"
+ elif hash dircolors 2>/dev/null; then
+ # Enable color support only on colorful terminals.
+ # dircolors --print-database uses its own built-in database
+ # instead of using /etc/DIR_COLORS. Try to use the external file
+ # first to take advantage of user additions.
+ local safe_term=${TERM//[^[:alnum:]]/?}
+ local match_lhs=""
+ [[ -f ~/.dir_colors ]] && match_lhs="${match_lhs}$(<~/.dir_colors)"
+ [[ -f /etc/DIR_COLORS ]] && match_lhs="${match_lhs}$(</etc/DIR_COLORS)"
+ [[ -z ${match_lhs} ]] && match_lhs=$(dircolors --print-database)
+ [[ $'\n'${match_lhs} == *$'\n'"TERM "${safe_term}* ]] && use_color=true
+ mkl_dbg "dircolors reports color support: ${use_color}"
+ fi
+
+ if ${use_color}; then
+ if ${has_tput}; then
+ # In theory, user could have set different escape sequences
+ # Because tput is available we can use it to query the right values ...
+ mkl_dbg "Using color escape sequences from tput"
+ MKL_RED=$(tput setaf 1)
+ MKL_GREEN=$(tput setaf 2)
+ MKL_YELLOW=$(tput setaf 3)
+ MKL_BLUE=$(tput setaf 4)
+ MKL_CLR_RESET=$(tput sgr0)
+ else
+ mkl_dbg "Using hard-code ANSI color escape sequences"
+ MKL_RED="\033[031m"
+ MKL_GREEN="\033[032m"
+ MKL_YELLOW="\033[033m"
+ MKL_BLUE="\033[034m"
+ MKL_CLR_RESET="\033[0m"
+ fi
+ else
+ mkl_dbg "Did not detect color support in \"$TERM\" terminal!"
+ fi
+
+ return 0
+}
+
+
+
+
+###########################################################################
+#
+#
+# Module functionality
+#
+#
+###########################################################################
+
+# Downloads module from repository.
+# Arguments:
+# module name
+# Returns:
+# module file name
+function mkl_module_download {
+ local modname="$1"
+ local url="$MKL_REPO_URL/modules/configure.$modname"
+ local tmpfile=""
+
+ fname="${MKLOVE_DIR}/modules/configure.$modname"
+
+ if [[ $url != http*://* ]]; then
+ # Local path, just copy file.
+ if [[ ! -f $url ]]; then
+ mkl_err "Module $modname not found at $url"
+ return 1
+ fi
+
+ if ! cp "$url" "$fname" ; then
+ mkl_err "Failed to copy $url to $fname"
+ return 1
+ fi
+
+ echo "$fname"
+ return 0
+ fi
+
+ # Download
+ mkl_info "${MKL_BLUE}downloading missing module $modname from $url${MKL_CLR_RESET}"
+
+ tmpfile=$(mktemp _mkltmpXXXXXX)
+ local out=
+ out=$(curl -fLs -o "$tmpfile" "$url" 2>&1)
+
+ if [[ $? -ne 0 ]]; then
+ rm -f "$tmpfile"
+ mkl_err "Failed to download $modname:"
+ mkl_err0 $out
+ return 1
+ fi
+
+ # Move downloaded file into place replacing the old file.
+ mv "$tmpfile" "$fname" || return 1
+
+ # "Return" filename
+ echo "$fname"
+
+ return 0
+}
+
+
+# Load module by name or filename
+# Arguments:
+# "require"|"try"
+# filename
+# [ module arguments ]
+function mkl_module_load {
+ local try=$1
+ shift
+ local fname=$1
+ shift
+ local modname=${fname#*configure.}
+ local bypath=1
+
+ # Check if already loaded
+ if mkl_in_list "$MKL_MODULES" "$modname"; then
+ return 0
+ fi
+
+ if [[ $fname = $modname ]]; then
+ # Module specified by name, find the file.
+ bypath=0
+ for fname in configure.$modname \
+ ${MKLOVE_DIR}/modules/configure.$modname ; do
+ [[ -s $fname ]] && break
+ done
+ fi
+
+ # Calling module
+ local cmod=$MKL_MODULE
+ [[ -z $cmod ]] && cmod="base"
+
+ if [[ ! -s $fname ]]; then
+ # Attempt to download module, if permitted
+ if [[ $MKL_NO_DOWNLOAD != 0 || $bypath == 1 ]]; then
+ mkl_err "Module $modname not found at $fname (required by $cmod) and downloads disabled"
+ if [[ $try = "require" ]]; then
+ mkl_fail "$modname" "none" "fail" \
+ "Module $modname not found (required by $cmod) and downloads disabled"
+ fi
+ return 1
+ fi
+
+ fname=$(mkl_module_download "$modname")
+ if [[ $? -ne 0 ]]; then
+ mkl_err "Module $modname not found (required by $cmod)"
+ if [[ $try = "require" ]]; then
+ mkl_fail "$modname" "none" "fail" \
+ "Module $modname not found (required by $cmod)"
+ return 1
+ fi
+ fi
+
+ # Now downloaded, try loading the module again.
+ mkl_module_load $try "$fname" "$@"
+ return $?
+ fi
+
+ # Set current module
+ local save_MKL_MODULE=$MKL_MODULE
+ MKL_MODULE=$modname
+
+ mkl_dbg "Loading module $modname (required by $cmod) from $fname"
+
+ # Source module file (positional arguments are available to module)
+ source $fname
+
+ # Restore current module (might be recursive)
+ MKL_MODULE=$save_MKL_MODULE
+
+ # Add module to list of modules
+ mkl_var_append MKL_MODULES $modname
+
+ # Rename module's special functions so we can call them separetely later.
+ mkl_func_rename "options" "${modname}_options"
+ mkl_func_rename "install_source" "${modname}_install_source"
+ mkl_func_rename "manual_checks" "${modname}_manual_checks"
+ mkl_func_push MKL_CHECKS "$modname" "checks"
+ mkl_func_push MKL_GENERATORS "$modname" "generate"
+ mkl_func_push MKL_CLEANERS "$modname" "clean"
+}
+
+
+# Require and load module
+# Must only be called from module file outside any function.
+# Arguments:
+# [ --try ] Dont fail if module doesn't exist
+# module1
+# [ "must" "pass" ]
+# [ module arguments ... ]
+function mkl_require {
+ local try="require"
+ if [[ $1 = "--try" ]]; then
+ local try="try"
+ shift
+ fi
+
+ local mod=$1
+ shift
+ local override_action=
+
+ # Check for cyclic dependencies
+ if mkl_in_list "$MKL_LOAD_STACK" "$mod"; then
+ mkl_err "Cyclic dependency detected while loading $mod module:"
+ local cmod=
+ local lmod=$mod
+ for cmod in $MKL_LOAD_STACK ; do
+ mkl_err " $lmod required by $cmod"
+ lmod=$cmod
+ done
+ mkl_fail base "" fail "Cyclic dependency detected while loading module $mod"
+ return 1
+ fi
+
+ mkl_var_prepend MKL_LOAD_STACK "$mod"
+
+
+ if [[ "$1 $2" == "must pass" ]]; then
+ shift
+ shift
+ override_action="fail"
+ fi
+
+ if [[ ! -z $override_action ]]; then
+ mkl_meta_set "MOD__$mod" "override_action" "$override_action"
+ fi
+
+
+ mkl_module_load $try $mod "$@"
+ local ret=$?
+
+ mkl_var_shift MKL_LOAD_STACK
+
+ return $ret
+}
+
+
+
+###########################################################################
+#
+#
+# Usage options
+#
+#
+###########################################################################
+
+
+MKL_USAGE="Usage: ./configure [OPTIONS...]
+
+ mklove configure script - mklove, not autoconf
+ Copyright (c) 2014-2019 Magnus Edenhill - https://github.com/edenhill/mklove
+"
+
+function mkl_usage {
+ echo "$MKL_USAGE"
+ local name=$(mkl_meta_get description name)
+
+ if [[ ! -z ${name} ]]; then
+ echo " $name - $(mkl_meta_get description oneline)
+ $(mkl_meta_get description copyright)
+"
+ fi
+
+ local og
+ for og in $MKL_USAGE_GROUPS ; do
+ og="MKL_USAGE_GROUP__$og"
+ echo "${!og}"
+ done
+
+ echo "Honoured environment variables:
+ CC, CPP, CXX, CFLAGS, CPPFLAGS, CXXFLAGS, LDFLAGS, LIBS,
+ LD, NM, OBJDUMP, STRIP, RANLIB, PKG_CONFIG, PKG_CONFIG_PATH,
+ STATIC_LIB_<libname>=.../libname.a
+
+"
+
+}
+
+
+
+# Add usage option informative text
+# Arguments:
+# text
+function mkl_usage_info {
+ MKL_USAGE="$MKL_USAGE
+$1"
+}
+
+
+# Add option to usage output
+# Arguments:
+# option group ("Standard", "Cross-Compilation", etc..)
+# variable name
+# option ("--foo", "--foo=*", "--foo=args_required")
+# help
+# default (optional)
+# assignvalue (optional, default:"y")
+# function block (optional)
+#
+# If option takes the form --foo=* then arguments are optional.
+function mkl_option {
+ local optgroup=$1
+ local varname=$2
+
+ # Fixed width between option name and help in usage output
+ local pad=" "
+ if [[ ${#3} -lt ${#pad} ]]; then
+ pad=${pad:0:$(expr ${#pad} - ${#3})}
+ else
+ pad=""
+ fi
+
+ # Add to usage output
+ local optgroup_safe=$(mkl_env_esc $optgroup)
+ if ! mkl_in_list "$MKL_USAGE_GROUPS" "$optgroup_safe" ; then
+ mkl_env_append MKL_USAGE_GROUPS "$optgroup_safe"
+ mkl_env_set "MKL_USAGE_GROUP__$optgroup_safe" "$optgroup options:
+"
+ fi
+
+ local defstr=""
+ [[ ! -z $5 ]] && defstr=" [$5]"
+ mkl_env_append "MKL_USAGE_GROUP__$optgroup_safe" " $3 $pad $4$defstr
+"
+
+ local optname="${3#--}"
+ local safeopt=
+ local optval=""
+ if [[ $3 == *=* ]]; then
+ optname="${optname%=*}"
+ optval="${3#*=}"
+ if [[ $optval == '*' ]]; then
+ # Avoid globbing of --foo=* optional arguments
+ optval='\*'
+ fi
+ fi
+
+ safeopt=$(mkl_env_esc $optname)
+
+ mkl_meta_set "MKL_OPT_ARGS" "$safeopt" "$optval"
+
+ #
+ # Optional variable scoping by prefix: "env:", "mk:", "def:"
+ #
+ local setallvar="mkl_allvar_set ''"
+ local setmkvar="mkl_mkvar_set ''"
+
+ if [[ $varname = env:* ]]; then
+ # Set environment variable (during configure runtime only)
+ varname=${varname#*:}
+ setallvar=mkl_env_set
+ setmkvar=mkl_env_set
+ elif [[ $varname = mk:* ]]; then
+ # Set Makefile.config variable
+ varname=${varname#*:}
+ setallvar="mkl_mkvar_append ''"
+ setmkvar="mkl_mkvar_append ''"
+ elif [[ $varname = def:* ]]; then
+ # Set config.h define
+ varname=${varname#*:}
+ setallvar="mkl_define_set ''"
+ setmkvar="mkl_define_set ''"
+ fi
+
+
+ if [[ ! -z $7 ]]; then
+ # Function block specified.
+ eval "function opt_$safeopt { $7 }"
+ else
+ # Add default implementation of function simply setting the value.
+ # Application may override this by redefining the function after calling
+ # mkl_option.
+ if [[ $optval = "PATH" ]]; then
+ # PATH argument: make it an absolute path.
+ # Only set the make variable (not config.h)
+ eval "function opt_$safeopt { $setmkvar $varname \"\$(mkl_abspath \$(mkl_render \$1))\"; }"
+ else
+ # Standard argument: simply set the value
+ if [[ -z "$6" ]]; then
+ eval "function opt_$safeopt { $setallvar $varname \"\$1\"; }"
+ else
+ eval "function opt_$safeopt { $setallvar $varname \"$6\"; }"
+ fi
+ fi
+ fi
+
+ # If default value is provided and does not start with "$" (variable ref)
+ # then set it right away.
+ # $ variable refs are set after all checks have run during the
+ # generating step.
+ if [[ ${#5} != 0 ]] ; then
+ if [[ $5 = *\$* ]]; then
+ mkl_var_append "MKL_LATE_VARS" "opt_$safeopt:$5"
+ else
+ opt_$safeopt $5
+ fi
+ fi
+
+ if [[ ! -z $varname ]]; then
+ # Add variable to list
+ MKL_CONFVARS="$MKL_CONFVARS $varname"
+ fi
+
+}
+
+
+
+# Adds a toggle (--enable-X, --disable-X) option.
+# Arguments:
+# option group ("Standard", ..)
+# variable name (WITH_FOO)
+# option (--enable-foo, --enable-foo=*, or --enable-foo=req)
+# help ("foo.." ("Enable" and "Disable" will be prepended))
+# default (y or n)
+
+function mkl_toggle_option {
+
+ # Add option argument
+ mkl_option "$1" "$2" "$3" "$4" "$5"
+
+ # Add corresponding "--disable-foo" option for "--enable-foo".
+ local disname="${3/--enable/--disable}"
+ local dishelp="${4/Enable/Disable}"
+ mkl_option "$1" "$2" "$disname" "$dishelp" "" "n"
+}
+
+# Adds a toggle (--enable-X, --disable-X) option with builtin checker.
+# This is the library version.
+# Arguments:
+# option group ("Standard", ..)
+# config name (foo, must be same as pkg-config name)
+# variable name (WITH_FOO)
+# action (fail or disable)
+# option (--enable-foo)
+# help (defaults to "Enable <config name>")
+# linker flags (-lfoo)
+# default (y or n)
+
+function mkl_toggle_option_lib {
+
+ local help="$6"
+ [[ -z "$help" ]] && help="Enable $2"
+
+ # Add option argument
+ mkl_option "$1" "$3" "$5" "$help" "$8"
+
+ # Add corresponding "--disable-foo" option for "--enable-foo".
+ local disname="${5/--enable/--disable}"
+ local dishelp="${help/Enable/Disable}"
+ mkl_option "$1" "$3" "$disname" "$dishelp" "" "n"
+
+ # Create checks
+ eval "function _tmp_func { mkl_lib_check \"$2\" \"$3\" \"$4\" CC \"$7\"; }"
+ mkl_func_push MKL_CHECKS "$MKL_MODULE" _tmp_func
+}
+
+
+
+# Downloads, verifies checksum, and extracts an archive to
+# the current directory.
+#
+# Arguments:
+# url Archive URL
+# shabits The SHA algorithm bit count used to verify the checksum. E.g., "256".
+# checksum Expected checksum of archive (use "" to not perform check)
+function mkl_download_archive {
+ local url="$1"
+ local shabits="$2"
+ local exp_checksum="$3"
+
+ local tmpfile=$(mktemp _mkltmpXXXXXX)
+
+ # Try both wget and curl
+ if ! wget -nv -O "$tmpfile" "$url" ; then
+ if ! curl -fLsS -o "$tmpfile" "$url" ; then
+ rm -f "$tmpfile"
+ echo -e "ERROR: Download of $url failed" 1>&2
+ return 1
+ fi
+ fi
+
+ if [[ -n $exp_checksum ]]; then
+ # Verify checksum
+
+ local checksum_tool=""
+
+ # OSX has shasum by default, on Linux it is typically in
+ # some Perl package that may or may not be installed.
+ if $(which shasum >/dev/null 2>&1); then
+ checksum_tool="shasum -b -a ${shabits}"
+ else
+ # shaXsum is available in Linux coreutils
+ checksum_tool="sha${shabits}sum"
+ fi
+
+ local checksum=$($checksum_tool "$tmpfile" | cut -d' ' -f1)
+ if [[ $? -ne 0 ]]; then
+ rm -f "$tmpfile"
+ echo "ERROR: Failed to verify checksum of $url with $checksum_tool" 1>&2
+ return 1
+ fi
+
+ if [[ $checksum != $exp_checksum ]]; then
+ rm -f "$tmpfile"
+ echo "ERROR: $url: $checksum_tool: Checksum mismatch: expected $exp_checksum, calculated $checksum" 1>&2
+ return 1
+ fi
+
+ echo "### Checksum of $url verified ($checksum_tool):"
+ echo "### Expected: $exp_checksum"
+ echo "### Calculated: $checksum"
+ fi
+
+ tar xzf "$tmpfile" --strip-components 1
+ if [[ $? -ne 0 ]]; then
+ rm -f "$tmpfile"
+ echo "ERROR: $url: failed to extract archive" 1>&2
+ return 1
+ fi
+
+
+ rm -f "$tmpfile"
+ return 0
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.builtin b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.builtin
new file mode 100644
index 000000000..796528008
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.builtin
@@ -0,0 +1,70 @@
+#!/bin/bash
+#
+# mklove builtin checks and options
+# Sets:
+# prefix, etc..
+
+
+mkl_option "Standard" prefix "--prefix=PATH" \
+ "Install arch-independent files in PATH" "/usr/local"
+mkl_option "Standard" exec_prefix "--exec-prefix=PATH" \
+ "Install arch-dependent files in PATH" "\$prefix"
+mkl_option "Standard" bindir "--bindir=PATH" "User executables" "\$exec_prefix/bin"
+mkl_option "Standard" sbindir "--sbindir=PATH" "System admin executables" \
+ "\$exec_prefix/sbin"
+mkl_option "Standard" libexecdir "--libexecdir=PATH" "Program executables" \
+ "\$exec_prefix/libexec"
+mkl_option "Standard" datadir "--datadir=PATH" "Read-only arch-independent data" \
+ "\$prefix/share"
+mkl_option "Standard" sysconfdir "--sysconfdir=PATH" "Configuration data" \
+ "\$prefix/etc"
+mkl_option "Standard" sharedstatedir "--sharedstatedir=PATH" \
+ "Modifiable arch-independent data" "\$prefix/com"
+mkl_option "Standard" localstatedir "--localstatedir=PATH" \
+ "Modifiable local state data" "\$prefix/var"
+mkl_option "Standard" runstatedir "--runstatedir=PATH" \
+ "Modifiable per-process data" "\$prefix/var/run"
+mkl_option "Standard" libdir "--libdir=PATH" "Libraries" "\$exec_prefix/lib"
+mkl_option "Standard" includedir "--includedir=PATH" "C/C++ header files" \
+ "\$prefix/include"
+mkl_option "Standard" infodir "--infodir=PATH" "Info documentation" "\$prefix/info"
+mkl_option "Standard" mandir "--mandir=PATH" "Manual pages" "\$prefix/man"
+
+mkl_option "Configure tool" "" "--list-modules" "List loaded mklove modules"
+mkl_option "Configure tool" "" "--list-checks" "List checks"
+mkl_option "Configure tool" env:MKL_FAILFATAL "--fail-fatal" "All failures are fatal"
+mkl_option "Configure tool" env:MKL_NOCACHE "--no-cache" "Dont use or generate config.cache"
+mkl_option "Configure tool" env:MKL_DEBUG "--debug" "Enable configure debugging"
+mkl_option "Configure tool" env:MKL_CLEAN "--clean" "Remove generated configure files"
+mkl_option "Configure tool" "" "--reconfigure" "Rerun configure with same arguments as last run"
+mkl_option "Configure tool" env:MKL_NO_DOWNLOAD "--no-download" "Disable downloads of required mklove modules"
+mkl_option "Configure tool" env:MKL_UPDATE_MODS "--update-modules" "Update modules from global repository"
+mkl_option "Configure tool" env:MKL_REPO_URL "--repo-url=URL_OR_PATH" "Override mklove modules repo URL" "$MKL_REPO_URL"
+mkl_option "Configure tool" "" "--help" "Show configure usage"
+
+
+# These autoconf compatibility options are ignored by mklove
+mkl_toggle_option "Compatibility" "mk:COMPAT_MAINT_MODE" "--enable-maintainer-mode" "Maintainer mode (no-op)"
+mkl_option "Compatibility" "mk:PROGRAM_PREFIX" "--program-prefix=PFX" "Program prefix (no-op)"
+mkl_option "Compatibility" "mk:COMPAT_DISABLE_DEP_TRACK" "--disable-dependency-tracking" "Disable dependency tracking (no-op)"
+mkl_option "Compatibility" "mk:COMPAT_DISABLE_SILENT_RULES" "--disable-silent-rules" "Verbose build output (no-op)"
+mkl_option "Compatibility" "mk:COMPAT_SILENT" "--silent" "Less verbose build output (no-op)"
+mkl_toggle_option "Compatibility" "mk:COMPAT_ENABLE_SHARED" "--enable-shared" "Build shared library (no-op)"
+mkl_toggle_option "Compatibility" "mk:COMPAT_DISABLE_OPT_CHECK" '--enable-option-checking=*' "Disable configure option checking (no-op)"
+
+
+mkl_option "Dependency" env:MKL_INSTALL_DEPS "--install-deps" "Attempt to install missing dependencies"
+mkl_option "Dependency" env:MKL_SOURCE_DEPS_ONLY "--source-deps-only" "Only perform source builds of dependencies, not using any package managers"
+
+
+function checks {
+
+ if [[ ! -z $libdir ]]; then
+ mkl_mkvar_append "libdir" LDFLAGS "-L${libdir}"
+ fi
+
+ if [[ ! -z $includedir ]]; then
+ mkl_mkvar_append "includedir" CPPFLAGS "-I${includedir}"
+ fi
+
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cc b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cc
new file mode 100644
index 000000000..d29488383
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cc
@@ -0,0 +1,186 @@
+#!/bin/bash
+#
+# Compiler detection
+# Sets:
+# CC, CXX, CFLAGS, CPPFLAGS, LDFLAGS, ARFLAGS, PKG_CONFIG, INSTALL, MBITS
+
+
+mkl_require host
+
+function checks {
+
+ # C compiler
+ mkl_meta_set "ccenv" "name" "C compiler from CC env"
+ if ! mkl_command_check "ccenv" "WITH_CC" cont "$CC --version"; then
+ if mkl_command_check "gcc" "WITH_GCC" cont "gcc --version"; then
+ CC=gcc
+ elif mkl_command_check "clang" "WITH_CLANG" cont "clang --version"; then
+ CC=clang
+ elif mkl_command_check "cc" "WITH_CC" fail "cc --version"; then
+ CC=cc
+ fi
+ fi
+ export CC="${CC}"
+ mkl_mkvar_set CC CC "$CC"
+
+ if [[ $MKL_CC_WANT_CXX == 1 ]]; then
+ # C++ compiler
+ mkl_meta_set "cxxenv" "name" "C++ compiler from CXX env"
+ if ! mkl_command_check "cxxenv" "WITH_CXX" cont "$CXX --version" ; then
+ mkl_meta_set "gxx" "name" "C++ compiler (g++)"
+ mkl_meta_set "clangxx" "name" "C++ compiler (clang++)"
+ mkl_meta_set "cxx" "name" "C++ compiler (c++)"
+ if mkl_command_check "gxx" "WITH_GXX" cont "g++ --version"; then
+ CXX=g++
+ elif mkl_command_check "clangxx" "WITH_CLANGXX" cont "clang++ --version"; then
+ CXX=clang++
+ elif mkl_command_check "cxx" "WITH_CXX" fail "c++ --version"; then
+ CXX=c++
+ fi
+ fi
+ export CXX="${CXX}"
+ mkl_mkvar_set "CXX" CXX "$CXX"
+ fi
+
+ # Handle machine bits, if specified.
+ if [[ ! -z "$MBITS" ]]; then
+ mkl_meta_set mbits_m name "mbits compiler flag (-m$MBITS)"
+ if mkl_compile_check mbits_m "" fail CC "-m$MBITS"; then
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-m$MBITS"
+ mkl_mkvar_append LDFLAGS LDFLAGS "-m$MBITS"
+ fi
+ if [[ -z "$ARFLAGS" && $MBITS == 64 && $MKL_DISTRO == "sunos" ]]; then
+ # Turn on 64-bit archives on SunOS
+ mkl_mkvar_append ARFLAGS ARFLAGS "S"
+ fi
+ fi
+
+ # Provide prefix and checks for various other build tools.
+ local t=
+ for t in LD:ld NM:nm OBJDUMP:objdump STRIP:strip LIBTOOL:libtool RANLIB:ranlib ; do
+ local tenv=${t%:*}
+ t=${t#*:}
+ local tval="${!tenv}"
+
+ [[ -z $tval ]] && tval="$t"
+
+ if mkl_prog_check "$t" "" disable "$tval" ; then
+ if [[ $tval != ${!tenv} ]]; then
+ export "$tenv"="$tval"
+ fi
+ mkl_mkvar_set $tenv $tenv "$tval"
+ fi
+ done
+
+ # Compiler and linker flags
+ [[ ! -z $CFLAGS ]] && mkl_mkvar_set "CFLAGS" "CFLAGS" "$CFLAGS"
+ [[ ! -z $CPPFLAGS ]] && mkl_mkvar_set "CPPFLAGS" "CPPFLAGS" "$CPPFLAGS"
+ [[ ! -z $CXXFLAGS ]] && mkl_mkvar_set "CXXFLAGS" "CXXFLAGS" "$CXXFLAGS"
+ [[ ! -z $LDFLAGS ]] && mkl_mkvar_set "LDFLAGS" "LDFLAGS" "$LDFLAGS"
+ [[ ! -z $ARFLAGS ]] && mkl_mkvar_set "ARFLAGS" "ARFLAGS" "$ARFLAGS"
+
+ if [[ $MKL_NO_DEBUG_SYMBOLS != "y" ]]; then
+ # Add debug symbol flag (-g)
+ # OSX 10.9 requires -gstrict-dwarf for some reason.
+ mkl_meta_set cc_g_dwarf name "debug symbols compiler flag (-g...)"
+ if [[ $MKL_DISTRO == "osx" ]]; then
+ if mkl_compile_check cc_g_dwarf "" cont CC "-gstrict-dwarf"; then
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-gstrict-dwarf"
+ else
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-g"
+ fi
+ else
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-g"
+ fi
+ fi
+
+
+ # pkg-config
+ if [ -z "$PKG_CONFIG" ]; then
+ PKG_CONFIG=pkg-config
+ fi
+
+ if mkl_command_check "pkgconfig" "WITH_PKGCONFIG" cont "$PKG_CONFIG --version"; then
+ export PKG_CONFIG
+ fi
+ mkl_mkvar_set "pkgconfig" PKG_CONFIG $PKG_CONFIG
+
+ [[ ! -z "$append_PKG_CONFIG_PATH" ]] && mkl_env_append PKG_CONFIG_PATH "$append_PKG_CONFIG_PATH" ":"
+
+ # install
+ if [ -z "$INSTALL" ]; then
+ if [[ $MKL_DISTRO == "sunos" ]]; then
+ mkl_meta_set ginstall name "GNU install"
+ if mkl_command_check ginstall "" ignore "ginstall --version"; then
+ INSTALL=$(which ginstall)
+ else
+ INSTALL=$(which install)
+ fi
+ else
+ INSTALL=$(which install)
+ fi
+ fi
+
+ if mkl_command_check "install" "WITH_INSTALL" cont "$INSTALL --version"; then
+ export INSTALL
+ fi
+ mkl_mkvar_set "install" INSTALL $INSTALL
+
+
+ # Enable profiling if desired
+ if [[ $WITH_PROFILING == y ]]; then
+ mkl_allvar_set "" "WITH_PROFILING" "y"
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-pg"
+ mkl_mkvar_append LDFLAGS LDFLAGS "-pg"
+ fi
+
+ # Optimization
+ if [[ $WITHOUT_OPTIMIZATION == n ]]; then
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-O2"
+ else
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-O0"
+ fi
+
+ # Static linking
+ if [[ $WITH_STATIC_LINKING == y ]]; then
+ # LDFLAGS_STATIC is the LDFLAGS needed to enable static linking
+ # of sub-sequent libraries, while
+ # LDFLAGS_DYNAMIC is the LDFLAGS needed to enable dynamic linking.
+ if [[ $MKL_DISTRO != "osx" ]]; then
+ mkl_mkvar_set staticlinking LDFLAGS_STATIC "-Wl,-Bstatic"
+ mkl_mkvar_set staticlinking LDFLAGS_DYNAMIC "-Wl,-Bdynamic"
+ mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC y
+ else
+ # OSX linker can't enable/disable static linking so we'll
+ # need to find the .a through STATIC_LIB_libname env var
+ mkl_mkvar_set staticlinking HAS_LDFLAGS_STATIC n
+ # libtool -static supported
+ mkl_mkvar_set staticlinking HAS_LIBTOOL_STATIC y
+ fi
+ fi
+
+ # Check for GNU ar (which has the -M option)
+ mkl_meta_set "gnuar" "name" "GNU ar"
+ mkl_command_check "gnuar" "HAS_GNU_AR" disable \
+ "ar -V 2>/dev/null | grep -q GNU"
+}
+
+
+mkl_option "Compiler" "env:CC" "--cc=CC" "Build using C compiler CC" "\$CC"
+mkl_option "Compiler" "env:CXX" "--cxx=CXX" "Build using C++ compiler CXX" "\$CXX"
+mkl_option "Compiler" "ARCH" "--arch=ARCH" "Build for architecture" "$(uname -m)"
+mkl_option "Compiler" "CPU" "--cpu=CPU" "Build and optimize for specific CPU" "generic"
+mkl_option "Compiler" "MBITS" "--mbits=BITS" "Machine bits (32 or 64)" ""
+
+for n in CFLAGS CPPFLAGS CXXFLAGS LDFLAGS ARFLAGS; do
+ mkl_option "Compiler" "mk:$n" "--$n=$n" "Add $n flags"
+done
+
+mkl_option "Compiler" "env:append_PKG_CONFIG_PATH" "--pkg-config-path=EXTRA_PATHS" "Extra paths for pkg-config"
+
+mkl_option "Compiler" "WITH_PROFILING" "--enable-profiling" "Enable profiling"
+mkl_option "Compiler" "WITH_STATIC_LINKING" "--enable-static" "Enable static linking"
+mkl_option "Compiler" "WITHOUT_OPTIMIZATION" "--disable-optimization" "Disable optimization flag to compiler" "n"
+mkl_option "Compiler" "env:MKL_NO_DEBUG_SYMBOLS" "--disable-debug-symbols" "Disable debugging symbols" "n"
+mkl_option "Compiler" "env:MKL_WANT_WERROR" "--enable-werror" "Enable compiler warnings as errors" "n"
+mkl_option "Compiler" "WITH_STRIP" "--enable-strip" "Strip libraries when installing" "n"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cxx b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cxx
new file mode 100644
index 000000000..a38ac7367
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.cxx
@@ -0,0 +1,8 @@
+#!/bin/bash
+#
+# C++ detection
+#
+# This script simply limits the checks of configure.cc
+
+
+MKL_CC_WANT_CXX=1
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.fileversion b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.fileversion
new file mode 100644
index 000000000..9bea11786
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.fileversion
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Reads version from file and sets variables accordingly
+# The first non-commented line in the file is expected to be the version string.
+# Arguments:
+# filename
+# STR_VERSION_VARIABLE_NAME
+# [ HEX_VERSION_VARIABLE_NAME ]
+#
+# Example: Set string version in variable named "MYVERSION_STR" and
+# the hex representation in "MYVERSION"
+# mkl_require VERSION.txt MYVERSION_STR MYVERSION
+
+if [[ -z "$2" ]]; then
+ mkl_fail "fileversion" "none" "fail" "Missing argument(s), expected: FILENAME STR_VER HEX_VER"
+ return 0
+fi
+
+fileversion_file="$1"
+fileversion_strvar="$2"
+fileversion_hexvar="$3"
+
+function checks {
+ mkl_check_begin "fileversion" "" "no-cache" "version from file $fileversion_file"
+
+ if [[ ! -s $fileversion_file ]]; then
+ mkl_check_failed "fileversion" "" "fail" \
+ "Version file $fileversion_file is not readable"
+ return 1
+ fi
+
+ local orig=$(grep -v ^\# "$fileversion_file" | grep -v '^$' | head -1)
+ # Strip v prefix if any
+ orig=${orig#v}
+
+ # Try to decode version string into hex
+ # Supported format is "[v]NN.NN.NN[.NN]"
+ if [[ ! -z $fileversion_hexvar ]]; then
+ local hex=""
+ local s=${orig#v} # Strip v prefix, if any.
+ local ncnt=0
+ local n=
+ for n in ${s//./ } ; do
+ if [[ ! ( "$n" =~ ^[0-9][0-9]?$ ) ]]; then
+ mkl_check_failed "fileversion" "" "fail" \
+ "$fileversion_file: Could not decode '$orig' into hex version, expecting format 'NN.NN.NN[.NN]'"
+ return 1
+ fi
+ hex="$hex$(printf %02x $n)"
+ ncnt=$(expr $ncnt + 1)
+ done
+
+ if [[ ! -z $hex ]]; then
+ # Finish all four bytess
+ for n in {$ncnt..4} ; do
+ hex="$hex$(printf %02x 0)"
+ done
+ mkl_allvar_set "fileversion" "$fileversion_hexvar" "0x$hex"
+ fi
+ fi
+
+ mkl_allvar_set "fileversion" "$fileversion_strvar" "$orig"
+
+ mkl_check_done "fileversion" "" "cont" "ok" "${!fileversion_strvar}"
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.gitversion b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.gitversion
new file mode 100644
index 000000000..ad42291c7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.gitversion
@@ -0,0 +1,29 @@
+#!/bin/bash
+#
+# Sets version variable from git information.
+# Optional arguments:
+# "as"
+# VARIABLE_NAME
+#
+# Example: Set version in variable named "MYVERSION":
+# mkl_require gitversion as MYVERSION [default DEFVERSION]
+
+if [[ $1 == "as" ]]; then
+ shift
+ __MKL_GITVERSION_VARNAME="$1"
+ shift
+else
+ __MKL_GITVERSION_VARNAME="VERSION"
+fi
+
+if [[ $1 == "default" ]]; then
+ shift
+ __MKL_GITVERSION_DEFAULT="$1"
+ shift
+fi
+
+
+function checks {
+ mkl_allvar_set "gitversion" "$__MKL_GITVERSION_VARNAME" \
+ "$(git describe --abbrev=6 --tags HEAD --always 2>/dev/null || echo $__MKL_GITVERSION_DEFAULT)"
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.good_cflags b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.good_cflags
new file mode 100644
index 000000000..c8587f2e3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.good_cflags
@@ -0,0 +1,18 @@
+#!/bin/bash
+#
+# Provides some known-good CFLAGS
+# Sets:
+# CFLAGS
+# CXXFLAGS
+# CPPFLAGS
+
+
+function checks {
+ mkl_mkvar_append CPPFLAGS CPPFLAGS \
+ "-Wall -Wsign-compare -Wfloat-equal -Wpointer-arith -Wcast-align"
+
+ if [[ $MKL_WANT_WERROR = "y" ]]; then
+ mkl_mkvar_append CPPFLAGS CPPFLAGS \
+ "-Werror"
+ fi
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.host b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.host
new file mode 100644
index 000000000..155fecc08
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.host
@@ -0,0 +1,132 @@
+#!/bin/bash
+#
+# Host OS support
+# Sets:
+# HOST
+# BUILD
+# TARGET
+
+# FIXME: No need for this right now
+#mkl_require host_linux
+#mkl_require host_osx
+#mkl_require host_cygwin
+
+#mkl_option "Cross-compilation" "mk:HOST_OS" "--host-os=osname" "Host OS (linux,osx,cygwin,..)" "auto"
+
+
+# autoconf compatibility - does nothing at this point
+mkl_option "Cross-compilation" "mk:HOST" "--host=HOST" "Configure to build programs to run on HOST (no-op)"
+mkl_option "Cross-compilation" "mk:BUILD" "--build=BUILD" "Configure for building on BUILD (no-op)"
+mkl_option "Cross-compilation" "mk:TARGET" "--target=TARGET" "Configure for building cross-toolkits for platform TARGET (no-op)"
+
+
+# Resolve the OS/distro at import time, rather than as a check,
+# so that MKL_DISTRO is available to other modules at import time.
+function resolve_distro {
+ solib_ext=.so
+
+ # Try lsb_release
+ local sys
+ sys=$(lsb_release -is 2>/dev/null)
+ if [[ $? -gt 0 ]]; then
+ # That didnt work, try uname.
+ local kn=$(uname -s)
+ case $kn in
+ Linux)
+ sys=Linux
+ solib_ext=.so
+
+ if [[ -f /etc/os-release ]]; then
+ eval $(grep ^ID= /etc/os-release)
+ if [[ -n $ID ]]; then
+ sys="$ID"
+ fi
+ elif [[ -f /etc/centos-release ]]; then
+ sys=centos
+ elif [[ -f /etc/alpine-release ]]; then
+ sys=alpine
+ fi
+ ;;
+ Darwin)
+ sys=osx
+ solib_ext=.dylib
+ ;;
+ CYGWIN*)
+ sys=Cygwin
+ solib_ext=.dll
+ ;;
+ *)
+ sys="$kn"
+ solib_ext=.so
+ ;;
+ esac
+ fi
+
+ # Convert to lower case
+ sys=$(echo $sys | tr '[:upper:]' '[:lower:]')
+ mkl_mkvar_set "distro" "MKL_DISTRO" "$sys"
+ mkl_allvar_set "distro" "SOLIB_EXT" "$solib_ext"
+}
+
+resolve_distro
+
+
+function checks {
+ # Try to figure out what OS/distro we are running on.
+ mkl_check_begin "distro" "" "no-cache" "OS or distribution"
+
+ if [[ -z $MKL_DISTRO ]]; then
+ mkl_check_failed "distro" "" "ignore" ""
+ else
+ mkl_check_done "distro" "" "ignore" "ok" "$MKL_DISTRO"
+ fi
+}
+
+#function checks {
+# mkl_check_begin "host" "HOST_OS" "no-cache" "host OS"
+#
+# #
+# # If --host-os=.. was not specified then this is most likely not a
+# # a cross-compilation and we can base the host-os on the native OS.
+# #
+# if [[ $HOST_OS != "auto" ]]; then
+# mkl_check_done "host" "HOST_OS" "cont" "ok" "$HOST_OS"
+# return 0
+# fi
+#
+# kn=$(uname -s)
+# case $kn in
+# Linux)
+# hostos=linux
+# ;;
+# Darwin)
+# hostos=osx
+# ;;
+# CYGWIN*)
+# hostos=cygwin
+# ;;
+# *)
+# hostos="$(mkl_lower $kn)"
+# mkl_err "Unknown host OS kernel name: $kn"
+# mkl_err0 " Will attempt to load module host_$hostos anyway."
+# mkl_err0 " Please consider writing a configure.host_$hostos"
+# ;;
+# esac
+#
+# if ! mkl_require --try "host_$hostos"; then
+# # Module not found
+# mkl_check_done "host" "HOST_OS" "cont" "failed" "$kn?"
+# else
+# # Module loaded
+#
+# if mkl_func_exists "host_${hostos}_setup" ; then
+# "host_${hostos}_setup"
+# fi
+#
+# mkl_check_done "host" "HOST_OS" "cont" "ok" "$hostos"
+# fi
+#
+# # Set HOST_OS var even if probing failed.
+# mkl_mkvar_set "host" "HOST_OS" "$hostos"
+#}
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.lib b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.lib
new file mode 100644
index 000000000..49ed29368
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.lib
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# Module for building shared libraries
+# Sets:
+# WITH_GNULD | WITH_OSXLD
+# WITH_LDS - linker script support
+mkl_require pic
+
+function checks {
+
+ mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-shared'
+
+ # Check what arguments to pass to CC or LD for shared libraries
+ mkl_meta_set gnulib name "GNU-compatible linker options"
+ mkl_meta_set osxlib name "OSX linker options"
+
+ if mkl_compile_check gnulib WITH_GNULD cont CC \
+ "-shared -Wl,-soname,mkltest.0" "" ; then
+ # GNU linker
+ mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-Wl,-soname,$(LIBFILENAME)'
+
+ elif mkl_compile_check osxlib WITH_OSXLD cont CC \
+ "-dynamiclib -Wl,-install_name,/tmp/mkltest.so.0" ; then
+ # OSX linker
+ mkl_mkvar_append LIB_LDFLAGS LIB_LDFLAGS '-dynamiclib -Wl,-install_name,$(DESTDIR)$(libdir)/$(LIBFILENAME)'
+ fi
+
+ # Check what argument is needed for passing linker script.
+ local ldsfile=$(mktemp _mkltmpXXXXXX)
+ echo "{
+ global:
+ *;
+};
+" > $ldsfile
+
+ mkl_meta_set ldsflagvs name "GNU linker-script ld flag"
+ mkl_meta_set ldsflagm name "Solaris linker-script ld flag"
+ if mkl_compile_check ldsflagvs "" cont CC \
+ "-shared -Wl,--version-script=$ldsfile"; then
+ mkl_mkvar_set ldsflagvs LDFLAG_LINKERSCRIPT "-Wl,--version-script="
+ mkl_mkvar_set lib_lds WITH_LDS y
+ elif mkl_compile_check ldsflagm "" ignore CC \
+ "-shared -Wl,-M$ldsfile"; then
+ mkl_mkvar_set ldsflagm LDFLAG_LINKERSCRIPT "-Wl,-M"
+ mkl_mkvar_set lib_lds WITH_LDS y
+ fi
+
+ rm -f "$ldsfile"
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libcurl b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libcurl
new file mode 100644
index 000000000..05048745f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libcurl
@@ -0,0 +1,99 @@
+#!/bin/bash
+#
+# libcurl support, with installer
+#
+# Usage:
+# mkl_require libcurl
+#
+# And then call the following function from the correct place/order in checks:
+# mkl_check libcurl
+#
+
+mkl_toggle_option "Feature" ENABLE_CURL "--enable-curl" "Enable HTTP client (using libcurl)" "try"
+
+function manual_checks {
+ case "$ENABLE_CURL" in
+ n) return 0 ;;
+ y) local action=fail ;;
+ try) local action=disable ;;
+ *) mkl_err "mklove internal error: invalid value for ENABLE_CURL: $ENABLE_CURL"; exit 1 ;;
+ esac
+
+ mkl_meta_set "libcurl" "apk" "curl-dev curl-static"
+ mkl_meta_set "libcurl" "deb" "libcurl4-openssl-dev"
+ mkl_meta_set "libcurl" "static" "libcurl.a"
+ if [[ $MKL_DISTRO == "osx" && $WITH_STATIC_LINKING ]]; then
+ mkl_env_append LDFLAGS "-framework CoreFoundation -framework SystemConfiguration"
+ mkl_mkvar_append "libcurl" MKL_PKGCONFIG_LIBS_PRIVATE "-framework CoreFoundation -framework SystemConfiguration"
+ fi
+ mkl_lib_check "libcurl" "WITH_CURL" $action CC "-lcurl" \
+ "
+#include <curl/curl.h>
+
+void foo (void) {
+ curl_global_init(CURL_GLOBAL_DEFAULT);
+}
+"
+}
+
+
+# Install curl from source tarball
+#
+# Param 1: name (libcurl)
+# Param 2: install-dir-prefix (e.g., DESTDIR)
+# Param 2: version (optional)
+function install_source {
+ local name=$1
+ local destdir=$2
+ local ver=7.86.0
+ local checksum="3dfdd39ba95e18847965cd3051ea6d22586609d9011d91df7bc5521288987a82"
+
+ echo "### Installing $name $ver from source to $destdir"
+ if [[ ! -f Makefile ]]; then
+ mkl_download_archive \
+ "https://curl.se/download/curl-${ver}.tar.gz" \
+ 256 \
+ $checksum || return 1
+ fi
+
+ # curl's configure has a runtime check where a program is built
+ # with all libs linked and then executed, since mklove's destdir
+ # is outside the standard ld.so search path this runtime check will
+ # fail due to missing libraries.
+ # We patch curl's configure file to skip this check altogether.
+ if ! mkl_patch libcurl 0000 ; then
+ return 1
+ fi
+
+ # Clear out LIBS to not interfer with lib detection process.
+ LIBS="" ./configure \
+ --with-openssl \
+ --enable-static \
+ --disable-shared \
+ --disable-ntlm{,-wb} \
+ --disable-dict \
+ --disable-ftp \
+ --disable-file \
+ --disable-gopher \
+ --disable-imap \
+ --disable-mqtt \
+ --disable-pop3 \
+ --disable-rtsp \
+ --disable-smb \
+ --disable-smtp \
+ --disable-telnet \
+ --disable-tftp \
+ --disable-manual \
+ --disable-ldap{,s} \
+ --disable-libcurl-option \
+ --without-{librtmp,libidn2,winidn,nghttp2,nghttp3,ngtcp2,quiche,brotli} &&
+ time make -j &&
+ make DESTDIR="${destdir}" prefix=/usr install
+ local ret=$?
+
+ if [[ $MKL_DISTRO == osx ]]; then
+ mkl_mkvar_append "libcurl" LIBS "-framework CoreFoundation -framework SystemConfiguration"
+ fi
+
+ return $ret
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libsasl2 b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libsasl2
new file mode 100644
index 000000000..e148e03da
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libsasl2
@@ -0,0 +1,36 @@
+#!/bin/bash
+#
+# libsasl2 support (for GSSAPI/Kerberos), without source installer.
+#
+# Usage:
+# mkl_require libsasl2
+#
+#
+# And then call the following function from the correct place/order in checks:
+# mkl_check libsasl2
+#
+
+mkl_toggle_option "Feature" ENABLE_GSSAPI "--enable-gssapi" "Enable SASL GSSAPI support with Cyrus libsasl2" "try"
+mkl_toggle_option "Feature" ENABLE_GSSAPI "--enable-sasl" "Deprecated: Alias for --enable-gssapi"
+
+function manual_checks {
+ case "$ENABLE_GSSAPI" in
+ n) return 0 ;;
+ y) local action=fail ;;
+ try) local action=disable ;;
+ *) mkl_err "mklove internal error: invalid value for ENABLE_GSSAPI: $ENABLE_GSSAPI"; exit 1 ;;
+ esac
+
+ mkl_meta_set "libsasl2" "deb" "libsasl2-dev"
+ mkl_meta_set "libsasl2" "rpm" "cyrus-sasl"
+ mkl_meta_set "libsasl2" "apk" "cyrus-sasl-dev"
+
+ local sasl_includes="
+#include <stddef.h>
+#include <sasl/sasl.h>
+"
+
+ if ! mkl_lib_check "libsasl2" "WITH_SASL_CYRUS" $action CC "-lsasl2" "$sasl_includes" ; then
+ mkl_lib_check "libsasl" "WITH_SASL_CYRUS" $action CC "-lsasl" "$sasl_includes"
+ fi
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libssl b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libssl
new file mode 100644
index 000000000..8ce586422
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libssl
@@ -0,0 +1,147 @@
+#!/bin/bash
+#
+# libssl and libcrypto (OpenSSL or derivate) support, with installer.
+# Requires OpenSSL version v1.0.1 or later.
+#
+# Usage:
+# mkl_require libssl
+#
+
+# And then call the following function from the correct place/order in checks:
+# mkl_check libssl
+#
+#
+# This module is a bit hacky since OpenSSL provides both libcrypto and libssl,
+# the latter depending on the former, but from a user perspective it is
+# SSL that is the feature, not crypto.
+
+mkl_toggle_option "Feature" ENABLE_SSL "--enable-ssl" "Enable SSL support" "try"
+
+
+function manual_checks {
+ case "$ENABLE_SSL" in
+ n) return 0 ;;
+ y) local action=fail ;;
+ try) local action=disable ;;
+ *) mkl_err "mklove internal error: invalid value for ENABLE_SSL: $ENABLE_SSL"; exit 1 ;;
+ esac
+
+ if [[ $MKL_SOURCE_DEPS_ONLY != y && $MKL_DISTRO == "osx" ]]; then
+ # Add brew's OpenSSL pkg-config path on OSX
+ # to avoid picking up the outdated system-provided openssl/libcrypto.
+ mkl_env_append PKG_CONFIG_PATH "/usr/local/opt/openssl/lib/pkgconfig" ":"
+ # and similar path for M1 brew location
+ mkl_env_append PKG_CONFIG_PATH "/opt/homebrew/opt/openssl/lib/pkgconfig" ":"
+ fi
+
+ # OpenSSL provides both libcrypto and libssl
+ if [[ $WITH_STATIC_LINKING != y ]]; then
+ # Debian's OpenSSL static libraries are broken.
+ mkl_meta_set "libcrypto" "deb" "libssl-dev"
+ fi
+ mkl_meta_set "libcrypto" "rpm" "openssl-devel"
+ mkl_meta_set "libcrypto" "brew" "openssl"
+ mkl_meta_set "libcrypto" "apk" "openssl-dev"
+ mkl_meta_set "libcrypto" "static" "libcrypto.a"
+
+ if ! mkl_lib_check "libcrypto" "" $action CC "-lcrypto" "
+#include <openssl/ssl.h>
+#include <openssl/evp.h>
+#if OPENSSL_VERSION_NUMBER < 0x1000100fL
+#error \"Requires OpenSSL version >= v1.0.1\"
+#endif"; then
+ return
+ fi
+
+
+ #
+ # libssl
+ #
+ mkl_meta_set "libssl" "static" "libssl.a"
+
+ if [[ $(mkl_meta_get "libcrypto" "installed_with") == "source" ]]; then
+ # Try to resolve the libssl.a static library path based on the
+ # libcrypto (openssl) install path.
+ mkl_resolve_static_libs "libssl" "$(mkl_dep_destdir libcrypto)"
+ fi
+
+ mkl_lib_check "libssl" "WITH_SSL" $action CC "-lssl -lcrypto" \
+ "#include <openssl/ssl.h>
+#if OPENSSL_VERSION_NUMBER < 0x1000100fL
+#error \"Requires OpenSSL version >= v1.0.1\"
+#endif"
+
+ # Silence OpenSSL 3.0.0 deprecation warnings since they'll make
+ # -Werror fail.
+ if ! mkl_compile_check --sub "libcrypto" "" "" CC "-lcrypto" "
+#include <openssl/ssl.h>
+#if OPENSSL_VERSION_NUMBER >= 0x30000000L
+#error \"OpenSSL version >= v3.0.0 needs OPENSSL_SUPPRESS_DEPRECATED\"
+#endif"; then
+ mkl_define_set "libcrypto" OPENSSL_SUPPRESS_DEPRECATED
+ fi
+}
+
+
+ # Install libcrypto/libssl from source tarball on linux.
+ #
+ # Param 1: name (libcrypto)
+ # Param 2: install-dir-prefix (e.g., DESTDIR)
+ # Param 2: version (optional)
+function libcrypto_install_source {
+ local name=$1
+ local destdir=$2
+ local ver=3.0.8
+ local checksum="6c13d2bf38fdf31eac3ce2a347073673f5d63263398f1f69d0df4a41253e4b3e"
+ local url=https://www.openssl.org/source/openssl-${ver}.tar.gz
+
+ local conf_args="--prefix=/usr --openssldir=/usr/lib/ssl no-shared no-zlib"
+
+ if [[ $ver == 1.0.* ]]; then
+ conf_args="${conf_args} no-krb5"
+ fi
+
+ if [[ $ver != 3.* ]]; then
+ # OpenSSL 3 deprecates ENGINE support, but we still need it, so only
+ # add no-deprecated to non-3.x builds.
+ conf_args="${conf_args} no-deprecated"
+ fi
+
+ # 1.1.1q tests fail to build on OSX/M1, so disable them.
+ if [[ $MKL_DISTRO == osx && $ver == 1.1.1q ]]; then
+ conf_args="${conf_args} no-tests"
+ fi
+
+ echo "### Installing $name $ver from source ($url) to $destdir"
+ if [[ ! -f config ]]; then
+ echo "### Downloading"
+ mkl_download_archive "$url" "256" "$checksum" || return 1
+ fi
+
+ if [[ $MKL_DISTRO == "osx" ]]; then
+ # Workaround build issue in 1.1.1l on OSX with older toolchains.
+ if [[ $ver == 1.1.1l ]]; then
+ if ! mkl_patch libssl 0000 ; then
+ return 1
+ fi
+ fi
+
+ # Silence a load of warnings on OSX
+ conf_args="${conf_args} -Wno-nullability-completeness"
+ fi
+
+ echo "### Configuring with args $conf_args"
+ ./config $conf_args || return $?
+
+ echo "### Building"
+ make
+
+ echo "### Installing to $destdir"
+ if [[ $ver == 1.0.* ]]; then
+ make INSTALL_PREFIX="$destdir" install_sw
+ else
+ make DESTDIR="$destdir" install
+ fi
+
+ return $?
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libzstd b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libzstd
new file mode 100644
index 000000000..e32378f78
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.libzstd
@@ -0,0 +1,58 @@
+#!/bin/bash
+#
+# libzstd support, with installer
+#
+# Usage:
+# mkl_require libzstd
+#
+# And then call the following function from the correct place/order in checks:
+# mkl_check libzstd
+#
+
+mkl_toggle_option "Feature" ENABLE_ZSTD "--enable-zstd" "Enable support for ZSTD compression" "try"
+
+function manual_checks {
+ case "$ENABLE_ZSTD" in
+ n) return 0 ;;
+ y) local action=fail ;;
+ try) local action=disable ;;
+ *) mkl_err "mklove internal error: invalid value for ENABLE_ZSTD: $ENABLE_ZSTD"; exit 1 ;;
+ esac
+
+ mkl_meta_set "libzstd" "brew" "zstd"
+ mkl_meta_set "libzstd" "apk" "zstd-dev zstd-static"
+ mkl_meta_set "libzstd" "static" "libzstd.a"
+ mkl_lib_check "libzstd" "WITH_ZSTD" $action CC "-lzstd" \
+ "
+#include <zstd.h>
+#include <zstd_errors.h>
+
+void foo (void) {
+ ZSTD_getFrameContentSize(NULL, 0);
+}
+"
+}
+
+
+# Install zstd from source tarball
+#
+# Param 1: name (libzstd)
+# Param 2: install-dir-prefix (e.g., DESTDIR)
+# Param 2: version (optional)
+function install_source {
+ local name=$1
+ local destdir=$2
+ local ver=1.5.2
+ local checksum="7c42d56fac126929a6a85dbc73ff1db2411d04f104fae9bdea51305663a83fd0"
+
+ echo "### Installing $name $ver from source to $destdir"
+ if [[ ! -f Makefile ]]; then
+ mkl_download_archive \
+ "https://github.com/facebook/zstd/releases/download/v${ver}/zstd-${ver}.tar.gz" \
+ "256" \
+ $checksum || return 1
+ fi
+
+ time make -j DESTDIR="${destdir}" prefix=/usr install
+ return $?
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.parseversion b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.parseversion
new file mode 100644
index 000000000..0ee0f577e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.parseversion
@@ -0,0 +1,95 @@
+#!/bin/bash
+#
+# Parses the provided version string and creates variables accordingly.
+# [ "hex2str" <fmt> ] -- version-string is in hex (e.g., 0x00080300)
+# version-string
+# STR_VERSION_VARIABLE_NAME
+# [ HEX_VERSION_VARIABLE_NAME ]
+#
+# Note: The version will also be set in MKL_APP_VERSION
+#
+# Example: Set string version in variable named "MYVERSION_STR" and
+# the hex representation in "MYVERSION"
+# mkl_require parseversion "$(head -1 VERSION.txt)" MYVERSION_STR MYVERSION
+
+if [[ $1 == "hex2str" ]]; then
+ parseversion_type="hex"
+ parseversion_fmt="${2}:END:%d%d%d%d"
+ shift
+ shift
+else
+ parseversion_type=""
+ parseversion_fmt="%d.%d.%d.%d"
+fi
+
+if [[ -z "$2" ]]; then
+ mkl_fail "parseversion" "none" "fail" "Missing argument(s)"
+ return 0
+fi
+
+parseversion_orig="$1"
+parseversion_strvar="$2"
+parseversion_hexvar="$3"
+
+function checks {
+ mkl_check_begin --verb "parsing" "parseversion" "" "no-cache" \
+ "version '$parseversion_orig'"
+
+ # Strip v prefix if any
+ orig=${parseversion_orig#v}
+
+ if [[ $orig == 0x* ]]; then
+ parseversion_type="hex"
+ orig=${orig#0x}
+ fi
+
+ if [[ -z $orig ]]; then
+ mkl_check_failed "parseversion" "" "fail" "Version string is empty"
+ return 1
+ fi
+
+ # If orig is in hex we construct a string format instead.
+ if [[ $parseversion_type == "hex" ]]; then
+ local s=$orig
+ local str=""
+ local vals=""
+ while [[ ! -z $s ]]; do
+ local n=${s:0:2}
+ s=${s:${#n}}
+ vals="${vals}$(printf %d 0x$n) "
+ done
+ str=$(printf "$parseversion_fmt" $vals)
+ orig=${str%:END:*}
+ fi
+
+
+ # Try to decode version string into hex
+ # Supported format is "[v]NN.NN.NN[.NN]"
+ if [[ ! -z $parseversion_hexvar ]]; then
+ local hex=""
+ local s=$orig
+ local ncnt=0
+ local n=
+ for n in ${s//./ } ; do
+ if [[ ! ( "$n" =~ ^[0-9][0-9]?$ ) ]]; then
+ mkl_check_failed "parseversion" "" "fail" \
+ "Could not decode '$parseversion_orig' into hex version, expecting format 'NN.NN.NN[.NN]'"
+ return 1
+ fi
+ hex="$hex$(printf %02x $n)"
+ ncnt=$(expr $ncnt + 1)
+ done
+
+ if [[ ! -z $hex ]]; then
+ # Finish all four bytess
+ while [[ ${#hex} -lt 8 ]]; do
+ hex="$hex$(printf %02x 0)"
+ done
+ mkl_allvar_set "parseversion" "$parseversion_hexvar" "0x$hex"
+ fi
+ fi
+
+ mkl_allvar_set "parseversion" "$parseversion_strvar" "$orig"
+ mkl_allvar_set "parseversion" MKL_APP_VERSION "$orig"
+ mkl_check_done "parseversion" "" "cont" "ok" "${!parseversion_strvar}"
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.pic b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.pic
new file mode 100644
index 000000000..8f138f8d5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.pic
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+# Checks if -fPIC is supported, and if so turns it on.
+#
+# Sets:
+# HAVE_PIC
+# CPPFLAGS
+#
+
+function checks {
+
+ if mkl_compile_check PIC HAVE_PIC disable CC "-fPIC" "" ; then
+ mkl_mkvar_append CPPFLAGS CPPFLAGS "-fPIC"
+ fi
+}
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.socket b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.socket
new file mode 100644
index 000000000..f0777ab3b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.socket
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+# Provides proper compiler flags for socket support, e.g. socket(3).
+
+function checks {
+
+ local src="
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <unistd.h>
+void foo (void) {
+ int s = socket(0, 0, 0);
+ close(s);
+}"
+ if ! mkl_compile_check socket "" cont CC "" "$src"; then
+ if mkl_compile_check --ldflags="-lsocket -lnsl" socket_nsl "" fail CC "" "$src"; then
+ mkl_mkvar_append socket_nsl LIBS "-lsocket -lnsl"
+ fi
+ fi
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.zlib b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.zlib
new file mode 100644
index 000000000..08333e947
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/configure.zlib
@@ -0,0 +1,61 @@
+#!/bin/bash
+#
+# zlib support, with installer
+#
+# Usage:
+# mkl_require zlib
+#
+# And then call the following function from the correct place/order in checks:
+# mkl_check zlib
+#
+
+mkl_toggle_option "Feature" ENABLE_ZLIB "--enable-zlib" "Enable support for zlib compression" "try"
+
+function manual_checks {
+ case "$ENABLE_ZLIB" in
+ n) return 0 ;;
+ y) local action=fail ;;
+ try) local action=disable ;;
+ *) mkl_err "mklove internal error: invalid value for ENABLE_ZLIB: $ENABLE_ZLIB"; exit 1 ;;
+ esac
+
+ mkl_meta_set "zlib" "apk" "zlib-dev"
+ mkl_meta_set "zlib" "static" "libz.a"
+ mkl_lib_check "zlib" "WITH_ZLIB" $action CC "-lz" \
+ "
+#include <stddef.h>
+#include <zlib.h>
+
+void foo (void) {
+ z_stream *p = NULL;
+ inflate(p, 0);
+}
+"
+}
+
+
+# Install zlib from source tarball
+#
+# Param 1: name (zlib)
+# Param 2: install-dir-prefix (e.g., DESTDIR)
+# Param 2: version (optional)
+function install_source {
+ local name=$1
+ local destdir=$2
+ local ver=1.2.13
+ local checksum="b3a24de97a8fdbc835b9833169501030b8977031bcb54b3b3ac13740f846ab30"
+
+ echo "### Installing $name $ver from source to $destdir"
+ if [[ ! -f Makefile ]]; then
+ mkl_download_archive \
+ "https://zlib.net/fossils/zlib-${ver}.tar.gz" \
+ "256" \
+ "$checksum" || return 1
+ fi
+
+ CFLAGS=-fPIC ./configure --static --prefix=/usr
+ make -j
+ make test
+ make DESTDIR="${destdir}" install
+ return $?
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/README.md b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/README.md
new file mode 100644
index 000000000..1208dc86d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/README.md
@@ -0,0 +1,8 @@
+This directory contains patches to dependencies used by the source installers in configure.*
+
+
+Patch filename format is:
+<module>.NNNN-description_of_patch.patch
+
+Where module is the configure.<module> name, NNNN is the patch apply order, e.g. 0000.
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch
new file mode 100644
index 000000000..6623b22fb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libcurl.0000-no-runtime-linking-check.patch
@@ -0,0 +1,11 @@
+--- a/configure 2022-06-27 12:15:45.000000000 +0200
++++ b/configure 2022-06-27 12:17:20.000000000 +0200
+@@ -33432,7 +33432,7 @@
+
+
+
+- if test "x$cross_compiling" != xyes; then
++ if false; then
+
+ { printf "%s\n" "$as_me:${as_lineno-$LINENO}: checking run-time libs availability" >&5
+ printf %s "checking run-time libs availability... " >&6; }
diff --git a/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch
new file mode 100644
index 000000000..b0e37e325
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/mklove/modules/patches/libssl.0000-osx-rand-include-fix-OpenSSL-PR16409.patch
@@ -0,0 +1,56 @@
+From cef404f1e7a598166cbc2fd2e0048f7e2d752ad5 Mon Sep 17 00:00:00 2001
+From: David Carlier <devnexen@gmail.com>
+Date: Tue, 24 Aug 2021 22:40:14 +0100
+Subject: [PATCH] Darwin platform allows to build on releases before
+ Yosemite/ios 8.
+
+issue #16407 #16408
+---
+ crypto/rand/rand_unix.c | 5 +----
+ include/crypto/rand.h | 10 ++++++++++
+ 2 files changed, 11 insertions(+), 4 deletions(-)
+
+diff --git a/crypto/rand/rand_unix.c b/crypto/rand/rand_unix.c
+index 43f1069d151d..0f4525106af7 100644
+--- a/crypto/rand/rand_unix.c
++++ b/crypto/rand/rand_unix.c
+@@ -34,9 +34,6 @@
+ #if defined(__OpenBSD__)
+ # include <sys/param.h>
+ #endif
+-#if defined(__APPLE__)
+-# include <CommonCrypto/CommonRandom.h>
+-#endif
+
+ #if defined(OPENSSL_SYS_UNIX) || defined(__DJGPP__)
+ # include <sys/types.h>
+@@ -381,7 +378,7 @@ static ssize_t syscall_random(void *buf, size_t buflen)
+ if (errno != ENOSYS)
+ return -1;
+ }
+-# elif defined(__APPLE__)
++# elif defined(OPENSSL_APPLE_CRYPTO_RANDOM)
+ if (CCRandomGenerateBytes(buf, buflen) == kCCSuccess)
+ return (ssize_t)buflen;
+
+diff --git a/include/crypto/rand.h b/include/crypto/rand.h
+index 5350d3a93119..674f840fd13c 100644
+--- a/include/crypto/rand.h
++++ b/include/crypto/rand.h
+@@ -20,6 +20,16 @@
+
+ # include <openssl/rand.h>
+
++# if defined(__APPLE__) && !defined(OPENSSL_NO_APPLE_CRYPTO_RANDOM)
++# include <Availability.h>
++# if (defined(__MAC_OS_X_VERSION_MIN_REQUIRED) && __MAC_OS_X_VERSION_MIN_REQUIRED >= 101000) || \
++ (defined(__IPHONE_OS_VERSION_MIN_REQUIRED) && __IPHONE_OS_VERSION_MIN_REQUIRED >= 80000)
++# define OPENSSL_APPLE_CRYPTO_RANDOM 1
++# include <CommonCrypto/CommonCryptoError.h>
++# include <CommonCrypto/CommonRandom.h>
++# endif
++# endif
++
+ /* forward declaration */
+ typedef struct rand_pool_st RAND_POOL;
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/RELEASE.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/RELEASE.md
new file mode 100644
index 000000000..930636db4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/RELEASE.md
@@ -0,0 +1,311 @@
+# librdkafka release process
+
+This guide outlines the steps needed to release a new version of librdkafka
+and publish packages to channels (NuGet, Homebrew, etc,..).
+
+Releases are done in two phases:
+ * release-candidate(s) - RC1 will be the first release candidate, and any
+ changes to the repository will require a new RC.
+ * final release - the final release is based directly on the last RC tag
+ followed by a single version-bump commit (see below).
+
+Release tag and version format:
+ * tagged release builds to verify CI release builders: vA.B.C-PREn
+ * release-candidate: vA.B.C-RCn
+ * final release: vA.B.C
+
+
+## Update protocol requests and error codes
+
+Check out the latest version of Apache Kafka (not trunk, needs to be a released
+version since protocol may change on trunk).
+
+### Protocol request types
+
+Generate protocol request type codes with:
+
+ $ src/generate_proto.sh ~/src/your-kafka-dir
+
+Cut'n'paste the new defines and strings to `rdkafka_protocol.h` and
+`rdkafka_proto.h`.
+
+### Error codes
+
+Error codes must currently be parsed manually, open
+`clients/src/main/java/org/apache/kafka/common/protocol/Errors.java`
+in the Kafka source directory and update the `rd_kafka_resp_err_t` and
+`RdKafka::ErrorCode` enums in `rdkafka.h` and `rdkafkacpp.h`
+respectively.
+Add the error strings to `rdkafka.c`.
+The Kafka error strings are sometimes a bit too verbose for our taste,
+so feel free to rewrite them (usually removing a couple of 'the's).
+Error strings must not contain a trailing period.
+
+**NOTE**: Only add **new** error codes, do not alter existing ones since that
+ will be a breaking API change.
+
+
+## Run regression tests
+
+**Build tests:**
+
+ $ cd tests
+ $ make -j build
+
+**Run the full regression test suite:** (requires Linux and the trivup python package)
+
+ $ make full
+
+
+If all tests pass, carry on, otherwise identify and fix bug and start over.
+
+
+
+## Write release notes / changelog
+
+All relevant PRs should also include an update to [CHANGELOG.md](../CHANGELOG.md)
+that in a user-centric fashion outlines what changed.
+It might not be practical for all contributors to write meaningful changelog
+entries, so it is okay to add them separately later after the PR has been
+merged (make sure to credit community contributors for their work).
+
+The changelog should include:
+ * What type of release (maintenance or feature release)
+ * A short intro to the release, describing the type of release: maintenance
+ or feature release, as well as fix or feature high-lights.
+ * A section of **New features**, if any.
+ * A section of **Upgrade considerations**, if any, to outline important changes
+ that require user attention.
+ * A section of **Enhancements**, if any.
+ * A section of **Fixes**, if any, preferably with Consumer, Producer, and
+ Generic sub-sections.
+
+
+## Pre-release code tasks
+
+**Switch to the release branch which is of the format `A.B.C.x` or `A.B.x`.**
+
+ $ git checkout -b 0.11.1.x
+
+
+**Update in-code versions.**
+
+The last octet in the version hex number is the pre-build/release-candidate
+number, where 0xAABBCCff is the final release for version 0xAABBCC.
+Release candidates start at 200, thus 0xAABBCCc9 is RC1, 0xAABBCCca is RC2, etc.
+
+Change the `RD_KAFKA_VERSION` defines in both `src/rdkafka.h` and
+`src-cpp/rdkafkacpp.h` to the version to build, such as 0x000b01c9
+for v0.11.1-RC1, or 0x000b01ff for the final v0.11.1 release.
+Update the librdkafka version in `vcpkg.json`.
+
+ # Update defines
+ $ $EDITOR src/rdkafka.h src-cpp/rdkafkacpp.h vcpkg.json
+
+ # Reconfigure and build
+ $ ./configure
+ $ make
+
+ # Check git diff for correctness
+ $ git diff
+
+ # Commit
+ $ git commit -m "Version v0.11.1-RC1" src/rdkafka.h src-cpp/rdkafkacpp.h
+
+
+**Create tag.**
+
+ $ git tag v0.11.1-RC1 # for an RC
+ # or for the final release:
+ $ git tag v0.11.1 # for the final release
+
+
+**Push branch and commit to github**
+
+ # Dry-run first to make sure things look correct
+ $ git push --dry-run origin 0.11.1.x
+
+ # Live
+ $ git push origin 0.11.1.x
+**Push tags and commit to github**
+
+ # Dry-run first to make sure things look correct.
+ $ git push --dry-run --tags origin v0.11.1-RC1
+
+ # Live
+ $ git push --tags origin v0.11.1-RC1
+
+
+## Creating packages
+
+As soon as a tag is pushed the CI system (SemaphoreCI) will start its
+build pipeline and eventually upload packaging artifacts to the SemaphoreCI
+project artifact store.
+
+Monitor the Semaphore CI project page to know when the build pipeline
+is finished, then download the relevant artifacts for further use, see
+*The artifact pipeline* chapter below.
+
+
+## Publish release on github
+
+Create a release on github by going to https://github.com/edenhill/librdkafka/releases
+and Draft a new release.
+Name the release the same as the final release tag (e.g., `v1.9.0`) and set
+the tag to the same.
+Paste the CHANGELOG.md section for this release into the release description,
+look at the preview and fix any formatting issues.
+
+Run the following command to get checksums of the github release assets:
+
+ $ packaging/tools/gh-release-checksums.py <the-tag>
+
+It will take some time for the script to download the files, when done
+paste the output to the end of the release page.
+
+Make sure the release page looks okay, is still correct (check for new commits),
+and has the correct tag, then click Publish release.
+
+
+
+### Homebrew recipe update
+
+**Note**: This is typically not needed since homebrew seems to pick up new
+ release versions quickly enough. Recommend you skip this step.
+
+The brew-update-pr.sh script automatically pushes a PR to homebrew-core
+with a patch to update the librdkafka version of the formula.
+This should only be done for final releases and not release candidates.
+
+On a MacOSX host with homebrew installed:
+
+ $ cd package/homebrew
+ # Dry-run first to see that things are okay.
+ $ ./brew-update-pr.sh v0.11.1
+ # If everything looks good, do the live push:
+ $ ./brew-update-pr.sh --upload v0.11.1
+
+
+### Deb and RPM packaging
+
+Debian and RPM packages are generated by Confluent packaging, called
+Independent client releases, which is a separate non-public process and the
+resulting packages are made available on Confluent's client deb and rpm
+repositories.
+
+That process is outside the scope of this document.
+
+See the Confluent docs for instructions how to access these packages:
+https://docs.confluent.io/current/installation.html
+
+
+
+
+## Build and release artifacts
+
+The following chapter explains what, how, and where artifacts are built.
+It also outlines where these artifacts are used.
+
+### So what is an artifact?
+
+An artifact is a build of the librdkafka library, dynamic/shared and/or static,
+with a certain set of external or built-in dependencies, for a specific
+architecture and operating system (and sometimes even operating system version).
+
+If you build librdkafka from source with no special `./configure` arguments
+you will end up with:
+
+ * a dynamically linked library (e.g., `librdkafka.so.1`)
+ with a set of dynamically linked external dependencies (OpenSSL, zlib, etc),
+ all depending on what dependencies are available on the build host.
+
+ * a static library (`librdkafka.a`) that will have external dependencies
+ that needs to be linked dynamically. There is no way for a static library
+ to express link dependencies, so there will also be `rdkafka-static.pc`
+ pkg-config file generated that contains linker flags for the external
+ dependencies.
+ Those external dependencies are however most likely only available on the
+ build host, so this static library is not particularily useful for
+ repackaging purposes (such as for high-level clients using librdkafka).
+
+ * a self-contained static-library (`librdkafka-static.a`) which attempts
+ to contain static versions of all external dependencies, effectively making
+ it possible to link just with `librdkafka-static.a` to get all
+ dependencies needed.
+ Since the state of static libraries in the various distro and OS packaging
+ systems is of varying quality and availability, it is usually not possible
+ for the librdkafka build system (mklove) to generate this completely
+ self-contained static library simply using dependencies available on the
+ build system, and the make phase of the build will emit warnings when it
+ can't bundle all external dependencies due to this.
+ To circumvent this problem it is possible for the build system (mklove)
+ to download and build static libraries of all needed external dependencies,
+ which in turn allows it to create a complete bundle of all dependencies.
+ This results in a `librdkafka-static.a` that has no external dependecies
+ other than the system libraries (libc, pthreads, rt, etc).
+ To achieve this you will need to pass
+ `--install-deps --source-deps-only --enable-static` to
+ librdkafka's `./configure`.
+
+ * `rdkafka.pc` and `rdkafka-static.pc` pkg-config files that tells
+ applications and libraries that depend on librdkafka what external
+ dependencies are needed to successfully link with librdkafka.
+ This is mainly useful for the dynamic librdkafka librdkafka
+ (`librdkafka.so.1` or `librdkafka.1.dylib` on OSX).
+
+
+**NOTE**: Due to libsasl2/cyrus-sasl's dynamically loaded plugins, it is
+not possible for us to provide a self-contained static library with
+GSSAPI/Kerberos support.
+
+
+
+### The artifact pipeline
+
+We rely solely on CI systems to build our artifacts; no artifacts must be built
+on a non-CI system (e.g., someones work laptop, some random ec2 instance, etc).
+
+The reasons for this are:
+
+ 1. Reproducible builds: we want a well-defined environment that doesn't change
+ (too much) without notice and that we can rebuild artifacts on at a later
+ time if required.
+ 2. Security; these CI systems provide at least some degree of security
+ guarantees, and they're managed by people who knows what they're doing
+ most of the time. This minimizes the risk for an artifact to be silently
+ compromised due to the developer's laptop being hacked.
+ 3. Logs; we have build logs for all artifacts, which contains checksums.
+ This way we can know how an artifact was built, what features were enabled
+ and what versions of dependencies were used, as well as know that an
+ artifact has not been tampered with after leaving the CI system.
+
+
+By default the CI jobs are triggered by branch pushes and pull requests
+and contain a set of jobs to validate that the changes that were pushed does
+not break compilation or functionality (by running parts of the test suite).
+These jobs do not produce any artifacts.
+
+
+For the artifact pipeline there's tag builds, which are triggered by pushing a
+tag to the git repository.
+These tag builds will generate artifacts which are used by the same pipeline
+to create NuGet and static library packages, which are then uploaded to
+SemaphoreCI's project artifact store.
+
+Once a tag build pipeline is done, you can download the relevant packages
+from the Semaphore CI project artifact store.
+
+The NuGet package, `librdkafka.redist.<version>.nupkg`, needs to be
+manually uploaded to NuGet.
+
+The `librdkafka-static-bundle-<version>.tgz` static library bundle
+needs to be manually imported into the confluent-kafka-go client using the
+import script that resides in the Go client repository.
+
+
+**Note**: You will need a NuGet API key to upload nuget packages.
+
+
+See [nuget/nugetpackaging.py] and [nuget/staticpackaging.py] to see how
+packages are assembled from build artifacts.
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/alpine/build-alpine.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/alpine/build-alpine.sh
new file mode 100755
index 000000000..e6d2471c9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/alpine/build-alpine.sh
@@ -0,0 +1,38 @@
+#!/bin/sh
+#
+# Build librdkafka on Alpine.
+#
+
+set -x
+
+if [ "$1" = "--in-docker" ]; then
+ # Runs in docker, performs the actual build.
+ shift
+
+ apk add bash curl gcc g++ make musl-dev linux-headers bsd-compat-headers git python3 perl patch
+
+ git clone /v /librdkafka
+
+ cd /librdkafka
+ ./configure --install-deps --disable-gssapi --disable-lz4-ext --enable-static $*
+ make -j
+ examples/rdkafka_example -X builtin.features
+ CI=true make -C tests run_local_quick
+
+ # Create a tarball in artifacts/
+ cd src
+ ldd librdkafka.so.1
+ tar cvzf /v/artifacts/alpine-librdkafka.tgz librdkafka.so.1 librdkafka*.a rdkafka-static.pc
+ cd ../..
+
+else
+ # Runs on the host, simply spins up the in-docker build.
+ if [ ! -f configure.self ]; then
+ echo "Must be run from the top-level librdkafka dir"
+ exit 1
+ fi
+
+ mkdir -p artifacts
+
+ exec docker run -v $PWD:/v alpine:3.12 /v/packaging/alpine/$(basename $0) --in-docker $*
+fi
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/archlinux/PKGBUILD b/fluent-bit/lib/librdkafka-2.1.0/packaging/archlinux/PKGBUILD
new file mode 100644
index 000000000..7063d5cef
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/archlinux/PKGBUILD
@@ -0,0 +1,30 @@
+pkgname=librdkafka
+pkgver=1.0.0.RC5.r11.g3cf68480
+pkgrel=1
+pkgdesc='The Apache Kafka C/C++ client library'
+url='https://github.com/edenhill/librdkafka'
+license=('BSD')
+arch=('x86_64')
+source=('git+https://github.com/edenhill/librdkafka#branch=master')
+sha256sums=('SKIP')
+depends=(glibc libsasl lz4 openssl zlib zstd)
+makedepends=(bash git python3)
+
+pkgver() {
+ cd "$pkgname"
+ git describe --long --tags --match "v[0-9]*" | sed 's/^v//;s/\([^-]*-g\)/r\1/;s/-/./g'
+}
+
+build() {
+ cd "$pkgname"
+ ./configure --prefix=/usr
+ make
+}
+
+package() {
+ cd "$pkgname"
+ make install DESTDIR="$pkgdir"
+ for f in $(find -type f -name 'LICENSE*'); do
+ install -D -m0644 "$f" "$pkgdir/usr/share/licenses/$pkgname/$f"
+ done
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Config.cmake.in b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Config.cmake.in
new file mode 100644
index 000000000..8a6522b06
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Config.cmake.in
@@ -0,0 +1,37 @@
+@PACKAGE_INIT@
+
+include(CMakeFindDependencyMacro)
+
+if(@WITH_ZLIB@)
+ find_dependency(ZLIB)
+endif()
+
+if(@WITH_CURL@)
+ find_dependency(CURL)
+endif()
+
+if(@WITH_ZSTD@)
+ find_library(ZSTD zstd)
+ if(NOT ZSTD)
+ message(ERROR "ZSTD library not found!")
+ else()
+ message(STATUS "Found ZSTD: " ${ZSTD})
+ endif()
+endif()
+
+if(@WITH_SSL@)
+ if(@WITH_BUNDLED_SSL@)
+ # TODO: custom SSL library should be installed
+ else()
+ find_dependency(OpenSSL)
+ endif()
+endif()
+
+if(@WITH_LZ4_EXT@)
+ find_dependency(LZ4)
+endif()
+
+find_dependency(Threads)
+
+include("${CMAKE_CURRENT_LIST_DIR}/@targets_export_name@.cmake")
+check_required_components("@PROJECT_NAME@")
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindLZ4.cmake b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindLZ4.cmake
new file mode 100644
index 000000000..594c4290c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindLZ4.cmake
@@ -0,0 +1,38 @@
+find_path(LZ4_INCLUDE_DIR
+ NAMES lz4.h
+ DOC "lz4 include directory")
+mark_as_advanced(LZ4_INCLUDE_DIR)
+find_library(LZ4_LIBRARY
+ NAMES lz4
+ DOC "lz4 library")
+mark_as_advanced(LZ4_LIBRARY)
+
+if (LZ4_INCLUDE_DIR)
+ file(STRINGS "${LZ4_INCLUDE_DIR}/lz4.h" _lz4_version_lines
+ REGEX "#define[ \t]+LZ4_VERSION_(MAJOR|MINOR|RELEASE)")
+ string(REGEX REPLACE ".*LZ4_VERSION_MAJOR *\([0-9]*\).*" "\\1" _lz4_version_major "${_lz4_version_lines}")
+ string(REGEX REPLACE ".*LZ4_VERSION_MINOR *\([0-9]*\).*" "\\1" _lz4_version_minor "${_lz4_version_lines}")
+ string(REGEX REPLACE ".*LZ4_VERSION_RELEASE *\([0-9]*\).*" "\\1" _lz4_version_release "${_lz4_version_lines}")
+ set(LZ4_VERSION "${_lz4_version_major}.${_lz4_version_minor}.${_lz4_version_release}")
+ unset(_lz4_version_major)
+ unset(_lz4_version_minor)
+ unset(_lz4_version_release)
+ unset(_lz4_version_lines)
+endif ()
+
+include(FindPackageHandleStandardArgs)
+find_package_handle_standard_args(LZ4
+ REQUIRED_VARS LZ4_LIBRARY LZ4_INCLUDE_DIR
+ VERSION_VAR LZ4_VERSION)
+
+if (LZ4_FOUND)
+ set(LZ4_INCLUDE_DIRS "${LZ4_INCLUDE_DIR}")
+ set(LZ4_LIBRARIES "${LZ4_LIBRARY}")
+
+ if (NOT TARGET LZ4::LZ4)
+ add_library(LZ4::LZ4 UNKNOWN IMPORTED)
+ set_target_properties(LZ4::LZ4 PROPERTIES
+ IMPORTED_LOCATION "${LZ4_LIBRARY}"
+ INTERFACE_INCLUDE_DIRECTORIES "${LZ4_INCLUDE_DIR}")
+ endif ()
+endif ()
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindZSTD.cmake b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindZSTD.cmake
new file mode 100644
index 000000000..7de137e0f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/FindZSTD.cmake
@@ -0,0 +1,27 @@
+#
+# - Try to find Facebook zstd library
+# This will define
+# ZSTD_FOUND
+# ZSTD_INCLUDE_DIR
+# ZSTD_LIBRARY
+#
+
+find_path(ZSTD_INCLUDE_DIR NAMES zstd.h)
+
+find_library(ZSTD_LIBRARY_DEBUG NAMES zstdd zstd_staticd)
+find_library(ZSTD_LIBRARY_RELEASE NAMES zstd zstd_static)
+
+include(SelectLibraryConfigurations)
+SELECT_LIBRARY_CONFIGURATIONS(ZSTD)
+
+include(FindPackageHandleStandardArgs)
+FIND_PACKAGE_HANDLE_STANDARD_ARGS(
+ ZSTD DEFAULT_MSG
+ ZSTD_LIBRARY ZSTD_INCLUDE_DIR
+)
+
+if (ZSTD_FOUND)
+ message(STATUS "Found Zstd: ${ZSTD_LIBRARY}")
+endif()
+
+mark_as_advanced(ZSTD_INCLUDE_DIR ZSTD_LIBRARY)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/LICENSE.FindZstd b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/LICENSE.FindZstd
new file mode 100644
index 000000000..9561f469b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/Modules/LICENSE.FindZstd
@@ -0,0 +1,178 @@
+FindZstd.cmake: git@github.com:facebook/folly.git 87f1a403b49552dae75ae94c8610dd5979913477
+
+ Apache License
+ Version 2.0, January 2004
+ http://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/README.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/README.md
new file mode 100644
index 000000000..47ad2cb63
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/README.md
@@ -0,0 +1,38 @@
+# Build librdkafka with cmake
+
+The cmake build mode is experimental and not officially supported,
+the community is asked to maintain and support this mode through PRs.
+
+Set up build environment (from top-level librdkafka directory):
+
+ $ cmake -H. -B_cmake_build
+
+On MacOSX and OpenSSL from Homebrew you might need to do:
+
+ $ cmake -H. -B_cmake_build -DOPENSSL_ROOT_DIR=/usr/local/opt/openssl
+
+
+Build the library:
+
+ $ cmake --build _cmake_build
+
+If you want to build static library:
+
+ $ cmake --build _cmake_build -DRDKAFKA_BUILD_STATIC=1
+
+
+Run (local) tests:
+
+ $ (cd _cmake_build && ctest -VV -R RdKafkaTestBrokerLess)
+
+
+Install library:
+
+ $ cmake --build _cmake_build --target install
+
+
+If you use librdkafka as submodule in cmake project and want static link of librdkafka:
+
+ set(RDKAFKA_BUILD_STATIC ON CACHE BOOL "")
+ add_subdirectory(librdkafka)
+ target_link_libraries(your_library_or_executable rdkafka)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/config.h.in b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/config.h.in
new file mode 100644
index 000000000..9e356c5f9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/config.h.in
@@ -0,0 +1,52 @@
+#cmakedefine01 WITHOUT_OPTIMIZATION
+#cmakedefine01 ENABLE_DEVEL
+#cmakedefine01 ENABLE_REFCNT_DEBUG
+
+#cmakedefine01 HAVE_ATOMICS_32
+#cmakedefine01 HAVE_ATOMICS_32_SYNC
+
+#if (HAVE_ATOMICS_32)
+# if (HAVE_ATOMICS_32_SYNC)
+# define ATOMIC_OP32(OP1,OP2,PTR,VAL) __sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)
+# else
+# define ATOMIC_OP32(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
+# endif
+#endif
+
+#cmakedefine01 HAVE_ATOMICS_64
+#cmakedefine01 HAVE_ATOMICS_64_SYNC
+
+#if (HAVE_ATOMICS_64)
+# if (HAVE_ATOMICS_64_SYNC)
+# define ATOMIC_OP64(OP1,OP2,PTR,VAL) __sync_ ## OP1 ## _and_ ## OP2(PTR, VAL)
+# else
+# define ATOMIC_OP64(OP1,OP2,PTR,VAL) __atomic_ ## OP1 ## _ ## OP2(PTR, VAL, __ATOMIC_SEQ_CST)
+# endif
+#endif
+
+#cmakedefine01 WITH_PKGCONFIG
+#cmakedefine01 WITH_HDRHISTOGRAM
+#cmakedefine01 WITH_ZLIB
+#cmakedefine01 WITH_CURL
+#cmakedefine01 WITH_OAUTHBEARER_OIDC
+#cmakedefine01 WITH_ZSTD
+#cmakedefine01 WITH_LIBDL
+#cmakedefine01 WITH_PLUGINS
+#define WITH_SNAPPY 1
+#define WITH_SOCKEM 1
+#cmakedefine01 WITH_SSL
+#cmakedefine01 WITH_SASL
+#cmakedefine01 WITH_SASL_SCRAM
+#cmakedefine01 WITH_SASL_OAUTHBEARER
+#cmakedefine01 WITH_SASL_CYRUS
+#cmakedefine01 WITH_LZ4_EXT
+#cmakedefine01 HAVE_REGEX
+#cmakedefine01 HAVE_STRNDUP
+#cmakedefine01 HAVE_RAND_R
+#cmakedefine01 HAVE_PTHREAD_SETNAME_GNU
+#cmakedefine01 HAVE_PTHREAD_SETNAME_DARWIN
+#cmakedefine01 HAVE_PTHREAD_SETNAME_FREEBSD
+#cmakedefine01 WITH_C11THREADS
+#cmakedefine01 WITH_CRC32C_HW
+#define SOLIB_EXT "${CMAKE_SHARED_LIBRARY_SUFFIX}"
+#define BUILT_WITH "${BUILT_WITH}"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/parseversion.cmake b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/parseversion.cmake
new file mode 100644
index 000000000..592e8df54
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/parseversion.cmake
@@ -0,0 +1,60 @@
+# hex2dec(<out-var> <input>):
+# Convert a hexadecimal value <input> to decimal and write the result
+# to <out-var>.
+macro(hex2dec var val)
+ set(${var} 0)
+
+ set(hex2dec_idx 0)
+ string(LENGTH "${val}" hex2dec_len)
+
+ while(hex2dec_idx LESS hex2dec_len)
+ string(SUBSTRING ${val} ${hex2dec_idx} 1 hex2dec_char)
+
+ if(hex2dec_char MATCHES "[0-9]")
+ set(hex2dec_char ${hex2dec_char})
+ elseif(hex2dec_char MATCHES "[aA]")
+ set(hex2dec_char 10)
+ elseif(hex2dec_char MATCHES "[bB]")
+ set(hex2dec_char 11)
+ elseif(hex2dec_char MATCHES "[cC]")
+ set(hex2dec_char 12)
+ elseif(hex2dec_char MATCHES "[dD]")
+ set(hex2dec_char 13)
+ elseif(hex2dec_char MATCHES "[eE]")
+ set(hex2dec_char 14)
+ elseif(hex2dec_char MATCHES "[fF]")
+ set(hex2dec_char 15)
+ else()
+ message(FATAL_ERROR "Invalid format for hexidecimal character: " ${hex2dec_char})
+ endif()
+
+ math(EXPR hex2dec_char "${hex2dec_char} << ((${hex2dec_len}-${hex2dec_idx}-1)*4)")
+ math(EXPR ${var} "${${var}}+${hex2dec_char}")
+ math(EXPR hex2dec_idx "${hex2dec_idx}+1")
+ endwhile()
+endmacro(hex2dec)
+
+# parseversion(<filepath>):
+# Parse the file given by <filepath> for the RD_KAFKA_VERSION constant
+# and convert the hex value to decimal version numbers.
+# Creates the following CMake variables:
+# * RDKAFKA_VERSION
+# * RDKAFKA_VERSION_MAJOR
+# * RDKAFKA_VERSION_MINOR
+# * RDKAFKA_VERSION_REVISION
+# * RDKAFKA_VERSION_PRERELEASE
+macro(parseversion path)
+ file(STRINGS ${path} rdkafka_version_def REGEX "#define *RD_KAFKA_VERSION *\(0x[a-f0-9]*\)\.*")
+ string(REGEX REPLACE "#define *RD_KAFKA_VERSION *0x" "" rdkafka_version_hex ${rdkafka_version_def})
+
+ string(SUBSTRING ${rdkafka_version_hex} 0 2 rdkafka_version_major_hex)
+ string(SUBSTRING ${rdkafka_version_hex} 2 2 rdkafka_version_minor_hex)
+ string(SUBSTRING ${rdkafka_version_hex} 4 2 rdkafka_version_revision_hex)
+ string(SUBSTRING ${rdkafka_version_hex} 6 2 rdkafka_version_prerelease_hex)
+
+ hex2dec(RDKAFKA_VERSION_MAJOR ${rdkafka_version_major_hex})
+ hex2dec(RDKAFKA_VERSION_MINOR ${rdkafka_version_minor_hex})
+ hex2dec(RDKAFKA_VERSION_REVISION ${rdkafka_version_revision_hex})
+ hex2dec(RDKAFKA_VERSION_PRERELEASE ${rdkafka_version_prerelease_hex})
+ set(RDKAFKA_VERSION "${RDKAFKA_VERSION_MAJOR}.${RDKAFKA_VERSION_MINOR}.${RDKAFKA_VERSION_REVISION}")
+endmacro(parseversion)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/rdkafka.pc.in b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/rdkafka.pc.in
new file mode 100644
index 000000000..0eb17e856
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/rdkafka.pc.in
@@ -0,0 +1,12 @@
+prefix=@CMAKE_INSTALL_PREFIX@
+exec_prefix=${prefix}
+includedir=${prefix}/include
+libdir=${prefix}/lib
+
+Name: @PKG_CONFIG_NAME@
+Description: @PKG_CONFIG_DESCRIPTION@
+Version: @PKG_CONFIG_VERSION@
+Requires: @PKG_CONFIG_REQUIRES@
+Cflags: @PKG_CONFIG_CFLAGS@
+Libs: @PKG_CONFIG_LIBS@
+Libs.private: @PKG_CONFIG_LIBS_PRIVATE@
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_32_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_32_test.c
new file mode 100644
index 000000000..b3373bb8b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_32_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int32_t foo(int32_t i) {
+ return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}
+
+int main() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_64_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_64_test.c
new file mode 100644
index 000000000..31922b85c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/atomic_64_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int64_t foo(int64_t i) {
+ return __atomic_add_fetch(&i, 1, __ATOMIC_SEQ_CST);
+}
+
+int main() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/c11threads_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/c11threads_test.c
new file mode 100644
index 000000000..31681ae61
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/c11threads_test.c
@@ -0,0 +1,14 @@
+#include <threads.h>
+
+static int start_func(void *arg) {
+ int iarg = *(int *)arg;
+ return iarg;
+}
+
+void main(void) {
+ thrd_t thr;
+ int arg = 1;
+ if (thrd_create(&thr, start_func, (void *)&arg) != thrd_success) {
+ ;
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/crc32c_hw_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/crc32c_hw_test.c
new file mode 100644
index 000000000..e80097803
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/crc32c_hw_test.c
@@ -0,0 +1,27 @@
+#include <inttypes.h>
+#include <stdio.h>
+#define LONGx1 "8192"
+#define LONGx2 "16384"
+void main(void) {
+ const char *n = "abcdefghijklmnopqrstuvwxyz0123456789";
+ uint64_t c0 = 0, c1 = 1, c2 = 2;
+ uint64_t s;
+ uint32_t eax = 1, ecx;
+ __asm__("cpuid" : "=c"(ecx) : "a"(eax) : "%ebx", "%edx");
+ __asm__(
+ "crc32b\t"
+ "(%1), %0"
+ : "=r"(c0)
+ : "r"(n), "0"(c0));
+ __asm__(
+ "crc32q\t"
+ "(%3), %0\n\t"
+ "crc32q\t" LONGx1
+ "(%3), %1\n\t"
+ "crc32q\t" LONGx2 "(%3), %2"
+ : "=r"(c0), "=r"(c1), "=r"(c2)
+ : "r"(n), "0"(c0), "1"(c1), "2"(c2));
+ s = c0 + c1 + c2;
+ printf("avoiding unused code removal by printing %d, %d, %d\n", (int)s,
+ (int)eax, (int)ecx);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/dlopen_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/dlopen_test.c
new file mode 100644
index 000000000..ecb478994
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/dlopen_test.c
@@ -0,0 +1,11 @@
+#include <string.h>
+#include <dlfcn.h>
+
+int main() {
+ void *h;
+ /* Try loading anything, we don't care if it works */
+ h = dlopen("__nothing_rdkafka.so", RTLD_NOW | RTLD_LOCAL);
+ if (h)
+ dlclose(h);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/libsasl2_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/libsasl2_test.c
new file mode 100644
index 000000000..3f3ab3409
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/libsasl2_test.c
@@ -0,0 +1,7 @@
+#include <string.h>
+#include <sasl/sasl.h>
+
+int main() {
+ sasl_done();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c
new file mode 100644
index 000000000..73e31e069
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_darwin_test.c
@@ -0,0 +1,6 @@
+#include <pthread.h>
+
+int main() {
+ pthread_setname_np("abc");
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c
new file mode 100644
index 000000000..329ace08e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_freebsd_test.c
@@ -0,0 +1,7 @@
+#include <pthread.h>
+#include <pthread_np.h>
+
+int main() {
+ pthread_set_name_np(pthread_self(), "abc");
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c
new file mode 100644
index 000000000..3be1b21bc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/pthread_setname_gnu_test.c
@@ -0,0 +1,5 @@
+#include <pthread.h>
+
+int main() {
+ return pthread_setname_np(pthread_self(), "abc");
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rand_r_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rand_r_test.c
new file mode 100644
index 000000000..be722d0a0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rand_r_test.c
@@ -0,0 +1,7 @@
+#include <stdlib.h>
+
+int main() {
+ unsigned int seed = 0xbeaf;
+ (void)rand_r(&seed);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rdkafka_setup.cmake b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rdkafka_setup.cmake
new file mode 100644
index 000000000..5ea7f7dc6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/rdkafka_setup.cmake
@@ -0,0 +1,122 @@
+try_compile(
+ HAVE_REGEX
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/regex_test.c"
+)
+
+try_compile(
+ HAVE_STRNDUP
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/strndup_test.c"
+)
+
+try_compile(
+ HAVE_RAND_R
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/rand_r_test.c"
+)
+
+try_compile(
+ HAVE_PTHREAD_SETNAME_GNU
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/pthread_setname_gnu_test.c"
+ COMPILE_DEFINITIONS "-D_GNU_SOURCE"
+ LINK_LIBRARIES "-lpthread"
+)
+
+try_compile(
+ HAVE_PTHREAD_SETNAME_DARWIN
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/pthread_setname_darwin_test.c"
+ COMPILE_DEFINITIONS "-D_DARWIN_C_SOURCE"
+ LINK_LIBRARIES "-lpthread"
+)
+
+try_compile(
+ HAVE_PTHREAD_SETNAME_FREEBSD
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/pthread_setname_freebsd_test.c"
+ LINK_LIBRARIES "-lpthread"
+)
+
+# Atomic 32 tests {
+set(LINK_ATOMIC NO)
+set(HAVE_ATOMICS_32 NO)
+set(HAVE_ATOMICS_32_SYNC NO)
+
+try_compile(
+ _atomics_32
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/atomic_32_test.c"
+)
+
+if(_atomics_32)
+ set(HAVE_ATOMICS_32 YES)
+else()
+ try_compile(
+ _atomics_32_lib
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/atomic_32_test.c"
+ LINK_LIBRARIES "-latomic"
+ )
+ if(_atomics_32_lib)
+ set(HAVE_ATOMICS_32 YES)
+ set(LINK_ATOMIC YES)
+ else()
+ try_compile(
+ HAVE_ATOMICS_32_SYNC
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/sync_32_test.c"
+ )
+ endif()
+endif()
+# }
+
+# Atomic 64 tests {
+set(HAVE_ATOMICS_64 NO)
+set(HAVE_ATOMICS_64_SYNC NO)
+
+try_compile(
+ _atomics_64
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/atomic_64_test.c"
+)
+
+if(_atomics_64)
+ set(HAVE_ATOMICS_64 YES)
+else()
+ try_compile(
+ _atomics_64_lib
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/atomic_64_test.c"
+ LINK_LIBRARIES "-latomic"
+ )
+ if(_atomics_64_lib)
+ set(HAVE_ATOMICS_64 YES)
+ set(LINK_ATOMIC YES)
+ else()
+ try_compile(
+ HAVE_ATOMICS_64_SYNC
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/sync_64_test.c"
+ )
+ endif()
+endif()
+# }
+
+# C11 threads
+try_compile(
+ WITH_C11THREADS
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/c11threads_test.c"
+ LINK_LIBRARIES "-pthread"
+)
+# }
+
+# CRC32C {
+try_compile(
+ WITH_CRC32C_HW
+ "${CMAKE_CURRENT_BINARY_DIR}/try_compile"
+ "${TRYCOMPILE_SRC_DIR}/crc32c_hw_test.c"
+)
+# }
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/regex_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/regex_test.c
new file mode 100644
index 000000000..329098d20
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/regex_test.c
@@ -0,0 +1,10 @@
+#include <stddef.h>
+#include <regex.h>
+
+int main() {
+ regcomp(NULL, NULL, 0);
+ regexec(NULL, NULL, 0, NULL, 0);
+ regerror(0, NULL, NULL, 0);
+ regfree(NULL);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/strndup_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/strndup_test.c
new file mode 100644
index 000000000..a10b74526
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/strndup_test.c
@@ -0,0 +1,5 @@
+#include <string.h>
+
+int main() {
+ return strndup("hi", 2) ? 0 : 1;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_32_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_32_test.c
new file mode 100644
index 000000000..2bc80ab4c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_32_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int32_t foo(int32_t i) {
+ return __sync_add_and_fetch(&i, 1);
+}
+
+int main() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_64_test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_64_test.c
new file mode 100644
index 000000000..4b6ad6d38
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cmake/try_compile/sync_64_test.c
@@ -0,0 +1,8 @@
+#include <inttypes.h>
+
+int64_t foo(int64_t i) {
+ return __sync_add_and_fetch(&i, 1);
+}
+
+int main() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/README.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/README.md
new file mode 100644
index 000000000..24a82f142
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/README.md
@@ -0,0 +1,14 @@
+# Confluent Platform package verification
+
+This small set of scripts verifies the librdkafka packages that
+are part of the Confluent Platform.
+
+The base_url is the http S3 bucket path to the a PR job, or similar.
+
+## How to use
+
+ $ ./verify-packages.sh 5.3 https://thes3bucketpath/X/Y
+
+
+Requires docker and patience.
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/check_features.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/check_features.c
new file mode 100644
index 000000000..4229402fd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/check_features.c
@@ -0,0 +1,64 @@
+#include <stdio.h>
+#include <string.h>
+#include <librdkafka/rdkafka.h>
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ char buf[512];
+ size_t sz = sizeof(buf);
+ rd_kafka_conf_res_t res;
+ static const char *expected_features = "ssl,sasl_gssapi,lz4,zstd";
+ char errstr[512];
+ int i;
+ int failures = 0;
+
+ printf("librdkafka %s (0x%x, define: 0x%x)\n", rd_kafka_version_str(),
+ rd_kafka_version(), RD_KAFKA_VERSION);
+
+ if (argc > 1 && !(argc & 1)) {
+ printf("Usage: %s [config.property config-value ..]\n",
+ argv[0]);
+ return 1;
+ }
+
+ conf = rd_kafka_conf_new();
+ res = rd_kafka_conf_get(conf, "builtin.features", buf, &sz);
+
+ if (res != RD_KAFKA_CONF_OK) {
+ printf("ERROR: conf_get failed: %d\n", res);
+ return 1;
+ }
+
+ printf("builtin.features: %s\n", buf);
+
+ /* librdkafka allows checking for expected features
+ * by setting the corresponding feature flags in builtin.features,
+ * which will return an error if one or more flags are not enabled. */
+ if (rd_kafka_conf_set(conf, "builtin.features", expected_features,
+ errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ printf(
+ "ERROR: expected at least features: %s\n"
+ "got error: %s\n",
+ expected_features, errstr);
+ failures++;
+ }
+
+ printf("all expected features matched: %s\n", expected_features);
+
+ /* Apply config from argv key value pairs */
+ for (i = 1; i + 1 < argc; i += 2) {
+ printf("verifying config %s=%s\n", argv[i], argv[i + 1]);
+ if (rd_kafka_conf_set(conf, argv[i], argv[i + 1], errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ printf("ERROR: failed to set %s=%s: %s\n", argv[i],
+ argv[i + 1], errstr);
+ failures++;
+ }
+ }
+
+ rd_kafka_conf_destroy(conf);
+
+ printf("%d failures\n", failures);
+
+ return !!failures;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-deb.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-deb.sh
new file mode 100755
index 000000000..1350d0655
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-deb.sh
@@ -0,0 +1,34 @@
+#!/bin/bash
+#
+
+set -e
+
+cpver=$1
+base_url=$2
+
+if [[ -z $base_url ]]; then
+ echo "Usage: $0 <cp-base-ver> <base_url>"
+ exit 1
+fi
+
+apt-get update
+apt-get install -y apt-transport-https wget
+
+wget -qO - ${base_url}/deb/${cpver}/archive.key | apt-key add -
+
+
+cat >/etc/apt/sources.list.d/Confluent.list <<EOF
+deb [arch=amd64] $base_url/deb/${cpver} stable main
+EOF
+
+apt-get update
+apt-get install -y librdkafka-dev gcc
+
+gcc /v/check_features.c -o /tmp/check_features -lrdkafka
+
+/tmp/check_features
+
+# Verify plugins
+apt-get install -y confluent-librdkafka-plugins
+
+/tmp/check_features plugin.library.paths monitoring-interceptor
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-packages.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-packages.sh
new file mode 100755
index 000000000..ecddbd558
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-packages.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+# Verifies RPM and DEB packages from Confluent Platform
+#
+
+cpver=$1
+base_url=$2
+
+if [[ -z $base_url ]]; then
+ echo "Usage: $0 <CP-M.m-version> <base-url>"
+ echo ""
+ echo " <CP-M.m-version> is the Major.minor version of CP, e.g., 5.3"
+ echo " <base-url> is the release base bucket URL"
+ exit 1
+fi
+
+thisdir="$( cd "$(dirname "$0")" ; pwd -P )"
+
+echo "#### Verifying RPM packages ####"
+docker run -v $thisdir:/v centos:7 /v/verify-rpm.sh $cpver $base_url
+rpm_status=$?
+
+echo "#### Verifying Debian packages ####"
+docker run -v $thisdir:/v ubuntu:16.04 /v/verify-deb.sh $cpver $base_url
+deb_status=$?
+
+
+if [[ $rpm_status == 0 ]]; then
+ echo "SUCCESS: RPM packages verified"
+else
+ echo "ERROR: RPM package verification failed"
+fi
+
+if [[ $deb_status == 0 ]]; then
+ echo "SUCCESS: Debian packages verified"
+else
+ echo "ERROR: Debian package verification failed"
+fi
+
+if [[ $deb_status != 0 || $rpm_status != 0 ]]; then
+ exit 1
+fi
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-rpm.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-rpm.sh
new file mode 100755
index 000000000..d7b3b1a14
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/cp/verify-rpm.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+
+set -e
+
+cpver=$1
+base_url=$2
+
+if [[ -z $base_url ]]; then
+ echo "Usage: $0 <cp-base-ver> <base_url>"
+ exit 1
+fi
+
+cat >/etc/yum.repos.d/Confluent.repo <<EOF
+[Confluent.dist]
+name=Confluent repository (dist)
+baseurl=$base_url/rpm/${cpver}/7
+gpgcheck=0
+gpgkey=$base_url/rpm/${cpver}/archive.key
+enabled=1
+[Confluent]
+name=Confluent repository
+baseurl=$base_url/rpm/${cpver}
+gpgcheck=1
+gpgkey=$base_url/rpm/${cpver}/archive.key
+enabled=1
+EOF
+
+yum install -y librdkafka-devel gcc
+
+gcc /v/check_features.c -o /tmp/check_features -lrdkafka
+
+/tmp/check_features
+
+# Verify plugins
+yum install -y confluent-librdkafka-plugins
+
+/tmp/check_features plugin.library.paths monitoring-interceptor
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/.gitignore
new file mode 100644
index 000000000..eb66d4d31
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/.gitignore
@@ -0,0 +1,6 @@
+*.log
+files
+librdkafka-dev
+librdkafka1-dbg
+librdkafka1
+tmp
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/changelog b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/changelog
new file mode 100644
index 000000000..c50cb5aa8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/changelog
@@ -0,0 +1,66 @@
+librdkafka (0.8.6-1) unstable; urgency=medium
+
+ * New upstream release.
+ * Backport upstream commit f6fd0da, adding --disable-silent-rules
+ compatibility support to mklove. (Closes: #788742)
+
+ -- Faidon Liambotis <paravoid@debian.org> Sun, 19 Jul 2015 01:36:18 +0300
+
+librdkafka (0.8.5-2) unstable; urgency=medium
+
+ * Install rdkafka.pc in the right, multiarch location. (Closes: #766759)
+
+ -- Faidon Liambotis <paravoid@debian.org> Sun, 26 Oct 2014 06:47:07 +0200
+
+librdkafka (0.8.5-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Fixes kFreeBSD FTBFS.
+ * Ship rdkafka.pc pkg-config in librdkafka-dev.
+
+ -- Faidon Liambotis <paravoid@debian.org> Fri, 24 Oct 2014 18:03:22 +0300
+
+librdkafka (0.8.4-1) unstable; urgency=medium
+
+ * New upstream release, including a new build system.
+ - Add Build-Depends on perl, required by configure.
+ - Support multiarch library paths.
+ - Better detection of architecture atomic builtins, supporting more
+ architectures. (Closes: #739930)
+ - Various portability bugs fixed. (Closes: #730506)
+ - Update debian/librdkafka1.symbols.
+ * Convert to a multiarch package.
+ * Switch to Architecture: any, because of renewed upstream portability.
+ * Update debian/copyright to add src/ before Files: paths.
+ * Update Standards-Version to 3.9.6, no changes needed.
+ * Ship only the C library for now, not the new C++ library; the latter is
+ still in flux in some ways and will probably be shipped in a separate
+ package in a future release.
+
+ -- Faidon Liambotis <paravoid@debian.org> Wed, 22 Oct 2014 23:57:24 +0300
+
+librdkafka (0.8.3-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Multiple internal symbols hidden; breaks ABI without a SONAME bump, but
+ these were internal and should not break any applications, packaged or
+ not.
+ * Update Standards-Version to 3.9.5, no changes needed.
+
+ -- Faidon Liambotis <paravoid@debian.org> Tue, 18 Feb 2014 02:21:43 +0200
+
+librdkafka (0.8.1-1) unstable; urgency=medium
+
+ * New upstream release.
+ - Multiple fixes to FTBFS on various architectures. (Closes: #730506)
+ - Remove dh_auto_clean override, fixed upstream.
+ * Limit the set of architectures: upstream currently relies on 64-bit atomic
+ operations that several Debian architectures do not support.
+
+ -- Faidon Liambotis <paravoid@debian.org> Thu, 05 Dec 2013 16:53:28 +0200
+
+librdkafka (0.8.0-1) unstable; urgency=low
+
+ * Initial release. (Closes: #710271)
+
+ -- Faidon Liambotis <paravoid@debian.org> Mon, 04 Nov 2013 16:50:07 +0200
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/compat b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/compat
new file mode 100644
index 000000000..ec635144f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/compat
@@ -0,0 +1 @@
+9
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/control b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/control
new file mode 100644
index 000000000..510db8f23
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/control
@@ -0,0 +1,49 @@
+Source: librdkafka
+Priority: optional
+Maintainer: Faidon Liambotis <paravoid@debian.org>
+Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python3
+Standards-Version: 3.9.6
+Section: libs
+Homepage: https://github.com/edenhill/librdkafka
+Vcs-Git: git://github.com/edenhill/librdkafka.git -b debian
+Vcs-Browser: https://github.com/edenhill/librdkafka/tree/debian
+
+Package: librdkafka1
+Architecture: any
+Multi-Arch: same
+Depends: ${shlibs:Depends}, ${misc:Depends}
+Description: library implementing the Apache Kafka protocol
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+
+Package: librdkafka-dev
+Section: libdevel
+Architecture: any
+Multi-Arch: same
+Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (development headers)
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the development headers.
+
+Package: librdkafka1-dbg
+Section: debug
+Priority: extra
+Architecture: any
+Multi-Arch: same
+Depends: librdkafka1 (= ${binary:Version}), ${misc:Depends}
+Description: library implementing the Apache Kafka protocol (debugging symbols)
+ librdkafka is a C implementation of the Apache Kafka protocol. It currently
+ implements the 0.8 version of the protocol and can be used to develop both
+ Producers and Consumers.
+ .
+ More information about Apache Kafka can be found at http://kafka.apache.org/
+ .
+ This package contains the debugging symbols.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/copyright b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/copyright
new file mode 100644
index 000000000..20885d9f3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/copyright
@@ -0,0 +1,84 @@
+Format: http://www.debian.org/doc/packaging-manuals/copyright-format/1.0/
+Upstream-Name: librdkafka
+Source: https://github.com/edenhill/librdkafka
+
+License: BSD-2-clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are met:
+ .
+ 1. Redistributions of source code must retain the above copyright notice,
+ this list of conditions and the following disclaimer.
+ 2. Redistributions in binary form must reproduce the above copyright notice,
+ this list of conditions and the following disclaimer in the documentation
+ and/or other materials provided with the distribution.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ POSSIBILITY OF SUCH DAMAGE.
+
+Files: *
+Copyright: 2012-2015, Magnus Edenhill
+License: BSD-2-clause
+
+Files: src/rdcrc32.c src/rdcrc32.h
+Copyright: 2006-2012, Thomas Pircher <tehpeh@gmx.net>
+License: MIT
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+ .
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+ .
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+
+Files: src/snappy.c src/snappy.h src/snappy_compat.h
+Copyright: 2005, Google Inc.
+ 2011, Intel Corporation
+License: BSD-3-clause
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+ .
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+ * Neither the name of Google Inc. nor the names of its
+ contributors may be used to endorse or promote products derived from
+ this software without specific prior written permission.
+ .
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+Files: debian/*
+Copyright: 2013 Faidon Liambotis <paravoid@debian.org>
+License: BSD-2-clause
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/docs b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/docs
new file mode 100644
index 000000000..0b76c34c4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/docs
@@ -0,0 +1,5 @@
+README.md
+INTRODUCTION.md
+CONFIGURATION.md
+STATISTICS.md
+CHANGELOG.md \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/gbp.conf b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/gbp.conf
new file mode 100644
index 000000000..b2a0f02e3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/gbp.conf
@@ -0,0 +1,9 @@
+[buildpackage]
+upstream-tree=tag
+upstream-branch=master
+debian-branch=debian
+upstream-tag=%(version)s
+debian-tag=debian/%(version)s
+no-create-orig = True
+tarball-dir = ../tarballs
+export-dir = ../build-area
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.dirs b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.dirs
new file mode 100644
index 000000000..44188162e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.dirs
@@ -0,0 +1,2 @@
+usr/lib
+usr/include
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.examples b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.examples
new file mode 100644
index 000000000..b45032efe
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.examples
@@ -0,0 +1,2 @@
+examples/rdkafka_example.c
+examples/rdkafka_performance.c
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.install b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.install
new file mode 100644
index 000000000..478f660f5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.install
@@ -0,0 +1,6 @@
+usr/include/*/rdkafka.h
+usr/include/*/rdkafkacpp.h
+usr/lib/*/librdkafka.a
+usr/lib/*/librdkafka.so
+usr/lib/*/librdkafka++.a
+usr/lib/*/librdkafka++.so
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.substvars b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.substvars
new file mode 100644
index 000000000..abd3ebebc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka-dev.substvars
@@ -0,0 +1 @@
+misc:Depends=
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka.dsc b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka.dsc
new file mode 100644
index 000000000..447b9e656
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka.dsc
@@ -0,0 +1,16 @@
+Format: 3.0 (quilt)
+Source: librdkafka
+Binary: librdkafka1, librdkafka-dev, librdkafka1-dbg
+Architecture: any
+Version: 0.9.1-1pre1
+Maintainer: Magnus Edenhill <librdkafka@edenhill.se>
+Homepage: https://github.com/edenhill/librdkafka
+Standards-Version: 3.9.6
+Vcs-Browser: https://github.com/edenhill/librdkafka/tree/master
+Vcs-Git: git://github.com/edenhill/librdkafka.git -b master
+Build-Depends: debhelper (>= 9), zlib1g-dev, libssl-dev, libsasl2-dev, python3
+Package-List:
+ librdkafka-dev deb libdevel optional arch=any
+ librdkafka1 deb libs optional arch=any
+ librdkafka1-dbg deb debug extra arch=any
+Original-Maintainer: Faidon Liambotis <paravoid@debian.org>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1-dbg.substvars b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1-dbg.substvars
new file mode 100644
index 000000000..abd3ebebc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1-dbg.substvars
@@ -0,0 +1 @@
+misc:Depends=
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.dirs b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.dirs
new file mode 100644
index 000000000..68457717b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.dirs
@@ -0,0 +1 @@
+usr/lib
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.install b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.install
new file mode 100644
index 000000000..7e86e5f18
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.install
@@ -0,0 +1,2 @@
+usr/lib/*/librdkafka.so.*
+usr/lib/*/librdkafka++.so.*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postinst.debhelper b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postinst.debhelper
new file mode 100644
index 000000000..3d89d3ef6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postinst.debhelper
@@ -0,0 +1,5 @@
+# Automatically added by dh_makeshlibs
+if [ "$1" = "configure" ]; then
+ ldconfig
+fi
+# End automatically added section
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postrm.debhelper b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postrm.debhelper
new file mode 100644
index 000000000..7f4404727
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.postrm.debhelper
@@ -0,0 +1,5 @@
+# Automatically added by dh_makeshlibs
+if [ "$1" = "remove" ]; then
+ ldconfig
+fi
+# End automatically added section
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.symbols b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.symbols
new file mode 100644
index 000000000..0ef576eb1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/librdkafka1.symbols
@@ -0,0 +1,64 @@
+librdkafka.so.1 librdkafka1 #MINVER#
+* Build-Depends-Package: librdkafka-dev
+ rd_kafka_brokers_add@Base 0.8.0
+ rd_kafka_conf_destroy@Base 0.8.0
+ rd_kafka_conf_dump@Base 0.8.3
+ rd_kafka_conf_dump_free@Base 0.8.3
+ rd_kafka_conf_dup@Base 0.8.3
+ rd_kafka_conf_new@Base 0.8.0
+ rd_kafka_conf_properties_show@Base 0.8.0
+ rd_kafka_conf_set@Base 0.8.0
+ rd_kafka_conf_set_dr_cb@Base 0.8.0
+ rd_kafka_conf_set_dr_msg_cb@Base 0.8.4
+ rd_kafka_conf_set_error_cb@Base 0.8.0
+ rd_kafka_conf_set_log_cb@Base 0.8.4
+ rd_kafka_conf_set_opaque@Base 0.8.0
+ rd_kafka_conf_set_open_cb@Base 0.8.4
+ rd_kafka_conf_set_socket_cb@Base 0.8.4
+ rd_kafka_conf_set_stats_cb@Base 0.8.0
+ rd_kafka_consume@Base 0.8.0
+ rd_kafka_consume_batch@Base 0.8.0
+ rd_kafka_consume_batch_queue@Base 0.8.4
+ rd_kafka_consume_callback@Base 0.8.0
+ rd_kafka_consume_callback_queue@Base 0.8.4
+ rd_kafka_consume_queue@Base 0.8.4
+ rd_kafka_consume_start@Base 0.8.0
+ rd_kafka_consume_start_queue@Base 0.8.4
+ rd_kafka_consume_stop@Base 0.8.0
+ rd_kafka_destroy@Base 0.8.0
+ rd_kafka_dump@Base 0.8.0
+ rd_kafka_err2str@Base 0.8.0
+ rd_kafka_errno2err@Base 0.8.3
+ rd_kafka_log_print@Base 0.8.0
+ rd_kafka_log_syslog@Base 0.8.0
+ rd_kafka_message_destroy@Base 0.8.0
+ rd_kafka_metadata@Base 0.8.4
+ rd_kafka_metadata_destroy@Base 0.8.4
+ rd_kafka_msg_partitioner_random@Base 0.8.0
+ rd_kafka_name@Base 0.8.0
+ rd_kafka_new@Base 0.8.0
+ rd_kafka_offset_store@Base 0.8.3
+ rd_kafka_opaque@Base 0.8.4
+ rd_kafka_outq_len@Base 0.8.0
+ rd_kafka_poll@Base 0.8.0
+ rd_kafka_produce@Base 0.8.0
+ rd_kafka_produce_batch@Base 0.8.4
+ rd_kafka_queue_destroy@Base 0.8.4
+ rd_kafka_queue_new@Base 0.8.4
+ rd_kafka_set_log_level@Base 0.8.0
+ rd_kafka_set_logger@Base 0.8.0
+ rd_kafka_thread_cnt@Base 0.8.0
+ rd_kafka_topic_conf_destroy@Base 0.8.0
+ rd_kafka_topic_conf_dump@Base 0.8.3
+ rd_kafka_topic_conf_dup@Base 0.8.3
+ rd_kafka_topic_conf_new@Base 0.8.0
+ rd_kafka_topic_conf_set@Base 0.8.0
+ rd_kafka_topic_conf_set_opaque@Base 0.8.0
+ rd_kafka_topic_conf_set_partitioner_cb@Base 0.8.0
+ rd_kafka_topic_destroy@Base 0.8.0
+ rd_kafka_topic_name@Base 0.8.0
+ rd_kafka_topic_new@Base 0.8.0
+ rd_kafka_topic_partition_available@Base 0.8.0
+ rd_kafka_version@Base 0.8.1
+ rd_kafka_version_str@Base 0.8.1
+ rd_kafka_wait_destroyed@Base 0.8.0
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/rules b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/rules
new file mode 100755
index 000000000..a18c40d98
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/rules
@@ -0,0 +1,19 @@
+#!/usr/bin/make -f
+
+# Uncomment this to turn on verbose mode.
+#export DH_VERBOSE=1
+
+%:
+ dh $@
+
+override_dh_strip:
+ dh_strip --dbg-package=librdkafka1-dbg
+
+override_dh_auto_install:
+ dh_auto_install
+ install -D -m 0644 rdkafka.pc \
+ debian/librdkafka-dev/usr/lib/${DEB_HOST_MULTIARCH}/pkgconfig/rdkafka.pc
+ install -D -m 0644 rdkafka-static.pc \
+ debian/librdkafka-dev/usr/lib/${DEB_HOST_MULTIARCH}/pkgconfig/rdkafka-static.pc
+
+.PHONY: override_dh_strip override_dh_auth_install
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/source/format b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/source/format
new file mode 100644
index 000000000..163aaf8d8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/source/format
@@ -0,0 +1 @@
+3.0 (quilt)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/watch b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/watch
new file mode 100644
index 000000000..fc9aec86f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/debian/watch
@@ -0,0 +1,2 @@
+version=3
+http://github.com/edenhill/librdkafka/tags .*/(\d[\d\.]*)\.tar\.gz
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/get_version.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/get_version.py
new file mode 100755
index 000000000..fad1d9718
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/get_version.py
@@ -0,0 +1,21 @@
+#!/usr/bin/env python3
+
+import sys
+
+if len(sys.argv) != 2:
+ raise Exception('Usage: %s path/to/rdkafka.h' % sys.argv[0])
+
+kafka_h_file = sys.argv[1]
+f = open(kafka_h_file)
+for line in f:
+ if '#define RD_KAFKA_VERSION' in line:
+ version = line.split()[-1]
+ break
+f.close()
+
+major = int(version[2:4], 16)
+minor = int(version[4:6], 16)
+patch = int(version[6:8], 16)
+version = '.'.join(str(item) for item in (major, minor, patch))
+
+print(version)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/README.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/README.md
new file mode 100644
index 000000000..a23a08537
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/README.md
@@ -0,0 +1,15 @@
+# Update the Homebrew librdkafka package version
+
+The `./brew-update-pr.sh` script in this directory updates the
+brew formula for librdkafka and pushes a PR to the homebrew-core repository.
+
+You should run it in two steps, first an implicit dry-run mode
+to check that things seem correct, and if that checks out a
+live upload mode which actually pushes the PR.
+
+ # Do a dry-run first, v0.11.0 is the librdkafka tag:
+ $ ./brew-update-pr.sh v0.11.0
+
+ # If everything looks okay, run the live upload mode:
+ $ ./brew-update-pr.sh --upload v0.11.0
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/brew-update-pr.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/brew-update-pr.sh
new file mode 100755
index 000000000..f756159cd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/homebrew/brew-update-pr.sh
@@ -0,0 +1,31 @@
+#!/bin/bash
+#
+# Automatically pushes a PR to homebrew-core to update
+# the librdkafka version.
+#
+# Usage:
+# # Dry-run:
+# ./brew-update-pr.sh v0.11.0
+# # if everything looks good:
+# ./brew-update-pr.sh --upload v0.11.0
+#
+
+
+DRY_RUN="--dry-run"
+if [[ $1 == "--upload" ]]; then
+ DRY_RUN=
+ shift
+fi
+
+TAG=$1
+
+if [[ -z $TAG ]]; then
+ echo "Usage: $0 [--upload] <librdkafka-tag>"
+ exit 1
+fi
+
+set -eu
+
+brew bump-formula-pr $DRY_RUN --strict \
+ --url=https://github.com/edenhill/librdkafka/archive/${TAG}.tar.gz \
+ librdkafka
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh
new file mode 100644
index 000000000..a5162caad
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw-static.sh
@@ -0,0 +1,52 @@
+#!/bin/bash
+
+set -e
+
+cmake \
+ -G "MinGW Makefiles" \
+ -D CMAKE_INSTALL_PREFIX="$PWD/dest/" \
+ -D RDKAFKA_BUILD_STATIC=ON \
+ .
+
+$mingw64 mingw32-make
+$mingw64 mingw32-make install
+
+# Bundle all the static dependencies with the static lib we just built
+mkdir mergescratch
+pushd mergescratch
+cp /C/msys64/mingw64/lib/libzstd.a ./
+cp /C/msys64/mingw64/lib/libcrypto.a ./
+cp /C/msys64/mingw64/lib/liblz4.a ./
+cp /C/msys64/mingw64/lib/libssl.a ./
+cp /C/msys64/mingw64/lib/libz.a ./
+cp ../src/librdkafka.a ./
+
+# Have to rename because ar won't work with + in the name
+cp ../src-cpp/librdkafka++.a ./librdkafkacpp.a
+ar -M << EOF
+create librdkafka-static.a
+addlib librdkafka.a
+addlib libzstd.a
+addlib libcrypto.a
+addlib liblz4.a
+addlib libssl.a
+addlib libz.a
+save
+end
+EOF
+
+ar -M << EOF
+create librdkafkacpp-static.a
+addlib librdkafka-static.a
+addlib librdkafkacpp.a
+save
+end
+EOF
+
+strip -g ./librdkafka-static.a
+strip -g ./librdkafkacpp-static.a
+cp ./librdkafka-static.a ../dest/lib/
+cp ./librdkafkacpp-static.a ../dest/lib/librdkafka++-static.a
+popd
+rm -rf ./mergescratch
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw.sh
new file mode 100644
index 000000000..b0b81fe0a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/configure-build-msys2-mingw.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+
+set -e
+
+cmake \
+ -G "MinGW Makefiles" \
+ -D CMAKE_INSTALL_PREFIX="$PWD/dest/" \
+ -D WITHOUT_WIN32_CONFIG=ON \
+ -D RDKAFKA_BUILD_EXAMPLES=ON \
+ -D RDKAFKA_BUILD_TESTS=ON \
+ -D RDKAFKA_BUILD_STATIC=OFF \
+ -D CMAKE_WINDOWS_EXPORT_ALL_SYMBOLS=TRUE .
+
+$mingw64 mingw32-make
+$mingw64 mingw32-make install
+
+cd tests
+cp ../dest/bin/librdkafka.dll ./
+cp ../dest/bin/librdkafka++.dll ./
+CI=true ./test-runner.exe -l -Q
+cd ..
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/run-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/run-tests.sh
new file mode 100644
index 000000000..6749add5d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/run-tests.sh
@@ -0,0 +1,6 @@
+#!/bin/bash
+
+set -e
+
+cd tests
+./test-runner.exe -l -Q -p1 0000
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/semaphoreci-build.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/semaphoreci-build.sh
new file mode 100644
index 000000000..378545b44
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/semaphoreci-build.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+
+set -ex
+
+if [[ $1 == "--static" ]]; then
+ linkage="static"
+ shift
+else
+linkage="dynamic"
+fi
+
+if [[ -z $1 ]]; then
+ echo "Usage: $0 [--static] <relative-path-to-output-librdkafka.tgz>"
+ exit 1
+fi
+
+archive="${PWD}/$1"
+
+source ./packaging/mingw-w64/travis-before-install.sh
+
+if [[ $linkage == "static" ]]; then
+ ./packaging/mingw-w64/configure-build-msys2-mingw-static.sh
+else
+ ./packaging/mingw-w64/configure-build-msys2-mingw.sh
+fi
+
+
+./packaging/mingw-w64/run-tests.sh
+
+pushd dest
+tar cvzf $archive .
+sha256sum $archive
+popd
+
+
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/travis-before-install.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/travis-before-install.sh
new file mode 100644
index 000000000..e75507f93
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/mingw-w64/travis-before-install.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+
+set -e
+
+export msys2='cmd //C RefreshEnv.cmd '
+export msys2+='& set MSYS=winsymlinks:nativestrict '
+export msys2+='& C:\\msys64\\msys2_shell.cmd -defterm -no-start'
+export mingw64="$msys2 -mingw64 -full-path -here -c "\"\$@"\" --"
+export msys2+=" -msys2 -c "\"\$@"\" --"
+
+# Have to update pacman first or choco upgrade will failure due to migration
+# to zstd instead of xz compression
+$msys2 pacman -Sy --noconfirm pacman
+
+## Install more MSYS2 packages from https://packages.msys2.org/base here
+$msys2 pacman --sync --noconfirm --needed mingw-w64-x86_64-gcc mingw-w64-x86_64-make mingw-w64-x86_64-cmake mingw-w64-x86_64-openssl mingw-w64-x86_64-lz4 mingw-w64-x86_64-zstd
+
+taskkill //IM gpg-agent.exe //F || true # https://travis-ci.community/t/4967
+export PATH=/C/msys64/mingw64/bin:$PATH
+export MAKE=mingw32-make # so that Autotools can find it
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/.gitignore
new file mode 100644
index 000000000..56919a155
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/.gitignore
@@ -0,0 +1,7 @@
+dl-*
+out-*
+*.nupkg
+*.tgz
+*.key
+*.pyc
+__pycache__
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/README.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/README.md
new file mode 100644
index 000000000..87b176930
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/README.md
@@ -0,0 +1,78 @@
+# Package assembly
+
+This set of scripts collect CI artifacts from a local directory or S3, and
+assembles them into a package structure defined by a packaging class in a
+staging directory.
+For the NugetPackage class the NuGet tool is then run (from within docker) on
+this staging directory to create a proper NuGet package (with all the metadata).
+While the StaticPackage class creates a tarball.
+
+The finalized nuget package maybe uploaded manually to NuGet.org
+
+## Requirements
+
+ * Requires Python 3
+ * Requires Docker
+ * (if --s3) Requires private S3 access keys for the librdkafka-ci-packages bucket.
+
+
+
+## Usage
+
+1. Trigger CI builds by creating and pushing a new release (candidate) tag
+ in the librdkafka repo. Make sure the tag is created on the correct branch.
+
+ $ git tag v0.11.0-RC3
+ $ git push origin v0.11.0-RC3
+
+2. Wait for CI builds to finish, monitor the builds here:
+
+ * https://travis-ci.org/edenhill/librdkafka
+ * https://ci.appveyor.com/project/edenhill/librdkafka
+
+Or if using SemaphoreCI, just have the packaging job depend on prior build jobs
+in the same pipeline.
+
+3. On a Linux host, run the release.py script to assemble the NuGet package
+
+ $ cd packaging/nuget
+ # Specify the tag
+ $ ./release.py v0.11.0-RC3
+ # Optionally, if the tag was moved and an exact sha is also required:
+ # $ ./release.py --sha <the-full-git-sha> v0.11.0-RC3
+
+4. If all artifacts were available the NuGet package will be built
+ and reside in the current directory as librdkafka.redist.<v-less-tag>.nupkg
+
+5. Test the package manually
+
+6. Upload the package to NuGet
+
+ * https://www.nuget.org/packages/manage/upload
+
+7. If you trust this process you can have release.py upload the package
+ automatically to NuGet after building it:
+
+ $ ./release.py --retries 100 --upload your-nuget-api.key v0.11.0-RC3
+
+
+
+## Other uses
+
+### Create static library bundles
+
+To create a bundle (tarball) of librdkafka self-contained static library
+builds, use the following command:
+
+ $ ./release.py --class StaticPackage v1.1.0
+
+
+### Clean up S3 bucket
+
+To clean up old non-release/non-RC builds from the S3 bucket, first check with:
+
+ $ AWS_PROFILE=.. ./cleanup-s3.py --age 360
+
+Verify that the listed objects should really be deleted, then delete:
+
+ $ AWS_PROFILE=.. ./cleanup-s3.py --age 360 --delete
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/artifact.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/artifact.py
new file mode 100755
index 000000000..c58e0c9c7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/artifact.py
@@ -0,0 +1,177 @@
+#!/usr/bin/env python3
+#
+#
+# Collects CI artifacts from S3 storage, downloading them
+# to a local directory.
+#
+# The artifacts' folder in the S3 bucket must have the following token
+# format:
+# <token>-[<value>]__ (repeat)
+#
+# Recognized tokens (unrecognized tokens are ignored):
+# p - project (e.g., "confluent-kafka-python")
+# bld - builder (e.g., "travis")
+# plat - platform ("osx", "linux", ..)
+# arch - arch ("x64", ..)
+# tag - git tag
+# sha - git sha
+# bid - builder's build-id
+# bldtype - Release, Debug (appveyor)
+#
+# Example:
+# p-confluent-kafka-python__bld-travis__plat-linux__tag-__sha-112130ce297656ea1c39e7c94c99286f95133a24__bid-271588764__/confluent_kafka-0.11.0-cp35-cp35m-manylinux1_x86_64.whl
+
+
+import re
+import os
+import boto3
+
+import packaging
+
+s3_bucket = 'librdkafka-ci-packages'
+dry_run = False
+
+
+class Artifact (object):
+ def __init__(self, arts, path, info=None):
+ self.path = path
+ # Remove unexpanded AppVeyor $(..) tokens from filename
+ self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
+ slpath = os.path.join(os.path.dirname(path), self.fname)
+ if os.path.isfile(slpath):
+ # Already points to local file in correct location
+ self.lpath = slpath
+ else:
+ # Prepare download location in dlpath
+ self.lpath = os.path.join(arts.dlpath, slpath)
+
+ if info is None:
+ self.info = dict()
+ else:
+ # Assign the map and convert all keys to lower case
+ self.info = {k.lower(): v for k, v in info.items()}
+ # Rename values, e.g., 'plat':'linux' to 'plat':'debian'
+ for k, v in self.info.items():
+ rdict = packaging.rename_vals.get(k, None)
+ if rdict is not None:
+ self.info[k] = rdict.get(v, v)
+
+ # Score value for sorting
+ self.score = 0
+
+ # AppVeyor symbol builds are of less value
+ if self.fname.find('.symbols.') != -1:
+ self.score -= 10
+
+ self.arts = arts
+ arts.artifacts.append(self)
+
+ def __repr__(self):
+ return self.path
+
+ def __lt__(self, other):
+ return self.score < other.score
+
+ def download(self):
+ """ Download artifact from S3 and store in local directory .lpath.
+ If the artifact is already downloaded nothing is done. """
+ if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
+ return
+ print('Downloading %s -> %s' % (self.path, self.lpath))
+ if dry_run:
+ return
+ ldir = os.path.dirname(self.lpath)
+ if not os.path.isdir(ldir):
+ os.makedirs(ldir, 0o755)
+ self.arts.s3_bucket.download_file(self.path, self.lpath)
+
+
+class Artifacts (object):
+ def __init__(self, match, dlpath):
+ super(Artifacts, self).__init__()
+ self.match = match
+ self.artifacts = list()
+ # Download directory (make sure it ends with a path separator)
+ if not dlpath.endswith(os.path.sep):
+ dlpath = os.path.join(dlpath, '')
+ self.dlpath = dlpath
+ if not os.path.isdir(self.dlpath):
+ if not dry_run:
+ os.makedirs(self.dlpath, 0o755)
+
+ def collect_single(self, path, req_tag=True):
+ """ Collect single artifact, be it in S3 or locally.
+ :param: path string: S3 or local (relative) path
+ :param: req_tag bool: Require tag to match.
+ """
+
+ print('? %s' % path)
+
+ # For local files, strip download path.
+ # Also ignore any parent directories.
+ if path.startswith(self.dlpath):
+ folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
+ else:
+ folder = os.path.basename(os.path.dirname(path))
+
+ # The folder contains the tokens needed to perform
+ # matching of project, gitref, etc.
+ rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)__', folder)
+ if rinfo is None or len(rinfo) == 0:
+ print('Incorrect folder/file name format for %s' % folder)
+ return None
+
+ info = dict(rinfo)
+
+ # Ignore AppVeyor Debug builds
+ if info.get('bldtype', '').lower() == 'debug':
+ print('Ignoring debug artifact %s' % folder)
+ return None
+
+ tag = info.get('tag', None)
+ if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
+ # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
+ # with an empty value when not set, it leaves that token
+ # in the string - so translate that to no tag.
+ del info['tag']
+
+ # Match tag or sha to gitref
+ unmatched = list()
+ for m, v in self.match.items():
+ if m not in info or info[m] != v:
+ unmatched.append(m)
+
+ # Make sure all matches were satisfied, unless this is a
+ # common artifact.
+ if info.get('p', '') != 'common' and len(unmatched) > 0:
+ print(info)
+ print('%s: %s did not match %s' %
+ (info.get('p', None), folder, unmatched))
+ return None
+
+ return Artifact(self, path, info)
+
+ def collect_s3(self):
+ """ Collect and download build-artifacts from S3 based on
+ git reference """
+ print(
+ 'Collecting artifacts matching %s from S3 bucket %s' %
+ (self.match, s3_bucket))
+ self.s3 = boto3.resource('s3')
+ self.s3_bucket = self.s3.Bucket(s3_bucket)
+ self.s3_client = boto3.client('s3')
+ for item in self.s3_client.list_objects(
+ Bucket=s3_bucket, Prefix='librdkafka/').get('Contents'):
+ self.collect_single(item.get('Key'))
+
+ for a in self.artifacts:
+ a.download()
+
+ def collect_local(self, path, req_tag=True):
+ """ Collect artifacts from a local directory possibly previously
+ collected from s3 """
+ for f in [os.path.join(dp, f) for dp, dn,
+ filenames in os.walk(path) for f in filenames]:
+ if not os.path.isfile(f):
+ continue
+ self.collect_single(f, req_tag)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/cleanup-s3.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/cleanup-s3.py
new file mode 100755
index 000000000..2093af0c1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/cleanup-s3.py
@@ -0,0 +1,143 @@
+#!/usr/bin/env python3
+#
+# Clean up test builds from librdkafka's S3 bucket.
+# This also covers python builds.
+
+import re
+from datetime import datetime, timezone
+import boto3
+import argparse
+
+# Collects CI artifacts from S3 storage, downloading them
+# to a local directory, or collecting already downloaded artifacts from
+# local directory.
+#
+# The artifacts' folder in the S3 bucket must have the following token
+# format:
+# <token>-[<value>]__ (repeat)
+#
+# Recognized tokens (unrecognized tokens are ignored):
+# p - project (e.g., "confluent-kafka-python")
+# bld - builder (e.g., "travis")
+# plat - platform ("osx", "linux", ..)
+# arch - arch ("x64", ..)
+# tag - git tag
+# sha - git sha
+# bid - builder's build-id
+# bldtype - Release, Debug (appveyor)
+# lnk - std, static
+#
+# Example:
+# librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz
+
+
+s3_bucket = 'librdkafka-ci-packages'
+
+
+def may_delete(path):
+ """ Returns true if S3 object path is eligible for deletion, e.g.
+ has a non-release/rc tag. """
+
+ # The path contains the tokens needed to perform
+ # matching of project, gitref, etc.
+ rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)(?:__|$)', path)
+ if rinfo is None or len(rinfo) == 0:
+ print(f"Incorrect folder/file name format for {path}")
+ return False
+
+ info = dict(rinfo)
+
+ tag = info.get('tag', None)
+ if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
+ # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
+ # with an empty value when not set, it leaves that token
+ # in the string - so translate that to no tag.
+ del info['tag']
+ tag = None
+
+ if tag is None:
+ return True
+
+ if re.match(r'^v?\d+\.\d+\.\d+(-?RC\d+)?$', tag,
+ flags=re.IGNORECASE) is None:
+ return True
+
+ return False
+
+
+def collect_s3(s3, min_age_days=60):
+ """ Collect artifacts from S3 """
+ now = datetime.now(timezone.utc)
+ eligible = []
+ totcnt = 0
+ # note: list_objects will return at most 1000 objects per call,
+ # use continuation token to read full list.
+ cont_token = None
+ more = True
+ while more:
+ if cont_token is not None:
+ res = s3.list_objects_v2(Bucket=s3_bucket,
+ ContinuationToken=cont_token)
+ else:
+ res = s3.list_objects_v2(Bucket=s3_bucket)
+
+ if res.get('IsTruncated') is True:
+ cont_token = res.get('NextContinuationToken')
+ else:
+ more = False
+
+ for item in res.get('Contents'):
+ totcnt += 1
+ age = (now - item.get('LastModified')).days
+ path = item.get('Key')
+ if age >= min_age_days and may_delete(path):
+ eligible.append(path)
+
+ return (eligible, totcnt)
+
+
+def chunk_list(lst, cnt):
+ """ Split list into lists of cnt """
+ for i in range(0, len(lst), cnt):
+ yield lst[i:i + cnt]
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--delete",
+ help="WARNING! Don't just check, actually delete "
+ "S3 objects.",
+ action="store_true")
+ parser.add_argument("--age", help="Minimum object age in days.",
+ type=int, default=360)
+
+ args = parser.parse_args()
+ dry_run = args.delete is not True
+ min_age_days = args.age
+
+ if dry_run:
+ op = "Eligible for deletion"
+ else:
+ op = "Deleting"
+
+ s3 = boto3.client('s3')
+
+ # Collect eligible artifacts
+ eligible, totcnt = collect_s3(s3, min_age_days=min_age_days)
+ print(f"{len(eligible)}/{totcnt} eligible artifacts to delete")
+
+ # Delete in chunks of 1000 (max what the S3 API can do)
+ for chunk in chunk_list(eligible, 1000):
+ print(op + ":\n" + '\n'.join(chunk))
+ if dry_run:
+ continue
+
+ res = s3.delete_objects(Bucket=s3_bucket,
+ Delete={
+ 'Objects': [{'Key': x} for x in chunk],
+ 'Quiet': True
+ })
+ errors = res.get('Errors', [])
+ if len(errors) > 0:
+ raise Exception(f"Delete failed: {errors}")
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip
new file mode 100644
index 000000000..9bc5e9fbc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr120.zip
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip
new file mode 100644
index 000000000..152938138
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-win32__bldtype-Release/msvcr140.zip
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip
new file mode 100644
index 000000000..3609c0385
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr120.zip
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip
new file mode 100644
index 000000000..b99e5ae5b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/common/p-common__plat-windows__arch-x64__bldtype-Release/msvcr140.zip
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nuget.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nuget.sh
new file mode 100755
index 000000000..032371231
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nuget.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+#
+# Front-end for nuget that runs nuget in a docker image.
+
+set -ex
+
+if [[ -f /.dockerenv ]]; then
+ echo "Inside docker"
+
+ pushd $(dirname $0)
+
+ nuget $*
+
+ popd
+
+else
+ echo "Running docker image"
+ docker run -v $(pwd):/io mono:latest /io/$0 $*
+fi
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nugetpackage.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nugetpackage.py
new file mode 100644
index 000000000..aea05ade0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/nugetpackage.py
@@ -0,0 +1,286 @@
+#!/usr/bin/env python3
+#
+# Create NuGet package
+#
+
+import os
+import tempfile
+import shutil
+import subprocess
+from packaging import Package, Mapping
+
+
+class NugetPackage (Package):
+ """ All platforms, archs, et.al, are bundled into one set of
+ NuGet output packages: "main", redist and symbols """
+
+ # See .semamphore/semaphore.yml for where these are built.
+ mappings = [
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/include/librdkafka/rdkafka.h',
+ 'build/native/include/librdkafka/rdkafka.h'),
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/include/librdkafka/rdkafkacpp.h',
+ 'build/native/include/librdkafka/rdkafkacpp.h'),
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/include/librdkafka/rdkafka_mock.h',
+ 'build/native/include/librdkafka/rdkafka_mock.h'),
+
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/share/doc/librdkafka/README.md',
+ 'README.md'),
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/share/doc/librdkafka/CONFIGURATION.md',
+ 'CONFIGURATION.md'),
+ Mapping({'arch': 'x64',
+ 'plat': 'osx',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/share/doc/librdkafka/LICENSES.txt',
+ 'LICENSES.txt'),
+
+ # OSX x64
+ Mapping({'arch': 'x64',
+ 'plat': 'osx'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.dylib',
+ 'runtimes/osx-x64/native/librdkafka.dylib'),
+ # OSX arm64
+ Mapping({'arch': 'arm64',
+ 'plat': 'osx'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.1.dylib',
+ 'runtimes/osx-arm64/native/librdkafka.dylib'),
+
+ # Linux glibc centos6 x64 with GSSAPI
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos6',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.so.1',
+ 'runtimes/linux-x64/native/librdkafka.so'),
+ # Linux glibc centos6 x64 without GSSAPI (no external deps)
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos6',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.so.1',
+ 'runtimes/linux-x64/native/centos6-librdkafka.so'),
+ # Linux glibc centos7 x64 with GSSAPI
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos7',
+ 'lnk': 'std'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.so.1',
+ 'runtimes/linux-x64/native/centos7-librdkafka.so'),
+ # Linux glibc centos7 arm64 without GSSAPI (no external deps)
+ Mapping({'arch': 'arm64',
+ 'plat': 'linux',
+ 'dist': 'centos7',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.so.1',
+ 'runtimes/linux-arm64/native/librdkafka.so'),
+
+ # Linux musl alpine x64 without GSSAPI (no external deps)
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'alpine',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka.so.1',
+ 'runtimes/linux-x64/native/alpine-librdkafka.so'),
+
+ # Common Win runtime
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'msvcr140.zip',
+ 'vcruntime140.dll',
+ 'runtimes/win-x64/native/vcruntime140.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'msvcr140.zip',
+ 'msvcp140.dll', 'runtimes/win-x64/native/msvcp140.dll'),
+
+ # matches x64 librdkafka.redist.zip
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/librdkafka.dll',
+ 'runtimes/win-x64/native/librdkafka.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/librdkafkacpp.dll',
+ 'runtimes/win-x64/native/librdkafkacpp.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/libcrypto-3-x64.dll',
+ 'runtimes/win-x64/native/libcrypto-3-x64.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/libssl-3-x64.dll',
+ 'runtimes/win-x64/native/libssl-3-x64.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/zlib1.dll',
+ 'runtimes/win-x64/native/zlib1.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/zstd.dll',
+ 'runtimes/win-x64/native/zstd.dll'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/x64/Release/libcurl.dll',
+ 'runtimes/win-x64/native/libcurl.dll'),
+ # matches x64 librdkafka.redist.zip, lib files
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/lib/v142/x64/Release/librdkafka.lib',
+ 'build/native/lib/win/x64/win-x64-Release/v142/librdkafka.lib' # noqa: E501
+ ),
+ Mapping({'arch': 'x64',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/lib/v142/x64/Release/librdkafkacpp.lib',
+ 'build/native/lib/win/x64/win-x64-Release/v142/librdkafkacpp.lib' # noqa: E501
+ ),
+
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'msvcr140.zip',
+ 'vcruntime140.dll',
+ 'runtimes/win-x86/native/vcruntime140.dll'),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'msvcr140.zip',
+ 'msvcp140.dll', 'runtimes/win-x86/native/msvcp140.dll'),
+
+ # matches Win32 librdkafka.redist.zip
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/librdkafka.dll',
+ 'runtimes/win-x86/native/librdkafka.dll'),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/librdkafkacpp.dll',
+ 'runtimes/win-x86/native/librdkafkacpp.dll'),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/libcrypto-3.dll',
+ 'runtimes/win-x86/native/libcrypto-3.dll'),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/libssl-3.dll',
+ 'runtimes/win-x86/native/libssl-3.dll'),
+
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/zlib1.dll',
+ 'runtimes/win-x86/native/zlib1.dll'),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/zstd.dll',
+ 'runtimes/win-x86/native/zstd.dll'),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/bin/v142/Win32/Release/libcurl.dll',
+ 'runtimes/win-x86/native/libcurl.dll'),
+
+ # matches Win32 librdkafka.redist.zip, lib files
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/lib/v142/Win32/Release/librdkafka.lib',
+ 'build/native/lib/win/x86/win-x86-Release/v142/librdkafka.lib' # noqa: E501
+ ),
+ Mapping({'arch': 'x86',
+ 'plat': 'win'},
+ 'librdkafka.redist*',
+ 'build/native/lib/v142/Win32/Release/librdkafkacpp.lib',
+ 'build/native/lib/win/x86/win-x86-Release/v142/librdkafkacpp.lib' # noqa: E501
+ )
+ ]
+
+ def __init__(self, version, arts):
+ if version.startswith('v'):
+ version = version[1:] # Strip v prefix
+ super(NugetPackage, self).__init__(version, arts)
+
+ def cleanup(self):
+ if os.path.isdir(self.stpath):
+ shutil.rmtree(self.stpath)
+
+ def build(self, buildtype):
+ """ Build single NuGet package for all its artifacts. """
+
+ # NuGet removes the prefixing v from the version.
+ vless_version = self.kv['version']
+ if vless_version[0] == 'v':
+ vless_version = vless_version[1:]
+
+ self.stpath = tempfile.mkdtemp(prefix="out-", suffix="-%s" % buildtype,
+ dir=".")
+
+ self.render('librdkafka.redist.nuspec')
+ self.copy_template('librdkafka.redist.targets',
+ destpath=os.path.join('build', 'native'))
+ self.copy_template('librdkafka.redist.props',
+ destpath='build')
+
+ # Generate template tokens for artifacts
+ for a in self.arts.artifacts:
+ if 'bldtype' not in a.info:
+ a.info['bldtype'] = 'release'
+
+ a.info['variant'] = '%s-%s-%s' % (a.info.get('plat'),
+ a.info.get('arch'),
+ a.info.get('bldtype'))
+ if 'toolset' not in a.info:
+ a.info['toolset'] = 'v142'
+
+ # Apply mappings and extract files
+ self.apply_mappings()
+
+ print('Tree extracted to %s' % self.stpath)
+
+ # After creating a bare-bone nupkg layout containing the artifacts
+ # and some spec and props files, call the 'nuget' utility to
+ # make a proper nupkg of it (with all the metadata files).
+ subprocess.check_call("./nuget.sh pack %s -BasePath '%s' -NonInteractive" % # noqa: E501
+ (os.path.join(self.stpath,
+ 'librdkafka.redist.nuspec'),
+ self.stpath), shell=True)
+
+ return 'librdkafka.redist.%s.nupkg' % vless_version
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/packaging.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/packaging.py
new file mode 100755
index 000000000..c4dab806d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/packaging.py
@@ -0,0 +1,448 @@
+#!/usr/bin/env python3
+#
+# Packaging script.
+# Assembles packages using CI artifacts.
+#
+
+import sys
+import re
+import os
+import shutil
+from fnmatch import fnmatch
+from string import Template
+from zfile import zfile
+import boto3
+import magic
+
+if sys.version_info[0] < 3:
+ from urllib import unquote as _unquote
+else:
+ from urllib.parse import unquote as _unquote
+
+
+def unquote(path):
+ # Removes URL escapes, and normalizes the path by removing ./.
+ path = _unquote(path)
+ if path[:2] == './':
+ return path[2:]
+ return path
+
+
+# Rename token values
+rename_vals = {'plat': {'windows': 'win'},
+ 'arch': {'x86_64': 'x64',
+ 'amd64': 'x64',
+ 'i386': 'x86',
+ 'win32': 'x86'}}
+
+# Filemagic arch mapping.
+# key is (plat, arch, file_extension), value is a compiled filemagic regex.
+# This is used to verify that an artifact has the expected file type.
+magic_patterns = {
+ ('win', 'x64', '.dll'): re.compile('PE32.*DLL.* x86-64, for MS Windows'),
+ ('win', 'x86', '.dll'):
+ re.compile('PE32.*DLL.* Intel 80386, for MS Windows'),
+ ('win', 'x64', '.lib'): re.compile('current ar archive'),
+ ('win', 'x86', '.lib'): re.compile('current ar archive'),
+ ('linux', 'x64', '.so'): re.compile('ELF 64.* x86-64'),
+ ('linux', 'arm64', '.so'): re.compile('ELF 64.* ARM aarch64'),
+ ('osx', 'x64', '.dylib'): re.compile('Mach-O 64.* x86_64'),
+ ('osx', 'arm64', '.dylib'): re.compile('Mach-O 64.*arm64')}
+
+magic = magic.Magic()
+
+
+def magic_mismatch(path, a):
+ """ Verify that the filemagic for \\p path matches for artifact \\p a.
+ Returns True if the magic file info does NOT match.
+ Returns False if no matching is needed or the magic matches. """
+ k = (a.info.get('plat', None), a.info.get('arch', None),
+ os.path.splitext(path)[1])
+ pattern = magic_patterns.get(k, None)
+ if pattern is None:
+ return False
+
+ minfo = magic.id_filename(path)
+ if not pattern.match(minfo):
+ print(
+ f"Warning: {path} magic \"{minfo}\" "
+ f"does not match expected {pattern} for key {k}")
+ return True
+
+ return False
+
+
+# Collects CI artifacts from S3 storage, downloading them
+# to a local directory, or collecting already downloaded artifacts from
+# local directory.
+#
+# The artifacts' folder in the S3 bucket must have the following token
+# format:
+# <token>-[<value>]__ (repeat)
+#
+# Recognized tokens (unrecognized tokens are ignored):
+# p - project (e.g., "confluent-kafka-python")
+# bld - builder (e.g., "travis")
+# plat - platform ("osx", "linux", ..)
+# dist - distro or runtime ("centos6", "mingw", "msvcr", "alpine", ..).
+# arch - arch ("x64", ..)
+# tag - git tag
+# sha - git sha
+# bid - builder's build-id
+# bldtype - Release, Debug (appveyor)
+# lnk - Linkage ("std", "static", "all" (both std and static))
+# extra - Extra build options, typically "gssapi" (for cyrus-sasl linking).
+
+#
+# Example:
+# librdkafka/p-librdkafka__bld-travis__plat-linux__arch-x64__tag-v0.0.62__sha-d051b2c19eb0c118991cd8bc5cf86d8e5e446cde__bid-1562.1/librdkafka.tar.gz
+
+
+class MissingArtifactError(Exception):
+ pass
+
+
+s3_bucket = 'librdkafka-ci-packages'
+dry_run = False
+
+
+class Artifact (object):
+ def __init__(self, arts, path, info=None):
+ self.path = path
+ # Remove unexpanded AppVeyor $(..) tokens from filename
+ self.fname = re.sub(r'\$\([^\)]+\)', '', os.path.basename(path))
+ slpath = os.path.join(os.path.dirname(path), self.fname)
+ if os.path.isfile(slpath):
+ # Already points to local file in correct location
+ self.lpath = slpath
+ else:
+ # Prepare download location in dlpath
+ self.lpath = os.path.join(arts.dlpath, slpath)
+
+ if info is None:
+ self.info = dict()
+ else:
+ # Assign the map and convert all keys to lower case
+ self.info = {k.lower(): v for k, v in info.items()}
+ # Rename values, e.g., 'plat':'windows' to 'plat':'win'
+ for k, v in self.info.items():
+ rdict = rename_vals.get(k, None)
+ if rdict is not None:
+ self.info[k] = rdict.get(v, v)
+
+ # Score value for sorting
+ self.score = 0
+
+ # AppVeyor symbol builds are of less value
+ if self.fname.find('.symbols.') != -1:
+ self.score -= 10
+
+ self.arts = arts
+ arts.artifacts.append(self)
+
+ def __repr__(self):
+ return self.path
+
+ def __lt__(self, other):
+ return self.score < other.score
+
+ def download(self):
+ """ Download artifact from S3 and store in local directory .lpath.
+ If the artifact is already downloaded nothing is done. """
+ if os.path.isfile(self.lpath) and os.path.getsize(self.lpath) > 0:
+ return
+ print('Downloading %s' % self.path)
+ if dry_run:
+ return
+ ldir = os.path.dirname(self.lpath)
+ if not os.path.isdir(ldir):
+ os.makedirs(ldir, 0o755)
+ self.arts.s3_bucket.download_file(self.path, self.lpath)
+
+
+class Artifacts (object):
+ def __init__(self, match, dlpath):
+ super(Artifacts, self).__init__()
+ self.match = match
+ self.artifacts = list()
+ # Download directory (make sure it ends with a path separator)
+ if not dlpath.endswith(os.path.sep):
+ dlpath = os.path.join(dlpath, '')
+ self.dlpath = dlpath
+ if not os.path.isdir(self.dlpath):
+ if not dry_run:
+ os.makedirs(self.dlpath, 0o755)
+
+ def collect_single(self, path, req_tag=True):
+ """ Collect single artifact, be it in S3 or locally.
+ :param: path string: S3 or local (relative) path
+ :param: req_tag bool: Require tag to match.
+ """
+
+ # For local files, strip download path.
+ # Also ignore any parent directories.
+ if path.startswith(self.dlpath):
+ folder = os.path.basename(os.path.dirname(path[len(self.dlpath):]))
+ else:
+ folder = os.path.basename(os.path.dirname(path))
+
+ # The folder contains the tokens needed to perform
+ # matching of project, gitref, etc.
+ rinfo = re.findall(r'(?P<tag>[^-]+)-(?P<val>.*?)(?:__|$)', folder)
+ if rinfo is None or len(rinfo) == 0:
+ print('Incorrect folder/file name format for %s' % folder)
+ return None
+
+ info = dict(rinfo)
+
+ # Ignore AppVeyor Debug builds
+ if info.get('bldtype', '').lower() == 'debug':
+ print('Ignoring debug artifact %s' % folder)
+ return None
+
+ tag = info.get('tag', None)
+ if tag is not None and (len(tag) == 0 or tag.startswith('$(')):
+ # AppVeyor doesn't substite $(APPVEYOR_REPO_TAG_NAME)
+ # with an empty value when not set, it leaves that token
+ # in the string - so translate that to no tag.
+ del info['tag']
+
+ # Perform matching
+ unmatched = list()
+ for m, v in self.match.items():
+ if m not in info or info[m] != v:
+ unmatched.append(f"{m} = {v}")
+
+ # Make sure all matches were satisfied, unless this is a
+ # common artifact.
+ if info.get('p', '') != 'common' and len(unmatched) > 0:
+ return None
+
+ return Artifact(self, path, info)
+
+ def collect_s3(self):
+ """ Collect and download build-artifacts from S3 based on
+ git reference """
+ print(
+ 'Collecting artifacts matching %s from S3 bucket %s' %
+ (self.match, s3_bucket))
+ self.s3 = boto3.resource('s3')
+ self.s3_bucket = self.s3.Bucket(s3_bucket)
+ self.s3_client = boto3.client('s3')
+
+ # note: list_objects will return at most 1000 objects per call,
+ # use continuation token to read full list.
+ cont_token = None
+ more = True
+ while more:
+ if cont_token is not None:
+ res = self.s3_client.list_objects_v2(
+ Bucket=s3_bucket,
+ Prefix='librdkafka/',
+ ContinuationToken=cont_token)
+ else:
+ res = self.s3_client.list_objects_v2(Bucket=s3_bucket,
+ Prefix='librdkafka/')
+
+ if res.get('IsTruncated') is True:
+ cont_token = res.get('NextContinuationToken')
+ else:
+ more = False
+
+ for item in res.get('Contents'):
+ self.collect_single(item.get('Key'))
+
+ for a in self.artifacts:
+ a.download()
+
+ def collect_local(self, path, req_tag=True):
+ """ Collect artifacts from a local directory possibly previously
+ collected from s3 """
+ for f in [os.path.join(dp, f) for dp, dn,
+ filenames in os.walk(path) for f in filenames]:
+ if not os.path.isfile(f):
+ continue
+ self.collect_single(f, req_tag)
+
+
+class Mapping (object):
+ """ Maps/matches a file in an input release artifact to
+ the output location of the package, based on attributes and paths. """
+
+ def __init__(self, attributes, artifact_fname_glob, path_in_artifact,
+ output_pkg_path=None, artifact_fname_excludes=[]):
+ """
+ @param attributes A dict of artifact attributes that must match.
+ If an attribute name (dict key) is prefixed
+ with "!" (e.g., "!plat") then the attribute
+ must not match.
+ @param artifact_fname_glob Match artifacts with this filename glob.
+ @param path_in_artifact On match, extract this file in the artifact,..
+ @param output_pkg_path ..and write it to this location in the package.
+ Defaults to path_in_artifact.
+ @param artifact_fname_excludes Exclude artifacts matching these
+ filenames.
+
+ Pass a list of Mapping objects to FIXME to perform all mappings.
+ """
+ super(Mapping, self).__init__()
+ self.attributes = attributes
+ self.fname_glob = artifact_fname_glob
+ self.input_path = path_in_artifact
+ if output_pkg_path is None:
+ self.output_path = self.input_path
+ else:
+ self.output_path = output_pkg_path
+ self.name = self.output_path
+ self.fname_excludes = artifact_fname_excludes
+
+ def __str__(self):
+ return self.name
+
+
+class Package (object):
+ """ Generic Package class
+ A Package is a working container for one or more output
+ packages for a specific package type (e.g., nuget) """
+
+ def __init__(self, version, arts):
+ super(Package, self).__init__()
+ self.version = version
+ self.arts = arts
+ # These may be overwritten by specific sub-classes:
+ self.artifacts = arts.artifacts
+ # Staging path, filled in later.
+ self.stpath = None
+ self.kv = {'version': version}
+ self.files = dict()
+
+ def add_file(self, file):
+ self.files[file] = True
+
+ def build(self):
+ """ Build package output(s), return a list of paths "
+ to built packages """
+ raise NotImplementedError
+
+ def cleanup(self):
+ """ Optional cleanup routine for removing temporary files, etc. """
+ pass
+
+ def render(self, fname, destpath='.'):
+ """ Render template in file fname and save to destpath/fname,
+ where destpath is relative to stpath """
+
+ outf = os.path.join(self.stpath, destpath, fname)
+
+ if not os.path.isdir(os.path.dirname(outf)):
+ os.makedirs(os.path.dirname(outf), 0o0755)
+
+ with open(os.path.join('templates', fname), 'r') as tf:
+ tmpl = Template(tf.read())
+ with open(outf, 'w') as of:
+ of.write(tmpl.substitute(self.kv))
+
+ self.add_file(outf)
+
+ def copy_template(self, fname, target_fname=None, destpath='.'):
+ """ Copy template file to destpath/fname
+ where destpath is relative to stpath """
+
+ if target_fname is None:
+ target_fname = fname
+ outf = os.path.join(self.stpath, destpath, target_fname)
+
+ if not os.path.isdir(os.path.dirname(outf)):
+ os.makedirs(os.path.dirname(outf), 0o0755)
+
+ shutil.copy(os.path.join('templates', fname), outf)
+
+ self.add_file(outf)
+
+ def apply_mappings(self):
+ """ Applies a list of Mapping to match and extract files from
+ matching artifacts. If any of the listed Mappings can not be
+ fulfilled an exception is raised. """
+
+ assert self.mappings
+ assert len(self.mappings) > 0
+
+ for m in self.mappings:
+
+ artifact = None
+ for a in self.arts.artifacts:
+ found = True
+
+ for attr in m.attributes:
+ if attr[0] == '!':
+ # Require attribute NOT to match
+ origattr = attr
+ attr = attr[1:]
+
+ if attr in a.info and \
+ a.info[attr] != m.attributes[origattr]:
+ found = False
+ break
+ else:
+ # Require attribute to match
+ if attr not in a.info or \
+ a.info[attr] != m.attributes[attr]:
+ found = False
+ break
+
+ if not fnmatch(a.fname, m.fname_glob):
+ found = False
+
+ for exclude in m.fname_excludes:
+ if exclude in a.fname:
+ found = False
+ break
+
+ if found:
+ artifact = a
+ break
+
+ if artifact is None:
+ raise MissingArtifactError(
+ '%s: unable to find artifact with tags %s matching "%s"' %
+ (m, str(m.attributes), m.fname_glob))
+
+ output_path = os.path.join(self.stpath, m.output_path)
+
+ try:
+ zfile.ZFile.extract(artifact.lpath, m.input_path, output_path)
+# except KeyError:
+# continue
+ except Exception as e:
+ raise Exception(
+ '%s: file not found in archive %s: %s. Files in archive are:\n%s' % # noqa: E501
+ (m, artifact.lpath, e, '\n'.join(zfile.ZFile(
+ artifact.lpath).getnames())))
+
+ # Check that the file type matches.
+ if magic_mismatch(output_path, a):
+ os.unlink(output_path)
+ continue
+
+ # All mappings found and extracted.
+
+ def verify(self, path):
+ """ Verify package content based on the previously defined mappings """
+
+ missing = list()
+ with zfile.ZFile(path, 'r') as zf:
+ print('Verifying %s:' % path)
+
+ # Zipfiles may url-encode filenames, unquote them before matching.
+ pkgd = [unquote(x) for x in zf.getnames()]
+ missing = [x for x in self.mappings if x.output_path not in pkgd]
+
+ if len(missing) > 0:
+ print(
+ 'Missing files in package %s:\n%s' %
+ (path, '\n'.join([str(x) for x in missing])))
+ print('Actual: %s' % '\n'.join(pkgd))
+ return False
+
+ print('OK - %d expected files found' % len(self.mappings))
+ return True
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/push-to-nuget.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/push-to-nuget.sh
new file mode 100755
index 000000000..598dd4cd7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/push-to-nuget.sh
@@ -0,0 +1,21 @@
+#!/bin/bash
+#
+# Upload NuGet package to NuGet.org using provided NuGet API key
+#
+
+set -e
+
+key=$1
+pkg=$2
+
+if [[ -z $pkg ]]; then
+ echo "Usage: $0 <nuget.org-api-key> <nuget-package>"
+ exit 1
+fi
+
+set -u
+
+docker run -t -v $PWD/$pkg:/$pkg mcr.microsoft.com/dotnet/sdk:3.1 \
+ dotnet nuget push /$pkg -n -s https://api.nuget.org/v3/index.json \
+ -k $key --source https://api.nuget.org/v3/index.json
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/release.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/release.py
new file mode 100755
index 000000000..f230a580c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/release.py
@@ -0,0 +1,167 @@
+#!/usr/bin/env python3
+#
+#
+# NuGet release packaging tool.
+# Creates a NuGet package from CI artifacts on S3.
+#
+
+
+import os
+import sys
+import argparse
+import time
+import packaging
+import nugetpackage
+import staticpackage
+
+
+dry_run = False
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument(
+ "--s3",
+ help="Collect artifacts from S3 bucket",
+ action="store_true")
+ parser.add_argument("--dry-run",
+ help="Locate artifacts but don't actually "
+ "download or do anything",
+ action="store_true")
+ parser.add_argument(
+ "--directory",
+ help="Download directory (default: dl-<tag>)",
+ default=None)
+ parser.add_argument(
+ "--no-cleanup",
+ help="Don't clean up temporary folders",
+ action="store_true")
+ parser.add_argument(
+ "--sha",
+ help="Also match on this git sha1",
+ default=None)
+ parser.add_argument(
+ "--ignore-tag",
+ help="Ignore the artifacts' tag attribute (for devel use only)",
+ action="store_true",
+ default=False)
+ parser.add_argument(
+ "--nuget-version",
+ help="The nuget package version (defaults to same as tag)",
+ default=None)
+ parser.add_argument("--upload", help="Upload package to after building, "
+ "using provided NuGet API key "
+ "(either file or the key itself)",
+ default=None,
+ type=str)
+ parser.add_argument(
+ "--class",
+ help="Packaging class (either NugetPackage or StaticPackage)",
+ default="NugetPackage",
+ dest="pkgclass")
+ parser.add_argument(
+ "--retries",
+ help="Number of retries to collect artifacts",
+ default=0,
+ type=int)
+ parser.add_argument("tag", help="Git tag to collect")
+
+ args = parser.parse_args()
+ dry_run = args.dry_run
+ retries = args.retries
+ if not args.directory:
+ args.directory = 'dl-%s' % args.tag
+
+ match = {}
+ if not args.ignore_tag:
+ match['tag'] = args.tag
+
+ if args.sha is not None:
+ match['sha'] = args.sha
+
+ if args.pkgclass == "NugetPackage":
+ pkgclass = nugetpackage.NugetPackage
+ elif args.pkgclass == "StaticPackage":
+ pkgclass = staticpackage.StaticPackage
+ else:
+ raise ValueError(f'Unknown packaging class {args.pkgclass}: '
+ 'should be one of NugetPackage or StaticPackage')
+
+ try:
+ match.update(getattr(pkgclass, 'match'))
+ except BaseException:
+ pass
+
+ arts = packaging.Artifacts(match, args.directory)
+
+ # Collect common local artifacts, such as support files.
+ arts.collect_local('common', req_tag=False)
+
+ while True:
+ if args.s3:
+ arts.collect_s3()
+
+ arts.collect_local(arts.dlpath)
+
+ if len(arts.artifacts) == 0:
+ raise ValueError('No artifacts found for %s' % match)
+
+ print('Collected artifacts (%s):' % (arts.dlpath))
+ for a in arts.artifacts:
+ print(' %s' % a.lpath)
+ print('')
+
+ if args.nuget_version is not None:
+ package_version = args.nuget_version
+ else:
+ package_version = args.tag
+
+ print('')
+
+ if dry_run:
+ sys.exit(0)
+
+ print('Building packages:')
+
+ try:
+ p = pkgclass(package_version, arts)
+ pkgfile = p.build(buildtype='release')
+ break
+ except packaging.MissingArtifactError as e:
+ if retries <= 0 or not args.s3:
+ if not args.no_cleanup:
+ p.cleanup()
+ raise e
+
+ p.cleanup()
+ retries -= 1
+ print(e)
+ print('Retrying in 30 seconds')
+ time.sleep(30)
+
+ if not args.no_cleanup:
+ p.cleanup()
+ else:
+ print(' --no-cleanup: leaving %s' % p.stpath)
+
+ print('')
+
+ if not p.verify(pkgfile):
+ print('Package failed verification.')
+ sys.exit(1)
+
+ print('Created package: %s' % pkgfile)
+
+ if args.upload is not None:
+ if os.path.isfile(args.upload):
+ with open(args.upload, 'r') as f:
+ nuget_key = f.read().replace('\n', '')
+ else:
+ nuget_key = args.upload
+
+ print('Uploading %s to NuGet' % pkgfile)
+ r = os.system("./push-to-nuget.sh '%s' %s" % (nuget_key, pkgfile))
+ assert int(r) == 0, \
+ f"NuGet upload failed with exit code {r}, see previous errors"
+ print('%s successfully uploaded to NuGet' % pkgfile)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/requirements.txt b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/requirements.txt
new file mode 100644
index 000000000..0fa2fd19c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/requirements.txt
@@ -0,0 +1,3 @@
+boto3==1.18.45
+rpmfile==1.0.8
+filemagic==1.6
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/staticpackage.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/staticpackage.py
new file mode 100644
index 000000000..38567bb60
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/staticpackage.py
@@ -0,0 +1,178 @@
+#!/usr/bin/env python3
+#
+# Create self-contained static-library tar-ball package
+#
+
+import os
+import tempfile
+import shutil
+import subprocess
+from packaging import Package, Mapping
+
+
+class StaticPackage (Package):
+ """ Create a tar-ball with self-contained static libraries.
+ These are later imported into confluent-kafka-go. """
+
+ # Make sure gssapi (cyrus-sasl) is not linked, since that is a
+ # dynamic linkage, by specifying negative match '!extra': 'gssapi'.
+ # Except for on OSX where cyrus-sasl is always available, and
+ # Windows where it is never linked.
+ #
+ # Match statically linked artifacts (which are included in 'all' builds)
+ mappings = [
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos6',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/include/librdkafka/rdkafka.h',
+ 'rdkafka.h'),
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos6',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/share/doc/librdkafka/LICENSES.txt',
+ 'LICENSES.txt'),
+
+ # glibc linux static lib and pkg-config file
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos6',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka-static.a',
+ 'librdkafka_glibc_linux_amd64.a'),
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'centos6',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_glibc_linux_amd64.pc'),
+
+ # glibc linux arm64 static lib and pkg-config file
+ Mapping({'arch': 'arm64',
+ 'plat': 'linux',
+ 'dist': 'centos7',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka-static.a',
+ 'librdkafka_glibc_linux_arm64.a'),
+ Mapping({'arch': 'arm64',
+ 'plat': 'linux',
+ 'dist': 'centos7',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_glibc_linux_arm64.pc'),
+
+ # musl linux static lib and pkg-config file
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'alpine',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka-static.a',
+ 'librdkafka_musl_linux_amd64.a'),
+ Mapping({'arch': 'x64',
+ 'plat': 'linux',
+ 'dist': 'alpine',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_musl_linux_amd64.pc'),
+
+ # musl linux arm64 static lib and pkg-config file
+ Mapping({'arch': 'arm64',
+ 'plat': 'linux',
+ 'dist': 'alpine',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka-static.a',
+ 'librdkafka_musl_linux_arm64.a'),
+ Mapping({'arch': 'arm64',
+ 'plat': 'linux',
+ 'dist': 'alpine',
+ 'lnk': 'all',
+ '!extra': 'gssapi'},
+ 'librdkafka.tgz',
+ './usr/local/lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_musl_linux_arm64.pc'),
+
+ # osx x64 static lib and pkg-config file
+ Mapping({'arch': 'x64',
+ 'plat': 'osx',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka-static.a',
+ 'librdkafka_darwin_amd64.a'),
+ Mapping({'arch': 'x64',
+ 'plat': 'osx',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_darwin_amd64.pc'),
+
+ # osx arm64 static lib and pkg-config file
+ Mapping({'arch': 'arm64',
+ 'plat': 'osx',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/librdkafka-static.a',
+ 'librdkafka_darwin_arm64.a'),
+ Mapping({'arch': 'arm64',
+ 'plat': 'osx',
+ 'lnk': 'all'},
+ 'librdkafka.tgz',
+ './usr/local/lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_darwin_arm64.pc'),
+
+ # win static lib and pkg-config file (mingw)
+ Mapping({'arch': 'x64',
+ 'plat': 'win',
+ 'dist': 'mingw',
+ 'lnk': 'static'},
+ 'librdkafka.tgz',
+ './lib/librdkafka-static.a', 'librdkafka_windows.a'),
+ Mapping({'arch': 'x64',
+ 'plat': 'win',
+ 'dist': 'mingw',
+ 'lnk': 'static'},
+ 'librdkafka.tgz',
+ './lib/pkgconfig/rdkafka-static.pc',
+ 'librdkafka_windows.pc'),
+ ]
+
+ def __init__(self, version, arts):
+ super(StaticPackage, self).__init__(version, arts)
+
+ def cleanup(self):
+ if os.path.isdir(self.stpath):
+ shutil.rmtree(self.stpath)
+
+ def build(self, buildtype):
+ """ Build single package for all artifacts. """
+
+ self.stpath = tempfile.mkdtemp(prefix="out-", dir=".")
+
+ self.apply_mappings()
+
+ print('Tree extracted to %s' % self.stpath)
+
+ # After creating a bare-bone layout, create a tarball.
+ outname = "librdkafka-static-bundle-%s.tgz" % self.version
+ print('Writing to %s in %s' % (outname, self.stpath))
+ subprocess.check_call("(cd %s && tar cvzf ../%s .)" %
+ (self.stpath, outname),
+ shell=True)
+
+ return outname
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.nuspec b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.nuspec
new file mode 100644
index 000000000..dbfd7b1aa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.nuspec
@@ -0,0 +1,21 @@
+<?xml version="1.0" encoding="utf-8"?>
+<package xmlns="http://schemas.microsoft.com/packaging/2011/10/nuspec.xsd">
+ <metadata>
+ <id>librdkafka.redist</id>
+ <version>${version}</version>
+ <title>librdkafka - redistributable</title>
+ <authors>Magnus Edenhill, edenhill</authors>
+ <owners>Confluent Inc.</owners>
+ <requireLicenseAcceptance>false</requireLicenseAcceptance>
+ <licenseUrl>https://github.com/confluentinc/librdkafka/blob/master/LICENSES.txt</licenseUrl>
+ <projectUrl>https://github.com/confluentinc/librdkafka</projectUrl>
+ <description>The Apache Kafka C/C++ client library - redistributable</description>
+ <summary>The Apache Kafka C/C++ client library</summary>
+ <releaseNotes>Release of librdkafka</releaseNotes>
+ <copyright>Copyright 2012-2023</copyright>
+ <tags>native apache kafka librdkafka C C++ nativepackage</tags>
+ </metadata>
+ <files>
+ <file src="**" />
+ </files>
+</package>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.props b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.props
new file mode 100644
index 000000000..c1615c61c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.props
@@ -0,0 +1,18 @@
+<?xml version="1.0" encoding="utf-8" ?>
+<Project ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup>
+ <Content Include="$(MSBuildThisFileDirectory)..\runtimes\win-x86\native\*">
+ <Link>librdkafka\x86\%(Filename)%(Extension)</Link>
+ <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
+ </Content>
+ <Content Include="$(MSBuildThisFileDirectory)..\runtimes\win-x64\native\*">
+ <Link>librdkafka\x64\%(Filename)%(Extension)</Link>
+ <CopyToOutputDirectory>PreserveNewest</CopyToOutputDirectory>
+ </Content>
+ </ItemGroup>
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.targets b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.targets
new file mode 100644
index 000000000..d174cda11
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/templates/librdkafka.redist.targets
@@ -0,0 +1,19 @@
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemDefinitionGroup>
+ <Link>
+ <AdditionalDependencies Condition="'$(Platform)' == 'x64'">$(MSBuildThisFileDirectory)lib\win\x64\win-x64-Release\v142\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalDependencies Condition="'$(Platform)' != 'x64'">$(MSBuildThisFileDirectory)lib\win\x86\win-x86-Release\v142\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories Condition="'$(Platform)' == 'x64'">$(MSBuildThisFileDirectory)lib\win\x64\win-x64-Release\v142;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ <AdditionalLibraryDirectories Condition="'$(Platform)' != 'x64'">$(MSBuildThisFileDirectory)lib\win\x86\win-x86-Release\v142;%(AdditionalLibraryDirectories)</AdditionalLibraryDirectories>
+ </Link>
+ <ClCompile>
+ <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemGroup Condition="'$(Platform)' == 'x64'">
+ <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\runtimes\win-x64\native\*.dll" />
+ </ItemGroup>
+ <ItemGroup Condition="'$(Platform)' != 'x64'">
+ <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\runtimes\win-x86\native\*.dll" />
+ </ItemGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/__init__.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/__init__.py
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/zfile.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/zfile.py
new file mode 100644
index 000000000..51f2df25f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/nuget/zfile/zfile.py
@@ -0,0 +1,98 @@
+#!/usr/bin/env python3
+
+import os
+import tarfile
+import zipfile
+import rpmfile
+
+
+class ZFile (object):
+ def __init__(self, path, mode='r', ext=None):
+ super(ZFile, self).__init__()
+
+ if ext is not None:
+ _ext = ext
+ else:
+ _ext = os.path.splitext(path)[-1]
+ if _ext.startswith('.'):
+ _ext = _ext[1:]
+
+ if zipfile.is_zipfile(path) or _ext == 'zip':
+ self.f = zipfile.ZipFile(path, mode)
+ elif tarfile.is_tarfile(path) or _ext in ('tar', 'tgz', 'gz'):
+ self.f = tarfile.open(path, mode)
+ elif _ext == 'rpm':
+ self.f = rpmfile.open(path, mode + 'b')
+ else:
+ raise ValueError('Unsupported file extension: %s' % path)
+
+ def __enter__(self):
+ return self
+
+ def __exit__(self, *args):
+ if callable(getattr(self.f, 'close', None)):
+ self.f.close()
+
+ def getnames(self):
+ if isinstance(self.f, zipfile.ZipFile):
+ return self.f.namelist()
+ elif isinstance(self.f, tarfile.TarFile):
+ return self.f.getnames()
+ elif isinstance(self.f, rpmfile.RPMFile):
+ return [x.name for x in self.f.getmembers()]
+ else:
+ raise NotImplementedError
+
+ def headers(self):
+ if isinstance(self.f, rpmfile.RPMFile):
+ return self.f.headers
+ else:
+ return dict()
+
+ def extract_to(self, member, path):
+ """ Extract compress file's \\p member to \\p path
+ If \\p path is a directory the member's basename will used as
+ filename, otherwise path is considered the full file path name. """
+
+ if not os.path.isdir(os.path.dirname(path)):
+ os.makedirs(os.path.dirname(path))
+
+ if os.path.isdir(path):
+ path = os.path.join(path, os.path.basename(member))
+
+ with open(path, 'wb') as of:
+ if isinstance(self.f, zipfile.ZipFile):
+ zf = self.f.open(member)
+ else:
+ zf = self.f.extractfile(member)
+
+ while True:
+ b = zf.read(1024 * 100)
+ if b:
+ of.write(b)
+ else:
+ break
+
+ zf.close()
+
+ @classmethod
+ def extract(cls, zpath, member, outpath):
+ """
+ Extract file member (full internal path) to output from
+ archive zpath.
+ """
+
+ with ZFile(zpath) as zf:
+ zf.extract_to(member, outpath)
+
+ @classmethod
+ def compress(cls, zpath, paths, stripcnt=0, ext=None):
+ """
+ Create new compressed file \\p zpath containing files in \\p paths
+ """
+
+ with ZFile(zpath, 'w', ext=ext) as zf:
+ for p in paths:
+ outp = os.path.sep.join(p.split(os.path.sep)[stripcnt:])
+ print('zip %s to %s (stripcnt %d)' % (p, outp, stripcnt))
+ zf.f.write(p, outp)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/.gitignore
new file mode 100644
index 000000000..4bfdf21ed
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/.gitignore
@@ -0,0 +1,7 @@
+*.log
+available_pkgs
+installed_pkgs
+pkgs-*
+arts-*
+cache
+output
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/Makefile b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/Makefile
new file mode 100644
index 000000000..c5c8f8c10
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/Makefile
@@ -0,0 +1,92 @@
+PACKAGE_NAME?= librdkafka
+VERSION?= $(shell ../get_version.py ../../src/rdkafka.h)
+
+# Jenkins CI integration
+BUILD_NUMBER?= 1
+
+MOCK_CONFIG?=default
+
+RESULT_DIR?=pkgs-$(VERSION)-$(BUILD_NUMBER)-$(MOCK_CONFIG)
+
+# Where built packages are copied with `make copy-artifacts`
+ARTIFACTS_DIR?=../../artifacts
+
+all: rpm
+
+
+SOURCES:
+ mkdir -p SOURCES
+
+archive: SOURCES
+ cd ../../ && \
+ git archive --prefix=$(PACKAGE_NAME)-$(VERSION)/ \
+ -o packaging/rpm/SOURCES/$(PACKAGE_NAME)-$(VERSION).tar.gz HEAD
+
+
+build_prepare: archive
+ mkdir -p $(RESULT_DIR)
+ rm -f $(RESULT_DIR)/$(PACKAGE_NAME)*.rpm
+
+
+srpm: build_prepare
+ /usr/bin/mock \
+ -r $(MOCK_CONFIG) \
+ $(MOCK_OPTIONS) \
+ --define "__version $(VERSION)" \
+ --define "__release $(BUILD_NUMBER)" \
+ --enable-network \
+ --resultdir=$(RESULT_DIR) \
+ --no-clean --no-cleanup-after \
+ --install epel-release \
+ --buildsrpm \
+ --spec=librdkafka.spec \
+ --sources=SOURCES || \
+ (tail -n 100 pkgs-$(VERSION)*/*log ; false)
+ @echo "======= Source RPM now available in $(RESULT_DIR) ======="
+
+rpm: srpm
+ /usr/bin/mock \
+ -r $(MOCK_CONFIG) \
+ $(MOCK_OPTIONS) \
+ --define "__version $(VERSION)"\
+ --define "__release $(BUILD_NUMBER)"\
+ --enable-network \
+ --resultdir=$(RESULT_DIR) \
+ --no-clean --no-cleanup-after \
+ --rebuild $(RESULT_DIR)/$(PACKAGE_NAME)*.src.rpm || \
+ (tail -n 100 pkgs-$(VERSION)*/*log ; false)
+ @echo "======= Binary RPMs now available in $(RESULT_DIR) ======="
+
+copy-artifacts:
+ cp $(RESULT_DIR)/*rpm $(ARTIFACTS_DIR)
+
+clean:
+ rm -rf SOURCES
+ /usr/bin/mock -r $(MOCK_CONFIG) --clean
+
+distclean: clean
+ rm -f build.log root.log state.log available_pkgs installed_pkgs \
+ *.rpm *.tar.gz
+
+# Prepare ubuntu 14.04 for building RPMs with mock.
+# - older versions of mock needs the config file to reside in /etc/mock,
+# so we copy it there.
+# - add a mock system group (if not already exists)
+# - add the current user to the mock group.
+# - prepare mock environment with some needed packages.
+# NOTE: This target should be run with sudo.
+prepare_ubuntu:
+ apt-get -qq update
+ apt-get install -y -qq mock make git python-lzma
+ cp *.cfg /etc/mock/
+ addgroup --system mock || true
+ adduser $$(whoami) mock
+ /usr/bin/mock -r $(MOCK_CONFIG) --init
+ /usr/bin/mock -r $(MOCK_CONFIG) \
+ --enable-network \
+ --no-cleanup-after \
+ --install epel-release shadow-utils
+
+prepare_centos:
+ yum install -y -q mock make git
+ cp *.cfg /etc/mock/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/README.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/README.md
new file mode 100644
index 000000000..92a6eca95
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/README.md
@@ -0,0 +1,23 @@
+# RPM packages for librdkafka
+
+On a system with RPM mock installed, simply run make to create RPM packages:
+
+ $ make
+
+Additional mock options may be specified using MOCK_OPTIONS:
+
+ $ make MOCK_OPTIONS='--bootstrap-chroot'
+
+
+## Build with Mock on docker
+
+From the librdkafka top-level directory:
+
+ $ packaging/rpm/mock-on-docker.sh
+
+Wait for packages to build, they will be copied to top-level dir artifacts/
+
+Test the packages:
+
+ $ packaging/rpm/tests/test-on-docker.sh
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/el7-x86_64.cfg b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/el7-x86_64.cfg
new file mode 100644
index 000000000..502282749
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/el7-x86_64.cfg
@@ -0,0 +1,40 @@
+config_opts['root'] = 'el7-x86_64'
+config_opts['target_arch'] = 'x86_64'
+config_opts['legal_host_arches'] = ('x86_64',)
+config_opts['chroot_setup_cmd'] = 'install @buildsys-build'
+config_opts['dist'] = 'el7' # only useful for --resultdir variable subst
+config_opts['releasever'] = '7'
+config_opts['docker_unshare_warning'] = False
+config_opts['nosync'] = True
+
+config_opts['yum.conf'] = """
+[main]
+keepcache=1
+debuglevel=2
+reposdir=/dev/null
+logfile=/var/log/yum.log
+retries=15
+obsoletes=1
+gpgcheck=0
+assumeyes=1
+syslog_ident=mock
+syslog_device=
+mdpolicy=group:primary
+
+# repos
+[base]
+name=BaseOS
+mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=os
+failovermethod=priority
+
+[updates]
+name=updates
+enabled=1
+mirrorlist=http://mirrorlist.centos.org/?release=7&arch=x86_64&repo=updates
+failovermethod=priority
+
+[epel]
+name=epel
+mirrorlist=http://mirrors.fedoraproject.org/mirrorlist?repo=epel-7&arch=x86_64
+failovermethod=priority
+"""
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/librdkafka.spec b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/librdkafka.spec
new file mode 100644
index 000000000..4f9e8c0d0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/librdkafka.spec
@@ -0,0 +1,118 @@
+Name: librdkafka
+Version: %{__version}
+Release: %{__release}%{?dist}
+%define soname 1
+
+Summary: The Apache Kafka C library
+Group: Development/Libraries/C and C++
+License: BSD-2-Clause
+URL: https://github.com/edenhill/librdkafka
+Source: librdkafka-%{version}.tar.gz
+
+BuildRequires: zlib-devel libstdc++-devel gcc >= 4.1 gcc-c++ cyrus-sasl-devel
+BuildRoot: %(mktemp -ud %{_tmppath}/%{name}-%{version}-%{release}-XXXXXX)
+
+%define _source_payload w9.gzdio
+%define _binary_payload w9.gzdio
+
+%description
+librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
+
+
+%package -n %{name}%{soname}
+Summary: The Apache Kafka C library
+Group: Development/Libraries/C and C++
+Requires: zlib libstdc++ cyrus-sasl
+# openssl libraries were extract to openssl-libs in RHEL7
+%if 0%{?rhel} >= 7
+Requires: openssl-libs >= 1.0.2
+BuildRequires: openssl-devel >= 1.0.2 python3
+%else
+Requires: openssl
+# python34 is provided from epel-release, but that package needs to be installed
+# prior to rpmbuild working out these dependencies (such as from mock).
+BuildRequires: openssl-devel python34
+%endif
+
+%description -n %{name}%{soname}
+librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
+
+
+%package -n %{name}-devel
+Summary: The Apache Kafka C library (Development Environment)
+Group: Development/Libraries/C and C++
+Requires: %{name}%{soname} = %{version}
+
+%description -n %{name}-devel
+librdkafka is the C/C++ client library implementation of the Apache Kafka protocol, containing both Producer and Consumer support.
+
+This package contains headers and libraries required to build applications
+using librdkafka.
+
+
+%prep
+%setup -q -n %{name}-%{version}
+
+# --install-deps will install missing dependencies that are not available
+# through BuildRequires, such as libzstd, which will be linked statically.
+%configure --install-deps --disable-lz4-ext
+
+%build
+cat config.log
+make
+examples/rdkafka_example -X builtin.features
+
+%install
+rm -rf %{buildroot}
+DESTDIR=%{buildroot} make install
+
+%clean
+rm -rf %{buildroot}
+
+%post -n %{name}%{soname} -p /sbin/ldconfig
+%postun -n %{name}%{soname} -p /sbin/ldconfig
+
+%files -n %{name}%{soname}
+%defattr(444,root,root)
+%{_libdir}/librdkafka.so.%{soname}
+%{_libdir}/librdkafka++.so.%{soname}
+%defattr(-,root,root)
+%doc %{_docdir}/librdkafka/README.md
+%doc %{_docdir}/librdkafka/LICENSE
+%doc %{_docdir}/librdkafka/CONFIGURATION.md
+%doc %{_docdir}/librdkafka/INTRODUCTION.md
+%doc %{_docdir}/librdkafka/STATISTICS.md
+%doc %{_docdir}/librdkafka/CHANGELOG.md
+%doc %{_docdir}/librdkafka/LICENSES.txt
+
+%defattr(-,root,root)
+#%{_bindir}/rdkafka_example
+#%{_bindir}/rdkafka_performance
+
+
+%files -n %{name}-devel
+%defattr(-,root,root)
+%{_includedir}/librdkafka
+%defattr(444,root,root)
+%{_libdir}/librdkafka.a
+%{_libdir}/librdkafka-static.a
+%{_libdir}/librdkafka.so
+%{_libdir}/librdkafka++.a
+%{_libdir}/librdkafka++.so
+%{_libdir}/pkgconfig/rdkafka++.pc
+%{_libdir}/pkgconfig/rdkafka.pc
+%{_libdir}/pkgconfig/rdkafka-static.pc
+%{_libdir}/pkgconfig/rdkafka++-static.pc
+
+%changelog
+* Thu Apr 09 2015 Eduard Iskandarov <e.iskandarov@corp.mail.ru> 0.8.6-0
+- 0.8.6 simplify build process
+
+* Fri Oct 24 2014 Magnus Edenhill <rdkafka@edenhill.se> 0.8.5-0
+- 0.8.5 release
+
+* Mon Aug 18 2014 Magnus Edenhill <rdkafka@edenhill.se> 0.8.4-0
+- 0.8.4 release
+
+* Mon Mar 17 2014 Magnus Edenhill <vk@edenhill.se> 0.8.3-0
+- Initial RPM package
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/mock-on-docker.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/mock-on-docker.sh
new file mode 100755
index 000000000..eec3d54a7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/mock-on-docker.sh
@@ -0,0 +1,97 @@
+#!/bin/bash
+#
+#
+#
+# Run mock in docker to create RPM packages of librdkafka.
+#
+# Usage:
+# packaging/rpm/mock-on-docker.sh [<mock configs ..>]
+#
+
+set -ex
+
+_DOCKER_IMAGE=fedora:35
+_MOCK_CONFIGS="centos+epel-7-x86_64 centos-stream+epel-8-x86_64"
+
+if [[ $1 == "--build" ]]; then
+ on_builder=1
+ shift
+else
+ on_builder=0
+fi
+
+
+if [[ -n $* ]]; then
+ _MOCK_CONFIGS="$*"
+fi
+
+
+if [[ $on_builder == 0 ]]; then
+ #
+ # Running on host, fire up a docker container and run the latter
+ # part of this script in docker.
+ #
+
+ if [[ ! -f configure.self ]]; then
+ echo "$0 must be run from librdkafka top directory"
+ exit 1
+ fi
+
+ mkdir -p ${PWD}/packaging/rpm/cache/mock
+
+ docker run \
+ --privileged \
+ -t \
+ -v ${PWD}/packaging/rpm/cache/mock:/var/cache/mock \
+ -v ${PWD}:/io \
+ $_DOCKER_IMAGE \
+ /io/packaging/rpm/mock-on-docker.sh --build $_MOCK_CONFIGS
+
+ mkdir -p artifacts
+ for MOCK_CONFIG in $_MOCK_CONFIGS ; do
+ cp -vr --no-preserve=ownership packaging/rpm/arts-${MOCK_CONFIG}/*rpm artifacts/
+ done
+
+ echo "All Done"
+
+else
+ #
+ # Running in docker container.
+ #
+
+ dnf install -y -q mock mock-core-configs make git
+
+ echo "%_netsharedpath /sys:/proc" >> /etc/rpm/macros.netshared
+
+ pushd /io/packaging/rpm
+
+ for MOCK_CONFIG in $_MOCK_CONFIGS ; do
+ cfg_file=/etc/mock/${MOCK_CONFIG}.cfg
+ if [[ ! -f $cfg_file ]]; then
+ echo "Error: Mock config $cfg_file does not exist"
+ exit 1
+ fi
+
+ echo "config_opts['plugin_conf']['bind_mount_enable'] = False" >> $cfg_file
+ echo "config_opts['docker_unshare_warning'] = False" >> $cfg_file
+ echo "Building $MOCK_CONFIG in $PWD"
+ cat $cfg_file
+
+ echo "Setting git safe.directory"
+ git config --global --add safe.directory /io
+
+ export MOCK_CONFIG=$MOCK_CONFIG
+ make all
+
+ echo "Done building $MOCK_CONFIG: copying artifacts"
+ artdir="arts-$MOCK_CONFIG"
+ mkdir -p "$artdir"
+ make ARTIFACTS_DIR="$artdir" copy-artifacts
+
+ done
+
+ popd
+ echo "Done"
+fi
+
+exit 0
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/.gitignore
new file mode 100644
index 000000000..333a2b7ac
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/.gitignore
@@ -0,0 +1,2 @@
+test
+testcpp
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/Makefile b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/Makefile
new file mode 100644
index 000000000..edd457997
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/Makefile
@@ -0,0 +1,25 @@
+
+PROGS?=test test-static testcpp testcpp-static
+
+all: $(PROGS)
+
+test: test.c
+ $(CC) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka)
+
+test-static: test.c
+ $(CC) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs --static rdkafka-static)
+
+testcpp: test.cpp
+ $(CXX) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka++)
+
+testcpp-static: test.cpp
+ $(CXX) -O2 -Werror -Wall $^ -o $@ $$(pkg-config --libs rdkafka++-static)
+
+run:
+ @(for p in $(PROGS); do \
+ echo "# Running $$p" ; \
+ ./$$p || (echo $$p failed ; exit 1) ; \
+ done)
+
+clean:
+ rm -f $(PROGS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/README.md b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/README.md
new file mode 100644
index 000000000..8d1107b66
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/README.md
@@ -0,0 +1,8 @@
+# Test librdkafka RPMs using docker
+
+After building the RPMs (see README.md in parent directory) test
+the RPMs on the supported CentOS/RHEL versions using:
+
+ $ packaging/rpm/tests/test-on-docker.sh
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/run-test.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/run-test.sh
new file mode 100755
index 000000000..c1234a945
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/run-test.sh
@@ -0,0 +1,49 @@
+#!/bin/bash
+#
+# This script runs in the docker container, performing:
+# * install build toolchain
+# * install librdkafka rpms
+# * builds test apps
+# * runs test apps
+#
+# Usage: $0 <docker-image-name>
+
+set -ex
+
+pushd /v
+
+_IMG=$1
+
+echo "Testing on $_IMG"
+
+if [[ $_IMG == "centos:6" ]]; then
+ _EL=6
+ _INST="yum install -y -q"
+elif [[ $_IMG == "centos:7" ]]; then
+ _EL=7
+ _INST="yum install -y -q"
+ # centos:7 ships with openssl-libs 1.0.1 which is outdated and not
+ # ABI-compatible with 1.0.2 (which we build with).
+ # Upgrade openssl-libs, as users would, to prevent missing symbols.
+ _UPG="yum upgrade -y openssl-libs"
+else
+ _EL=8
+ _INST="dnf install -y -q"
+fi
+
+$_INST gcc gcc-c++ make pkg-config
+
+if [[ -n $_UPG ]]; then
+ $_UPG
+fi
+
+$_INST /rpms/librdkafka1-*el${_EL}.x86_64.rpm /rpms/librdkafka-devel-*el${_EL}.x86_64.rpm
+
+make clean all
+
+make run
+
+make clean
+
+echo "$_IMG is all good!"
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test-on-docker.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test-on-docker.sh
new file mode 100755
index 000000000..2c12ff792
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test-on-docker.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+#
+# Test librdkafka packages in <rpmdirectory> using docker.
+# Must be executed from the librdkafka top-level directory.
+#
+# Usage:
+# packaging/rpm/test-on-docker.sh [<rpm-dir>]
+
+set -ex
+
+if [[ ! -f configure.self ]]; then
+ echo "Must be executed from the librdkafka top-level directory"
+ exit 1
+fi
+
+_DOCKER_IMAGES="centos:7 redhat/ubi8:8.5-226"
+_RPMDIR=artifacts
+
+if [[ -n $1 ]]; then
+ _RPMDIR="$1"
+fi
+
+_RPMDIR=$(readlink -f $_RPMDIR)
+
+if [[ ! -d $_RPMDIR ]]; then
+ echo "$_RPMDIR does not exist"
+ exit 1
+fi
+
+
+fails=""
+for _IMG in $_DOCKER_IMAGES ; do
+ if ! docker run \
+ -t \
+ -v $_RPMDIR:/rpms \
+ -v $(readlink -f packaging/rpm/tests):/v \
+ $_IMG \
+ /v/run-test.sh $_IMG ; then
+ echo "ERROR: $_IMG FAILED"
+ fails="${fails}$_IMG "
+ fi
+done
+
+if [[ -n $fails ]]; then
+ echo "##################################################"
+ echo "# Package verification failed for:"
+ echo "# $fails"
+ echo "# See previous errors"
+ echo "##################################################"
+ exit 1
+fi
+
+exit 0
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.c b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.c
new file mode 100644
index 000000000..cf39b6bcd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.c
@@ -0,0 +1,77 @@
+#include <stdio.h>
+#include <string.h>
+#include <librdkafka/rdkafka.h>
+
+int main(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ char features[256];
+ size_t fsize = sizeof(features);
+ char errstr[512];
+ const char *exp_features[] = {
+ "gzip", "snappy", "ssl", "sasl", "regex",
+ "lz4", "sasl_gssapi", "sasl_plain", "sasl_scram", "plugins",
+ "zstd", "sasl_oauthbearer", NULL,
+ };
+ const char **exp;
+ int missing = 0;
+
+
+ printf("librdkafka %s\n", rd_kafka_version_str());
+
+ conf = rd_kafka_conf_new();
+ if (rd_kafka_conf_get(conf, "builtin.features", features, &fsize) !=
+ RD_KAFKA_CONF_OK) {
+ fprintf(stderr, "conf_get failed\n");
+ return 1;
+ }
+
+ printf("builtin.features %s\n", features);
+
+ /* Verify that expected features are enabled. */
+ for (exp = exp_features; *exp; exp++) {
+ const char *t = features;
+ size_t elen = strlen(*exp);
+ int match = 0;
+
+ while ((t = strstr(t, *exp))) {
+ if (t[elen] == ',' || t[elen] == '\0') {
+ match = 1;
+ break;
+ }
+ t += elen;
+ }
+
+ if (match)
+ continue;
+
+ fprintf(stderr, "ERROR: feature %s not found\n", *exp);
+ missing++;
+ }
+
+ if (rd_kafka_conf_set(conf, "security.protocol", "SASL_SSL", errstr,
+ sizeof(errstr)) ||
+ rd_kafka_conf_set(conf, "sasl.mechanism", "PLAIN", errstr,
+ sizeof(errstr)) ||
+ rd_kafka_conf_set(conf, "sasl.username", "username", errstr,
+ sizeof(errstr)) ||
+ rd_kafka_conf_set(conf, "sasl.password", "password", errstr,
+ sizeof(errstr)) ||
+ rd_kafka_conf_set(conf, "debug", "security", errstr,
+ sizeof(errstr))) {
+ fprintf(stderr, "conf_set failed: %s\n", errstr);
+ return 1;
+ }
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if (!rk) {
+ fprintf(stderr, "rd_kafka_new failed: %s\n", errstr);
+ return 1;
+ }
+
+ printf("client name %s\n", rd_kafka_name(rk));
+
+ rd_kafka_destroy(rk);
+
+ return missing ? 1 : 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.cpp b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.cpp
new file mode 100644
index 000000000..d78a76710
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/rpm/tests/test.cpp
@@ -0,0 +1,34 @@
+#include <iostream>
+#include <librdkafka/rdkafkacpp.h>
+
+
+int main() {
+ std::cout << "librdkafka++ " << RdKafka::version_str() << std::endl;
+
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ std::string features;
+
+ if (conf->get("builtin.features", features) != RdKafka::Conf::CONF_OK) {
+ std::cerr << "conf_get failed" << std::endl;
+ return 1;
+ }
+
+ std::cout << "builtin.features " << features << std::endl;
+
+ std::string errstr;
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::cerr << "Producer::create failed: " << errstr << std::endl;
+ return 1;
+ }
+
+ delete conf;
+
+ std::cout << "client name " << producer->name() << std::endl;
+
+
+ delete producer;
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-deb-package.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-deb-package.sh
new file mode 100755
index 000000000..d9cad6d25
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-deb-package.sh
@@ -0,0 +1,64 @@
+#!/bin/bash
+#
+# Build librdkafka Debian package on a bare-bone Debian host, such as ubuntu:16.04 (docker).
+#
+# Usage (from top-level librdkafka dir):
+# docker run -it -v $PWD:/v ubuntu:16.04 /v/packaging/tools/build-deb-package.sh 1.0.0 master
+#
+
+set -exu
+
+if [[ $# -ne 2 ]]; then
+ echo "Usage: $0 <package-version> <librdkafka-branch-or-tag>"
+ exit 1
+fi
+
+export VERSION=$1
+LRK_BRANCH=$2
+
+apt-get update
+
+# Install debian packaging tools and librdkafka build dependencies
+apt-get install -y git-buildpackage debhelper \
+ zlib1g-dev libssl-dev libsasl2-dev liblz4-dev
+
+
+# Clone the librdkafka git repo to a new location to avoid messing
+# up the librdkafka working directory.
+
+
+BUILD_DIR=$(mktemp -d)
+
+pushd $BUILD_DIR
+
+git clone /v librdkafka
+
+pushd librdkafka
+
+export DEBEMAIL="librdkafka packaging <rdkafka@edenhill.se>"
+git config user.email "rdkafka@edenhill.se"
+git config user.name "librdkafka packaging"
+
+DEB_BRANCH=origin/confluent-debian
+TMP_BRANCH=tmp-debian
+git checkout -b $TMP_BRANCH $LRK_BRANCH
+git merge --no-edit $DEB_BRANCH
+
+dch --newversion ${VERSION/-/\~}-1 "Release version $VERSION" --urgency low && dch --release --distribution unstable ""
+
+git commit -a -m "Tag Debian release $VERSION."
+
+make archive
+mkdir -p ../tarballs || true
+mv librdkafka-${VERSION}.tar.gz ../tarballs/librdkafka_${VERSION}.orig.tar.gz
+
+gbp buildpackage -us -uc --git-debian-branch=$TMP_BRANCH \
+ --git-upstream-tree=$LRK_BRANCH \
+ --git-verbose \
+ --git-builder="debuild --set-envvar=VERSION=$VERSION --set-envvar=SKIP_TESTS=y -i -I"
+
+
+popd # librdkafka
+
+popd # $BUILD_DIR
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-debian.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-debian.sh
new file mode 100755
index 000000000..e62ee5f67
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-debian.sh
@@ -0,0 +1,65 @@
+#!/bin/bash
+#
+# Build librdkafka on a bare-bone Debian host, such as the
+# mcr.microsoft.com/dotnet/sdk Docker image.
+#
+# Statically linked
+# WITH openssl 1.0, zlib
+# WITHOUT libsasl2, lz4(ext, using builtin instead)
+#
+# Usage (from top-level librdkafka dir):
+# docker run -it -v $PWD:/v mcr.microsoft.com/dotnet/sdk /v/packaging/tools/build-debian.sh /v /v/librdkafka-debian9.tgz
+#
+
+
+set -ex
+
+LRK_DIR=$1
+shift
+OUT_TGZ=$1
+shift
+CONFIG_ARGS=$*
+
+if [[ ! -f $LRK_DIR/configure.self || -z $OUT_TGZ ]]; then
+ echo "Usage: $0 <librdkafka-root-direcotry> <output-tgz> [<configure-args..>]"
+ exit 1
+fi
+
+set -u
+
+apt-get update
+apt-get install -y gcc g++ zlib1g-dev python3 git-core make patch
+
+
+# Copy the librdkafka git archive to a new location to avoid messing
+# up the librdkafka working directory.
+
+BUILD_DIR=$(mktemp -d)
+
+pushd $BUILD_DIR
+
+DEST_DIR=$PWD/dest
+mkdir -p $DEST_DIR
+
+# Workaround for newer Git not allowing clone directory to be owned by
+# another user (which is a questionable limitation for the read-only archive
+# command..)
+git config --global --add safe.directory /v
+
+(cd $LRK_DIR ; git archive --format tar HEAD) | tar xf -
+
+./configure --install-deps --disable-gssapi --disable-lz4-ext --enable-static --prefix=$DEST_DIR $CONFIG_ARGS
+make -j
+examples/rdkafka_example -X builtin.features
+CI=true make -C tests run_local_quick
+make install
+
+# Tar up the output directory
+pushd $DEST_DIR
+ldd lib/*.so.1
+tar cvzf $OUT_TGZ .
+popd # $DEST_DIR
+
+popd # $BUILD_DIR
+
+rm -rf "$BUILD_DIR"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-manylinux.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-manylinux.sh
new file mode 100755
index 000000000..4aeaa9622
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-manylinux.sh
@@ -0,0 +1,68 @@
+#!/bin/bash
+#
+# Build on a manylinux (https://github.com/pypa/manylinux) docker container.
+#
+# This will provide a self-contained librdkafka shared library that works
+# on most glibc-based Linuxes.
+#
+# Statically linked
+# WITH openssl 1.1.1, zlib, lz4(bundled)
+# WITHOUT libsasl2
+#
+#
+# Run:
+# docker run -t -v "$PWD:/v quay.io/pypa/manylinux2010_x86_64 /v/packaging/tools/build-manylinux.sh /v /v/artifacts/librdkafka-manylinux2010_x86_64.tgz $config_args"
+
+set -ex
+
+LRK_DIR=$1
+shift
+OUT_TGZ=$1
+shift
+CONFIG_ARGS=$*
+
+if [[ ! -f $LRK_DIR/configure.self || -z $OUT_TGZ ]]; then
+ echo "Usage: $0 <librdkafka-root-direcotry> <output-tgz> [<configure-args..>]"
+ exit 1
+fi
+
+set -u
+
+yum install -y libstdc++-devel gcc gcc-c++ python34
+
+# Copy the librdkafka git archive to a new location to avoid messing
+# up the librdkafka working directory.
+
+BUILD_DIR=$(mktemp -d)
+
+pushd $BUILD_DIR
+
+DEST_DIR=$PWD/dest
+mkdir -p $DEST_DIR
+
+# Workaround for newer Git not allowing clone directory to be owned by
+# another user (which is a questionable limitation for the read-only archive
+# command..)
+git config --global --add safe.directory /v
+
+(cd $LRK_DIR ; git archive --format tar HEAD) | tar xf -
+
+./configure --install-deps --source-deps-only --disable-gssapi --disable-lz4-ext --enable-static --prefix=$DEST_DIR $CONFIG_ARGS
+
+make -j
+
+examples/rdkafka_example -X builtin.features
+
+CI=true make -C tests run_local_quick
+
+make install
+
+# Tar up the output directory
+pushd $DEST_DIR
+ldd lib/*.so.1
+tar cvzf $OUT_TGZ .
+popd # $DEST_DIR
+
+popd # $BUILD_DIR
+
+rm -rf "$BUILD_DIR"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-release-artifacts.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-release-artifacts.sh
new file mode 100755
index 000000000..ea09aaf96
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/build-release-artifacts.sh
@@ -0,0 +1,138 @@
+#!/bin/sh
+#
+# ^ NOTE: This needs to be sh, not bash, for alpine compatibility.
+#
+#
+# Build dynamic and statically linked librdkafka libraries useful for
+# release artifacts in high-level clients.
+#
+# Requires docker.
+# Supported docker images:
+# alpine:3.16
+# quay.io/pypa/manylinux2014_aarch64 (centos7)
+# quay.io/pypa/manylinux2014_x86_64 (centos7)
+# quay.io/pypa/manylinux2010_x86_64 (centos6)
+#
+# Usage:
+# packaging/tools/build-release-artifacts.sh [--disable-gssapi] <docker-image> <relative-output-tarball-path.tgz>
+#
+# The output path must be a relative path and inside the librdkafka directory
+# structure.
+#
+
+set -e
+
+docker_image=""
+extra_pkgs_rpm=""
+extra_pkgs_apk=""
+extra_config_args=""
+expected_features="gzip snappy ssl sasl regex lz4 sasl_plain sasl_scram plugins zstd sasl_oauthbearer http oidc"
+
+# Since cyrus-sasl is the only non-statically-linkable dependency,
+# we provide a --disable-gssapi option so that two different libraries
+# can be built: one with GSSAPI/Kerberos support, and one without, depending
+# on this option.
+if [ "$1" = "--disable-gssapi" ]; then
+ extra_config_args="${extra_config_args} --disable-gssapi"
+ disable_gssapi="$1"
+ shift
+else
+ extra_pkgs_rpm="${extra_pkgs_rpm} cyrus-sasl cyrus-sasl-devel"
+ extra_pkgs_apk="${extra_pkgs_apk} cyrus-sasl cyrus-sasl-dev"
+ expected_features="${expected_features} sasl_gssapi"
+ disable_gssapi=""
+fi
+
+# Check if we're running on the host or the (docker) build target.
+if [ "$1" = "--in-docker" -a $# -eq 2 ]; then
+ output="$2"
+elif [ $# -eq 2 ]; then
+ docker_image="$1"
+ output="$2"
+else
+ echo "Usage: $0 [--disable-gssapi] <manylinux-docker-image> <output-path.tgz>"
+ exit 1
+fi
+
+if [ -n "$docker_image" ]; then
+ # Running on the host, spin up the docker builder.
+ exec docker run -v "$PWD:/v" $docker_image /v/packaging/tools/build-release-artifacts.sh $disable_gssapi --in-docker "/v/$output"
+ # Only reached on exec error
+ exit $?
+fi
+
+
+########################################################################
+# Running in the docker instance, this is where we perform the build. #
+########################################################################
+
+
+# Packages required for building librdkafka (perl is for openssl).
+
+if grep -q alpine /etc/os-release 2>/dev/null ; then
+ # Alpine
+ apk add \
+ bash curl gcc g++ make musl-dev linux-headers bsd-compat-headers git \
+ python3 perl patch $extra_pkgs_apk
+
+else
+ # CentOS
+ yum install -y libstdc++-devel gcc gcc-c++ python3 git perl-IPC-Cmd $extra_pkgs_rpm
+fi
+
+
+# Clone the repo so other builds are unaffected of what we're doing
+# and we get a pristine build tree.
+git clone /v /librdkafka
+
+cd /librdkafka
+
+# Build librdkafka
+./configure \
+ --install-deps --source-deps-only --disable-lz4-ext \
+ --enable-static --enable-strip $extra_config_args
+
+make -j
+
+# Show library linkage (for troubleshooting) and checksums (for verification)
+for lib in src/librdkafka.so.1 src-cpp/librdkafka++.so.1; do
+ echo "$0: LINKAGE ${lib}:"
+ ldd src/librdkafka.so.1
+ echo "$0: SHA256 ${lib}:"
+ sha256sum "$lib"
+done
+
+# Verify that expected features are indeed built.
+features=$(examples/rdkafka_example -X builtin.features)
+echo "$0: FEATURES: $features"
+
+missing=""
+for f in $expected_features; do
+ if ! echo "$features" | grep -q "$f" ; then
+ echo "$0: BUILD IS MISSING FEATURE $f"
+ missing="${missing} $f"
+ fi
+done
+
+if [ -n "$missing" ]; then
+ exit 1
+fi
+
+
+# Run quick test suite, mark it as CI to avoid time/resource sensitive
+# tests to fail in case the worker is under-powered.
+CI=true make -C tests run_local_quick
+
+
+# Install librdkafka and then make a tar ball of the installed files.
+mkdir -p /destdir
+
+DESTDIR=/destdir make install
+
+cd /destdir
+tar cvzf "$output" .
+
+# Emit output hash so that build logs can be used to verify artifacts later.
+echo "$0: SHA256 $output:"
+sha256sum "$output"
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/distro-build.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/distro-build.sh
new file mode 100755
index 000000000..a4b5bfa61
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/distro-build.sh
@@ -0,0 +1,38 @@
+#!/bin/bash
+#
+# Build librdkafka for different distros to produce distro-specific artifacts.
+# Requires docker.
+#
+
+set -e
+
+distro=$1
+shift
+config_args=$*
+
+case $distro in
+ manylinux*)
+ # Any pypa/manylinux docker image build.
+ docker run -t -v "$PWD:/v" quay.io/pypa/$distro /v/packaging/tools/build-manylinux.sh /v /v/artifacts/librdkafka-${distro}.tgz $config_args
+ ;;
+ centos)
+ if [[ -n $config_args ]]; then
+ echo "Warning: configure arguments ignored for centos RPM build"
+ fi
+ packaging/rpm/mock-on-docker.sh
+ packaging/rpm/tests/test-on-docker.sh
+ ;;
+ debian)
+ docker run -it -v "$PWD:/v" mcr.microsoft.com/dotnet/sdk:3.1 /v/packaging/tools/build-debian.sh /v /v/artifacts/librdkafka-debian9.tgz $config_args
+ ;;
+ alpine)
+ packaging/alpine/build-alpine.sh $config_args
+ ;;
+ alpine-static)
+ packaging/alpine/build-alpine.sh --enable-static --source-deps-only $config_args
+ ;;
+ *)
+ echo "Usage: $0 <centos|debian|alpine|alpine-static>"
+ exit 1
+ ;;
+esac
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/gh-release-checksums.py b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/gh-release-checksums.py
new file mode 100755
index 000000000..e7259dc20
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/gh-release-checksums.py
@@ -0,0 +1,39 @@
+#!/usr/bin/env python3
+#
+# Calculate checksums for GitHub release artifacts/assets.
+#
+# Use the direct links rather than getting the tarball URLs from
+# the GitHub API since the latter uses the git-sha1 rather than the tag
+# in its zipped up content, causing checksum mismatches.
+#
+
+import sys
+import requests
+import hashlib
+
+
+if __name__ == '__main__':
+
+ if len(sys.argv) != 2:
+ print("Usage: {} <tag>".format(sys.argv[0]))
+ sys.exit(1)
+
+ tag = sys.argv[1]
+
+ print("## Checksums")
+ print("Release asset checksums:")
+
+ for ftype in ["zip", "tar.gz"]:
+ url = "https://github.com/edenhill/librdkafka/archive/{}.{}".format(
+ tag, ftype)
+
+ h = hashlib.sha256()
+
+ r = requests.get(url, stream=True)
+ while True:
+ buf = r.raw.read(100 * 1000)
+ if len(buf) == 0:
+ break
+ h.update(buf)
+
+ print(" * {}.{} SHA256 `{}`".format(tag, ftype, h.hexdigest()))
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/rdutcoverage.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/rdutcoverage.sh
new file mode 100755
index 000000000..e99c51bdc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/rdutcoverage.sh
@@ -0,0 +1,25 @@
+#!/bin/bash
+#
+# Verify that code coverage numbers are not reused in multiple places.
+#
+
+set -e
+
+echo "Checking for duplicate coverage numbers:"
+cnt=0
+for d in $(egrep -Rsoh 'RD_UT_COVERAGE\([[:digit:]]+\)' src \
+ | sort | uniq -c | \
+ egrep -v '^[[:space:]]*1 ' | awk '{print $2}'); do
+ grep -RsnF "$d" src
+ cnt=$(expr $cnt + 1)
+done
+
+echo ""
+
+if [[ $cnt -gt 0 ]]; then
+ echo "$cnt duplicates found: please use unique numbers"
+ exit 1
+else
+ echo "No duplicate(s) found"
+ exit 0
+fi
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/requirements.txt b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/requirements.txt
new file mode 100644
index 000000000..43603098a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/requirements.txt
@@ -0,0 +1,2 @@
+flake8
+autopep8
diff --git a/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/style-format.sh b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/style-format.sh
new file mode 100755
index 000000000..c59ecbe6a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/packaging/tools/style-format.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+#
+# Check or apply/fix the project coding style to all files passed as arguments.
+# Uses clang-format for C/C++ and flake8 for Python.
+#
+# Requires clang-format version 10 (apt install clang-format-10).
+#
+
+
+CLANG_FORMAT=${CLANG_FORMAT:-clang-format}
+
+set -e
+
+ret=0
+
+if [[ -z $1 ]]; then
+ echo "Usage: $0 [--fix] srcfile1.c srcfile2.h srcfile3.c ..."
+ echo ""
+ exit 0
+fi
+
+if [[ $1 == "--fix" ]]; then
+ fix=1
+ shift
+else
+ fix=0
+fi
+
+clang_format_version=$(${CLANG_FORMAT} --version | sed -Ee 's/.*version ([[:digit:]]+)\.[[:digit:]]+\.[[:digit:]]+.*/\1/')
+if [[ $clang_format_version != "10" ]] ; then
+ echo "$0: clang-format version 10, '$clang_format_version' detected"
+ exit 1
+fi
+
+# Get list of files from .formatignore to ignore formatting for.
+ignore_files=( $(grep '^[^#]..' .formatignore) )
+
+function ignore {
+ local file=$1
+
+ local f
+ for f in "${ignore_files[@]}" ; do
+ [[ $file == $f ]] && return 0
+ done
+
+ return 1
+}
+
+# Read the C++ style from src-cpp/.clang-format and store it
+# in a json-like string which is passed to --style.
+# (It would be great if clang-format could take a file path for the
+# format file..).
+cpp_style="{ $(grep -v '^...$' .clang-format-cpp | grep -v '^$' | tr '\n' ',' | sed -e 's/,$//') }"
+if [[ -z $cpp_style ]]; then
+ echo "$0: Unable to read .clang-format-cpp"
+ exit 1
+fi
+
+extra_info=""
+
+for f in $*; do
+
+ if ignore $f ; then
+ echo "$f is ignored by .formatignore" 1>&2
+ continue
+ fi
+
+ lang="c"
+ if [[ $f == *.cpp ]]; then
+ style="$cpp_style"
+ stylename="C++"
+ elif [[ $f == *.h && $(basename $f) == *cpp* ]]; then
+ style="$cpp_style"
+ stylename="C++ (header)"
+ elif [[ $f == *.py ]]; then
+ lang="py"
+ style="pep8"
+ stylename="pep8"
+ else
+ style="file" # Use .clang-format
+ stylename="C"
+ fi
+
+ check=0
+
+ if [[ $fix == 1 ]]; then
+ # Convert tabs to 8 spaces first.
+ if grep -ql $'\t' "$f"; then
+ sed -i -e 's/\t/ /g' "$f"
+ echo "$f: tabs converted to spaces"
+ fi
+
+ if [[ $lang == c ]]; then
+ # Run clang-format to reformat the file
+ ${CLANG_FORMAT} --style="$style" "$f" > _styletmp
+
+ else
+ # Run autopep8 to reformat the file.
+ python3 -m autopep8 -a "$f" > _styletmp
+ # autopep8 can't fix all errors, so we also perform a flake8 check.
+ check=1
+ fi
+
+ if ! cmp -s "$f" _styletmp; then
+ echo "$f: style fixed ($stylename)"
+ # Use cp to preserve target file mode/attrs.
+ cp _styletmp "$f"
+ rm _styletmp
+ fi
+ fi
+
+ if [[ $fix == 0 || $check == 1 ]]; then
+ # Check for tabs
+ if grep -q $'\t' "$f" ; then
+ echo "$f: contains tabs: convert to 8 spaces instead"
+ ret=1
+ fi
+
+ # Check style
+ if [[ $lang == c ]]; then
+ if ! ${CLANG_FORMAT} --style="$style" --Werror --dry-run "$f" ; then
+ echo "$f: had style errors ($stylename): see clang-format output above"
+ ret=1
+ fi
+ elif [[ $lang == py ]]; then
+ if ! python3 -m flake8 "$f"; then
+ echo "$f: had style errors ($stylename): see flake8 output above"
+ if [[ $fix == 1 ]]; then
+ # autopep8 couldn't fix all errors. Let the user know.
+ extra_info="Error: autopep8 could not fix all errors, fix the flake8 errors manually and run again."
+ fi
+ ret=1
+ fi
+ fi
+ fi
+
+done
+
+rm -f _styletmp
+
+if [[ $ret != 0 ]]; then
+ echo ""
+ echo "You can run the following command to automatically fix the style:"
+ echo " $ make style-fix"
+ [[ -n $extra_info ]] && echo "$extra_info"
+fi
+
+exit $ret
diff --git a/fluent-bit/lib/librdkafka-2.1.0/service.yml b/fluent-bit/lib/librdkafka-2.1.0/service.yml
new file mode 100644
index 000000000..b15226a30
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/service.yml
@@ -0,0 +1,18 @@
+name: librdkafka
+lang: unknown
+lang_version: unknown
+git:
+ enable: true
+github:
+ enable: true
+semaphore:
+ enable: true
+ pipeline_enable: false
+ triggers:
+ - tags
+ - branches
+ branches:
+ - master
+ - /semaphore.*/
+ - /dev_.*/
+ - /feature\/.*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/CMakeLists.txt
new file mode 100644
index 000000000..b0a6d51e4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/CMakeLists.txt
@@ -0,0 +1,90 @@
+set(LIBVER 1)
+
+set(
+ sources
+ ConfImpl.cpp
+ ConsumerImpl.cpp
+ HandleImpl.cpp
+ HeadersImpl.cpp
+ KafkaConsumerImpl.cpp
+ MessageImpl.cpp
+ MetadataImpl.cpp
+ ProducerImpl.cpp
+ QueueImpl.cpp
+ RdKafka.cpp
+ TopicImpl.cpp
+ TopicPartitionImpl.cpp
+)
+
+if(RDKAFKA_BUILD_STATIC)
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+ set(RDKAFKA_BUILD_MODE STATIC)
+else()
+ set(RDKAFKA_BUILD_MODE SHARED)
+endif()
+
+add_library(rdkafka++ ${RDKAFKA_BUILD_MODE} ${sources})
+if(NOT RDKAFKA_BUILD_STATIC)
+ set_property(TARGET rdkafka++ PROPERTY SOVERSION ${LIBVER})
+endif()
+
+target_link_libraries(rdkafka++ PUBLIC rdkafka)
+
+# Support '#include <rdkafcpp.h>'
+target_include_directories(rdkafka++ PUBLIC "$<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}>")
+if(NOT RDKAFKA_BUILD_STATIC)
+ target_compile_definitions(rdkafka++ PRIVATE LIBRDKAFKACPP_EXPORTS)
+endif()
+
+# Generate pkg-config file
+set(PKG_CONFIG_VERSION "${PROJECT_VERSION}")
+if(NOT RDKAFKA_BUILD_STATIC)
+ set(PKG_CONFIG_NAME "librdkafka++")
+ set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library")
+ set(PKG_CONFIG_REQUIRES "rdkafka")
+ set(PKG_CONFIG_CFLAGS "-I\${includedir}")
+ set(PKG_CONFIG_LIBS "-L\${libdir} -lrdkafka++")
+ set(PKG_CONFIG_LIBS_PRIVATE "-lrdkafka")
+ configure_file(
+ "../packaging/cmake/rdkafka.pc.in"
+ "${GENERATED_DIR}/rdkafka++.pc"
+ @ONLY
+ )
+ install(
+ FILES ${GENERATED_DIR}/rdkafka++.pc
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
+ )
+else()
+ set(PKG_CONFIG_NAME "librdkafka++-static")
+ set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library (static)")
+ set(PKG_CONFIG_REQUIRES "")
+ set(PKG_CONFIG_CFLAGS "-I\${includedir} -DLIBRDKAFKA_STATICLIB")
+ set(PKG_CONFIG_LIBS "-L\${libdir} \${libdir}/librdkafka++.a")
+ if(WIN32)
+ string(APPEND PKG_CONFIG_LIBS " -lws2_32 -lsecur32 -lcrypt32")
+ endif()
+
+ configure_file(
+ "../packaging/cmake/rdkafka.pc.in"
+ "${GENERATED_DIR}/rdkafka++-static.pc"
+ @ONLY
+ )
+ install(
+ FILES ${GENERATED_DIR}/rdkafka++-static.pc
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
+ )
+endif()
+
+install(
+ TARGETS rdkafka++
+ EXPORT "${targets_export_name}"
+ LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
+ INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
+)
+
+install(
+ FILES "rdkafkacpp.h"
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka"
+)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConfImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConfImpl.cpp
new file mode 100644
index 000000000..53d7b30c5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConfImpl.cpp
@@ -0,0 +1,84 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+
+#include "rdkafkacpp_int.h"
+
+
+
+RdKafka::ConfImpl::ConfResult RdKafka::ConfImpl::set(const std::string &name,
+ const std::string &value,
+ std::string &errstr) {
+ rd_kafka_conf_res_t res;
+ char errbuf[512];
+
+ if (this->conf_type_ == CONF_GLOBAL)
+ res = rd_kafka_conf_set(this->rk_conf_, name.c_str(), value.c_str(), errbuf,
+ sizeof(errbuf));
+ else
+ res = rd_kafka_topic_conf_set(this->rkt_conf_, name.c_str(), value.c_str(),
+ errbuf, sizeof(errbuf));
+
+ if (res != RD_KAFKA_CONF_OK)
+ errstr = errbuf;
+
+ return static_cast<Conf::ConfResult>(res);
+}
+
+
+std::list<std::string> *RdKafka::ConfImpl::dump() {
+ const char **arrc;
+ size_t cnt;
+ std::list<std::string> *arr;
+
+ if (rk_conf_)
+ arrc = rd_kafka_conf_dump(rk_conf_, &cnt);
+ else
+ arrc = rd_kafka_topic_conf_dump(rkt_conf_, &cnt);
+
+ arr = new std::list<std::string>();
+ for (int i = 0; i < static_cast<int>(cnt); i++)
+ arr->push_back(std::string(arrc[i]));
+
+ rd_kafka_conf_dump_free(arrc, cnt);
+ return arr;
+}
+
+RdKafka::Conf *RdKafka::Conf::create(ConfType type) {
+ ConfImpl *conf = new ConfImpl(type);
+
+ if (type == CONF_GLOBAL)
+ conf->rk_conf_ = rd_kafka_conf_new();
+ else
+ conf->rkt_conf_ = rd_kafka_topic_conf_new();
+
+ return conf;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConsumerImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConsumerImpl.cpp
new file mode 100644
index 000000000..b7f5e3b22
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ConsumerImpl.cpp
@@ -0,0 +1,244 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::Consumer::~Consumer() {
+}
+
+RdKafka::Consumer *RdKafka::Consumer::create(const RdKafka::Conf *conf,
+ std::string &errstr) {
+ char errbuf[512];
+ const RdKafka::ConfImpl *confimpl =
+ dynamic_cast<const RdKafka::ConfImpl *>(conf);
+ RdKafka::ConsumerImpl *rkc = new RdKafka::ConsumerImpl();
+ rd_kafka_conf_t *rk_conf = NULL;
+
+ if (confimpl) {
+ if (!confimpl->rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ delete rkc;
+ return NULL;
+ }
+
+ rkc->set_common_config(confimpl);
+
+ rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
+ }
+
+ rd_kafka_t *rk;
+ if (!(rk =
+ rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) {
+ errstr = errbuf;
+ // rd_kafka_new() takes ownership only if succeeds
+ if (rk_conf)
+ rd_kafka_conf_destroy(rk_conf);
+ delete rkc;
+ return NULL;
+ }
+
+ rkc->rk_ = rk;
+
+
+ return rkc;
+}
+
+int64_t RdKafka::Consumer::OffsetTail(int64_t offset) {
+ return RD_KAFKA_OFFSET_TAIL(offset);
+}
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic,
+ int32_t partition,
+ int64_t offset) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+ if (rd_kafka_consume_start(topicimpl->rkt_, partition, offset) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::start(Topic *topic,
+ int32_t partition,
+ int64_t offset,
+ Queue *queue) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+ RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
+
+ if (rd_kafka_consume_start_queue(topicimpl->rkt_, partition, offset,
+ queueimpl->queue_) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::stop(Topic *topic,
+ int32_t partition) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+ if (rd_kafka_consume_stop(topicimpl->rkt_, partition) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::ErrorCode RdKafka::ConsumerImpl::seek(Topic *topic,
+ int32_t partition,
+ int64_t offset,
+ int timeout_ms) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+ if (rd_kafka_seek(topicimpl->rkt_, partition, offset, timeout_ms) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::Message *RdKafka::ConsumerImpl::consume(Topic *topic,
+ int32_t partition,
+ int timeout_ms) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consume(topicimpl->rkt_, partition, timeout_ms);
+ if (!rkmessage)
+ return new RdKafka::MessageImpl(
+ RD_KAFKA_CONSUMER, topic,
+ static_cast<RdKafka::ErrorCode>(rd_kafka_last_error()));
+
+ return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, rkmessage);
+}
+
+namespace {
+/* Helper struct for `consume_callback'.
+ * Encapsulates the values we need in order to call `rd_kafka_consume_callback'
+ * and keep track of the C++ callback function and `opaque' value.
+ */
+struct ConsumerImplCallback {
+ ConsumerImplCallback(RdKafka::Topic *topic,
+ RdKafka::ConsumeCb *cb,
+ void *data) :
+ topic(topic), cb_cls(cb), cb_data(data) {
+ }
+ /* This function is the one we give to `rd_kafka_consume_callback', with
+ * the `opaque' pointer pointing to an instance of this struct, in which
+ * we can find the C++ callback and `cb_data'.
+ */
+ static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
+ ConsumerImplCallback *instance =
+ static_cast<ConsumerImplCallback *>(opaque);
+ RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, instance->topic, msg,
+ false /*don't free*/);
+ instance->cb_cls->consume_cb(message, instance->cb_data);
+ }
+ RdKafka::Topic *topic;
+ RdKafka::ConsumeCb *cb_cls;
+ void *cb_data;
+};
+} // namespace
+
+int RdKafka::ConsumerImpl::consume_callback(RdKafka::Topic *topic,
+ int32_t partition,
+ int timeout_ms,
+ RdKafka::ConsumeCb *consume_cb,
+ void *opaque) {
+ RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(topic);
+ ConsumerImplCallback context(topic, consume_cb, opaque);
+ return rd_kafka_consume_callback(topicimpl->rkt_, partition, timeout_ms,
+ &ConsumerImplCallback::consume_cb_trampoline,
+ &context);
+}
+
+
+RdKafka::Message *RdKafka::ConsumerImpl::consume(Queue *queue, int timeout_ms) {
+ RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consume_queue(queueimpl->queue_, timeout_ms);
+ if (!rkmessage)
+ return new RdKafka::MessageImpl(
+ RD_KAFKA_CONSUMER, NULL,
+ static_cast<RdKafka::ErrorCode>(rd_kafka_last_error()));
+ /*
+ * Recover our Topic * from the topic conf's opaque field, which we
+ * set in RdKafka::Topic::create() for just this kind of situation.
+ */
+ void *opaque = rd_kafka_topic_opaque(rkmessage->rkt);
+ Topic *topic = static_cast<Topic *>(opaque);
+
+ return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, topic, rkmessage);
+}
+
+namespace {
+/* Helper struct for `consume_callback' with a Queue.
+ * Encapsulates the values we need in order to call `rd_kafka_consume_callback'
+ * and keep track of the C++ callback function and `opaque' value.
+ */
+struct ConsumerImplQueueCallback {
+ ConsumerImplQueueCallback(RdKafka::ConsumeCb *cb, void *data) :
+ cb_cls(cb), cb_data(data) {
+ }
+ /* This function is the one we give to `rd_kafka_consume_callback', with
+ * the `opaque' pointer pointing to an instance of this struct, in which
+ * we can find the C++ callback and `cb_data'.
+ */
+ static void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
+ ConsumerImplQueueCallback *instance =
+ static_cast<ConsumerImplQueueCallback *>(opaque);
+ /*
+ * Recover our Topic * from the topic conf's opaque field, which we
+ * set in RdKafka::Topic::create() for just this kind of situation.
+ */
+ void *topic_opaque = rd_kafka_topic_opaque(msg->rkt);
+ RdKafka::Topic *topic = static_cast<RdKafka::Topic *>(topic_opaque);
+ RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg,
+ false /*don't free*/);
+ instance->cb_cls->consume_cb(message, instance->cb_data);
+ }
+ RdKafka::ConsumeCb *cb_cls;
+ void *cb_data;
+};
+} // namespace
+
+int RdKafka::ConsumerImpl::consume_callback(Queue *queue,
+ int timeout_ms,
+ RdKafka::ConsumeCb *consume_cb,
+ void *opaque) {
+ RdKafka::QueueImpl *queueimpl = dynamic_cast<RdKafka::QueueImpl *>(queue);
+ ConsumerImplQueueCallback context(consume_cb, opaque);
+ return rd_kafka_consume_callback_queue(
+ queueimpl->queue_, timeout_ms,
+ &ConsumerImplQueueCallback::consume_cb_trampoline, &context);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HandleImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HandleImpl.cpp
new file mode 100644
index 000000000..7aa2f2939
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HandleImpl.cpp
@@ -0,0 +1,425 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+
+#include "rdkafkacpp_int.h"
+
+void RdKafka::consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+ RdKafka::Topic *topic = static_cast<Topic *>(rd_kafka_topic_opaque(msg->rkt));
+
+ RdKafka::MessageImpl message(RD_KAFKA_CONSUMER, topic, msg,
+ false /*don't free*/);
+
+ handle->consume_cb_->consume_cb(message, opaque);
+}
+
+void RdKafka::log_cb_trampoline(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf) {
+ if (!rk) {
+ rd_kafka_log_print(rk, level, fac, buf);
+ return;
+ }
+
+ void *opaque = rd_kafka_opaque(rk);
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+ if (!handle->event_cb_) {
+ rd_kafka_log_print(rk, level, fac, buf);
+ return;
+ }
+
+ RdKafka::EventImpl event(RdKafka::Event::EVENT_LOG, RdKafka::ERR_NO_ERROR,
+ static_cast<RdKafka::Event::Severity>(level), fac,
+ buf);
+
+ handle->event_cb_->event_cb(event);
+}
+
+
+void RdKafka::error_cb_trampoline(rd_kafka_t *rk,
+ int err,
+ const char *reason,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+ char errstr[512];
+ bool is_fatal = false;
+
+ if (err == RD_KAFKA_RESP_ERR__FATAL) {
+ /* Translate to underlying fatal error code and string */
+ err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ if (err)
+ reason = errstr;
+ is_fatal = true;
+ }
+ RdKafka::EventImpl event(RdKafka::Event::EVENT_ERROR,
+ static_cast<RdKafka::ErrorCode>(err),
+ RdKafka::Event::EVENT_SEVERITY_ERROR, NULL, reason);
+ event.fatal_ = is_fatal;
+ handle->event_cb_->event_cb(event);
+}
+
+
+void RdKafka::throttle_cb_trampoline(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+ RdKafka::EventImpl event(RdKafka::Event::EVENT_THROTTLE);
+ event.str_ = broker_name;
+ event.id_ = broker_id;
+ event.throttle_time_ = throttle_time_ms;
+
+ handle->event_cb_->event_cb(event);
+}
+
+
+int RdKafka::stats_cb_trampoline(rd_kafka_t *rk,
+ char *json,
+ size_t json_len,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+ RdKafka::EventImpl event(RdKafka::Event::EVENT_STATS, RdKafka::ERR_NO_ERROR,
+ RdKafka::Event::EVENT_SEVERITY_INFO, NULL, json);
+
+ handle->event_cb_->event_cb(event);
+
+ return 0;
+}
+
+
+int RdKafka::socket_cb_trampoline(int domain,
+ int type,
+ int protocol,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+ return handle->socket_cb_->socket_cb(domain, type, protocol);
+}
+
+int RdKafka::open_cb_trampoline(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+ return handle->open_cb_->open_cb(pathname, flags, static_cast<int>(mode));
+}
+
+void RdKafka::oauthbearer_token_refresh_cb_trampoline(
+ rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+
+ handle->oauthbearer_token_refresh_cb_->oauthbearer_token_refresh_cb(
+ handle, std::string(oauthbearer_config ? oauthbearer_config : ""));
+}
+
+
+int RdKafka::ssl_cert_verify_cb_trampoline(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+ std::string errbuf;
+
+ bool res = 0 != handle->ssl_cert_verify_cb_->ssl_cert_verify_cb(
+ std::string(broker_name), broker_id, x509_error, depth,
+ buf, size, errbuf);
+
+ if (res)
+ return (int)res;
+
+ size_t errlen =
+ errbuf.size() > errstr_size - 1 ? errstr_size - 1 : errbuf.size();
+
+ memcpy(errstr, errbuf.c_str(), errlen);
+ if (errstr_size > 0)
+ errstr[errlen] = '\0';
+
+ return (int)res;
+}
+
+
+RdKafka::ErrorCode RdKafka::HandleImpl::metadata(bool all_topics,
+ const Topic *only_rkt,
+ Metadata **metadatap,
+ int timeout_ms) {
+ const rd_kafka_metadata_t *cmetadatap = NULL;
+
+ rd_kafka_topic_t *topic =
+ only_rkt ? static_cast<const TopicImpl *>(only_rkt)->rkt_ : NULL;
+
+ const rd_kafka_resp_err_t rc =
+ rd_kafka_metadata(rk_, all_topics, topic, &cmetadatap, timeout_ms);
+
+ *metadatap = (rc == RD_KAFKA_RESP_ERR_NO_ERROR)
+ ? new RdKafka::MetadataImpl(cmetadatap)
+ : NULL;
+
+ return static_cast<RdKafka::ErrorCode>(rc);
+}
+
+/**
+ * Convert a list of C partitions to C++ partitions
+ */
+static void c_parts_to_partitions(
+ const rd_kafka_topic_partition_list_t *c_parts,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ partitions.resize(c_parts->cnt);
+ for (int i = 0; i < c_parts->cnt; i++)
+ partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
+}
+
+static void free_partition_vector(std::vector<RdKafka::TopicPartition *> &v) {
+ for (unsigned int i = 0; i < v.size(); i++)
+ delete v[i];
+ v.clear();
+}
+
+void RdKafka::rebalance_cb_trampoline(
+ rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *c_partitions,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+ std::vector<RdKafka::TopicPartition *> partitions;
+
+ c_parts_to_partitions(c_partitions, partitions);
+
+ handle->rebalance_cb_->rebalance_cb(
+ dynamic_cast<RdKafka::KafkaConsumer *>(handle),
+ static_cast<RdKafka::ErrorCode>(err), partitions);
+
+ free_partition_vector(partitions);
+}
+
+
+void RdKafka::offset_commit_cb_trampoline0(
+ rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *c_offsets,
+ void *opaque) {
+ OffsetCommitCb *cb = static_cast<RdKafka::OffsetCommitCb *>(opaque);
+ std::vector<RdKafka::TopicPartition *> offsets;
+
+ if (c_offsets)
+ c_parts_to_partitions(c_offsets, offsets);
+
+ cb->offset_commit_cb(static_cast<RdKafka::ErrorCode>(err), offsets);
+
+ free_partition_vector(offsets);
+}
+
+static void offset_commit_cb_trampoline(
+ rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *c_offsets,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+ RdKafka::offset_commit_cb_trampoline0(rk, err, c_offsets,
+ handle->offset_commit_cb_);
+}
+
+
+void RdKafka::HandleImpl::set_common_config(const RdKafka::ConfImpl *confimpl) {
+ rd_kafka_conf_set_opaque(confimpl->rk_conf_, this);
+
+ if (confimpl->event_cb_) {
+ rd_kafka_conf_set_log_cb(confimpl->rk_conf_, RdKafka::log_cb_trampoline);
+ rd_kafka_conf_set_error_cb(confimpl->rk_conf_,
+ RdKafka::error_cb_trampoline);
+ rd_kafka_conf_set_throttle_cb(confimpl->rk_conf_,
+ RdKafka::throttle_cb_trampoline);
+ rd_kafka_conf_set_stats_cb(confimpl->rk_conf_,
+ RdKafka::stats_cb_trampoline);
+ event_cb_ = confimpl->event_cb_;
+ }
+
+ if (confimpl->oauthbearer_token_refresh_cb_) {
+ rd_kafka_conf_set_oauthbearer_token_refresh_cb(
+ confimpl->rk_conf_, RdKafka::oauthbearer_token_refresh_cb_trampoline);
+ oauthbearer_token_refresh_cb_ = confimpl->oauthbearer_token_refresh_cb_;
+ }
+
+ if (confimpl->socket_cb_) {
+ rd_kafka_conf_set_socket_cb(confimpl->rk_conf_,
+ RdKafka::socket_cb_trampoline);
+ socket_cb_ = confimpl->socket_cb_;
+ }
+
+ if (confimpl->ssl_cert_verify_cb_) {
+ rd_kafka_conf_set_ssl_cert_verify_cb(
+ confimpl->rk_conf_, RdKafka::ssl_cert_verify_cb_trampoline);
+ ssl_cert_verify_cb_ = confimpl->ssl_cert_verify_cb_;
+ }
+
+ if (confimpl->open_cb_) {
+#ifndef _WIN32
+ rd_kafka_conf_set_open_cb(confimpl->rk_conf_, RdKafka::open_cb_trampoline);
+ open_cb_ = confimpl->open_cb_;
+#endif
+ }
+
+ if (confimpl->rebalance_cb_) {
+ rd_kafka_conf_set_rebalance_cb(confimpl->rk_conf_,
+ RdKafka::rebalance_cb_trampoline);
+ rebalance_cb_ = confimpl->rebalance_cb_;
+ }
+
+ if (confimpl->offset_commit_cb_) {
+ rd_kafka_conf_set_offset_commit_cb(confimpl->rk_conf_,
+ offset_commit_cb_trampoline);
+ offset_commit_cb_ = confimpl->offset_commit_cb_;
+ }
+
+ if (confimpl->consume_cb_) {
+ rd_kafka_conf_set_consume_cb(confimpl->rk_conf_,
+ RdKafka::consume_cb_trampoline);
+ consume_cb_ = confimpl->consume_cb_;
+ }
+}
+
+
+RdKafka::ErrorCode RdKafka::HandleImpl::pause(
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_resp_err_t err;
+
+ c_parts = partitions_to_c_parts(partitions);
+
+ err = rd_kafka_pause_partitions(rk_, c_parts);
+
+ if (!err)
+ update_partitions_from_c_parts(partitions, c_parts);
+
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode RdKafka::HandleImpl::resume(
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_resp_err_t err;
+
+ c_parts = partitions_to_c_parts(partitions);
+
+ err = rd_kafka_resume_partitions(rk_, c_parts);
+
+ if (!err)
+ update_partitions_from_c_parts(partitions, c_parts);
+
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ return static_cast<RdKafka::ErrorCode>(err);
+}
+
+RdKafka::Queue *RdKafka::HandleImpl::get_partition_queue(
+ const TopicPartition *part) {
+ rd_kafka_queue_t *rkqu;
+ rkqu = rd_kafka_queue_get_partition(rk_, part->topic().c_str(),
+ part->partition());
+
+ if (rkqu == NULL)
+ return NULL;
+
+ return new QueueImpl(rkqu);
+}
+
+RdKafka::ErrorCode RdKafka::HandleImpl::set_log_queue(RdKafka::Queue *queue) {
+ rd_kafka_queue_t *rkqu = NULL;
+ if (queue) {
+ QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
+ rkqu = queueimpl->queue_;
+ }
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_set_log_queue(rk_, rkqu));
+}
+
+namespace RdKafka {
+
+rd_kafka_topic_partition_list_t *partitions_to_c_parts(
+ const std::vector<RdKafka::TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+
+ c_parts = rd_kafka_topic_partition_list_new((int)partitions.size());
+
+ for (unsigned int i = 0; i < partitions.size(); i++) {
+ const RdKafka::TopicPartitionImpl *tpi =
+ dynamic_cast<const RdKafka::TopicPartitionImpl *>(partitions[i]);
+ rd_kafka_topic_partition_t *rktpar = rd_kafka_topic_partition_list_add(
+ c_parts, tpi->topic_.c_str(), tpi->partition_);
+ rktpar->offset = tpi->offset_;
+ if (tpi->leader_epoch_ != -1)
+ rd_kafka_topic_partition_set_leader_epoch(rktpar, tpi->leader_epoch_);
+ }
+
+ return c_parts;
+}
+
+
+/**
+ * @brief Update the application provided 'partitions' with info from 'c_parts'
+ */
+void update_partitions_from_c_parts(
+ std::vector<RdKafka::TopicPartition *> &partitions,
+ const rd_kafka_topic_partition_list_t *c_parts) {
+ for (int i = 0; i < c_parts->cnt; i++) {
+ rd_kafka_topic_partition_t *p = &c_parts->elems[i];
+
+ /* Find corresponding C++ entry */
+ for (unsigned int j = 0; j < partitions.size(); j++) {
+ RdKafka::TopicPartitionImpl *pp =
+ dynamic_cast<RdKafka::TopicPartitionImpl *>(partitions[j]);
+ if (!strcmp(p->topic, pp->topic_.c_str()) &&
+ p->partition == pp->partition_) {
+ pp->offset_ = p->offset;
+ pp->err_ = static_cast<RdKafka::ErrorCode>(p->err);
+ pp->leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(p);
+ }
+ }
+ }
+}
+
+} // namespace RdKafka
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HeadersImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HeadersImpl.cpp
new file mode 100644
index 000000000..b567ef36c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/HeadersImpl.cpp
@@ -0,0 +1,48 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::Headers *RdKafka::Headers::create() {
+ return new RdKafka::HeadersImpl();
+}
+
+RdKafka::Headers *RdKafka::Headers::create(const std::vector<Header> &headers) {
+ if (headers.size() > 0)
+ return new RdKafka::HeadersImpl(headers);
+ else
+ return new RdKafka::HeadersImpl();
+}
+
+RdKafka::Headers::~Headers() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/KafkaConsumerImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/KafkaConsumerImpl.cpp
new file mode 100644
index 000000000..6f3b81c72
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/KafkaConsumerImpl.cpp
@@ -0,0 +1,296 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+#include <vector>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::KafkaConsumer::~KafkaConsumer() {
+}
+
+RdKafka::KafkaConsumer *RdKafka::KafkaConsumer::create(
+ const RdKafka::Conf *conf,
+ std::string &errstr) {
+ char errbuf[512];
+ const RdKafka::ConfImpl *confimpl =
+ dynamic_cast<const RdKafka::ConfImpl *>(conf);
+ RdKafka::KafkaConsumerImpl *rkc = new RdKafka::KafkaConsumerImpl();
+ rd_kafka_conf_t *rk_conf = NULL;
+ size_t grlen;
+
+ if (!confimpl || !confimpl->rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ delete rkc;
+ return NULL;
+ }
+
+ if (rd_kafka_conf_get(confimpl->rk_conf_, "group.id", NULL, &grlen) !=
+ RD_KAFKA_CONF_OK ||
+ grlen <= 1 /* terminating null only */) {
+ errstr = "\"group.id\" must be configured";
+ delete rkc;
+ return NULL;
+ }
+
+ rkc->set_common_config(confimpl);
+
+ rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
+
+ rd_kafka_t *rk;
+ if (!(rk =
+ rd_kafka_new(RD_KAFKA_CONSUMER, rk_conf, errbuf, sizeof(errbuf)))) {
+ errstr = errbuf;
+ // rd_kafka_new() takes ownership only if succeeds
+ rd_kafka_conf_destroy(rk_conf);
+ delete rkc;
+ return NULL;
+ }
+
+ rkc->rk_ = rk;
+
+ /* Redirect handle queue to cgrp's queue to provide a single queue point */
+ rd_kafka_poll_set_consumer(rk);
+
+ return rkc;
+}
+
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscribe(
+ const std::vector<std::string> &topics) {
+ rd_kafka_topic_partition_list_t *c_topics;
+ rd_kafka_resp_err_t err;
+
+ c_topics = rd_kafka_topic_partition_list_new((int)topics.size());
+
+ for (unsigned int i = 0; i < topics.size(); i++)
+ rd_kafka_topic_partition_list_add(c_topics, topics[i].c_str(),
+ RD_KAFKA_PARTITION_UA);
+
+ err = rd_kafka_subscribe(rk_, c_topics);
+
+ rd_kafka_topic_partition_list_destroy(c_topics);
+
+ return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unsubscribe() {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_unsubscribe(this->rk_));
+}
+
+RdKafka::Message *RdKafka::KafkaConsumerImpl::consume(int timeout_ms) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(this->rk_, timeout_ms);
+
+ if (!rkmessage)
+ return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, NULL,
+ RdKafka::ERR__TIMED_OUT);
+
+ return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage);
+}
+
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assignment(
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_resp_err_t err;
+
+ if ((err = rd_kafka_assignment(rk_, &c_parts)))
+ return static_cast<RdKafka::ErrorCode>(err);
+
+ partitions.resize(c_parts->cnt);
+
+ for (int i = 0; i < c_parts->cnt; i++)
+ partitions[i] = new RdKafka::TopicPartitionImpl(&c_parts->elems[i]);
+
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+
+
+bool RdKafka::KafkaConsumerImpl::assignment_lost() {
+ return rd_kafka_assignment_lost(rk_) ? true : false;
+}
+
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::subscription(
+ std::vector<std::string> &topics) {
+ rd_kafka_topic_partition_list_t *c_topics;
+ rd_kafka_resp_err_t err;
+
+ if ((err = rd_kafka_subscription(rk_, &c_topics)))
+ return static_cast<RdKafka::ErrorCode>(err);
+
+ topics.resize(c_topics->cnt);
+ for (int i = 0; i < c_topics->cnt; i++)
+ topics[i] = std::string(c_topics->elems[i].topic);
+
+ rd_kafka_topic_partition_list_destroy(c_topics);
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::assign(
+ const std::vector<TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_resp_err_t err;
+
+ c_parts = partitions_to_c_parts(partitions);
+
+ err = rd_kafka_assign(rk_, c_parts);
+
+ rd_kafka_topic_partition_list_destroy(c_parts);
+ return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::unassign() {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_assign(rk_, NULL));
+}
+
+
+RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_assign(
+ const std::vector<TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_error_t *c_error;
+
+ c_parts = partitions_to_c_parts(partitions);
+ c_error = rd_kafka_incremental_assign(rk_, c_parts);
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+
+ return NULL;
+}
+
+
+RdKafka::Error *RdKafka::KafkaConsumerImpl::incremental_unassign(
+ const std::vector<TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_error_t *c_error;
+
+ c_parts = partitions_to_c_parts(partitions);
+ c_error = rd_kafka_incremental_unassign(rk_, c_parts);
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+
+ return NULL;
+}
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::committed(
+ std::vector<RdKafka::TopicPartition *> &partitions,
+ int timeout_ms) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_resp_err_t err;
+
+ c_parts = partitions_to_c_parts(partitions);
+
+ err = rd_kafka_committed(rk_, c_parts, timeout_ms);
+
+ if (!err) {
+ update_partitions_from_c_parts(partitions, c_parts);
+ }
+
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::position(
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ rd_kafka_topic_partition_list_t *c_parts;
+ rd_kafka_resp_err_t err;
+
+ c_parts = partitions_to_c_parts(partitions);
+
+ err = rd_kafka_position(rk_, c_parts);
+
+ if (!err) {
+ update_partitions_from_c_parts(partitions, c_parts);
+ }
+
+ rd_kafka_topic_partition_list_destroy(c_parts);
+
+ return static_cast<RdKafka::ErrorCode>(err);
+}
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::seek(
+ const RdKafka::TopicPartition &partition,
+ int timeout_ms) {
+ const RdKafka::TopicPartitionImpl *p =
+ dynamic_cast<const RdKafka::TopicPartitionImpl *>(&partition);
+ rd_kafka_topic_t *rkt;
+
+ if (!(rkt = rd_kafka_topic_new(rk_, p->topic_.c_str(), NULL)))
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ /* FIXME: Use a C API that takes a topic_partition_list_t instead */
+ RdKafka::ErrorCode err = static_cast<RdKafka::ErrorCode>(
+ rd_kafka_seek(rkt, p->partition_, p->offset_, timeout_ms));
+
+ rd_kafka_topic_destroy(rkt);
+
+ return err;
+}
+
+
+
+RdKafka::ErrorCode RdKafka::KafkaConsumerImpl::close() {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_consumer_close(rk_));
+}
+
+
+RdKafka::Error *RdKafka::KafkaConsumerImpl::close(Queue *queue) {
+ QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
+ rd_kafka_error_t *c_error;
+
+ c_error = rd_kafka_consumer_close_queue(rk_, queueimpl->queue_);
+ if (c_error)
+ return new ErrorImpl(c_error);
+
+ return NULL;
+}
+
+
+RdKafka::ConsumerGroupMetadata::~ConsumerGroupMetadata() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/Makefile b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/Makefile
new file mode 100644
index 000000000..78ecb31f2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/Makefile
@@ -0,0 +1,55 @@
+PKGNAME= librdkafka
+LIBNAME= librdkafka++
+LIBVER= 1
+
+CXXSRCS= RdKafka.cpp ConfImpl.cpp HandleImpl.cpp \
+ ConsumerImpl.cpp ProducerImpl.cpp KafkaConsumerImpl.cpp \
+ TopicImpl.cpp TopicPartitionImpl.cpp MessageImpl.cpp \
+ HeadersImpl.cpp QueueImpl.cpp MetadataImpl.cpp
+
+HDRS= rdkafkacpp.h
+
+OBJS= $(CXXSRCS:%.cpp=%.o)
+
+
+
+all: lib check
+
+# No linker script/symbol hiding for C++ library
+DISABLE_LDS=y
+
+MKL_NO_SELFCONTAINED_STATIC_LIB=y
+include ../mklove/Makefile.base
+
+# Use C++ compiler as linker rather than the default C compiler
+CC_LD=$(CXX)
+
+# OSX and Cygwin requires linking required libraries
+ifeq ($(_UNAME_S),Darwin)
+ FWD_LINKING_REQ=y
+endif
+ifeq ($(_UNAME_S),AIX)
+ FWD_LINKING_REQ=y
+endif
+ifeq ($(shell uname -o 2>/dev/null),Cygwin)
+ FWD_LINKING_REQ=y
+endif
+
+# Ignore previously defined library dependencies for the C library,
+# we'll get those dependencies through the C library linkage.
+LIBS := -L../src -lrdkafka
+MKL_PKGCONFIG_REQUIRES_PRIVATE := rdkafka
+MKL_PKGCONFIG_REQUIRES := rdkafka
+
+CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a
+
+
+file-check: lib
+check: file-check
+
+install: lib-install
+uninstall: lib-uninstall
+
+clean: lib-clean
+
+-include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MessageImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MessageImpl.cpp
new file mode 100644
index 000000000..c6d83150f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MessageImpl.cpp
@@ -0,0 +1,38 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+
+RdKafka::Message::~Message() {
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MetadataImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MetadataImpl.cpp
new file mode 100644
index 000000000..62cbf9042
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/MetadataImpl.cpp
@@ -0,0 +1,170 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafkacpp_int.h"
+
+using namespace RdKafka;
+
+BrokerMetadata::~BrokerMetadata() {
+}
+PartitionMetadata::~PartitionMetadata() {
+}
+TopicMetadata::~TopicMetadata() {
+}
+Metadata::~Metadata() {
+}
+
+
+/**
+ * Metadata: Broker information handler implementation
+ */
+class BrokerMetadataImpl : public BrokerMetadata {
+ public:
+ BrokerMetadataImpl(const rd_kafka_metadata_broker_t *broker_metadata) :
+ broker_metadata_(broker_metadata), host_(broker_metadata->host) {
+ }
+
+ int32_t id() const {
+ return broker_metadata_->id;
+ }
+
+ std::string host() const {
+ return host_;
+ }
+ int port() const {
+ return broker_metadata_->port;
+ }
+
+ virtual ~BrokerMetadataImpl() {
+ }
+
+ private:
+ const rd_kafka_metadata_broker_t *broker_metadata_;
+ const std::string host_;
+};
+
+/**
+ * Metadata: Partition information handler
+ */
+class PartitionMetadataImpl : public PartitionMetadata {
+ public:
+ // @TODO too much memory copy? maybe we should create a new vector class that
+ // read directly from C arrays?
+ // @TODO use auto_ptr?
+ PartitionMetadataImpl(
+ const rd_kafka_metadata_partition_t *partition_metadata) :
+ partition_metadata_(partition_metadata) {
+ replicas_.reserve(partition_metadata->replica_cnt);
+ for (int i = 0; i < partition_metadata->replica_cnt; ++i)
+ replicas_.push_back(partition_metadata->replicas[i]);
+
+ isrs_.reserve(partition_metadata->isr_cnt);
+ for (int i = 0; i < partition_metadata->isr_cnt; ++i)
+ isrs_.push_back(partition_metadata->isrs[i]);
+ }
+
+ int32_t id() const {
+ return partition_metadata_->id;
+ }
+ int32_t leader() const {
+ return partition_metadata_->leader;
+ }
+ ErrorCode err() const {
+ return static_cast<ErrorCode>(partition_metadata_->err);
+ }
+
+ const std::vector<int32_t> *replicas() const {
+ return &replicas_;
+ }
+ const std::vector<int32_t> *isrs() const {
+ return &isrs_;
+ }
+
+ ~PartitionMetadataImpl() {
+ }
+
+ private:
+ const rd_kafka_metadata_partition_t *partition_metadata_;
+ std::vector<int32_t> replicas_, isrs_;
+};
+
+/**
+ * Metadata: Topic information handler
+ */
+class TopicMetadataImpl : public TopicMetadata {
+ public:
+ TopicMetadataImpl(const rd_kafka_metadata_topic_t *topic_metadata) :
+ topic_metadata_(topic_metadata), topic_(topic_metadata->topic) {
+ partitions_.reserve(topic_metadata->partition_cnt);
+ for (int i = 0; i < topic_metadata->partition_cnt; ++i)
+ partitions_.push_back(
+ new PartitionMetadataImpl(&topic_metadata->partitions[i]));
+ }
+
+ ~TopicMetadataImpl() {
+ for (size_t i = 0; i < partitions_.size(); ++i)
+ delete partitions_[i];
+ }
+
+ std::string topic() const {
+ return topic_;
+ }
+ const std::vector<const PartitionMetadata *> *partitions() const {
+ return &partitions_;
+ }
+ ErrorCode err() const {
+ return static_cast<ErrorCode>(topic_metadata_->err);
+ }
+
+ private:
+ const rd_kafka_metadata_topic_t *topic_metadata_;
+ const std::string topic_;
+ std::vector<const PartitionMetadata *> partitions_;
+};
+
+MetadataImpl::MetadataImpl(const rd_kafka_metadata_t *metadata) :
+ metadata_(metadata) {
+ brokers_.reserve(metadata->broker_cnt);
+ for (int i = 0; i < metadata->broker_cnt; ++i)
+ brokers_.push_back(new BrokerMetadataImpl(&metadata->brokers[i]));
+
+ topics_.reserve(metadata->topic_cnt);
+ for (int i = 0; i < metadata->topic_cnt; ++i)
+ topics_.push_back(new TopicMetadataImpl(&metadata->topics[i]));
+}
+
+MetadataImpl::~MetadataImpl() {
+ for (size_t i = 0; i < brokers_.size(); ++i)
+ delete brokers_[i];
+ for (size_t i = 0; i < topics_.size(); ++i)
+ delete topics_[i];
+
+
+ if (metadata_)
+ rd_kafka_metadata_destroy(metadata_);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ProducerImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ProducerImpl.cpp
new file mode 100644
index 000000000..8300dfb3b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/ProducerImpl.cpp
@@ -0,0 +1,197 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+
+RdKafka::Producer::~Producer() {
+}
+
+static void dr_msg_cb_trampoline(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ RdKafka::HandleImpl *handle = static_cast<RdKafka::HandleImpl *>(opaque);
+ RdKafka::MessageImpl message(RD_KAFKA_PRODUCER, NULL,
+ (rd_kafka_message_t *)rkmessage, false);
+ handle->dr_cb_->dr_cb(message);
+}
+
+
+
+RdKafka::Producer *RdKafka::Producer::create(const RdKafka::Conf *conf,
+ std::string &errstr) {
+ char errbuf[512];
+ const RdKafka::ConfImpl *confimpl =
+ dynamic_cast<const RdKafka::ConfImpl *>(conf);
+ RdKafka::ProducerImpl *rkp = new RdKafka::ProducerImpl();
+ rd_kafka_conf_t *rk_conf = NULL;
+
+ if (confimpl) {
+ if (!confimpl->rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ delete rkp;
+ return NULL;
+ }
+
+ rkp->set_common_config(confimpl);
+
+ rk_conf = rd_kafka_conf_dup(confimpl->rk_conf_);
+
+ if (confimpl->dr_cb_) {
+ rd_kafka_conf_set_dr_msg_cb(rk_conf, dr_msg_cb_trampoline);
+ rkp->dr_cb_ = confimpl->dr_cb_;
+ }
+ }
+
+
+ rd_kafka_t *rk;
+ if (!(rk =
+ rd_kafka_new(RD_KAFKA_PRODUCER, rk_conf, errbuf, sizeof(errbuf)))) {
+ errstr = errbuf;
+ // rd_kafka_new() takes ownership only if succeeds
+ if (rk_conf)
+ rd_kafka_conf_destroy(rk_conf);
+ delete rkp;
+ return NULL;
+ }
+
+ rkp->rk_ = rk;
+
+ return rkp;
+}
+
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const std::string *key,
+ void *msg_opaque) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+ if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len,
+ key ? key->c_str() : NULL, key ? key->size() : 0,
+ msg_opaque) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce(RdKafka::Topic *topic,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ void *msg_opaque) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+ if (rd_kafka_produce(topicimpl->rkt_, partition, msgflags, payload, len, key,
+ key_len, msg_opaque) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce(
+ RdKafka::Topic *topic,
+ int32_t partition,
+ const std::vector<char> *payload,
+ const std::vector<char> *key,
+ void *msg_opaque) {
+ RdKafka::TopicImpl *topicimpl = dynamic_cast<RdKafka::TopicImpl *>(topic);
+
+ if (rd_kafka_produce(topicimpl->rkt_, partition, RD_KAFKA_MSG_F_COPY,
+ payload ? (void *)&(*payload)[0] : NULL,
+ payload ? payload->size() : 0, key ? &(*key)[0] : NULL,
+ key ? key->size() : 0, msg_opaque) == -1)
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_last_error());
+
+ return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ int64_t timestamp,
+ void *msg_opaque) {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_producev(
+ rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()),
+ RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags),
+ RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len),
+ RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque),
+ RD_KAFKA_V_END));
+}
+
+RdKafka::ErrorCode RdKafka::ProducerImpl::produce(const std::string topic_name,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ int64_t timestamp,
+ RdKafka::Headers *headers,
+ void *msg_opaque) {
+ rd_kafka_headers_t *hdrs = NULL;
+ RdKafka::HeadersImpl *headersimpl = NULL;
+ rd_kafka_resp_err_t err;
+
+ if (headers) {
+ headersimpl = static_cast<RdKafka::HeadersImpl *>(headers);
+ hdrs = headersimpl->c_ptr();
+ }
+
+ err = rd_kafka_producev(
+ rk_, RD_KAFKA_V_TOPIC(topic_name.c_str()),
+ RD_KAFKA_V_PARTITION(partition), RD_KAFKA_V_MSGFLAGS(msgflags),
+ RD_KAFKA_V_VALUE(payload, len), RD_KAFKA_V_KEY(key, key_len),
+ RD_KAFKA_V_TIMESTAMP(timestamp), RD_KAFKA_V_OPAQUE(msg_opaque),
+ RD_KAFKA_V_HEADERS(hdrs), RD_KAFKA_V_END);
+
+ if (!err && headersimpl) {
+ /* A successful producev() call will destroy the C headers. */
+ headersimpl->c_headers_destroyed();
+ delete headers;
+ }
+
+ return static_cast<RdKafka::ErrorCode>(err);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/QueueImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/QueueImpl.cpp
new file mode 100644
index 000000000..19ebce9d6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/QueueImpl.cpp
@@ -0,0 +1,70 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::Queue::~Queue() {
+}
+
+RdKafka::Queue *RdKafka::Queue::create(Handle *base) {
+ return new RdKafka::QueueImpl(
+ rd_kafka_queue_new(dynamic_cast<HandleImpl *>(base)->rk_));
+}
+
+RdKafka::ErrorCode RdKafka::QueueImpl::forward(Queue *queue) {
+ if (!queue) {
+ rd_kafka_queue_forward(queue_, NULL);
+ } else {
+ QueueImpl *queueimpl = dynamic_cast<QueueImpl *>(queue);
+ rd_kafka_queue_forward(queue_, queueimpl->queue_);
+ }
+ return RdKafka::ERR_NO_ERROR;
+}
+
+RdKafka::Message *RdKafka::QueueImpl::consume(int timeout_ms) {
+ rd_kafka_message_t *rkmessage;
+ rkmessage = rd_kafka_consume_queue(queue_, timeout_ms);
+
+ if (!rkmessage)
+ return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, NULL,
+ RdKafka::ERR__TIMED_OUT);
+
+ return new RdKafka::MessageImpl(RD_KAFKA_CONSUMER, rkmessage);
+}
+
+int RdKafka::QueueImpl::poll(int timeout_ms) {
+ return rd_kafka_queue_poll_callback(queue_, timeout_ms);
+}
+
+void RdKafka::QueueImpl::io_event_enable(int fd,
+ const void *payload,
+ size_t size) {
+ rd_kafka_queue_io_event_enable(queue_, fd, payload, size);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/README.md b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/README.md
new file mode 100644
index 000000000..a4845894f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/README.md
@@ -0,0 +1,16 @@
+librdkafka C++ interface
+========================
+
+**See rdkafkacpp.h for the public C++ API**
+
+
+
+Maintainer notes for the C++ interface:
+
+ * The public C++ interface (rdkafkacpp.h) does not include the
+ public C interface (rdkafka.h) in any way, this means that all
+ constants, flags, etc, must be kept in sync manually between the two
+ header files.
+ A regression test should be implemented that checks this is true.
+
+ * The public C++ interface is provided using pure virtual abstract classes.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/RdKafka.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/RdKafka.cpp
new file mode 100644
index 000000000..b6cb33c28
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/RdKafka.cpp
@@ -0,0 +1,59 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <string>
+
+#include "rdkafkacpp_int.h"
+
+int RdKafka::version() {
+ return rd_kafka_version();
+}
+
+std::string RdKafka::version_str() {
+ return std::string(rd_kafka_version_str());
+}
+
+std::string RdKafka::get_debug_contexts() {
+ return std::string(RD_KAFKA_DEBUG_CONTEXTS);
+}
+
+std::string RdKafka::err2str(RdKafka::ErrorCode err) {
+ return std::string(rd_kafka_err2str(static_cast<rd_kafka_resp_err_t>(err)));
+}
+
+int RdKafka::wait_destroyed(int timeout_ms) {
+ return rd_kafka_wait_destroyed(timeout_ms);
+}
+
+void *RdKafka::mem_malloc(size_t size) {
+ return rd_kafka_mem_malloc(NULL, size);
+}
+
+void RdKafka::mem_free(void *ptr) {
+ rd_kafka_mem_free(NULL, ptr);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicImpl.cpp
new file mode 100644
index 000000000..bf9734df9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicImpl.cpp
@@ -0,0 +1,124 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <list>
+#include <cerrno>
+
+#include "rdkafkacpp_int.h"
+
+const int32_t RdKafka::Topic::PARTITION_UA = RD_KAFKA_PARTITION_UA;
+
+const int64_t RdKafka::Topic::OFFSET_BEGINNING = RD_KAFKA_OFFSET_BEGINNING;
+
+const int64_t RdKafka::Topic::OFFSET_END = RD_KAFKA_OFFSET_END;
+
+const int64_t RdKafka::Topic::OFFSET_STORED = RD_KAFKA_OFFSET_STORED;
+
+const int64_t RdKafka::Topic::OFFSET_INVALID = RD_KAFKA_OFFSET_INVALID;
+
+RdKafka::Topic::~Topic() {
+}
+
+static int32_t partitioner_cb_trampoline(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
+ std::string key(static_cast<const char *>(keydata), keylen);
+ return topicimpl->partitioner_cb_->partitioner_cb(topicimpl, &key,
+ partition_cnt, msg_opaque);
+}
+
+static int32_t partitioner_kp_cb_trampoline(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ RdKafka::TopicImpl *topicimpl = static_cast<RdKafka::TopicImpl *>(rkt_opaque);
+ return topicimpl->partitioner_kp_cb_->partitioner_cb(
+ topicimpl, keydata, keylen, partition_cnt, msg_opaque);
+}
+
+
+
+RdKafka::Topic *RdKafka::Topic::create(Handle *base,
+ const std::string &topic_str,
+ const Conf *conf,
+ std::string &errstr) {
+ const RdKafka::ConfImpl *confimpl =
+ static_cast<const RdKafka::ConfImpl *>(conf);
+ rd_kafka_topic_t *rkt;
+ rd_kafka_topic_conf_t *rkt_conf;
+ rd_kafka_t *rk = dynamic_cast<HandleImpl *>(base)->rk_;
+
+ RdKafka::TopicImpl *topic = new RdKafka::TopicImpl();
+
+ if (!confimpl) {
+ /* Reuse default topic config, but we need our own copy to
+ * set the topic opaque. */
+ rkt_conf = rd_kafka_default_topic_conf_dup(rk);
+ } else {
+ /* Make a copy of conf struct to allow Conf reuse. */
+ rkt_conf = rd_kafka_topic_conf_dup(confimpl->rkt_conf_);
+ }
+
+ /* Set topic opaque to the topic so that we can reach our topic object
+ * from whatever callbacks get registered.
+ * The application itself will not need these opaques since their
+ * callbacks are class based. */
+ rd_kafka_topic_conf_set_opaque(rkt_conf, static_cast<void *>(topic));
+
+ if (confimpl) {
+ if (confimpl->partitioner_cb_) {
+ rd_kafka_topic_conf_set_partitioner_cb(rkt_conf,
+ partitioner_cb_trampoline);
+ topic->partitioner_cb_ = confimpl->partitioner_cb_;
+ } else if (confimpl->partitioner_kp_cb_) {
+ rd_kafka_topic_conf_set_partitioner_cb(rkt_conf,
+ partitioner_kp_cb_trampoline);
+ topic->partitioner_kp_cb_ = confimpl->partitioner_kp_cb_;
+ }
+ }
+
+
+ if (!(rkt = rd_kafka_topic_new(rk, topic_str.c_str(), rkt_conf))) {
+ errstr = rd_kafka_err2str(rd_kafka_last_error());
+ delete topic;
+ rd_kafka_topic_conf_destroy(rkt_conf);
+ return NULL;
+ }
+
+ topic->rkt_ = rkt;
+
+ return topic;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicPartitionImpl.cpp b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicPartitionImpl.cpp
new file mode 100644
index 000000000..90ef820bf
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/TopicPartitionImpl.cpp
@@ -0,0 +1,57 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <string>
+#include <vector>
+
+#include "rdkafkacpp_int.h"
+
+RdKafka::TopicPartition::~TopicPartition() {
+}
+
+RdKafka::TopicPartition *RdKafka::TopicPartition::create(
+ const std::string &topic,
+ int partition) {
+ return new TopicPartitionImpl(topic, partition);
+}
+
+RdKafka::TopicPartition *RdKafka::TopicPartition::create(
+ const std::string &topic,
+ int partition,
+ int64_t offset) {
+ return new TopicPartitionImpl(topic, partition, offset);
+}
+
+void RdKafka::TopicPartition::destroy(
+ std::vector<TopicPartition *> &partitions) {
+ for (std::vector<TopicPartition *>::iterator it = partitions.begin();
+ it != partitions.end(); ++it)
+ delete (*it);
+ partitions.clear();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp.h b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp.h
new file mode 100644
index 000000000..1df1043c0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp.h
@@ -0,0 +1,3764 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014-2022 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKACPP_H_
+#define _RDKAFKACPP_H_
+
+/**
+ * @file rdkafkacpp.h
+ * @brief Apache Kafka C/C++ consumer and producer client library.
+ *
+ * rdkafkacpp.h contains the public C++ API for librdkafka.
+ * The API is documented in this file as comments prefixing the class,
+ * function, type, enum, define, etc.
+ * For more information, see the C interface in rdkafka.h and read the
+ * manual in INTRODUCTION.md.
+ * The C++ interface is STD C++ '03 compliant and adheres to the
+ * Google C++ Style Guide.
+
+ * @sa For the C interface see rdkafka.h
+ *
+ * @tableofcontents
+ */
+
+/**@cond NO_DOC*/
+#include <string>
+#include <list>
+#include <vector>
+#include <cstdlib>
+#include <cstring>
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifdef _WIN32
+#ifndef ssize_t
+#ifndef _BASETSD_H_
+#include <basetsd.h>
+#endif
+#ifndef _SSIZE_T_DEFINED
+#define _SSIZE_T_DEFINED
+typedef SSIZE_T ssize_t;
+#endif
+#endif
+#undef RD_EXPORT
+#ifdef LIBRDKAFKA_STATICLIB
+#define RD_EXPORT
+#else
+#ifdef LIBRDKAFKACPP_EXPORTS
+#define RD_EXPORT __declspec(dllexport)
+#else
+#define RD_EXPORT __declspec(dllimport)
+#endif
+#endif
+#else
+#define RD_EXPORT
+#endif
+
+/**@endcond*/
+
+extern "C" {
+/* Forward declarations */
+struct rd_kafka_s;
+struct rd_kafka_topic_s;
+struct rd_kafka_message_s;
+struct rd_kafka_conf_s;
+struct rd_kafka_topic_conf_s;
+}
+
+namespace RdKafka {
+
+/**
+ * @name Miscellaneous APIs
+ * @{
+ */
+
+/**
+ * @brief librdkafka version
+ *
+ * Interpreted as hex \c MM.mm.rr.xx:
+ * - MM = Major
+ * - mm = minor
+ * - rr = revision
+ * - xx = pre-release id (0xff is the final release)
+ *
+ * E.g.: \c 0x000801ff = 0.8.1
+ *
+ * @remark This value should only be used during compile time,
+ * for runtime checks of version use RdKafka::version()
+ */
+#define RD_KAFKA_VERSION 0x020100ff
+
+/**
+ * @brief Returns the librdkafka version as integer.
+ *
+ * @sa See RD_KAFKA_VERSION for how to parse the integer format.
+ */
+RD_EXPORT
+int version();
+
+/**
+ * @brief Returns the librdkafka version as string.
+ */
+RD_EXPORT
+std::string version_str();
+
+/**
+ * @brief Returns a CSV list of the supported debug contexts
+ * for use with Conf::Set("debug", ..).
+ */
+RD_EXPORT
+std::string get_debug_contexts();
+
+/**
+ * @brief Wait for all rd_kafka_t objects to be destroyed.
+ *
+ * @returns 0 if all kafka objects are now destroyed, or -1 if the
+ * timeout was reached.
+ * Since RdKafka handle deletion is an asynch operation the
+ * \p wait_destroyed() function can be used for applications where
+ * a clean shutdown is required.
+ */
+RD_EXPORT
+int wait_destroyed(int timeout_ms);
+
+/**
+ * @brief Allocate memory using the same allocator librdkafka uses.
+ *
+ * This is typically an abstraction for the malloc(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * allocating pointers that are used by librdkafka.
+ *
+ * @remark Memory allocated by mem_malloc() must be freed using
+ * mem_free().
+ */
+RD_EXPORT
+void *mem_malloc(size_t size);
+
+/**
+ * @brief Free pointer returned by librdkafka
+ *
+ * This is typically an abstraction for the free(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * freeing pointers returned by librdkafka.
+ *
+ * In standard setups it is usually not necessary to use this interface
+ * rather than the free(3) function.
+ *
+ * @remark mem_free() must only be used for pointers returned by APIs
+ * that explicitly mention using this function for freeing.
+ */
+RD_EXPORT
+void mem_free(void *ptr);
+
+/**@}*/
+
+
+
+/**
+ * @name Constants, errors, types
+ * @{
+ *
+ *
+ */
+
+/**
+ * @brief Error codes.
+ *
+ * The negative error codes delimited by two underscores
+ * (\c _ERR__..) denotes errors internal to librdkafka and are
+ * displayed as \c \"Local: \<error string..\>\", while the error codes
+ * delimited by a single underscore (\c ERR_..) denote broker
+ * errors and are displayed as \c \"Broker: \<error string..\>\".
+ *
+ * @sa Use RdKafka::err2str() to translate an error code a human readable string
+ */
+enum ErrorCode {
+ /* Internal errors to rdkafka: */
+ /** Begin internal error codes */
+ ERR__BEGIN = -200,
+ /** Received message is incorrect */
+ ERR__BAD_MSG = -199,
+ /** Bad/unknown compression */
+ ERR__BAD_COMPRESSION = -198,
+ /** Broker is going away */
+ ERR__DESTROY = -197,
+ /** Generic failure */
+ ERR__FAIL = -196,
+ /** Broker transport failure */
+ ERR__TRANSPORT = -195,
+ /** Critical system resource */
+ ERR__CRIT_SYS_RESOURCE = -194,
+ /** Failed to resolve broker */
+ ERR__RESOLVE = -193,
+ /** Produced message timed out*/
+ ERR__MSG_TIMED_OUT = -192,
+ /** Reached the end of the topic+partition queue on
+ * the broker. Not really an error.
+ * This event is disabled by default,
+ * see the `enable.partition.eof` configuration property. */
+ ERR__PARTITION_EOF = -191,
+ /** Permanent: Partition does not exist in cluster. */
+ ERR__UNKNOWN_PARTITION = -190,
+ /** File or filesystem error */
+ ERR__FS = -189,
+ /** Permanent: Topic does not exist in cluster. */
+ ERR__UNKNOWN_TOPIC = -188,
+ /** All broker connections are down. */
+ ERR__ALL_BROKERS_DOWN = -187,
+ /** Invalid argument, or invalid configuration */
+ ERR__INVALID_ARG = -186,
+ /** Operation timed out */
+ ERR__TIMED_OUT = -185,
+ /** Queue is full */
+ ERR__QUEUE_FULL = -184,
+ /** ISR count < required.acks */
+ ERR__ISR_INSUFF = -183,
+ /** Broker node update */
+ ERR__NODE_UPDATE = -182,
+ /** SSL error */
+ ERR__SSL = -181,
+ /** Waiting for coordinator to become available. */
+ ERR__WAIT_COORD = -180,
+ /** Unknown client group */
+ ERR__UNKNOWN_GROUP = -179,
+ /** Operation in progress */
+ ERR__IN_PROGRESS = -178,
+ /** Previous operation in progress, wait for it to finish. */
+ ERR__PREV_IN_PROGRESS = -177,
+ /** This operation would interfere with an existing subscription */
+ ERR__EXISTING_SUBSCRIPTION = -176,
+ /** Assigned partitions (rebalance_cb) */
+ ERR__ASSIGN_PARTITIONS = -175,
+ /** Revoked partitions (rebalance_cb) */
+ ERR__REVOKE_PARTITIONS = -174,
+ /** Conflicting use */
+ ERR__CONFLICT = -173,
+ /** Wrong state */
+ ERR__STATE = -172,
+ /** Unknown protocol */
+ ERR__UNKNOWN_PROTOCOL = -171,
+ /** Not implemented */
+ ERR__NOT_IMPLEMENTED = -170,
+ /** Authentication failure*/
+ ERR__AUTHENTICATION = -169,
+ /** No stored offset */
+ ERR__NO_OFFSET = -168,
+ /** Outdated */
+ ERR__OUTDATED = -167,
+ /** Timed out in queue */
+ ERR__TIMED_OUT_QUEUE = -166,
+ /** Feature not supported by broker */
+ ERR__UNSUPPORTED_FEATURE = -165,
+ /** Awaiting cache update */
+ ERR__WAIT_CACHE = -164,
+ /** Operation interrupted */
+ ERR__INTR = -163,
+ /** Key serialization error */
+ ERR__KEY_SERIALIZATION = -162,
+ /** Value serialization error */
+ ERR__VALUE_SERIALIZATION = -161,
+ /** Key deserialization error */
+ ERR__KEY_DESERIALIZATION = -160,
+ /** Value deserialization error */
+ ERR__VALUE_DESERIALIZATION = -159,
+ /** Partial response */
+ ERR__PARTIAL = -158,
+ /** Modification attempted on read-only object */
+ ERR__READ_ONLY = -157,
+ /** No such entry / item not found */
+ ERR__NOENT = -156,
+ /** Read underflow */
+ ERR__UNDERFLOW = -155,
+ /** Invalid type */
+ ERR__INVALID_TYPE = -154,
+ /** Retry operation */
+ ERR__RETRY = -153,
+ /** Purged in queue */
+ ERR__PURGE_QUEUE = -152,
+ /** Purged in flight */
+ ERR__PURGE_INFLIGHT = -151,
+ /** Fatal error: see RdKafka::Handle::fatal_error() */
+ ERR__FATAL = -150,
+ /** Inconsistent state */
+ ERR__INCONSISTENT = -149,
+ /** Gap-less ordering would not be guaranteed if proceeding */
+ ERR__GAPLESS_GUARANTEE = -148,
+ /** Maximum poll interval exceeded */
+ ERR__MAX_POLL_EXCEEDED = -147,
+ /** Unknown broker */
+ ERR__UNKNOWN_BROKER = -146,
+ /** Functionality not configured */
+ ERR__NOT_CONFIGURED = -145,
+ /** Instance has been fenced */
+ ERR__FENCED = -144,
+ /** Application generated error */
+ ERR__APPLICATION = -143,
+ /** Assignment lost */
+ ERR__ASSIGNMENT_LOST = -142,
+ /** No operation performed */
+ ERR__NOOP = -141,
+ /** No offset to automatically reset to */
+ ERR__AUTO_OFFSET_RESET = -140,
+ /** Partition log truncation detected */
+ ERR__LOG_TRUNCATION = -139,
+
+ /** End internal error codes */
+ ERR__END = -100,
+
+ /* Kafka broker errors: */
+ /** Unknown broker error */
+ ERR_UNKNOWN = -1,
+ /** Success */
+ ERR_NO_ERROR = 0,
+ /** Offset out of range */
+ ERR_OFFSET_OUT_OF_RANGE = 1,
+ /** Invalid message */
+ ERR_INVALID_MSG = 2,
+ /** Unknown topic or partition */
+ ERR_UNKNOWN_TOPIC_OR_PART = 3,
+ /** Invalid message size */
+ ERR_INVALID_MSG_SIZE = 4,
+ /** Leader not available */
+ ERR_LEADER_NOT_AVAILABLE = 5,
+ /** Not leader for partition */
+ ERR_NOT_LEADER_FOR_PARTITION = 6,
+ /** Request timed out */
+ ERR_REQUEST_TIMED_OUT = 7,
+ /** Broker not available */
+ ERR_BROKER_NOT_AVAILABLE = 8,
+ /** Replica not available */
+ ERR_REPLICA_NOT_AVAILABLE = 9,
+ /** Message size too large */
+ ERR_MSG_SIZE_TOO_LARGE = 10,
+ /** StaleControllerEpochCode */
+ ERR_STALE_CTRL_EPOCH = 11,
+ /** Offset metadata string too large */
+ ERR_OFFSET_METADATA_TOO_LARGE = 12,
+ /** Broker disconnected before response received */
+ ERR_NETWORK_EXCEPTION = 13,
+ /** Coordinator load in progress */
+ ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
+/** Group coordinator load in progress */
+#define ERR_GROUP_LOAD_IN_PROGRESS ERR_COORDINATOR_LOAD_IN_PROGRESS
+ /** Coordinator not available */
+ ERR_COORDINATOR_NOT_AVAILABLE = 15,
+/** Group coordinator not available */
+#define ERR_GROUP_COORDINATOR_NOT_AVAILABLE ERR_COORDINATOR_NOT_AVAILABLE
+ /** Not coordinator */
+ ERR_NOT_COORDINATOR = 16,
+/** Not coordinator for group */
+#define ERR_NOT_COORDINATOR_FOR_GROUP ERR_NOT_COORDINATOR
+ /** Invalid topic */
+ ERR_TOPIC_EXCEPTION = 17,
+ /** Message batch larger than configured server segment size */
+ ERR_RECORD_LIST_TOO_LARGE = 18,
+ /** Not enough in-sync replicas */
+ ERR_NOT_ENOUGH_REPLICAS = 19,
+ /** Message(s) written to insufficient number of in-sync replicas */
+ ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
+ /** Invalid required acks value */
+ ERR_INVALID_REQUIRED_ACKS = 21,
+ /** Specified group generation id is not valid */
+ ERR_ILLEGAL_GENERATION = 22,
+ /** Inconsistent group protocol */
+ ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
+ /** Invalid group.id */
+ ERR_INVALID_GROUP_ID = 24,
+ /** Unknown member */
+ ERR_UNKNOWN_MEMBER_ID = 25,
+ /** Invalid session timeout */
+ ERR_INVALID_SESSION_TIMEOUT = 26,
+ /** Group rebalance in progress */
+ ERR_REBALANCE_IN_PROGRESS = 27,
+ /** Commit offset data size is not valid */
+ ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
+ /** Topic authorization failed */
+ ERR_TOPIC_AUTHORIZATION_FAILED = 29,
+ /** Group authorization failed */
+ ERR_GROUP_AUTHORIZATION_FAILED = 30,
+ /** Cluster authorization failed */
+ ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
+ /** Invalid timestamp */
+ ERR_INVALID_TIMESTAMP = 32,
+ /** Unsupported SASL mechanism */
+ ERR_UNSUPPORTED_SASL_MECHANISM = 33,
+ /** Illegal SASL state */
+ ERR_ILLEGAL_SASL_STATE = 34,
+ /** Unuspported version */
+ ERR_UNSUPPORTED_VERSION = 35,
+ /** Topic already exists */
+ ERR_TOPIC_ALREADY_EXISTS = 36,
+ /** Invalid number of partitions */
+ ERR_INVALID_PARTITIONS = 37,
+ /** Invalid replication factor */
+ ERR_INVALID_REPLICATION_FACTOR = 38,
+ /** Invalid replica assignment */
+ ERR_INVALID_REPLICA_ASSIGNMENT = 39,
+ /** Invalid config */
+ ERR_INVALID_CONFIG = 40,
+ /** Not controller for cluster */
+ ERR_NOT_CONTROLLER = 41,
+ /** Invalid request */
+ ERR_INVALID_REQUEST = 42,
+ /** Message format on broker does not support request */
+ ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
+ /** Policy violation */
+ ERR_POLICY_VIOLATION = 44,
+ /** Broker received an out of order sequence number */
+ ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
+ /** Broker received a duplicate sequence number */
+ ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
+ /** Producer attempted an operation with an old epoch */
+ ERR_INVALID_PRODUCER_EPOCH = 47,
+ /** Producer attempted a transactional operation in an invalid state */
+ ERR_INVALID_TXN_STATE = 48,
+ /** Producer attempted to use a producer id which is not
+ * currently assigned to its transactional id */
+ ERR_INVALID_PRODUCER_ID_MAPPING = 49,
+ /** Transaction timeout is larger than the maximum
+ * value allowed by the broker's max.transaction.timeout.ms */
+ ERR_INVALID_TRANSACTION_TIMEOUT = 50,
+ /** Producer attempted to update a transaction while another
+ * concurrent operation on the same transaction was ongoing */
+ ERR_CONCURRENT_TRANSACTIONS = 51,
+ /** Indicates that the transaction coordinator sending a
+ * WriteTxnMarker is no longer the current coordinator for a
+ * given producer */
+ ERR_TRANSACTION_COORDINATOR_FENCED = 52,
+ /** Transactional Id authorization failed */
+ ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
+ /** Security features are disabled */
+ ERR_SECURITY_DISABLED = 54,
+ /** Operation not attempted */
+ ERR_OPERATION_NOT_ATTEMPTED = 55,
+ /** Disk error when trying to access log file on the disk */
+ ERR_KAFKA_STORAGE_ERROR = 56,
+ /** The user-specified log directory is not found in the broker config */
+ ERR_LOG_DIR_NOT_FOUND = 57,
+ /** SASL Authentication failed */
+ ERR_SASL_AUTHENTICATION_FAILED = 58,
+ /** Unknown Producer Id */
+ ERR_UNKNOWN_PRODUCER_ID = 59,
+ /** Partition reassignment is in progress */
+ ERR_REASSIGNMENT_IN_PROGRESS = 60,
+ /** Delegation Token feature is not enabled */
+ ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
+ /** Delegation Token is not found on server */
+ ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
+ /** Specified Principal is not valid Owner/Renewer */
+ ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
+ /** Delegation Token requests are not allowed on this connection */
+ ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
+ /** Delegation Token authorization failed */
+ ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
+ /** Delegation Token is expired */
+ ERR_DELEGATION_TOKEN_EXPIRED = 66,
+ /** Supplied principalType is not supported */
+ ERR_INVALID_PRINCIPAL_TYPE = 67,
+ /** The group is not empty */
+ ERR_NON_EMPTY_GROUP = 68,
+ /** The group id does not exist */
+ ERR_GROUP_ID_NOT_FOUND = 69,
+ /** The fetch session ID was not found */
+ ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
+ /** The fetch session epoch is invalid */
+ ERR_INVALID_FETCH_SESSION_EPOCH = 71,
+ /** No matching listener */
+ ERR_LISTENER_NOT_FOUND = 72,
+ /** Topic deletion is disabled */
+ ERR_TOPIC_DELETION_DISABLED = 73,
+ /** Leader epoch is older than broker epoch */
+ ERR_FENCED_LEADER_EPOCH = 74,
+ /** Leader epoch is newer than broker epoch */
+ ERR_UNKNOWN_LEADER_EPOCH = 75,
+ /** Unsupported compression type */
+ ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
+ /** Broker epoch has changed */
+ ERR_STALE_BROKER_EPOCH = 77,
+ /** Leader high watermark is not caught up */
+ ERR_OFFSET_NOT_AVAILABLE = 78,
+ /** Group member needs a valid member ID */
+ ERR_MEMBER_ID_REQUIRED = 79,
+ /** Preferred leader was not available */
+ ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
+ /** Consumer group has reached maximum size */
+ ERR_GROUP_MAX_SIZE_REACHED = 81,
+ /** Static consumer fenced by other consumer with same
+ * group.instance.id. */
+ ERR_FENCED_INSTANCE_ID = 82,
+ /** Eligible partition leaders are not available */
+ ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
+ /** Leader election not needed for topic partition */
+ ERR_ELECTION_NOT_NEEDED = 84,
+ /** No partition reassignment is in progress */
+ ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
+ /** Deleting offsets of a topic while the consumer group is
+ * subscribed to it */
+ ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
+ /** Broker failed to validate record */
+ ERR_INVALID_RECORD = 87,
+ /** There are unstable offsets that need to be cleared */
+ ERR_UNSTABLE_OFFSET_COMMIT = 88,
+ /** Throttling quota has been exceeded */
+ ERR_THROTTLING_QUOTA_EXCEEDED = 89,
+ /** There is a newer producer with the same transactionalId
+ * which fences the current one */
+ ERR_PRODUCER_FENCED = 90,
+ /** Request illegally referred to resource that does not exist */
+ ERR_RESOURCE_NOT_FOUND = 91,
+ /** Request illegally referred to the same resource twice */
+ ERR_DUPLICATE_RESOURCE = 92,
+ /** Requested credential would not meet criteria for acceptability */
+ ERR_UNACCEPTABLE_CREDENTIAL = 93,
+ /** Indicates that the either the sender or recipient of a
+ * voter-only request is not one of the expected voters */
+ ERR_INCONSISTENT_VOTER_SET = 94,
+ /** Invalid update version */
+ ERR_INVALID_UPDATE_VERSION = 95,
+ /** Unable to update finalized features due to server error */
+ ERR_FEATURE_UPDATE_FAILED = 96,
+ /** Request principal deserialization failed during forwarding */
+ ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97
+};
+
+
+/**
+ * @brief Returns a human readable representation of a kafka error.
+ */
+RD_EXPORT
+std::string err2str(RdKafka::ErrorCode err);
+
+
+
+/**
+ * @enum CertificateType
+ * @brief SSL certificate types
+ */
+enum CertificateType {
+ CERT_PUBLIC_KEY, /**< Client's public key */
+ CERT_PRIVATE_KEY, /**< Client's private key */
+ CERT_CA, /**< CA certificate */
+ CERT__CNT
+};
+
+/**
+ * @enum CertificateEncoding
+ * @brief SSL certificate encoding
+ */
+enum CertificateEncoding {
+ CERT_ENC_PKCS12, /**< PKCS#12 */
+ CERT_ENC_DER, /**< DER / binary X.509 ASN1 */
+ CERT_ENC_PEM, /**< PEM */
+ CERT_ENC__CNT
+};
+
+/**@} */
+
+
+
+/**@cond NO_DOC*/
+/* Forward declarations */
+class Handle;
+class Producer;
+class Message;
+class Headers;
+class Queue;
+class Event;
+class Topic;
+class TopicPartition;
+class Metadata;
+class KafkaConsumer;
+/**@endcond*/
+
+
+/**
+ * @name Error class
+ * @{
+ *
+ */
+
+/**
+ * @brief The Error class is used as a return value from APIs to propagate
+ * an error. The error consists of an error code which is to be used
+ * programatically, an error string for showing to the user,
+ * and various error flags that can be used programmatically to decide
+ * how to handle the error; e.g., should the operation be retried,
+ * was it a fatal error, etc.
+ *
+ * Error objects must be deleted explicitly to free its resources.
+ */
+class RD_EXPORT Error {
+ public:
+ /**
+ * @brief Create error object.
+ */
+ static Error *create(ErrorCode code, const std::string *errstr);
+
+ virtual ~Error() {
+ }
+
+ /*
+ * Error accessor methods
+ */
+
+ /**
+ * @returns the error code, e.g., RdKafka::ERR_UNKNOWN_MEMBER_ID.
+ */
+ virtual ErrorCode code() const = 0;
+
+ /**
+ * @returns the error code name, e.g, "ERR_UNKNOWN_MEMBER_ID".
+ */
+ virtual std::string name() const = 0;
+
+ /**
+ * @returns a human readable error string.
+ */
+ virtual std::string str() const = 0;
+
+ /**
+ * @returns true if the error is a fatal error, indicating that the client
+ * instance is no longer usable, else false.
+ */
+ virtual bool is_fatal() const = 0;
+
+ /**
+ * @returns true if the operation may be retried, else false.
+ */
+ virtual bool is_retriable() const = 0;
+
+ /**
+ * @returns true if the error is an abortable transaction error in which case
+ * the application must call RdKafka::Producer::abort_transaction()
+ * and start a new transaction with
+ * RdKafka::Producer::begin_transaction() if it wishes to proceed
+ * with transactions.
+ * Else returns false.
+ *
+ * @remark The return value of this method is only valid for errors returned
+ * by the transactional API.
+ */
+ virtual bool txn_requires_abort() const = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name Callback classes
+ * @{
+ *
+ *
+ * librdkafka uses (optional) callbacks to propagate information and
+ * delegate decisions to the application logic.
+ *
+ * An application must call RdKafka::poll() at regular intervals to
+ * serve queued callbacks.
+ */
+
+
+/**
+ * @brief Delivery Report callback class
+ *
+ * The delivery report callback will be called once for each message
+ * accepted by RdKafka::Producer::produce() (et.al) with
+ * RdKafka::Message::err() set to indicate the result of the produce request.
+ *
+ * The callback is called when a message is succesfully produced or
+ * if librdkafka encountered a permanent failure, or the retry counter for
+ * temporary errors has been exhausted.
+ *
+ * An application must call RdKafka::poll() at regular intervals to
+ * serve queued delivery report callbacks.
+
+ */
+class RD_EXPORT DeliveryReportCb {
+ public:
+ /**
+ * @brief Delivery report callback.
+ */
+ virtual void dr_cb(Message &message) = 0;
+
+ virtual ~DeliveryReportCb() {
+ }
+};
+
+
+/**
+ * @brief SASL/OAUTHBEARER token refresh callback class
+ *
+ * The SASL/OAUTHBEARER token refresh callback is triggered via RdKafka::poll()
+ * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved,
+ * typically based on the configuration defined in \c sasl.oauthbearer.config.
+ *
+ * The \c oauthbearer_config argument is the value of the
+ * \c sasl.oauthbearer.config configuration property.
+ *
+ * The callback should invoke RdKafka::Handle::oauthbearer_set_token() or
+ * RdKafka::Handle::oauthbearer_set_token_failure() to indicate success or
+ * failure, respectively.
+ *
+ * The refresh operation is eventable and may be received when an event
+ * callback handler is set with an event type of
+ * \c RdKafka::Event::EVENT_OAUTHBEARER_TOKEN_REFRESH.
+ *
+ * Note that before any SASL/OAUTHBEARER broker connection can succeed the
+ * application must call RdKafka::Handle::oauthbearer_set_token() once -- either
+ * directly or, more typically, by invoking RdKafka::poll() -- in order to
+ * cause retrieval of an initial token to occur.
+ *
+ * An application must call RdKafka::poll() at regular intervals to
+ * serve queued SASL/OAUTHBEARER token refresh callbacks (when
+ * OAUTHBEARER is the SASL mechanism).
+ */
+class RD_EXPORT OAuthBearerTokenRefreshCb {
+ public:
+ /**
+ * @brief SASL/OAUTHBEARER token refresh callback class.
+ *
+ * @param handle The RdKafka::Handle which requires a refreshed token.
+ * @param oauthbearer_config The value of the
+ * \p sasl.oauthbearer.config configuration property for \p handle.
+ */
+ virtual void oauthbearer_token_refresh_cb(
+ RdKafka::Handle *handle,
+ const std::string &oauthbearer_config) = 0;
+
+ virtual ~OAuthBearerTokenRefreshCb() {
+ }
+};
+
+
+/**
+ * @brief Partitioner callback class
+ *
+ * Generic partitioner callback class for implementing custom partitioners.
+ *
+ * @sa RdKafka::Conf::set() \c "partitioner_cb"
+ */
+class RD_EXPORT PartitionerCb {
+ public:
+ /**
+ * @brief Partitioner callback
+ *
+ * Return the partition to use for \p key in \p topic.
+ *
+ * The \p msg_opaque is the same \p msg_opaque provided in the
+ * RdKafka::Producer::produce() call.
+ *
+ * @remark \p key may be NULL or the empty.
+ *
+ * @returns Must return a value between 0 and \p partition_cnt
+ * (non-inclusive). May return RD_KAFKA_PARTITION_UA (-1) if partitioning
+ * failed.
+ *
+ * @sa The callback may use RdKafka::Topic::partition_available() to check
+ * if a partition has an active leader broker.
+ */
+ virtual int32_t partitioner_cb(const Topic *topic,
+ const std::string *key,
+ int32_t partition_cnt,
+ void *msg_opaque) = 0;
+
+ virtual ~PartitionerCb() {
+ }
+};
+
+/**
+ * @brief Variant partitioner with key pointer
+ *
+ */
+class PartitionerKeyPointerCb {
+ public:
+ /**
+ * @brief Variant partitioner callback that gets \p key as pointer and length
+ * instead of as a const std::string *.
+ *
+ * @remark \p key may be NULL or have \p key_len 0.
+ *
+ * @sa See RdKafka::PartitionerCb::partitioner_cb() for exact semantics
+ */
+ virtual int32_t partitioner_cb(const Topic *topic,
+ const void *key,
+ size_t key_len,
+ int32_t partition_cnt,
+ void *msg_opaque) = 0;
+
+ virtual ~PartitionerKeyPointerCb() {
+ }
+};
+
+
+
+/**
+ * @brief Event callback class
+ *
+ * Events are a generic interface for propagating errors, statistics, logs, etc
+ * from librdkafka to the application.
+ *
+ * @sa RdKafka::Event
+ */
+class RD_EXPORT EventCb {
+ public:
+ /**
+ * @brief Event callback
+ *
+ * @sa RdKafka::Event
+ */
+ virtual void event_cb(Event &event) = 0;
+
+ virtual ~EventCb() {
+ }
+};
+
+
+/**
+ * @brief Event object class as passed to the EventCb callback.
+ */
+class RD_EXPORT Event {
+ public:
+ /** @brief Event type */
+ enum Type {
+ EVENT_ERROR, /**< Event is an error condition */
+ EVENT_STATS, /**< Event is a statistics JSON document */
+ EVENT_LOG, /**< Event is a log message */
+ EVENT_THROTTLE /**< Event is a throttle level signaling from the broker */
+ };
+
+ /** @brief EVENT_LOG severities (conforms to syslog(3) severities) */
+ enum Severity {
+ EVENT_SEVERITY_EMERG = 0,
+ EVENT_SEVERITY_ALERT = 1,
+ EVENT_SEVERITY_CRITICAL = 2,
+ EVENT_SEVERITY_ERROR = 3,
+ EVENT_SEVERITY_WARNING = 4,
+ EVENT_SEVERITY_NOTICE = 5,
+ EVENT_SEVERITY_INFO = 6,
+ EVENT_SEVERITY_DEBUG = 7
+ };
+
+ virtual ~Event() {
+ }
+
+ /*
+ * Event Accessor methods
+ */
+
+ /**
+ * @returns The event type
+ * @remark Applies to all event types
+ */
+ virtual Type type() const = 0;
+
+ /**
+ * @returns Event error, if any.
+ * @remark Applies to all event types except THROTTLE
+ */
+ virtual ErrorCode err() const = 0;
+
+ /**
+ * @returns Log severity level.
+ * @remark Applies to LOG event type.
+ */
+ virtual Severity severity() const = 0;
+
+ /**
+ * @returns Log facility string.
+ * @remark Applies to LOG event type.
+ */
+ virtual std::string fac() const = 0;
+
+ /**
+ * @returns Log message string.
+ *
+ * \c EVENT_LOG: Log message string.
+ * \c EVENT_STATS: JSON object (as string).
+ *
+ * @remark Applies to LOG event type.
+ */
+ virtual std::string str() const = 0;
+
+ /**
+ * @returns Throttle time in milliseconds.
+ * @remark Applies to THROTTLE event type.
+ */
+ virtual int throttle_time() const = 0;
+
+ /**
+ * @returns Throttling broker's name.
+ * @remark Applies to THROTTLE event type.
+ */
+ virtual std::string broker_name() const = 0;
+
+ /**
+ * @returns Throttling broker's id.
+ * @remark Applies to THROTTLE event type.
+ */
+ virtual int broker_id() const = 0;
+
+
+ /**
+ * @returns true if this is a fatal error.
+ * @remark Applies to ERROR event type.
+ * @sa RdKafka::Handle::fatal_error()
+ */
+ virtual bool fatal() const = 0;
+};
+
+
+
+/**
+ * @brief Consume callback class
+ */
+class RD_EXPORT ConsumeCb {
+ public:
+ /**
+ * @brief The consume callback is used with
+ * RdKafka::Consumer::consume_callback()
+ * methods and will be called for each consumed \p message.
+ *
+ * The callback interface is optional but provides increased performance.
+ */
+ virtual void consume_cb(Message &message, void *opaque) = 0;
+
+ virtual ~ConsumeCb() {
+ }
+};
+
+
+/**
+ * @brief \b KafkaConsumer: Rebalance callback class
+ */
+class RD_EXPORT RebalanceCb {
+ public:
+ /**
+ * @brief Group rebalance callback for use with RdKafka::KafkaConsumer
+ *
+ * Registering a \p rebalance_cb turns off librdkafka's automatic
+ * partition assignment/revocation and instead delegates that responsibility
+ * to the application's \p rebalance_cb.
+ *
+ * The rebalance callback is responsible for updating librdkafka's
+ * assignment set based on the two events: RdKafka::ERR__ASSIGN_PARTITIONS
+ * and RdKafka::ERR__REVOKE_PARTITIONS but should also be able to handle
+ * arbitrary rebalancing failures where \p err is neither of those.
+ * @remark In this latter case (arbitrary error), the application must
+ * call unassign() to synchronize state.
+ *
+ * For eager/non-cooperative `partition.assignment.strategy` assignors,
+ * such as `range` and `roundrobin`, the application must use
+ * assign assign() to set and unassign() to clear the entire assignment.
+ * For the cooperative assignors, such as `cooperative-sticky`, the
+ * application must use incremental_assign() for ERR__ASSIGN_PARTITIONS and
+ * incremental_unassign() for ERR__REVOKE_PARTITIONS.
+ *
+ * Without a rebalance callback this is done automatically by librdkafka
+ * but registering a rebalance callback gives the application flexibility
+ * in performing other operations along with the assinging/revocation,
+ * such as fetching offsets from an alternate location (on assign)
+ * or manually committing offsets (on revoke).
+ *
+ * @sa RdKafka::KafkaConsumer::assign()
+ * @sa RdKafka::KafkaConsumer::incremental_assign()
+ * @sa RdKafka::KafkaConsumer::incremental_unassign()
+ * @sa RdKafka::KafkaConsumer::assignment_lost()
+ * @sa RdKafka::KafkaConsumer::rebalance_protocol()
+ *
+ * The following example show's the application's responsibilities:
+ * @code
+ * class MyRebalanceCb : public RdKafka::RebalanceCb {
+ * public:
+ * void rebalance_cb (RdKafka::KafkaConsumer *consumer,
+ * RdKafka::ErrorCode err,
+ * std::vector<RdKafka::TopicPartition*> &partitions) {
+ * if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+ * // application may load offets from arbitrary external
+ * // storage here and update \p partitions
+ * if (consumer->rebalance_protocol() == "COOPERATIVE")
+ * consumer->incremental_assign(partitions);
+ * else
+ * consumer->assign(partitions);
+ *
+ * } else if (err == RdKafka::ERR__REVOKE_PARTITIONS) {
+ * // Application may commit offsets manually here
+ * // if auto.commit.enable=false
+ * if (consumer->rebalance_protocol() == "COOPERATIVE")
+ * consumer->incremental_unassign(partitions);
+ * else
+ * consumer->unassign();
+ *
+ * } else {
+ * std::cerr << "Rebalancing error: " <<
+ * RdKafka::err2str(err) << std::endl;
+ * consumer->unassign();
+ * }
+ * }
+ * }
+ * @endcode
+ *
+ * @remark The above example lacks error handling for assign calls, see
+ * the examples/ directory.
+ */
+ virtual void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<TopicPartition *> &partitions) = 0;
+
+ virtual ~RebalanceCb() {
+ }
+};
+
+
+/**
+ * @brief Offset Commit callback class
+ */
+class RD_EXPORT OffsetCommitCb {
+ public:
+ /**
+ * @brief Set offset commit callback for use with consumer groups
+ *
+ * The results of automatic or manual offset commits will be scheduled
+ * for this callback and is served by RdKafka::KafkaConsumer::consume().
+ *
+ * If no partitions had valid offsets to commit this callback will be called
+ * with \p err == ERR__NO_OFFSET which is not to be considered an error.
+ *
+ * The \p offsets list contains per-partition information:
+ * - \c topic The topic committed
+ * - \c partition The partition committed
+ * - \c offset: Committed offset (attempted)
+ * - \c err: Commit error
+ */
+ virtual void offset_commit_cb(RdKafka::ErrorCode err,
+ std::vector<TopicPartition *> &offsets) = 0;
+
+ virtual ~OffsetCommitCb() {
+ }
+};
+
+
+
+/**
+ * @brief SSL broker certificate verification class.
+ *
+ * @remark Class instance must outlive the RdKafka client instance.
+ */
+class RD_EXPORT SslCertificateVerifyCb {
+ public:
+ /**
+ * @brief SSL broker certificate verification callback.
+ *
+ * The verification callback is triggered from internal librdkafka threads
+ * upon connecting to a broker. On each connection attempt the callback
+ * will be called for each certificate in the broker's certificate chain,
+ * starting at the root certification, as long as the application callback
+ * returns 1 (valid certificate).
+ *
+ * \p broker_name and \p broker_id correspond to the broker the connection
+ * is being made to.
+ * The \c x509_error argument indicates if OpenSSL's verification of
+ * the certificate succeed (0) or failed (an OpenSSL error code).
+ * The application may set the SSL context error code by returning 0
+ * from the verify callback and providing a non-zero SSL context error code
+ * in \p x509_error.
+ * If the verify callback sets \p x509_error to 0, returns 1, and the
+ * original \p x509_error was non-zero, the error on the SSL context will
+ * be cleared.
+ * \p x509_error is always a valid pointer to an int.
+ *
+ * \p depth is the depth of the current certificate in the chain, starting
+ * at the root certificate.
+ *
+ * The certificate itself is passed in binary DER format in \p buf of
+ * size \p size.
+ *
+ * The callback must 1 if verification succeeds, or 0 if verification fails
+ * and write a human-readable error message
+ * to \p errstr.
+ *
+ * @warning This callback will be called from internal librdkafka threads.
+ *
+ * @remark See <openssl/x509_vfy.h> in the OpenSSL source distribution
+ * for a list of \p x509_error codes.
+ */
+ virtual bool ssl_cert_verify_cb(const std::string &broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ std::string &errstr) = 0;
+
+ virtual ~SslCertificateVerifyCb() {
+ }
+};
+
+
+/**
+ * @brief \b Portability: SocketCb callback class
+ *
+ */
+class RD_EXPORT SocketCb {
+ public:
+ /**
+ * @brief Socket callback
+ *
+ * The socket callback is responsible for opening a socket
+ * according to the supplied \p domain, \p type and \p protocol.
+ * The socket shall be created with \c CLOEXEC set in a racefree fashion, if
+ * possible.
+ *
+ * It is typically not required to register an alternative socket
+ * implementation
+ *
+ * @returns The socket file descriptor or -1 on error (\c errno must be set)
+ */
+ virtual int socket_cb(int domain, int type, int protocol) = 0;
+
+ virtual ~SocketCb() {
+ }
+};
+
+
+/**
+ * @brief \b Portability: OpenCb callback class
+ *
+ */
+class RD_EXPORT OpenCb {
+ public:
+ /**
+ * @brief Open callback
+ * The open callback is responsible for opening the file specified by
+ * \p pathname, using \p flags and \p mode.
+ * The file shall be opened with \c CLOEXEC set in a racefree fashion, if
+ * possible.
+ *
+ * It is typically not required to register an alternative open implementation
+ *
+ * @remark Not currently available on native Win32
+ */
+ virtual int open_cb(const std::string &path, int flags, int mode) = 0;
+
+ virtual ~OpenCb() {
+ }
+};
+
+
+/**@}*/
+
+
+
+/**
+ * @name Configuration interface
+ * @{
+ *
+ */
+
+/**
+ * @brief Configuration interface
+ *
+ * Holds either global or topic configuration that are passed to
+ * RdKafka::Consumer::create(), RdKafka::Producer::create(),
+ * RdKafka::KafkaConsumer::create(), etc.
+ *
+ * @sa CONFIGURATION.md for the full list of supported properties.
+ */
+class RD_EXPORT Conf {
+ public:
+ /**
+ * @brief Configuration object type
+ */
+ enum ConfType {
+ CONF_GLOBAL, /**< Global configuration */
+ CONF_TOPIC /**< Topic specific configuration */
+ };
+
+ /**
+ * @brief RdKafka::Conf::Set() result code
+ */
+ enum ConfResult {
+ CONF_UNKNOWN = -2, /**< Unknown configuration property */
+ CONF_INVALID = -1, /**< Invalid configuration value */
+ CONF_OK = 0 /**< Configuration property was succesfully set */
+ };
+
+
+ /**
+ * @brief Create configuration object
+ */
+ static Conf *create(ConfType type);
+
+ virtual ~Conf() {
+ }
+
+ /**
+ * @brief Set configuration property \p name to value \p value.
+ *
+ * Fallthrough:
+ * Topic-level configuration properties may be set using this interface
+ * in which case they are applied on the \c default_topic_conf.
+ * If no \c default_topic_conf has been set one will be created.
+ * Any sub-sequent set("default_topic_conf", ..) calls will
+ * replace the current default topic configuration.
+
+ * @returns CONF_OK on success, else writes a human readable error
+ * description to \p errstr on error.
+ */
+ virtual Conf::ConfResult set(const std::string &name,
+ const std::string &value,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"dr_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ DeliveryReportCb *dr_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"oauthbearer_token_refresh_cb\" */
+ virtual Conf::ConfResult set(
+ const std::string &name,
+ OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"event_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ EventCb *event_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"default_topic_conf\"
+ *
+ * Sets the default topic configuration to use for for automatically
+ * subscribed topics.
+ *
+ * @sa RdKafka::KafkaConsumer::subscribe()
+ */
+ virtual Conf::ConfResult set(const std::string &name,
+ const Conf *topic_conf,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"partitioner_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ PartitionerCb *partitioner_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"partitioner_key_pointer_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ PartitionerKeyPointerCb *partitioner_kp_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"socket_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ SocketCb *socket_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"open_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ OpenCb *open_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"rebalance_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ RebalanceCb *rebalance_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"offset_commit_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ OffsetCommitCb *offset_commit_cb,
+ std::string &errstr) = 0;
+
+ /** @brief Use with \p name = \c \"ssl_cert_verify_cb\".
+ * @returns CONF_OK on success or CONF_INVALID if SSL is
+ * not supported in this build.
+ */
+ virtual Conf::ConfResult set(const std::string &name,
+ SslCertificateVerifyCb *ssl_cert_verify_cb,
+ std::string &errstr) = 0;
+
+ /**
+ * @brief Set certificate/key \p cert_type from the \p cert_enc encoded
+ * memory at \p buffer of \p size bytes.
+ *
+ * @param cert_type Certificate or key type to configure.
+ * @param cert_enc Buffer \p encoding type.
+ * @param buffer Memory pointer to encoded certificate or key.
+ * The memory is not referenced after this function returns.
+ * @param size Size of memory at \p buffer.
+ * @param errstr A human-readable error string will be written to this string
+ * on failure.
+ *
+ * @returns CONF_OK on success or CONF_INVALID if the memory in
+ * \p buffer is of incorrect encoding, or if librdkafka
+ * was not built with SSL support.
+ *
+ * @remark Calling this method multiple times with the same \p cert_type
+ * will replace the previous value.
+ *
+ * @remark Calling this method with \p buffer set to NULL will clear the
+ * configuration for \p cert_type.
+ *
+ * @remark The private key may require a password, which must be specified
+ * with the `ssl.key.password` configuration property prior to
+ * calling this function.
+ *
+ * @remark Private and public keys in PEM format may also be set with the
+ * `ssl.key.pem` and `ssl.certificate.pem` configuration properties.
+ *
+ * @remark CA certificate in PEM format may also be set with the
+ * `ssl.ca.pem` configuration property.
+ *
+ * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is
+ * encoded using an obsolete cipher, it might be necessary to set up
+ * an OpenSSL configuration file to load the "legacy" provider and
+ * set the OPENSSL_CONF environment variable.
+ * See
+ * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more
+ * information.
+ */
+ virtual Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type,
+ RdKafka::CertificateEncoding cert_enc,
+ const void *buffer,
+ size_t size,
+ std::string &errstr) = 0;
+
+ /** @brief Query single configuration value
+ *
+ * Do not use this method to get callbacks registered by the configuration
+ * file. Instead use the specific get() methods with the specific callback
+ * parameter in the signature.
+ *
+ * Fallthrough:
+ * Topic-level configuration properties from the \c default_topic_conf
+ * may be retrieved using this interface.
+ *
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p value. */
+ virtual Conf::ConfResult get(const std::string &name,
+ std::string &value) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p dr_cb. */
+ virtual Conf::ConfResult get(DeliveryReportCb *&dr_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p oauthbearer_token_refresh_cb. */
+ virtual Conf::ConfResult get(
+ OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p event_cb. */
+ virtual Conf::ConfResult get(EventCb *&event_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p partitioner_cb. */
+ virtual Conf::ConfResult get(PartitionerCb *&partitioner_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p partitioner_kp_cb. */
+ virtual Conf::ConfResult get(
+ PartitionerKeyPointerCb *&partitioner_kp_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p socket_cb. */
+ virtual Conf::ConfResult get(SocketCb *&socket_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p open_cb. */
+ virtual Conf::ConfResult get(OpenCb *&open_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p rebalance_cb. */
+ virtual Conf::ConfResult get(RebalanceCb *&rebalance_cb) const = 0;
+
+ /** @brief Query single configuration value
+ * @returns CONF_OK if the property was set previously set and
+ * returns the value in \p offset_commit_cb. */
+ virtual Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const = 0;
+
+ /** @brief Use with \p name = \c \"ssl_cert_verify_cb\" */
+ virtual Conf::ConfResult get(
+ SslCertificateVerifyCb *&ssl_cert_verify_cb) const = 0;
+
+ /** @brief Dump configuration names and values to list containing
+ * name,value tuples */
+ virtual std::list<std::string> *dump() = 0;
+
+ /** @brief Use with \p name = \c \"consume_cb\" */
+ virtual Conf::ConfResult set(const std::string &name,
+ ConsumeCb *consume_cb,
+ std::string &errstr) = 0;
+
+ /**
+ * @brief Returns the underlying librdkafka C rd_kafka_conf_t handle.
+ *
+ * @warning Calling the C API on this handle is not recommended and there
+ * is no official support for it, but for cases where the C++
+ * does not provide the proper functionality this C handle can be
+ * used to interact directly with the core librdkafka API.
+ *
+ * @remark The lifetime of the returned pointer is the same as the Conf
+ * object this method is called on.
+ *
+ * @remark Include <rdkafka/rdkafka.h> prior to including
+ * <rdkafka/rdkafkacpp.h>
+ *
+ * @returns \c rd_kafka_conf_t* if this is a CONF_GLOBAL object, else NULL.
+ */
+ virtual struct rd_kafka_conf_s *c_ptr_global() = 0;
+
+ /**
+ * @brief Returns the underlying librdkafka C rd_kafka_topic_conf_t handle.
+ *
+ * @warning Calling the C API on this handle is not recommended and there
+ * is no official support for it, but for cases where the C++
+ * does not provide the proper functionality this C handle can be
+ * used to interact directly with the core librdkafka API.
+ *
+ * @remark The lifetime of the returned pointer is the same as the Conf
+ * object this method is called on.
+ *
+ * @remark Include <rdkafka/rdkafka.h> prior to including
+ * <rdkafka/rdkafkacpp.h>
+ *
+ * @returns \c rd_kafka_topic_conf_t* if this is a CONF_TOPIC object,
+ * else NULL.
+ */
+ virtual struct rd_kafka_topic_conf_s *c_ptr_topic() = 0;
+
+ /**
+ * @brief Set callback_data for ssl engine.
+ *
+ * @remark The \c ssl.engine.location configuration must be set for this
+ * to have affect.
+ *
+ * @remark The memory pointed to by \p value must remain valid for the
+ * lifetime of the configuration object and any Kafka clients that
+ * use it.
+ *
+ * @returns CONF_OK on success, else CONF_INVALID.
+ */
+ virtual Conf::ConfResult set_engine_callback_data(void *value,
+ std::string &errstr) = 0;
+
+
+ /** @brief Enable/disable creation of a queue specific to SASL events
+ * and callbacks.
+ *
+ * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this
+ * configuration API allows an application to get a dedicated
+ * queue for the SASL events/callbacks. After enabling the queue with this API
+ * the application can retrieve the queue by calling
+ * RdKafka::Handle::get_sasl_queue() on the client instance.
+ * This queue may then be served directly by the application
+ * (RdKafka::Queue::poll()) or forwarded to another queue, such as
+ * the background queue.
+ *
+ * A convenience function is available to automatically forward the SASL queue
+ * to librdkafka's background thread, see
+ * RdKafka::Handle::sasl_background_callbacks_enable().
+ *
+ * By default (\p enable = false) the main queue (as served by
+ * RdKafka::Handle::poll(), et.al.) is used for SASL callbacks.
+ *
+ * @remark The SASL queue is currently only used by the SASL OAUTHBEARER "
+ * mechanism's token refresh callback.
+ */
+ virtual Conf::ConfResult enable_sasl_queue(bool enable,
+ std::string &errstr) = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name Kafka base client handle
+ * @{
+ *
+ */
+
+/**
+ * @brief Base handle, super class for specific clients.
+ */
+class RD_EXPORT Handle {
+ public:
+ virtual ~Handle() {
+ }
+
+ /** @returns the name of the handle */
+ virtual std::string name() const = 0;
+
+ /**
+ * @brief Returns the client's broker-assigned group member id
+ *
+ * @remark This currently requires the high-level KafkaConsumer
+ *
+ * @returns Last assigned member id, or empty string if not currently
+ * a group member.
+ */
+ virtual std::string memberid() const = 0;
+
+
+ /**
+ * @brief Polls the provided kafka handle for events.
+ *
+ * Events will trigger application provided callbacks to be called.
+ *
+ * The \p timeout_ms argument specifies the maximum amount of time
+ * (in milliseconds) that the call will block waiting for events.
+ * For non-blocking calls, provide 0 as \p timeout_ms.
+ * To wait indefinately for events, provide -1.
+ *
+ * Events:
+ * - delivery report callbacks (if an RdKafka::DeliveryCb is configured)
+ * [producer]
+ * - event callbacks (if an RdKafka::EventCb is configured) [producer &
+ * consumer]
+ *
+ * @remark An application should make sure to call poll() at regular
+ * intervals to serve any queued callbacks waiting to be called.
+ *
+ * @warning This method MUST NOT be used with the RdKafka::KafkaConsumer,
+ * use its RdKafka::KafkaConsumer::consume() instead.
+ *
+ * @returns the number of events served.
+ */
+ virtual int poll(int timeout_ms) = 0;
+
+ /**
+ * @brief Returns the current out queue length
+ *
+ * The out queue contains messages and requests waiting to be sent to,
+ * or acknowledged by, the broker.
+ */
+ virtual int outq_len() = 0;
+
+ /**
+ * @brief Request Metadata from broker.
+ *
+ * Parameters:
+ * \p all_topics - if non-zero: request info about all topics in cluster,
+ * if zero: only request info about locally known topics.
+ * \p only_rkt - only request info about this topic
+ * \p metadatap - pointer to hold metadata result.
+ * The \p *metadatap pointer must be released with \c
+ * delete. \p timeout_ms - maximum response time before failing.
+ *
+ * @returns RdKafka::ERR_NO_ERROR on success (in which case \p *metadatap
+ * will be set), else RdKafka::ERR__TIMED_OUT on timeout or
+ * other error code on error.
+ */
+ virtual ErrorCode metadata(bool all_topics,
+ const Topic *only_rkt,
+ Metadata **metadatap,
+ int timeout_ms) = 0;
+
+
+ /**
+ * @brief Pause producing or consumption for the provided list of partitions.
+ *
+ * Success or error is returned per-partition in the \p partitions list.
+ *
+ * @returns ErrorCode::NO_ERROR
+ *
+ * @sa resume()
+ */
+ virtual ErrorCode pause(std::vector<TopicPartition *> &partitions) = 0;
+
+
+ /**
+ * @brief Resume producing or consumption for the provided list of partitions.
+ *
+ * Success or error is returned per-partition in the \p partitions list.
+ *
+ * @returns ErrorCode::NO_ERROR
+ *
+ * @sa pause()
+ */
+ virtual ErrorCode resume(std::vector<TopicPartition *> &partitions) = 0;
+
+
+ /**
+ * @brief Query broker for low (oldest/beginning)
+ * and high (newest/end) offsets for partition.
+ *
+ * Offsets are returned in \p *low and \p *high respectively.
+ *
+ * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure.
+ */
+ virtual ErrorCode query_watermark_offsets(const std::string &topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high,
+ int timeout_ms) = 0;
+
+ /**
+ * @brief Get last known low (oldest/beginning)
+ * and high (newest/end) offsets for partition.
+ *
+ * The low offset is updated periodically (if statistics.interval.ms is set)
+ * while the high offset is updated on each fetched message set from the
+ * broker.
+ *
+ * If there is no cached offset (either low or high, or both) then
+ * OFFSET_INVALID will be returned for the respective offset.
+ *
+ * Offsets are returned in \p *low and \p *high respectively.
+ *
+ * @returns RdKafka::ERR_NO_ERROR on success or an error code on failure.
+ *
+ * @remark Shall only be used with an active consumer instance.
+ */
+ virtual ErrorCode get_watermark_offsets(const std::string &topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high) = 0;
+
+
+ /**
+ * @brief Look up the offsets for the given partitions by timestamp.
+ *
+ * The returned offset for each partition is the earliest offset whose
+ * timestamp is greater than or equal to the given timestamp in the
+ * corresponding partition.
+ *
+ * The timestamps to query are represented as \c offset in \p offsets
+ * on input, and \c offset() will return the closest earlier offset
+ * for the timestamp on output.
+ *
+ * Timestamps are expressed as milliseconds since epoch (UTC).
+ *
+ * The function will block for at most \p timeout_ms milliseconds.
+ *
+ * @remark Duplicate Topic+Partitions are not supported.
+ * @remark Errors are also returned per TopicPartition, see \c err()
+ *
+ * @returns an error code for general errors, else RdKafka::ERR_NO_ERROR
+ * in which case per-partition errors might be set.
+ */
+ virtual ErrorCode offsetsForTimes(std::vector<TopicPartition *> &offsets,
+ int timeout_ms) = 0;
+
+
+ /**
+ * @brief Retrieve queue for a given partition.
+ *
+ * @returns The fetch queue for the given partition if successful. Else,
+ * NULL is returned.
+ *
+ * @remark This function only works on consumers.
+ */
+ virtual Queue *get_partition_queue(const TopicPartition *partition) = 0;
+
+ /**
+ * @brief Forward librdkafka logs (and debug) to the specified queue
+ * for serving with one of the ..poll() calls.
+ *
+ * This allows an application to serve log callbacks (\c log_cb)
+ * in its thread of choice.
+ *
+ * @param queue Queue to forward logs to. If the value is NULL the logs
+ * are forwarded to the main queue.
+ *
+ * @remark The configuration property \c log.queue MUST also be set to true.
+ *
+ * @remark librdkafka maintains its own reference to the provided queue.
+ *
+ * @returns ERR_NO_ERROR on success or an error code on error.
+ */
+ virtual ErrorCode set_log_queue(Queue *queue) = 0;
+
+ /**
+ * @brief Cancels the current callback dispatcher (Handle::poll(),
+ * KafkaConsumer::consume(), etc).
+ *
+ * A callback may use this to force an immediate return to the calling
+ * code (caller of e.g. Handle::poll()) without processing any further
+ * events.
+ *
+ * @remark This function MUST ONLY be called from within a
+ * librdkafka callback.
+ */
+ virtual void yield() = 0;
+
+ /**
+ * @brief Returns the ClusterId as reported in broker metadata.
+ *
+ * @param timeout_ms If there is no cached value from metadata retrieval
+ * then this specifies the maximum amount of time
+ * (in milliseconds) the call will block waiting
+ * for metadata to be retrieved.
+ * Use 0 for non-blocking calls.
+ *
+ * @remark Requires broker version >=0.10.0 and api.version.request=true.
+ *
+ * @returns Last cached ClusterId, or empty string if no ClusterId could be
+ * retrieved in the allotted timespan.
+ */
+ virtual std::string clusterid(int timeout_ms) = 0;
+
+ /**
+ * @brief Returns the underlying librdkafka C rd_kafka_t handle.
+ *
+ * @warning Calling the C API on this handle is not recommended and there
+ * is no official support for it, but for cases where the C++
+ * does not provide the proper functionality this C handle can be
+ * used to interact directly with the core librdkafka API.
+ *
+ * @remark The lifetime of the returned pointer is the same as the Topic
+ * object this method is called on.
+ *
+ * @remark Include <rdkafka/rdkafka.h> prior to including
+ * <rdkafka/rdkafkacpp.h>
+ *
+ * @returns \c rd_kafka_t*
+ */
+ virtual struct rd_kafka_s *c_ptr() = 0;
+
+ /**
+ * @brief Returns the current ControllerId (controller broker id)
+ * as reported in broker metadata.
+ *
+ * @param timeout_ms If there is no cached value from metadata retrieval
+ * then this specifies the maximum amount of time
+ * (in milliseconds) the call will block waiting
+ * for metadata to be retrieved.
+ * Use 0 for non-blocking calls.
+ *
+ * @remark Requires broker version >=0.10.0 and api.version.request=true.
+ *
+ * @returns Last cached ControllerId, or -1 if no ControllerId could be
+ * retrieved in the allotted timespan.
+ */
+ virtual int32_t controllerid(int timeout_ms) = 0;
+
+
+ /**
+ * @brief Returns the first fatal error set on this client instance,
+ * or ERR_NO_ERROR if no fatal error has occurred.
+ *
+ * This function is to be used with the Idempotent Producer and
+ * the Event class for \c EVENT_ERROR events to detect fatal errors.
+ *
+ * Generally all errors raised by the error event are to be considered
+ * informational and temporary, the client will try to recover from all
+ * errors in a graceful fashion (by retrying, etc).
+ *
+ * However, some errors should logically be considered fatal to retain
+ * consistency; in particular a set of errors that may occur when using the
+ * Idempotent Producer and the in-order or exactly-once producer guarantees
+ * can't be satisfied.
+ *
+ * @param errstr A human readable error string if a fatal error was set.
+ *
+ * @returns ERR_NO_ERROR if no fatal error has been raised, else
+ * any other error code.
+ */
+ virtual ErrorCode fatal_error(std::string &errstr) const = 0;
+
+ /**
+ * @brief Set SASL/OAUTHBEARER token and metadata
+ *
+ * @param token_value the mandatory token value to set, often (but not
+ * necessarily) a JWS compact serialization as per
+ * https://tools.ietf.org/html/rfc7515#section-3.1.
+ * @param md_lifetime_ms when the token expires, in terms of the number of
+ * milliseconds since the epoch.
+ * @param md_principal_name the Kafka principal name associated with the
+ * token.
+ * @param extensions potentially empty SASL extension keys and values where
+ * element [i] is the key and [i+1] is the key's value, to be communicated
+ * to the broker as additional key-value pairs during the initial client
+ * response as per https://tools.ietf.org/html/rfc7628#section-3.1. The
+ * number of SASL extension keys plus values must be a non-negative multiple
+ * of 2. Any provided keys and values are copied.
+ * @param errstr A human readable error string is written here, only if
+ * there is an error.
+ *
+ * The SASL/OAUTHBEARER token refresh callback should invoke
+ * this method upon success. The extension keys must not include the reserved
+ * key "`auth`", and all extension keys and values must conform to the
+ * required format as per https://tools.ietf.org/html/rfc7628#section-3.1:
+ *
+ * key = 1*(ALPHA)
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ *
+ * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise \p errstr set
+ * and:<br>
+ * \c RdKafka::ERR__INVALID_ARG if any of the arguments are
+ * invalid;<br>
+ * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not
+ * supported by this build;<br>
+ * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is
+ * not configured as the client's authentication mechanism.<br>
+ *
+ * @sa RdKafka::oauthbearer_set_token_failure
+ * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb"
+ */
+ virtual ErrorCode oauthbearer_set_token(
+ const std::string &token_value,
+ int64_t md_lifetime_ms,
+ const std::string &md_principal_name,
+ const std::list<std::string> &extensions,
+ std::string &errstr) = 0;
+
+ /**
+ * @brief SASL/OAUTHBEARER token refresh failure indicator.
+ *
+ * @param errstr human readable error reason for failing to acquire a token.
+ *
+ * The SASL/OAUTHBEARER token refresh callback should
+ * invoke this method upon failure to refresh the token.
+ *
+ * @returns \c RdKafka::ERR_NO_ERROR on success, otherwise:<br>
+ * \c RdKafka::ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not
+ * supported by this build;<br>
+ * \c RdKafka::ERR__STATE if SASL/OAUTHBEARER is supported but is
+ * not configured as the client's authentication mechanism.
+ *
+ * @sa RdKafka::oauthbearer_set_token
+ * @sa RdKafka::Conf::set() \c "oauthbearer_token_refresh_cb"
+ */
+ virtual ErrorCode oauthbearer_set_token_failure(
+ const std::string &errstr) = 0;
+
+ /**
+ * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka
+ * background thread.
+ *
+ * This serves as an alternative for applications that do not
+ * call RdKafka::Handle::poll() (et.al.) at regular intervals.
+ */
+ virtual Error *sasl_background_callbacks_enable() = 0;
+
+
+ /**
+ * @returns the SASL callback queue, if enabled, else NULL.
+ *
+ * @sa RdKafka::Conf::enable_sasl_queue()
+ */
+ virtual Queue *get_sasl_queue() = 0;
+
+ /**
+ * @returns the librdkafka background thread queue.
+ */
+ virtual Queue *get_background_queue() = 0;
+
+
+
+ /**
+ * @brief Allocate memory using the same allocator librdkafka uses.
+ *
+ * This is typically an abstraction for the malloc(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * allocating pointers that are used by librdkafka.
+ *
+ * @remark Memory allocated by mem_malloc() must be freed using
+ * mem_free().
+ */
+ virtual void *mem_malloc(size_t size) = 0;
+
+ /**
+ * @brief Free pointer returned by librdkafka
+ *
+ * This is typically an abstraction for the free(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * freeing pointers returned by librdkafka.
+ *
+ * In standard setups it is usually not necessary to use this interface
+ * rather than the free(3) function.
+ *
+ * @remark mem_free() must only be used for pointers returned by APIs
+ * that explicitly mention using this function for freeing.
+ */
+ virtual void mem_free(void *ptr) = 0;
+
+ /**
+ * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by
+ * this Kafka client.
+ *
+ * This function sets or resets the SASL username and password credentials
+ * used by this Kafka client. The new credentials will be used the next time
+ * this client needs to authenticate to a broker.
+ * will not disconnect existing connections that might have been made using
+ * the old credentials.
+ *
+ * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms.
+ *
+ * @returns NULL on success or an error object on error.
+ */
+ virtual Error *sasl_set_credentials(const std::string &username,
+ const std::string &password) = 0;
+};
+
+
+/**@}*/
+
+
+/**
+ * @name Topic and partition objects
+ * @{
+ *
+ */
+
+/**
+ * @brief Topic+Partition
+ *
+ * This is a generic type to hold a single partition and various
+ * information about it.
+ *
+ * Is typically used with std::vector<RdKafka::TopicPartition*> to provide
+ * a list of partitions for different operations.
+ */
+class RD_EXPORT TopicPartition {
+ public:
+ /**
+ * @brief Create topic+partition object for \p topic and \p partition.
+ *
+ * Use \c delete to deconstruct.
+ */
+ static TopicPartition *create(const std::string &topic, int partition);
+
+ /**
+ * @brief Create topic+partition object for \p topic and \p partition
+ * with offset \p offset.
+ *
+ * Use \c delete to deconstruct.
+ */
+ static TopicPartition *create(const std::string &topic,
+ int partition,
+ int64_t offset);
+
+ virtual ~TopicPartition() = 0;
+
+ /**
+ * @brief Destroy/delete the TopicPartitions in \p partitions
+ * and clear the vector.
+ */
+ static void destroy(std::vector<TopicPartition *> &partitions);
+
+ /** @returns topic name */
+ virtual const std::string &topic() const = 0;
+
+ /** @returns partition id */
+ virtual int partition() const = 0;
+
+ /** @returns offset (if applicable) */
+ virtual int64_t offset() const = 0;
+
+ /** @brief Set offset */
+ virtual void set_offset(int64_t offset) = 0;
+
+ /** @returns error code (if applicable) */
+ virtual ErrorCode err() const = 0;
+
+ /** @brief Get partition leader epoch, or -1 if not known or relevant. */
+ virtual int32_t get_leader_epoch() = 0;
+
+ /** @brief Set partition leader epoch. */
+ virtual void set_leader_epoch(int32_t leader_epoch) = 0;
+};
+
+
+
+/**
+ * @brief Topic handle
+ *
+ */
+class RD_EXPORT Topic {
+ public:
+ /**
+ * @brief Unassigned partition.
+ *
+ * The unassigned partition is used by the producer API for messages
+ * that should be partitioned using the configured or default partitioner.
+ */
+ static const int32_t PARTITION_UA;
+
+ /** @brief Special offsets */
+ static const int64_t OFFSET_BEGINNING; /**< Consume from beginning */
+ static const int64_t OFFSET_END; /**< Consume from end */
+ static const int64_t OFFSET_STORED; /**< Use offset storage */
+ static const int64_t OFFSET_INVALID; /**< Invalid offset */
+
+
+ /**
+ * @brief Creates a new topic handle for topic named \p topic_str
+ *
+ * \p conf is an optional configuration for the topic that will be used
+ * instead of the default topic configuration.
+ * The \p conf object is reusable after this call.
+ *
+ * @returns the new topic handle or NULL on error (see \p errstr).
+ */
+ static Topic *create(Handle *base,
+ const std::string &topic_str,
+ const Conf *conf,
+ std::string &errstr);
+
+ virtual ~Topic() = 0;
+
+
+ /** @returns the topic name */
+ virtual std::string name() const = 0;
+
+ /**
+ * @returns true if \p partition is available for the topic (has leader).
+ * @warning \b MUST \b ONLY be called from within a
+ * RdKafka::PartitionerCb callback.
+ */
+ virtual bool partition_available(int32_t partition) const = 0;
+
+ /**
+ * @brief Store offset \p offset + 1 for topic partition \p partition.
+ * The offset will be committed (written) to the broker (or file) according
+ * to \p auto.commit.interval.ms or next manual offset-less commit call.
+ *
+ * @deprecated This API lacks support for partition leader epochs, which makes
+ * it at risk for unclean leader election log truncation issues.
+ * Use KafkaConsumer::offsets_store() or
+ * Message::offset_store() instead.
+ *
+ * @remark \c enable.auto.offset.store must be set to \c false when using
+ * this API.
+ *
+ * @returns RdKafka::ERR_NO_ERROR on success or an error code if none of the
+ * offsets could be stored.
+ */
+ virtual ErrorCode offset_store(int32_t partition, int64_t offset) = 0;
+
+ /**
+ * @brief Returns the underlying librdkafka C rd_kafka_topic_t handle.
+ *
+ * @warning Calling the C API on this handle is not recommended and there
+ * is no official support for it, but for cases where the C++ API
+ * does not provide the underlying functionality this C handle can be
+ * used to interact directly with the core librdkafka API.
+ *
+ * @remark The lifetime of the returned pointer is the same as the Topic
+ * object this method is called on.
+ *
+ * @remark Include <rdkafka/rdkafka.h> prior to including
+ * <rdkafka/rdkafkacpp.h>
+ *
+ * @returns \c rd_kafka_topic_t*
+ */
+ virtual struct rd_kafka_topic_s *c_ptr() = 0;
+};
+
+
+/**@}*/
+
+
+/**
+ * @name Message object
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Message timestamp object
+ *
+ * Represents the number of milliseconds since the epoch (UTC).
+ *
+ * The MessageTimestampType dictates the timestamp type or origin.
+ *
+ * @remark Requires Apache Kafka broker version >= 0.10.0
+ *
+ */
+
+class RD_EXPORT MessageTimestamp {
+ public:
+ /*! Message timestamp type */
+ enum MessageTimestampType {
+ MSG_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */
+ MSG_TIMESTAMP_CREATE_TIME, /**< Message creation time (source) */
+ MSG_TIMESTAMP_LOG_APPEND_TIME /**< Message log append time (broker) */
+ };
+
+ MessageTimestampType type; /**< Timestamp type */
+ int64_t timestamp; /**< Milliseconds since epoch (UTC). */
+};
+
+
+/**
+ * @brief Headers object
+ *
+ * Represents message headers.
+ *
+ * https://cwiki.apache.org/confluence/display/KAFKA/KIP-82+-+Add+Record+Headers
+ *
+ * @remark Requires Apache Kafka >= 0.11.0 brokers
+ */
+class RD_EXPORT Headers {
+ public:
+ virtual ~Headers() = 0;
+
+ /**
+ * @brief Header object
+ *
+ * This object represents a single Header with a key value pair
+ * and an ErrorCode
+ *
+ * @remark dynamic allocation of this object is not supported.
+ */
+ class Header {
+ public:
+ /**
+ * @brief Header object to encapsulate a single Header
+ *
+ * @param key the string value for the header key
+ * @param value the bytes of the header value, or NULL
+ * @param value_size the length in bytes of the header value
+ *
+ * @remark key and value are copied.
+ *
+ */
+ Header(const std::string &key, const void *value, size_t value_size) :
+ key_(key), err_(ERR_NO_ERROR), value_size_(value_size) {
+ value_ = copy_value(value, value_size);
+ }
+
+ /**
+ * @brief Header object to encapsulate a single Header
+ *
+ * @param key the string value for the header key
+ * @param value the bytes of the header value
+ * @param value_size the length in bytes of the header value
+ * @param err the error code if one returned
+ *
+ * @remark The error code is used for when the Header is constructed
+ * internally by using RdKafka::Headers::get_last which constructs
+ * a Header encapsulating the ErrorCode in the process.
+ * If err is set, the value and value_size fields will be undefined.
+ */
+ Header(const std::string &key,
+ const void *value,
+ size_t value_size,
+ const RdKafka::ErrorCode err) :
+ key_(key), err_(err), value_(NULL), value_size_(value_size) {
+ if (err == ERR_NO_ERROR)
+ value_ = copy_value(value, value_size);
+ }
+
+ /**
+ * @brief Copy constructor
+ *
+ * @param other Header to make a copy of.
+ */
+ Header(const Header &other) :
+ key_(other.key_), err_(other.err_), value_size_(other.value_size_) {
+ value_ = copy_value(other.value_, value_size_);
+ }
+
+ /**
+ * @brief Assignment operator
+ *
+ * @param other Header to make a copy of.
+ */
+ Header &operator=(const Header &other) {
+ if (&other == this) {
+ return *this;
+ }
+
+ key_ = other.key_;
+ err_ = other.err_;
+ value_size_ = other.value_size_;
+
+ if (value_ != NULL)
+ mem_free(value_);
+
+ value_ = copy_value(other.value_, value_size_);
+
+ return *this;
+ }
+
+ ~Header() {
+ if (value_ != NULL)
+ mem_free(value_);
+ }
+
+ /** @returns the key/name associated with this Header */
+ std::string key() const {
+ return key_;
+ }
+
+ /** @returns returns the binary value, or NULL */
+ const void *value() const {
+ return value_;
+ }
+
+ /** @returns returns the value casted to a nul-terminated C string,
+ * or NULL. */
+ const char *value_string() const {
+ return static_cast<const char *>(value_);
+ }
+
+ /** @returns Value Size the length of the Value in bytes */
+ size_t value_size() const {
+ return value_size_;
+ }
+
+ /** @returns the error code of this Header (usually ERR_NO_ERROR) */
+ RdKafka::ErrorCode err() const {
+ return err_;
+ }
+
+ private:
+ char *copy_value(const void *value, size_t value_size) {
+ if (!value)
+ return NULL;
+
+ char *dest = (char *)mem_malloc(value_size + 1);
+ memcpy(dest, (const char *)value, value_size);
+ dest[value_size] = '\0';
+
+ return dest;
+ }
+
+ std::string key_;
+ RdKafka::ErrorCode err_;
+ char *value_;
+ size_t value_size_;
+ void *operator new(size_t); /* Prevent dynamic allocation */
+ };
+
+ /**
+ * @brief Create a new instance of the Headers object
+ *
+ * @returns an empty Headers list
+ */
+ static Headers *create();
+
+ /**
+ * @brief Create a new instance of the Headers object from a std::vector
+ *
+ * @param headers std::vector of RdKafka::Headers::Header objects.
+ * The headers are copied, not referenced.
+ *
+ * @returns a Headers list from std::vector set to the size of the std::vector
+ */
+ static Headers *create(const std::vector<Header> &headers);
+
+ /**
+ * @brief Adds a Header to the end of the list.
+ *
+ * @param key header key/name
+ * @param value binary value, or NULL
+ * @param value_size size of the value
+ *
+ * @returns an ErrorCode signalling success or failure to add the header.
+ */
+ virtual ErrorCode add(const std::string &key,
+ const void *value,
+ size_t value_size) = 0;
+
+ /**
+ * @brief Adds a Header to the end of the list.
+ *
+ * Convenience method for adding a std::string as a value for the header.
+ *
+ * @param key header key/name
+ * @param value value string
+ *
+ * @returns an ErrorCode signalling success or failure to add the header.
+ */
+ virtual ErrorCode add(const std::string &key, const std::string &value) = 0;
+
+ /**
+ * @brief Adds a Header to the end of the list.
+ *
+ * This method makes a copy of the passed header.
+ *
+ * @param header Existing header to copy
+ *
+ * @returns an ErrorCode signalling success or failure to add the header.
+ */
+ virtual ErrorCode add(const Header &header) = 0;
+
+ /**
+ * @brief Removes all the Headers of a given key
+ *
+ * @param key header key/name to remove
+ *
+ * @returns An ErrorCode signalling a success or failure to remove the Header.
+ */
+ virtual ErrorCode remove(const std::string &key) = 0;
+
+ /**
+ * @brief Gets all of the Headers of a given key
+ *
+ * @param key header key/name
+ *
+ * @remark If duplicate keys exist this will return them all as a std::vector
+ *
+ * @returns a std::vector containing all the Headers of the given key.
+ */
+ virtual std::vector<Header> get(const std::string &key) const = 0;
+
+ /**
+ * @brief Gets the last occurrence of a Header of a given key
+ *
+ * @param key header key/name
+ *
+ * @remark This will only return the most recently added header
+ *
+ * @returns the Header if found, otherwise a Header with an err set to
+ * ERR__NOENT.
+ */
+ virtual Header get_last(const std::string &key) const = 0;
+
+ /**
+ * @brief Returns all Headers
+ *
+ * @returns a std::vector containing all of the Headers
+ */
+ virtual std::vector<Header> get_all() const = 0;
+
+ /**
+ * @returns the number of headers.
+ */
+ virtual size_t size() const = 0;
+};
+
+
+/**
+ * @brief Message object
+ *
+ * This object represents either a single consumed or produced message,
+ * or an event (\p err() is set).
+ *
+ * An application must check RdKafka::Message::err() to see if the
+ * object is a proper message (error is RdKafka::ERR_NO_ERROR) or a
+ * an error event.
+ *
+ */
+class RD_EXPORT Message {
+ public:
+ /** @brief Message persistence status can be used by the application to
+ * find out if a produced message was persisted in the topic log. */
+ enum Status {
+ /** Message was never transmitted to the broker, or failed with
+ * an error indicating it was not written to the log.
+ * Application retry risks ordering, but not duplication. */
+ MSG_STATUS_NOT_PERSISTED = 0,
+
+ /** Message was transmitted to broker, but no acknowledgement was
+ * received.
+ * Application retry risks ordering and duplication. */
+ MSG_STATUS_POSSIBLY_PERSISTED = 1,
+
+ /** Message was written to the log and fully acknowledged.
+ * No reason for application to retry.
+ * Note: this value should only be trusted with \c acks=all. */
+ MSG_STATUS_PERSISTED = 2,
+ };
+
+ /**
+ * @brief Accessor functions*
+ * @remark Not all fields are present in all types of callbacks.
+ */
+
+ /** @returns The error string if object represent an error event,
+ * else an empty string. */
+ virtual std::string errstr() const = 0;
+
+ /** @returns The error code if object represents an error event, else 0. */
+ virtual ErrorCode err() const = 0;
+
+ /** @returns the RdKafka::Topic object for a message (if applicable),
+ * or NULL if a corresponding RdKafka::Topic object has not been
+ * explicitly created with RdKafka::Topic::create().
+ * In this case use topic_name() instead. */
+ virtual Topic *topic() const = 0;
+
+ /** @returns Topic name (if applicable, else empty string) */
+ virtual std::string topic_name() const = 0;
+
+ /** @returns Partition (if applicable) */
+ virtual int32_t partition() const = 0;
+
+ /** @returns Message payload (if applicable) */
+ virtual void *payload() const = 0;
+
+ /** @returns Message payload length (if applicable) */
+ virtual size_t len() const = 0;
+
+ /** @returns Message key as string (if applicable) */
+ virtual const std::string *key() const = 0;
+
+ /** @returns Message key as void pointer (if applicable) */
+ virtual const void *key_pointer() const = 0;
+
+ /** @returns Message key's binary length (if applicable) */
+ virtual size_t key_len() const = 0;
+
+ /** @returns Message or error offset (if applicable) */
+ virtual int64_t offset() const = 0;
+
+ /** @returns Message timestamp (if applicable) */
+ virtual MessageTimestamp timestamp() const = 0;
+
+ /** @returns The \p msg_opaque as provided to RdKafka::Producer::produce() */
+ virtual void *msg_opaque() const = 0;
+
+ virtual ~Message() = 0;
+
+ /** @returns the latency in microseconds for a produced message measured
+ * from the produce() call, or -1 if latency is not available. */
+ virtual int64_t latency() const = 0;
+
+ /**
+ * @brief Returns the underlying librdkafka C rd_kafka_message_t handle.
+ *
+ * @warning Calling the C API on this handle is not recommended and there
+ * is no official support for it, but for cases where the C++ API
+ * does not provide the underlying functionality this C handle can be
+ * used to interact directly with the core librdkafka API.
+ *
+ * @remark The lifetime of the returned pointer is the same as the Message
+ * object this method is called on.
+ *
+ * @remark Include <rdkafka/rdkafka.h> prior to including
+ * <rdkafka/rdkafkacpp.h>
+ *
+ * @returns \c rd_kafka_message_t*
+ */
+ virtual struct rd_kafka_message_s *c_ptr() = 0;
+
+ /**
+ * @brief Returns the message's persistence status in the topic log.
+ */
+ virtual Status status() const = 0;
+
+ /** @returns the Headers instance for this Message, or NULL if there
+ * are no headers.
+ *
+ * @remark The lifetime of the Headers are the same as the Message. */
+ virtual RdKafka::Headers *headers() = 0;
+
+ /** @returns the Headers instance for this Message (if applicable).
+ * If NULL is returned the reason is given in \p err, which
+ * is either ERR__NOENT if there were no headers, or another
+ * error code if header parsing failed.
+ *
+ * @remark The lifetime of the Headers are the same as the Message. */
+ virtual RdKafka::Headers *headers(RdKafka::ErrorCode *err) = 0;
+
+ /** @returns the broker id of the broker the message was produced to or
+ * fetched from, or -1 if not known/applicable. */
+ virtual int32_t broker_id() const = 0;
+
+ /** @returns the message's partition leader epoch at the time the message was
+ * fetched and if known, else -1. */
+ virtual int32_t leader_epoch() const = 0;
+
+ /**
+ * @brief Store offset +1 for the consumed message.
+ *
+ * The message offset + 1 will be committed to broker according
+ * to \c `auto.commit.interval.ms` or manual offset-less commit()
+ *
+ * @warning This method may only be called for partitions that are currently
+ * assigned.
+ * Non-assigned partitions will fail with ERR__STATE.
+ *
+ * @warning Avoid storing offsets after calling seek() (et.al) as
+ * this may later interfere with resuming a paused partition, instead
+ * store offsets prior to calling seek.
+ *
+ * @remark \c `enable.auto.offset.store` must be set to "false" when using
+ * this API.
+ *
+ * @returns NULL on success or an error object on failure.
+ */
+ virtual Error *offset_store() = 0;
+};
+
+/**@}*/
+
+
+/**
+ * @name Queue interface
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Queue interface
+ *
+ * Create a new message queue. Message queues allows the application
+ * to re-route consumed messages from multiple topic+partitions into
+ * one single queue point. This queue point, containing messages from
+ * a number of topic+partitions, may then be served by a single
+ * consume() method, rather than one per topic+partition combination.
+ *
+ * See the RdKafka::Consumer::start(), RdKafka::Consumer::consume(), and
+ * RdKafka::Consumer::consume_callback() methods that take a queue as the first
+ * parameter for more information.
+ */
+class RD_EXPORT Queue {
+ public:
+ /**
+ * @brief Create Queue object
+ */
+ static Queue *create(Handle *handle);
+
+ /**
+ * @brief Forward/re-route queue to \p dst.
+ * If \p dst is \c NULL, the forwarding is removed.
+ *
+ * The internal refcounts for both queues are increased.
+ *
+ * @remark Regardless of whether \p dst is NULL or not, after calling this
+ * function, \p src will not forward it's fetch queue to the consumer
+ * queue.
+ */
+ virtual ErrorCode forward(Queue *dst) = 0;
+
+
+ /**
+ * @brief Consume message or get error event from the queue.
+ *
+ * @remark Use \c delete to free the message.
+ *
+ * @returns One of:
+ * - proper message (RdKafka::Message::err() is ERR_NO_ERROR)
+ * - error event (RdKafka::Message::err() is != ERR_NO_ERROR)
+ * - timeout due to no message or event in \p timeout_ms
+ * (RdKafka::Message::err() is ERR__TIMED_OUT)
+ */
+ virtual Message *consume(int timeout_ms) = 0;
+
+ /**
+ * @brief Poll queue, serving any enqueued callbacks.
+ *
+ * @remark Must NOT be used for queues containing messages.
+ *
+ * @returns the number of events served or 0 on timeout.
+ */
+ virtual int poll(int timeout_ms) = 0;
+
+ virtual ~Queue() = 0;
+
+ /**
+ * @brief Enable IO event triggering for queue.
+ *
+ * To ease integration with IO based polling loops this API
+ * allows an application to create a separate file-descriptor
+ * that librdkafka will write \p payload (of size \p size) to
+ * whenever a new element is enqueued on a previously empty queue.
+ *
+ * To remove event triggering call with \p fd = -1.
+ *
+ * librdkafka will maintain a copy of the \p payload.
+ *
+ * @remark When using forwarded queues the IO event must only be enabled
+ * on the final forwarded-to (destination) queue.
+ */
+ virtual void io_event_enable(int fd, const void *payload, size_t size) = 0;
+};
+
+/**@}*/
+
+/**
+ * @name ConsumerGroupMetadata
+ * @{
+ *
+ */
+/**
+ * @brief ConsumerGroupMetadata holds a consumer instance's group
+ * metadata state.
+ *
+ * This class currently does not have any public methods.
+ */
+class RD_EXPORT ConsumerGroupMetadata {
+ public:
+ virtual ~ConsumerGroupMetadata() = 0;
+};
+
+/**@}*/
+
+/**
+ * @name KafkaConsumer
+ * @{
+ *
+ */
+
+
+/**
+ * @brief High-level KafkaConsumer (for brokers 0.9 and later)
+ *
+ * @remark Requires Apache Kafka >= 0.9.0 brokers
+ *
+ * Currently supports the \c range and \c roundrobin partition assignment
+ * strategies (see \c partition.assignment.strategy)
+ */
+class RD_EXPORT KafkaConsumer : public virtual Handle {
+ public:
+ /**
+ * @brief Creates a KafkaConsumer.
+ *
+ * The \p conf object must have \c group.id set to the consumer group to join.
+ *
+ * Use RdKafka::KafkaConsumer::close() to shut down the consumer.
+ *
+ * @sa RdKafka::RebalanceCb
+ * @sa CONFIGURATION.md for \c group.id, \c session.timeout.ms,
+ * \c partition.assignment.strategy, etc.
+ */
+ static KafkaConsumer *create(const Conf *conf, std::string &errstr);
+
+ virtual ~KafkaConsumer() = 0;
+
+
+ /** @brief Returns the current partition assignment as set by
+ * RdKafka::KafkaConsumer::assign() */
+ virtual ErrorCode assignment(
+ std::vector<RdKafka::TopicPartition *> &partitions) = 0;
+
+ /** @brief Returns the current subscription as set by
+ * RdKafka::KafkaConsumer::subscribe() */
+ virtual ErrorCode subscription(std::vector<std::string> &topics) = 0;
+
+ /**
+ * @brief Update the subscription set to \p topics.
+ *
+ * Any previous subscription will be unassigned and unsubscribed first.
+ *
+ * The subscription set denotes the desired topics to consume and this
+ * set is provided to the partition assignor (one of the elected group
+ * members) for all clients which then uses the configured
+ * \c partition.assignment.strategy to assign the subscription sets's
+ * topics's partitions to the consumers, depending on their subscription.
+ *
+ * The result of such an assignment is a rebalancing which is either
+ * handled automatically in librdkafka or can be overridden by the application
+ * by providing a RdKafka::RebalanceCb.
+ *
+ * The rebalancing passes the assigned partition set to
+ * RdKafka::KafkaConsumer::assign() to update what partitions are actually
+ * being fetched by the KafkaConsumer.
+ *
+ * Regex pattern matching automatically performed for topics prefixed
+ * with \c \"^\" (e.g. \c \"^myPfx[0-9]_.*\"
+ *
+ * @remark A consumer error will be raised for each unavailable topic in the
+ * \p topics. The error will be ERR_UNKNOWN_TOPIC_OR_PART
+ * for non-existent topics, and
+ * ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics.
+ * The consumer error will be raised through consume() (et.al.)
+ * with the \c RdKafka::Message::err() returning one of the
+ * error codes mentioned above.
+ * The subscribe function itself is asynchronous and will not return
+ * an error on unavailable topics.
+ *
+ * @returns an error if the provided list of topics is invalid.
+ */
+ virtual ErrorCode subscribe(const std::vector<std::string> &topics) = 0;
+
+ /** @brief Unsubscribe from the current subscription set. */
+ virtual ErrorCode unsubscribe() = 0;
+
+ /**
+ * @brief Update the assignment set to \p partitions.
+ *
+ * The assignment set is the set of partitions actually being consumed
+ * by the KafkaConsumer.
+ */
+ virtual ErrorCode assign(const std::vector<TopicPartition *> &partitions) = 0;
+
+ /**
+ * @brief Stop consumption and remove the current assignment.
+ */
+ virtual ErrorCode unassign() = 0;
+
+ /**
+ * @brief Consume message or get error event, triggers callbacks.
+ *
+ * Will automatically call registered callbacks for any such queued events,
+ * including RdKafka::RebalanceCb, RdKafka::EventCb, RdKafka::OffsetCommitCb,
+ * etc.
+ *
+ * @remark Use \c delete to free the message.
+ *
+ * @remark An application should make sure to call consume() at regular
+ * intervals, even if no messages are expected, to serve any
+ * queued callbacks waiting to be called. This is especially
+ * important when a RebalanceCb has been registered as it needs
+ * to be called and handled properly to synchronize internal
+ * consumer state.
+ *
+ * @remark Application MUST NOT call \p poll() on KafkaConsumer objects.
+ *
+ * @returns One of:
+ * - proper message (RdKafka::Message::err() is ERR_NO_ERROR)
+ * - error event (RdKafka::Message::err() is != ERR_NO_ERROR)
+ * - timeout due to no message or event in \p timeout_ms
+ * (RdKafka::Message::err() is ERR__TIMED_OUT)
+ */
+ virtual Message *consume(int timeout_ms) = 0;
+
+ /**
+ * @brief Commit offsets for the current assignment.
+ *
+ * @remark This is the synchronous variant that blocks until offsets
+ * are committed or the commit fails (see return value).
+ *
+ * @remark If a RdKafka::OffsetCommitCb callback is registered it will
+ * be called with commit details on a future call to
+ * RdKafka::KafkaConsumer::consume()
+
+ *
+ * @returns ERR_NO_ERROR or error code.
+ */
+ virtual ErrorCode commitSync() = 0;
+
+ /**
+ * @brief Asynchronous version of RdKafka::KafkaConsumer::CommitSync()
+ *
+ * @sa RdKafka::KafkaConsumer::commitSync()
+ */
+ virtual ErrorCode commitAsync() = 0;
+
+ /**
+ * @brief Commit offset for a single topic+partition based on \p message
+ *
+ * @remark The offset committed will be the message's offset + 1.
+ *
+ * @remark This is the synchronous variant.
+ *
+ * @sa RdKafka::KafkaConsumer::commitSync()
+ */
+ virtual ErrorCode commitSync(Message *message) = 0;
+
+ /**
+ * @brief Commit offset for a single topic+partition based on \p message
+ *
+ * @remark The offset committed will be the message's offset + 1.
+ *
+ * @remark This is the asynchronous variant.
+ *
+ * @sa RdKafka::KafkaConsumer::commitSync()
+ */
+ virtual ErrorCode commitAsync(Message *message) = 0;
+
+ /**
+ * @brief Commit offsets for the provided list of partitions.
+ *
+ * @remark The \c .offset of the partitions in \p offsets should be the
+ * offset where consumption will resume, i.e., the last
+ * processed offset + 1.
+ *
+ * @remark This is the synchronous variant.
+ */
+ virtual ErrorCode commitSync(std::vector<TopicPartition *> &offsets) = 0;
+
+ /**
+ * @brief Commit offset for the provided list of partitions.
+ *
+ * @remark The \c .offset of the partitions in \p offsets should be the
+ * offset where consumption will resume, i.e., the last
+ * processed offset + 1.
+ *
+ * @remark This is the asynchronous variant.
+ */
+ virtual ErrorCode commitAsync(
+ const std::vector<TopicPartition *> &offsets) = 0;
+
+ /**
+ * @brief Commit offsets for the current assignment.
+ *
+ * @remark This is the synchronous variant that blocks until offsets
+ * are committed or the commit fails (see return value).
+ *
+ * @remark The provided callback will be called from this function.
+ *
+ * @returns ERR_NO_ERROR or error code.
+ */
+ virtual ErrorCode commitSync(OffsetCommitCb *offset_commit_cb) = 0;
+
+ /**
+ * @brief Commit offsets for the provided list of partitions.
+ *
+ * @remark This is the synchronous variant that blocks until offsets
+ * are committed or the commit fails (see return value).
+ *
+ * @remark The provided callback will be called from this function.
+ *
+ * @returns ERR_NO_ERROR or error code.
+ */
+ virtual ErrorCode commitSync(std::vector<TopicPartition *> &offsets,
+ OffsetCommitCb *offset_commit_cb) = 0;
+
+
+
+ /**
+ * @brief Retrieve committed offsets for topics+partitions.
+ *
+ * @returns ERR_NO_ERROR on success in which case the
+ * \p offset or \p err field of each \p partitions' element is filled
+ * in with the stored offset, or a partition specific error.
+ * Else returns an error code.
+ */
+ virtual ErrorCode committed(std::vector<TopicPartition *> &partitions,
+ int timeout_ms) = 0;
+
+ /**
+ * @brief Retrieve current positions (offsets) for topics+partitions.
+ *
+ * @returns ERR_NO_ERROR on success in which case the
+ * \p offset or \p err field of each \p partitions' element is filled
+ * in with the stored offset, or a partition specific error.
+ * Else returns an error code.
+ */
+ virtual ErrorCode position(std::vector<TopicPartition *> &partitions) = 0;
+
+
+ /**
+ * For pausing and resuming consumption, see
+ * @sa RdKafka::Handle::pause() and RdKafka::Handle::resume()
+ */
+
+
+ /**
+ * @brief Close and shut down the consumer.
+ *
+ * This call will block until the following operations are finished:
+ * - Trigger a local rebalance to void the current assignment (if any).
+ * - Stop consumption for current assignment (if any).
+ * - Commit offsets (if any).
+ * - Leave group (if applicable).
+ *
+ * The maximum blocking time is roughly limited to session.timeout.ms.
+ *
+ * @remark Callbacks, such as RdKafka::RebalanceCb and
+ * RdKafka::OffsetCommitCb, etc, may be called.
+ *
+ * @remark The consumer object must later be freed with \c delete
+ */
+ virtual ErrorCode close() = 0;
+
+
+ /**
+ * @brief Seek consumer for topic+partition to offset which is either an
+ * absolute or logical offset.
+ *
+ * If \p timeout_ms is not 0 the call will wait this long for the
+ * seek to be performed. If the timeout is reached the internal state
+ * will be unknown and this function returns `ERR__TIMED_OUT`.
+ * If \p timeout_ms is 0 it will initiate the seek but return
+ * immediately without any error reporting (e.g., async).
+ *
+ * This call triggers a fetch queue barrier flush.
+ *
+ * @remark Consumption for the given partition must have started for the
+ * seek to work. Use assign() to set the starting offset.
+ *
+ * @returns an ErrorCode to indicate success or failure.
+ */
+ virtual ErrorCode seek(const TopicPartition &partition, int timeout_ms) = 0;
+
+
+ /**
+ * @brief Store offset \p offset for topic partition \p partition.
+ * The offset will be committed (written) to the offset store according
+ * to \p auto.commit.interval.ms or the next manual offset-less commit*()
+ *
+ * Per-partition success/error status propagated through TopicPartition.err()
+ *
+ * @remark The \c .offset field is stored as is, it will NOT be + 1.
+ *
+ * @remark \c enable.auto.offset.store must be set to \c false when using
+ * this API.
+ *
+ * @remark The leader epoch, if set, will be used to fence outdated partition
+ * leaders. See TopicPartition::set_leader_epoch().
+ *
+ * @returns RdKafka::ERR_NO_ERROR on success, or
+ * RdKafka::ERR___UNKNOWN_PARTITION if none of the offsets could
+ * be stored, or
+ * RdKafka::ERR___INVALID_ARG if \c enable.auto.offset.store is true.
+ */
+ virtual ErrorCode offsets_store(std::vector<TopicPartition *> &offsets) = 0;
+
+
+ /**
+ * @returns the current consumer group metadata associated with this consumer,
+ * or NULL if the consumer is configured with a \c group.id.
+ * This metadata object should be passed to the transactional
+ * producer's RdKafka::Producer::send_offsets_to_transaction() API.
+ *
+ * @remark The returned object must be deleted by the application.
+ *
+ * @sa RdKafka::Producer::send_offsets_to_transaction()
+ */
+ virtual ConsumerGroupMetadata *groupMetadata() = 0;
+
+
+ /** @brief Check whether the consumer considers the current assignment to
+ * have been lost involuntarily. This method is only applicable for
+ * use with a subscribing consumer. Assignments are revoked
+ * immediately when determined to have been lost, so this method is
+ * only useful within a rebalance callback. Partitions that have
+ * been lost may already be owned by other members in the group and
+ * therefore commiting offsets, for example, may fail.
+ *
+ * @remark Calling assign(), incremental_assign() or incremental_unassign()
+ * resets this flag.
+ *
+ * @returns Returns true if the current partition assignment is considered
+ * lost, false otherwise.
+ */
+ virtual bool assignment_lost() = 0;
+
+ /**
+ * @brief The rebalance protocol currently in use. This will be
+ * "NONE" if the consumer has not (yet) joined a group, else it will
+ * match the rebalance protocol ("EAGER", "COOPERATIVE") of the
+ * configured and selected assignor(s). All configured
+ * assignors must have the same protocol type, meaning
+ * online migration of a consumer group from using one
+ * protocol to another (in particular upgading from EAGER
+ * to COOPERATIVE) without a restart is not currently
+ * supported.
+ *
+ * @returns an empty string on error, or one of
+ * "NONE", "EAGER", "COOPERATIVE" on success.
+ */
+
+ virtual std::string rebalance_protocol() = 0;
+
+
+ /**
+ * @brief Incrementally add \p partitions to the current assignment.
+ *
+ * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used,
+ * this method should be used in a rebalance callback to adjust the current
+ * assignment appropriately in the case where the rebalance type is
+ * ERR__ASSIGN_PARTITIONS. The application must pass the partition list
+ * passed to the callback (or a copy of it), even if the list is empty.
+ * This method may also be used outside the context of a rebalance callback.
+ *
+ * @returns NULL on success, or an error object if the operation was
+ * unsuccessful.
+ *
+ * @remark The returned object must be deleted by the application.
+ */
+ virtual Error *incremental_assign(
+ const std::vector<TopicPartition *> &partitions) = 0;
+
+
+ /**
+ * @brief Incrementally remove \p partitions from the current assignment.
+ *
+ * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used,
+ * this method should be used in a rebalance callback to adjust the current
+ * assignment appropriately in the case where the rebalance type is
+ * ERR__REVOKE_PARTITIONS. The application must pass the partition list
+ * passed to the callback (or a copy of it), even if the list is empty.
+ * This method may also be used outside the context of a rebalance callback.
+ *
+ * @returns NULL on success, or an error object if the operation was
+ * unsuccessful.
+ *
+ * @remark The returned object must be deleted by the application.
+ */
+ virtual Error *incremental_unassign(
+ const std::vector<TopicPartition *> &partitions) = 0;
+
+ /**
+ * @brief Close and shut down the consumer.
+ *
+ * Performs the same actions as RdKafka::KafkaConsumer::close() but in a
+ * background thread.
+ *
+ * Rebalance events/callbacks (etc) will be forwarded to the
+ * application-provided \p queue. The application must poll this queue until
+ * RdKafka::KafkaConsumer::closed() returns true.
+ *
+ * @remark Depending on consumer group join state there may or may not be
+ * rebalance events emitted on \p rkqu.
+ *
+ * @returns an error object if the consumer close failed, else NULL.
+ *
+ * @sa RdKafka::KafkaConsumer::closed()
+ */
+ virtual Error *close(Queue *queue) = 0;
+
+
+ /** @returns true if the consumer is closed, else 0.
+ *
+ * @sa RdKafka::KafkaConsumer::close()
+ */
+ virtual bool closed() = 0;
+};
+
+
+/**@}*/
+
+
+/**
+ * @name Simple Consumer (legacy)
+ * @{
+ *
+ */
+
+/**
+ * @brief Simple Consumer (legacy)
+ *
+ * A simple non-balanced, non-group-aware, consumer.
+ */
+class RD_EXPORT Consumer : public virtual Handle {
+ public:
+ /**
+ * @brief Creates a new Kafka consumer handle.
+ *
+ * \p conf is an optional object that will be used instead of the default
+ * configuration.
+ * The \p conf object is reusable after this call.
+ *
+ * @returns the new handle on success or NULL on error in which case
+ * \p errstr is set to a human readable error message.
+ */
+ static Consumer *create(const Conf *conf, std::string &errstr);
+
+ virtual ~Consumer() = 0;
+
+
+ /**
+ * @brief Start consuming messages for topic and \p partition
+ * at offset \p offset which may either be a proper offset (0..N)
+ * or one of the the special offsets: \p OFFSET_BEGINNING or \p OFFSET_END.
+ *
+ * rdkafka will attempt to keep \p queued.min.messages (config property)
+ * messages in the local queue by repeatedly fetching batches of messages
+ * from the broker until the threshold is reached.
+ *
+ * The application shall use one of the \p ..->consume*() functions
+ * to consume messages from the local queue, each kafka message being
+ * represented as a `RdKafka::Message *` object.
+ *
+ * \p ..->start() must not be called multiple times for the same
+ * topic and partition without stopping consumption first with
+ * \p ..->stop().
+ *
+ * @returns an ErrorCode to indicate success or failure.
+ */
+ virtual ErrorCode start(Topic *topic, int32_t partition, int64_t offset) = 0;
+
+ /**
+ * @brief Start consuming messages for topic and \p partition on
+ * queue \p queue.
+ *
+ * @sa RdKafka::Consumer::start()
+ */
+ virtual ErrorCode start(Topic *topic,
+ int32_t partition,
+ int64_t offset,
+ Queue *queue) = 0;
+
+ /**
+ * @brief Stop consuming messages for topic and \p partition, purging
+ * all messages currently in the local queue.
+ *
+ * The application needs to be stop all consumers before destroying
+ * the Consumer handle.
+ *
+ * @returns an ErrorCode to indicate success or failure.
+ */
+ virtual ErrorCode stop(Topic *topic, int32_t partition) = 0;
+
+ /**
+ * @brief Seek consumer for topic+partition to \p offset which is either an
+ * absolute or logical offset.
+ *
+ * If \p timeout_ms is not 0 the call will wait this long for the
+ * seek to be performed. If the timeout is reached the internal state
+ * will be unknown and this function returns `ERR__TIMED_OUT`.
+ * If \p timeout_ms is 0 it will initiate the seek but return
+ * immediately without any error reporting (e.g., async).
+ *
+ * This call triggers a fetch queue barrier flush.
+ *
+ * @returns an ErrorCode to indicate success or failure.
+ */
+ virtual ErrorCode seek(Topic *topic,
+ int32_t partition,
+ int64_t offset,
+ int timeout_ms) = 0;
+
+ /**
+ * @brief Consume a single message from \p topic and \p partition.
+ *
+ * \p timeout_ms is maximum amount of time to wait for a message to be
+ * received.
+ * Consumer must have been previously started with \p ..->start().
+ *
+ * @returns a Message object, the application needs to check if message
+ * is an error or a proper message RdKafka::Message::err() and checking for
+ * \p ERR_NO_ERROR.
+ *
+ * The message object must be destroyed when the application is done with it.
+ *
+ * Errors (in RdKafka::Message::err()):
+ * - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched.
+ * - ERR__PARTITION_EOF - End of partition reached, not an error.
+ */
+ virtual Message *consume(Topic *topic, int32_t partition, int timeout_ms) = 0;
+
+ /**
+ * @brief Consume a single message from the specified queue.
+ *
+ * \p timeout_ms is maximum amount of time to wait for a message to be
+ * received.
+ * Consumer must have been previously started on the queue with
+ * \p ..->start().
+ *
+ * @returns a Message object, the application needs to check if message
+ * is an error or a proper message \p Message->err() and checking for
+ * \p ERR_NO_ERROR.
+ *
+ * The message object must be destroyed when the application is done with it.
+ *
+ * Errors (in RdKafka::Message::err()):
+ * - ERR__TIMED_OUT - \p timeout_ms was reached with no new messages fetched
+ *
+ * Note that Message->topic() may be nullptr after certain kinds of
+ * errors, so applications should check that it isn't null before
+ * dereferencing it.
+ */
+ virtual Message *consume(Queue *queue, int timeout_ms) = 0;
+
+ /**
+ * @brief Consumes messages from \p topic and \p partition, calling
+ * the provided callback for each consumed messsage.
+ *
+ * \p consume_callback() provides higher throughput performance
+ * than \p consume().
+ *
+ * \p timeout_ms is the maximum amount of time to wait for one or
+ * more messages to arrive.
+ *
+ * The provided \p consume_cb instance has its \p consume_cb function
+ * called for every message received.
+ *
+ * The \p opaque argument is passed to the \p consume_cb as \p opaque.
+ *
+ * @returns the number of messages processed or -1 on error.
+ *
+ * @sa RdKafka::Consumer::consume()
+ */
+ virtual int consume_callback(Topic *topic,
+ int32_t partition,
+ int timeout_ms,
+ ConsumeCb *consume_cb,
+ void *opaque) = 0;
+
+ /**
+ * @brief Consumes messages from \p queue, calling the provided callback for
+ * each consumed messsage.
+ *
+ * @sa RdKafka::Consumer::consume_callback()
+ */
+ virtual int consume_callback(Queue *queue,
+ int timeout_ms,
+ RdKafka::ConsumeCb *consume_cb,
+ void *opaque) = 0;
+
+ /**
+ * @brief Converts an offset into the logical offset from the tail of a topic.
+ *
+ * \p offset is the (positive) number of items from the end.
+ *
+ * @returns the logical offset for message \p offset from the tail, this value
+ * may be passed to Consumer::start, et.al.
+ * @remark The returned logical offset is specific to librdkafka.
+ */
+ static int64_t OffsetTail(int64_t offset);
+};
+
+/**@}*/
+
+
+/**
+ * @name Producer
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Producer
+ */
+class RD_EXPORT Producer : public virtual Handle {
+ public:
+ /**
+ * @brief Creates a new Kafka producer handle.
+ *
+ * \p conf is an optional object that will be used instead of the default
+ * configuration.
+ * The \p conf object is reusable after this call.
+ *
+ * @returns the new handle on success or NULL on error in which case
+ * \p errstr is set to a human readable error message.
+ */
+ static Producer *create(const Conf *conf, std::string &errstr);
+
+
+ virtual ~Producer() = 0;
+
+ /**
+ * @brief RdKafka::Producer::produce() \p msgflags
+ *
+ * These flags are optional.
+ */
+ enum {
+ RK_MSG_FREE = 0x1, /**< rdkafka will free(3) \p payload
+ * when it is done with it.
+ * Mutually exclusive with RK_MSG_COPY. */
+ RK_MSG_COPY = 0x2, /**< the \p payload data will be copied
+ * and the \p payload pointer will not
+ * be used by rdkafka after the
+ * call returns.
+ * Mutually exclusive with RK_MSG_FREE. */
+ RK_MSG_BLOCK = 0x4 /**< Block produce*() on message queue
+ * full.
+ * WARNING:
+ * If a delivery report callback
+ * is used the application MUST
+ * call rd_kafka_poll() (or equiv.)
+ * to make sure delivered messages
+ * are drained from the internal
+ * delivery report queue.
+ * Failure to do so will result
+ * in indefinately blocking on
+ * the produce() call when the
+ * message queue is full.
+ */
+
+
+ /**@cond NO_DOC*/
+ /* For backwards compatibility: */
+#ifndef MSG_COPY /* defined in sys/msg.h */
+ , /** this comma must exist betwen
+ * RK_MSG_BLOCK and MSG_FREE
+ */
+ MSG_FREE = RK_MSG_FREE,
+ MSG_COPY = RK_MSG_COPY
+#endif
+ /**@endcond*/
+ };
+
+ /**
+ * @brief Produce and send a single message to broker.
+ *
+ * This is an asynch non-blocking API.
+ *
+ * \p partition is the target partition, either:
+ * - RdKafka::Topic::PARTITION_UA (unassigned) for
+ * automatic partitioning using the topic's partitioner function, or
+ * - a fixed partition (0..N)
+ *
+ * \p msgflags is zero or more of the following flags OR:ed together:
+ * RK_MSG_BLOCK - block \p produce*() call if
+ * \p queue.buffering.max.messages or
+ * \p queue.buffering.max.kbytes are exceeded.
+ * Messages are considered in-queue from the point they
+ * are accepted by produce() until their corresponding
+ * delivery report callback/event returns.
+ * It is thus a requirement to call
+ * poll() (or equiv.) from a separate
+ * thread when RK_MSG_BLOCK is used.
+ * See WARNING on \c RK_MSG_BLOCK above.
+ * RK_MSG_FREE - rdkafka will free(3) \p payload when it is done with it.
+ * RK_MSG_COPY - the \p payload data will be copied and the \p payload
+ * pointer will not be used by rdkafka after the
+ * call returns.
+ *
+ * NOTE: RK_MSG_FREE and RK_MSG_COPY are mutually exclusive.
+ *
+ * If the function returns an error code and RK_MSG_FREE was specified, then
+ * the memory associated with the payload is still the caller's
+ * responsibility.
+ *
+ * \p payload is the message payload of size \p len bytes.
+ *
+ * \p key is an optional message key, if non-NULL it
+ * will be passed to the topic partitioner as well as be sent with the
+ * message to the broker and passed on to the consumer.
+ *
+ * \p msg_opaque is an optional application-provided per-message opaque
+ * pointer that will provided in the delivery report callback (\p dr_cb) for
+ * referencing this message.
+ *
+ * @returns an ErrorCode to indicate success or failure:
+ * - ERR_NO_ERROR - message successfully enqueued for transmission.
+ *
+ * - ERR__QUEUE_FULL - maximum number of outstanding messages has been
+ * reached: \c queue.buffering.max.message
+ *
+ * - ERR_MSG_SIZE_TOO_LARGE - message is larger than configured max size:
+ * \c messages.max.bytes
+ *
+ * - ERR__UNKNOWN_PARTITION - requested \p partition is unknown in the
+ * Kafka cluster.
+ *
+ * - ERR__UNKNOWN_TOPIC - topic is unknown in the Kafka cluster.
+ */
+ virtual ErrorCode produce(Topic *topic,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const std::string *key,
+ void *msg_opaque) = 0;
+
+ /**
+ * @brief Variant produce() that passes the key as a pointer and length
+ * instead of as a const std::string *.
+ */
+ virtual ErrorCode produce(Topic *topic,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ void *msg_opaque) = 0;
+
+ /**
+ * @brief produce() variant that takes topic as a string (no need for
+ * creating a Topic object), and also allows providing the
+ * message timestamp (milliseconds since beginning of epoch, UTC).
+ * Otherwise identical to produce() above.
+ */
+ virtual ErrorCode produce(const std::string topic_name,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ int64_t timestamp,
+ void *msg_opaque) = 0;
+
+ /**
+ * @brief produce() variant that that allows for Header support on produce
+ * Otherwise identical to produce() above.
+ *
+ * @warning The \p headers will be freed/deleted if the produce() call
+ * succeeds, or left untouched if produce() fails.
+ */
+ virtual ErrorCode produce(const std::string topic_name,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ int64_t timestamp,
+ RdKafka::Headers *headers,
+ void *msg_opaque) = 0;
+
+
+ /**
+ * @brief Variant produce() that accepts vectors for key and payload.
+ * The vector data will be copied.
+ */
+ virtual ErrorCode produce(Topic *topic,
+ int32_t partition,
+ const std::vector<char> *payload,
+ const std::vector<char> *key,
+ void *msg_opaque) = 0;
+
+
+ /**
+ * @brief Wait until all outstanding produce requests, et.al, are completed.
+ * This should typically be done prior to destroying a producer
+ * instance to make sure all queued and in-flight produce requests are
+ * completed before terminating.
+ *
+ * @remark The \c linger.ms time will be ignored for the duration of the call,
+ * queued messages will be sent to the broker as soon as possible.
+ *
+ * @remark This function will call Producer::poll() and thus
+ * trigger callbacks.
+ *
+ * @returns ERR__TIMED_OUT if \p timeout_ms was reached before all
+ * outstanding requests were completed, else ERR_NO_ERROR
+ */
+ virtual ErrorCode flush(int timeout_ms) = 0;
+
+
+ /**
+ * @brief Purge messages currently handled by the producer instance.
+ *
+ * @param purge_flags tells which messages should be purged and how.
+ *
+ * The application will need to call Handle::poll() or Producer::flush()
+ * afterwards to serve the delivery report callbacks of the purged messages.
+ *
+ * Messages purged from internal queues fail with the delivery report
+ * error code set to ERR__PURGE_QUEUE, while purged messages that
+ * are in-flight to or from the broker will fail with the error code set to
+ * ERR__PURGE_INFLIGHT.
+ *
+ * @warning Purging messages that are in-flight to or from the broker
+ * will ignore any sub-sequent acknowledgement for these messages
+ * received from the broker, effectively making it impossible
+ * for the application to know if the messages were successfully
+ * produced or not. This may result in duplicate messages if the
+ * application retries these messages at a later time.
+ *
+ * @remark This call may block for a short time while background thread
+ * queues are purged.
+ *
+ * @returns ERR_NO_ERROR on success,
+ * ERR__INVALID_ARG if the \p purge flags are invalid or unknown,
+ * ERR__NOT_IMPLEMENTED if called on a non-producer client instance.
+ */
+ virtual ErrorCode purge(int purge_flags) = 0;
+
+ /**
+ * @brief RdKafka::Handle::purge() \p purge_flags
+ */
+ enum {
+ PURGE_QUEUE = 0x1, /**< Purge messages in internal queues */
+
+ PURGE_INFLIGHT = 0x2, /*! Purge messages in-flight to or from the broker.
+ * Purging these messages will void any future
+ * acknowledgements from the broker, making it
+ * impossible for the application to know if these
+ * messages were successfully delivered or not.
+ * Retrying these messages may lead to duplicates. */
+
+ PURGE_NON_BLOCKING = 0x4 /* Don't wait for background queue
+ * purging to finish. */
+ };
+
+ /**
+ * @name Transactional API
+ * @{
+ *
+ * Requires Kafka broker version v0.11.0 or later
+ *
+ * See the Transactional API documentation in rdkafka.h for more information.
+ */
+
+ /**
+ * @brief Initialize transactions for the producer instance.
+ *
+ * @param timeout_ms The maximum time to block. On timeout the operation
+ * may continue in the background, depending on state,
+ * and it is okay to call init_transactions() again.
+ *
+ * @returns an RdKafka::Error object on error, or NULL on success.
+ * Check whether the returned error object permits retrying
+ * by calling RdKafka::Error::is_retriable(), or whether a fatal
+ * error has been raised by calling RdKafka::Error::is_fatal().
+ *
+ * @remark The returned error object (if not NULL) must be deleted.
+ *
+ * See rd_kafka_init_transactions() in rdkafka.h for more information.
+ *
+ */
+ virtual Error *init_transactions(int timeout_ms) = 0;
+
+
+ /**
+ * @brief init_transactions() must have been called successfully
+ * (once) before this function is called.
+ *
+ * @returns an RdKafka::Error object on error, or NULL on success.
+ * Check whether a fatal error has been raised by calling
+ * RdKafka::Error::is_fatal_error().
+ *
+ * @remark The returned error object (if not NULL) must be deleted.
+ *
+ * See rd_kafka_begin_transaction() in rdkafka.h for more information.
+ */
+ virtual Error *begin_transaction() = 0;
+
+ /**
+ * @brief Sends a list of topic partition offsets to the consumer group
+ * coordinator for \p group_metadata, and marks the offsets as part
+ * part of the current transaction.
+ * These offsets will be considered committed only if the transaction
+ * is committed successfully.
+ *
+ * The offsets should be the next message your application will
+ * consume,
+ * i.e., the last processed message's offset + 1 for each partition.
+ * Either track the offsets manually during processing or use
+ * RdKafka::KafkaConsumer::position() (on the consumer) to get the
+ * current offsets for
+ * the partitions assigned to the consumer.
+ *
+ * Use this method at the end of a consume-transform-produce loop prior
+ * to committing the transaction with commit_transaction().
+ *
+ * @param offsets List of offsets to commit to the consumer group upon
+ * successful commit of the transaction. Offsets should be
+ * the next message to consume,
+ * e.g., last processed message + 1.
+ * @param group_metadata The current consumer group metadata as returned by
+ * RdKafka::KafkaConsumer::groupMetadata() on the consumer
+ * instance the provided offsets were consumed from.
+ * @param timeout_ms Maximum time allowed to register the
+ * offsets on the broker.
+ *
+ * @remark This function must be called on the transactional producer
+ * instance, not the consumer.
+ *
+ * @remark The consumer must disable auto commits
+ * (set \c enable.auto.commit to false on the consumer).
+ *
+ * @returns an RdKafka::Error object on error, or NULL on success.
+ * Check whether the returned error object permits retrying
+ * by calling RdKafka::Error::is_retriable(), or whether an abortable
+ * or fatal error has been raised by calling
+ * RdKafka::Error::txn_requires_abort() or RdKafka::Error::is_fatal()
+ * respectively.
+ *
+ * @remark The returned error object (if not NULL) must be deleted.
+ *
+ * See rd_kafka_send_offsets_to_transaction() in rdkafka.h for
+ * more information.
+ */
+ virtual Error *send_offsets_to_transaction(
+ const std::vector<TopicPartition *> &offsets,
+ const ConsumerGroupMetadata *group_metadata,
+ int timeout_ms) = 0;
+
+ /**
+ * @brief Commit the current transaction as started with begin_transaction().
+ *
+ * Any outstanding messages will be flushed (delivered) before actually
+ * committing the transaction.
+ *
+ * @param timeout_ms The maximum time to block. On timeout the operation
+ * may continue in the background, depending on state,
+ * and it is okay to call this function again.
+ * Pass -1 to use the remaining transaction timeout,
+ * this is the recommended use.
+ *
+ * @remark It is strongly recommended to always pass -1 (remaining transaction
+ * time) as the \p timeout_ms. Using other values risk internal
+ * state desynchronization in case any of the underlying protocol
+ * requests fail.
+ *
+ * @returns an RdKafka::Error object on error, or NULL on success.
+ * Check whether the returned error object permits retrying
+ * by calling RdKafka::Error::is_retriable(), or whether an abortable
+ * or fatal error has been raised by calling
+ * RdKafka::Error::txn_requires_abort() or RdKafka::Error::is_fatal()
+ * respectively.
+ *
+ * @remark The returned error object (if not NULL) must be deleted.
+ *
+ * See rd_kafka_commit_transaction() in rdkafka.h for more information.
+ */
+ virtual Error *commit_transaction(int timeout_ms) = 0;
+
+ /**
+ * @brief Aborts the ongoing transaction.
+ *
+ * This function should also be used to recover from non-fatal
+ * abortable transaction errors.
+ *
+ * Any outstanding messages will be purged and fail with
+ * RdKafka::ERR__PURGE_INFLIGHT or RdKafka::ERR__PURGE_QUEUE.
+ * See RdKafka::Producer::purge() for details.
+ *
+ * @param timeout_ms The maximum time to block. On timeout the operation
+ * may continue in the background, depending on state,
+ * and it is okay to call this function again.
+ * Pass -1 to use the remaining transaction timeout,
+ * this is the recommended use.
+ *
+ * @remark It is strongly recommended to always pass -1 (remaining transaction
+ * time) as the \p timeout_ms. Using other values risk internal
+ * state desynchronization in case any of the underlying protocol
+ * requests fail.
+ *
+ * @returns an RdKafka::Error object on error, or NULL on success.
+ * Check whether the returned error object permits retrying
+ * by calling RdKafka::Error::is_retriable(), or whether a
+ * fatal error has been raised by calling RdKafka::Error::is_fatal().
+ *
+ * @remark The returned error object (if not NULL) must be deleted.
+ *
+ * See rd_kafka_abort_transaction() in rdkafka.h for more information.
+ */
+ virtual Error *abort_transaction(int timeout_ms) = 0;
+
+ /**@}*/
+};
+
+/**@}*/
+
+
+/**
+ * @name Metadata interface
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Metadata: Broker information
+ */
+class BrokerMetadata {
+ public:
+ /** @returns Broker id */
+ virtual int32_t id() const = 0;
+
+ /** @returns Broker hostname */
+ virtual std::string host() const = 0;
+
+ /** @returns Broker listening port */
+ virtual int port() const = 0;
+
+ virtual ~BrokerMetadata() = 0;
+};
+
+
+
+/**
+ * @brief Metadata: Partition information
+ */
+class PartitionMetadata {
+ public:
+ /** @brief Replicas */
+ typedef std::vector<int32_t> ReplicasVector;
+ /** @brief ISRs (In-Sync-Replicas) */
+ typedef std::vector<int32_t> ISRSVector;
+
+ /** @brief Replicas iterator */
+ typedef ReplicasVector::const_iterator ReplicasIterator;
+ /** @brief ISRs iterator */
+ typedef ISRSVector::const_iterator ISRSIterator;
+
+
+ /** @returns Partition id */
+ virtual int32_t id() const = 0;
+
+ /** @returns Partition error reported by broker */
+ virtual ErrorCode err() const = 0;
+
+ /** @returns Leader broker (id) for partition */
+ virtual int32_t leader() const = 0;
+
+ /** @returns Replica brokers */
+ virtual const std::vector<int32_t> *replicas() const = 0;
+
+ /** @returns In-Sync-Replica brokers
+ * @warning The broker may return a cached/outdated list of ISRs.
+ */
+ virtual const std::vector<int32_t> *isrs() const = 0;
+
+ virtual ~PartitionMetadata() = 0;
+};
+
+
+
+/**
+ * @brief Metadata: Topic information
+ */
+class TopicMetadata {
+ public:
+ /** @brief Partitions */
+ typedef std::vector<const PartitionMetadata *> PartitionMetadataVector;
+ /** @brief Partitions iterator */
+ typedef PartitionMetadataVector::const_iterator PartitionMetadataIterator;
+
+ /** @returns Topic name */
+ virtual std::string topic() const = 0;
+
+ /** @returns Partition list */
+ virtual const PartitionMetadataVector *partitions() const = 0;
+
+ /** @returns Topic error reported by broker */
+ virtual ErrorCode err() const = 0;
+
+ virtual ~TopicMetadata() = 0;
+};
+
+
+/**
+ * @brief Metadata container
+ */
+class Metadata {
+ public:
+ /** @brief Brokers */
+ typedef std::vector<const BrokerMetadata *> BrokerMetadataVector;
+ /** @brief Topics */
+ typedef std::vector<const TopicMetadata *> TopicMetadataVector;
+
+ /** @brief Brokers iterator */
+ typedef BrokerMetadataVector::const_iterator BrokerMetadataIterator;
+ /** @brief Topics iterator */
+ typedef TopicMetadataVector::const_iterator TopicMetadataIterator;
+
+
+ /**
+ * @brief Broker list
+ * @remark Ownership of the returned pointer is retained by the instance of
+ * Metadata that is called.
+ */
+ virtual const BrokerMetadataVector *brokers() const = 0;
+
+ /**
+ * @brief Topic list
+ * @remark Ownership of the returned pointer is retained by the instance of
+ * Metadata that is called.
+ */
+ virtual const TopicMetadataVector *topics() const = 0;
+
+ /** @brief Broker (id) originating this metadata */
+ virtual int32_t orig_broker_id() const = 0;
+
+ /** @brief Broker (name) originating this metadata */
+ virtual std::string orig_broker_name() const = 0;
+
+ virtual ~Metadata() = 0;
+};
+
+/**@}*/
+
+} // namespace RdKafka
+
+
+#endif /* _RDKAFKACPP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp_int.h b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp_int.h
new file mode 100644
index 000000000..bc024ebe9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src-cpp/rdkafkacpp_int.h
@@ -0,0 +1,1628 @@
+/*
+ * librdkafka - Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKACPP_INT_H_
+#define _RDKAFKACPP_INT_H_
+
+#include <string>
+#include <iostream>
+#include <cstring>
+#include <stdlib.h>
+
+#include "rdkafkacpp.h"
+
+extern "C" {
+#include "../src/rdkafka.h"
+}
+
+#ifdef _WIN32
+/* Visual Studio */
+#include "../src/win32_config.h"
+#else
+/* POSIX / UNIX based systems */
+#include "../config.h" /* mklove output */
+#endif
+
+#ifdef _MSC_VER
+typedef int mode_t;
+#pragma warning(disable : 4250)
+#endif
+
+
+namespace RdKafka {
+
+void consume_cb_trampoline(rd_kafka_message_t *msg, void *opaque);
+void log_cb_trampoline(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf);
+void error_cb_trampoline(rd_kafka_t *rk,
+ int err,
+ const char *reason,
+ void *opaque);
+void throttle_cb_trampoline(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque);
+int stats_cb_trampoline(rd_kafka_t *rk,
+ char *json,
+ size_t json_len,
+ void *opaque);
+int socket_cb_trampoline(int domain, int type, int protocol, void *opaque);
+int open_cb_trampoline(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque);
+void rebalance_cb_trampoline(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *c_partitions,
+ void *opaque);
+void offset_commit_cb_trampoline0(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *c_offsets,
+ void *opaque);
+void oauthbearer_token_refresh_cb_trampoline(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque);
+
+int ssl_cert_verify_cb_trampoline(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque);
+
+rd_kafka_topic_partition_list_t *partitions_to_c_parts(
+ const std::vector<TopicPartition *> &partitions);
+
+/**
+ * @brief Update the application provided 'partitions' with info from 'c_parts'
+ */
+void update_partitions_from_c_parts(
+ std::vector<TopicPartition *> &partitions,
+ const rd_kafka_topic_partition_list_t *c_parts);
+
+
+class ErrorImpl : public Error {
+ public:
+ ~ErrorImpl() {
+ rd_kafka_error_destroy(c_error_);
+ }
+
+ ErrorImpl(ErrorCode code, const std::string *errstr) {
+ c_error_ = rd_kafka_error_new(static_cast<rd_kafka_resp_err_t>(code),
+ errstr ? "%s" : NULL,
+ errstr ? errstr->c_str() : NULL);
+ }
+
+ ErrorImpl(rd_kafka_error_t *c_error) : c_error_(c_error) {
+ }
+
+ static Error *create(ErrorCode code, const std::string *errstr) {
+ return new ErrorImpl(code, errstr);
+ }
+
+ ErrorCode code() const {
+ return static_cast<ErrorCode>(rd_kafka_error_code(c_error_));
+ }
+
+ std::string name() const {
+ return std::string(rd_kafka_error_name(c_error_));
+ }
+
+ std::string str() const {
+ return std::string(rd_kafka_error_string(c_error_));
+ }
+
+ bool is_fatal() const {
+ return !!rd_kafka_error_is_fatal(c_error_);
+ }
+
+ bool is_retriable() const {
+ return !!rd_kafka_error_is_retriable(c_error_);
+ }
+
+ bool txn_requires_abort() const {
+ return !!rd_kafka_error_txn_requires_abort(c_error_);
+ }
+
+ rd_kafka_error_t *c_error_;
+};
+
+
+class EventImpl : public Event {
+ public:
+ ~EventImpl() {
+ }
+
+ EventImpl(Type type,
+ ErrorCode err,
+ Severity severity,
+ const char *fac,
+ const char *str) :
+ type_(type),
+ err_(err),
+ severity_(severity),
+ fac_(fac ? fac : ""),
+ str_(str),
+ id_(0),
+ throttle_time_(0),
+ fatal_(false) {
+ }
+
+ EventImpl(Type type) :
+ type_(type),
+ err_(ERR_NO_ERROR),
+ severity_(EVENT_SEVERITY_EMERG),
+ fac_(""),
+ str_(""),
+ id_(0),
+ throttle_time_(0),
+ fatal_(false) {
+ }
+
+ Type type() const {
+ return type_;
+ }
+ ErrorCode err() const {
+ return err_;
+ }
+ Severity severity() const {
+ return severity_;
+ }
+ std::string fac() const {
+ return fac_;
+ }
+ std::string str() const {
+ return str_;
+ }
+ std::string broker_name() const {
+ if (type_ == EVENT_THROTTLE)
+ return str_;
+ else
+ return std::string("");
+ }
+ int broker_id() const {
+ return id_;
+ }
+ int throttle_time() const {
+ return throttle_time_;
+ }
+
+ bool fatal() const {
+ return fatal_;
+ }
+
+ Type type_;
+ ErrorCode err_;
+ Severity severity_;
+ std::string fac_;
+ std::string str_; /* reused for THROTTLE broker_name */
+ int id_;
+ int throttle_time_;
+ bool fatal_;
+};
+
+class QueueImpl : virtual public Queue {
+ public:
+ QueueImpl(rd_kafka_queue_t *c_rkqu) : queue_(c_rkqu) {
+ }
+ ~QueueImpl() {
+ rd_kafka_queue_destroy(queue_);
+ }
+ static Queue *create(Handle *base);
+ ErrorCode forward(Queue *queue);
+ Message *consume(int timeout_ms);
+ int poll(int timeout_ms);
+ void io_event_enable(int fd, const void *payload, size_t size);
+
+ rd_kafka_queue_t *queue_;
+};
+
+
+
+class HeadersImpl : public Headers {
+ public:
+ HeadersImpl() : headers_(rd_kafka_headers_new(8)) {
+ }
+
+ HeadersImpl(rd_kafka_headers_t *headers) : headers_(headers) {
+ }
+
+ HeadersImpl(const std::vector<Header> &headers) {
+ if (headers.size() > 0) {
+ headers_ = rd_kafka_headers_new(headers.size());
+ from_vector(headers);
+ } else {
+ headers_ = rd_kafka_headers_new(8);
+ }
+ }
+
+ ~HeadersImpl() {
+ if (headers_) {
+ rd_kafka_headers_destroy(headers_);
+ }
+ }
+
+ ErrorCode add(const std::string &key, const char *value) {
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value, -1);
+ return static_cast<RdKafka::ErrorCode>(err);
+ }
+
+ ErrorCode add(const std::string &key, const void *value, size_t value_size) {
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value,
+ value_size);
+ return static_cast<RdKafka::ErrorCode>(err);
+ }
+
+ ErrorCode add(const std::string &key, const std::string &value) {
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_header_add(headers_, key.c_str(), key.size(), value.c_str(),
+ value.size());
+ return static_cast<RdKafka::ErrorCode>(err);
+ }
+
+ ErrorCode add(const Header &header) {
+ rd_kafka_resp_err_t err;
+ err =
+ rd_kafka_header_add(headers_, header.key().c_str(), header.key().size(),
+ header.value(), header.value_size());
+ return static_cast<RdKafka::ErrorCode>(err);
+ }
+
+ ErrorCode remove(const std::string &key) {
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_header_remove(headers_, key.c_str());
+ return static_cast<RdKafka::ErrorCode>(err);
+ }
+
+ std::vector<Headers::Header> get(const std::string &key) const {
+ std::vector<Headers::Header> headers;
+ const void *value;
+ size_t size;
+ rd_kafka_resp_err_t err;
+ for (size_t idx = 0; !(err = rd_kafka_header_get(headers_, idx, key.c_str(),
+ &value, &size));
+ idx++) {
+ headers.push_back(Headers::Header(key, value, size));
+ }
+ return headers;
+ }
+
+ Headers::Header get_last(const std::string &key) const {
+ const void *value;
+ size_t size;
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_header_get_last(headers_, key.c_str(), &value, &size);
+ return Headers::Header(key, value, size,
+ static_cast<RdKafka::ErrorCode>(err));
+ }
+
+ std::vector<Headers::Header> get_all() const {
+ std::vector<Headers::Header> headers;
+ size_t idx = 0;
+ const char *name;
+ const void *valuep;
+ size_t size;
+ while (!rd_kafka_header_get_all(headers_, idx++, &name, &valuep, &size)) {
+ headers.push_back(Headers::Header(name, valuep, size));
+ }
+ return headers;
+ }
+
+ size_t size() const {
+ return rd_kafka_header_cnt(headers_);
+ }
+
+ /** @brief Reset the C headers pointer to NULL. */
+ void c_headers_destroyed() {
+ headers_ = NULL;
+ }
+
+ /** @returns the underlying C headers, or NULL. */
+ rd_kafka_headers_t *c_ptr() {
+ return headers_;
+ }
+
+
+ private:
+ void from_vector(const std::vector<Header> &headers) {
+ if (headers.size() == 0)
+ return;
+ for (std::vector<Header>::const_iterator it = headers.begin();
+ it != headers.end(); it++)
+ this->add(*it);
+ }
+
+ HeadersImpl(HeadersImpl const &) /*= delete*/;
+ HeadersImpl &operator=(HeadersImpl const &) /*= delete*/;
+
+ rd_kafka_headers_t *headers_;
+};
+
+
+
+class MessageImpl : public Message {
+ public:
+ ~MessageImpl() {
+ if (free_rkmessage_)
+ rd_kafka_message_destroy(const_cast<rd_kafka_message_t *>(rkmessage_));
+ if (key_)
+ delete key_;
+ if (headers_)
+ delete headers_;
+ }
+
+ MessageImpl(rd_kafka_type_t rk_type,
+ RdKafka::Topic *topic,
+ rd_kafka_message_t *rkmessage) :
+ topic_(topic),
+ rkmessage_(rkmessage),
+ free_rkmessage_(true),
+ key_(NULL),
+ headers_(NULL),
+ rk_type_(rk_type) {
+ }
+
+ MessageImpl(rd_kafka_type_t rk_type,
+ RdKafka::Topic *topic,
+ rd_kafka_message_t *rkmessage,
+ bool dofree) :
+ topic_(topic),
+ rkmessage_(rkmessage),
+ free_rkmessage_(dofree),
+ key_(NULL),
+ headers_(NULL),
+ rk_type_(rk_type) {
+ }
+
+ MessageImpl(rd_kafka_type_t rk_type, rd_kafka_message_t *rkmessage) :
+ topic_(NULL),
+ rkmessage_(rkmessage),
+ free_rkmessage_(true),
+ key_(NULL),
+ headers_(NULL),
+ rk_type_(rk_type) {
+ if (rkmessage->rkt) {
+ /* Possibly NULL */
+ topic_ = static_cast<Topic *>(rd_kafka_topic_opaque(rkmessage->rkt));
+ }
+ }
+
+ /* Create errored message */
+ MessageImpl(rd_kafka_type_t rk_type,
+ RdKafka::Topic *topic,
+ RdKafka::ErrorCode err) :
+ topic_(topic),
+ free_rkmessage_(false),
+ key_(NULL),
+ headers_(NULL),
+ rk_type_(rk_type) {
+ rkmessage_ = &rkmessage_err_;
+ memset(&rkmessage_err_, 0, sizeof(rkmessage_err_));
+ rkmessage_err_.err = static_cast<rd_kafka_resp_err_t>(err);
+ }
+
+ std::string errstr() const {
+ const char *es;
+ /* message_errstr() is only available for the consumer. */
+ if (rk_type_ == RD_KAFKA_CONSUMER)
+ es = rd_kafka_message_errstr(rkmessage_);
+ else
+ es = rd_kafka_err2str(rkmessage_->err);
+
+ return std::string(es ? es : "");
+ }
+
+ ErrorCode err() const {
+ return static_cast<RdKafka::ErrorCode>(rkmessage_->err);
+ }
+
+ Topic *topic() const {
+ return topic_;
+ }
+ std::string topic_name() const {
+ if (rkmessage_->rkt)
+ return rd_kafka_topic_name(rkmessage_->rkt);
+ else
+ return "";
+ }
+ int32_t partition() const {
+ return rkmessage_->partition;
+ }
+ void *payload() const {
+ return rkmessage_->payload;
+ }
+ size_t len() const {
+ return rkmessage_->len;
+ }
+ const std::string *key() const {
+ if (key_) {
+ return key_;
+ } else if (rkmessage_->key) {
+ key_ = new std::string(static_cast<char const *>(rkmessage_->key),
+ rkmessage_->key_len);
+ return key_;
+ }
+ return NULL;
+ }
+ const void *key_pointer() const {
+ return rkmessage_->key;
+ }
+ size_t key_len() const {
+ return rkmessage_->key_len;
+ }
+
+ int64_t offset() const {
+ return rkmessage_->offset;
+ }
+
+ MessageTimestamp timestamp() const {
+ MessageTimestamp ts;
+ rd_kafka_timestamp_type_t tstype;
+ ts.timestamp = rd_kafka_message_timestamp(rkmessage_, &tstype);
+ ts.type = static_cast<MessageTimestamp::MessageTimestampType>(tstype);
+ return ts;
+ }
+
+ void *msg_opaque() const {
+ return rkmessage_->_private;
+ }
+
+ int64_t latency() const {
+ return rd_kafka_message_latency(rkmessage_);
+ }
+
+ struct rd_kafka_message_s *c_ptr() {
+ return rkmessage_;
+ }
+
+ Status status() const {
+ return static_cast<Status>(rd_kafka_message_status(rkmessage_));
+ }
+
+ Headers *headers() {
+ ErrorCode err;
+ return headers(&err);
+ }
+
+ Headers *headers(ErrorCode *err) {
+ *err = ERR_NO_ERROR;
+
+ if (!headers_) {
+ rd_kafka_headers_t *c_hdrs;
+ rd_kafka_resp_err_t c_err;
+
+ if ((c_err = rd_kafka_message_detach_headers(rkmessage_, &c_hdrs))) {
+ *err = static_cast<RdKafka::ErrorCode>(c_err);
+ return NULL;
+ }
+
+ headers_ = new HeadersImpl(c_hdrs);
+ }
+
+ return headers_;
+ }
+
+ int32_t broker_id() const {
+ return rd_kafka_message_broker_id(rkmessage_);
+ }
+
+ int32_t leader_epoch() const {
+ return rd_kafka_message_leader_epoch(rkmessage_);
+ }
+
+
+ Error *offset_store() {
+ rd_kafka_error_t *c_error;
+
+ c_error = rd_kafka_offset_store_message(rkmessage_);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+ else
+ return NULL;
+ }
+
+ RdKafka::Topic *topic_;
+ rd_kafka_message_t *rkmessage_;
+ bool free_rkmessage_;
+ /* For error signalling by the C++ layer the .._err_ message is
+ * used as a place holder and rkmessage_ is set to point to it. */
+ rd_kafka_message_t rkmessage_err_;
+ mutable std::string *key_; /* mutable because it's a cached value */
+
+ private:
+ /* "delete" copy ctor + copy assignment, for safety of key_ */
+ MessageImpl(MessageImpl const &) /*= delete*/;
+ MessageImpl &operator=(MessageImpl const &) /*= delete*/;
+
+ RdKafka::Headers *headers_;
+ const rd_kafka_type_t rk_type_; /**< Client type */
+};
+
+
+class ConfImpl : public Conf {
+ public:
+ ConfImpl(ConfType conf_type) :
+ consume_cb_(NULL),
+ dr_cb_(NULL),
+ event_cb_(NULL),
+ socket_cb_(NULL),
+ open_cb_(NULL),
+ partitioner_cb_(NULL),
+ partitioner_kp_cb_(NULL),
+ rebalance_cb_(NULL),
+ offset_commit_cb_(NULL),
+ oauthbearer_token_refresh_cb_(NULL),
+ ssl_cert_verify_cb_(NULL),
+ conf_type_(conf_type),
+ rk_conf_(NULL),
+ rkt_conf_(NULL) {
+ }
+ ~ConfImpl() {
+ if (rk_conf_)
+ rd_kafka_conf_destroy(rk_conf_);
+ else if (rkt_conf_)
+ rd_kafka_topic_conf_destroy(rkt_conf_);
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ const std::string &value,
+ std::string &errstr);
+
+ Conf::ConfResult set(const std::string &name,
+ DeliveryReportCb *dr_cb,
+ std::string &errstr) {
+ if (name != "dr_cb") {
+ errstr = "Invalid value type, expected RdKafka::DeliveryReportCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ dr_cb_ = dr_cb;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb,
+ std::string &errstr) {
+ if (name != "oauthbearer_token_refresh_cb") {
+ errstr =
+ "Invalid value type, expected RdKafka::OAuthBearerTokenRefreshCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ oauthbearer_token_refresh_cb_ = oauthbearer_token_refresh_cb;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ EventCb *event_cb,
+ std::string &errstr) {
+ if (name != "event_cb") {
+ errstr = "Invalid value type, expected RdKafka::EventCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ event_cb_ = event_cb;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ const Conf *topic_conf,
+ std::string &errstr) {
+ const ConfImpl *tconf_impl =
+ dynamic_cast<const RdKafka::ConfImpl *>(topic_conf);
+ if (name != "default_topic_conf" || !tconf_impl->rkt_conf_) {
+ errstr = "Invalid value type, expected RdKafka::Conf";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ rd_kafka_conf_set_default_topic_conf(
+ rk_conf_, rd_kafka_topic_conf_dup(tconf_impl->rkt_conf_));
+
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ PartitionerCb *partitioner_cb,
+ std::string &errstr) {
+ if (name != "partitioner_cb") {
+ errstr = "Invalid value type, expected RdKafka::PartitionerCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rkt_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_TOPIC object";
+ return Conf::CONF_INVALID;
+ }
+
+ partitioner_cb_ = partitioner_cb;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ PartitionerKeyPointerCb *partitioner_kp_cb,
+ std::string &errstr) {
+ if (name != "partitioner_key_pointer_cb") {
+ errstr = "Invalid value type, expected RdKafka::PartitionerKeyPointerCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rkt_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_TOPIC object";
+ return Conf::CONF_INVALID;
+ }
+
+ partitioner_kp_cb_ = partitioner_kp_cb;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set(const std::string &name,
+ SocketCb *socket_cb,
+ std::string &errstr) {
+ if (name != "socket_cb") {
+ errstr = "Invalid value type, expected RdKafka::SocketCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ socket_cb_ = socket_cb;
+ return Conf::CONF_OK;
+ }
+
+
+ Conf::ConfResult set(const std::string &name,
+ OpenCb *open_cb,
+ std::string &errstr) {
+ if (name != "open_cb") {
+ errstr = "Invalid value type, expected RdKafka::OpenCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ open_cb_ = open_cb;
+ return Conf::CONF_OK;
+ }
+
+
+
+ Conf::ConfResult set(const std::string &name,
+ RebalanceCb *rebalance_cb,
+ std::string &errstr) {
+ if (name != "rebalance_cb") {
+ errstr = "Invalid value type, expected RdKafka::RebalanceCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ rebalance_cb_ = rebalance_cb;
+ return Conf::CONF_OK;
+ }
+
+
+ Conf::ConfResult set(const std::string &name,
+ OffsetCommitCb *offset_commit_cb,
+ std::string &errstr) {
+ if (name != "offset_commit_cb") {
+ errstr = "Invalid value type, expected RdKafka::OffsetCommitCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ offset_commit_cb_ = offset_commit_cb;
+ return Conf::CONF_OK;
+ }
+
+
+ Conf::ConfResult set(const std::string &name,
+ SslCertificateVerifyCb *ssl_cert_verify_cb,
+ std::string &errstr) {
+ if (name != "ssl_cert_verify_cb") {
+ errstr = "Invalid value type, expected RdKafka::SslCertificateVerifyCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ ssl_cert_verify_cb_ = ssl_cert_verify_cb;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult set_engine_callback_data(void *value, std::string &errstr) {
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ rd_kafka_conf_set_engine_callback_data(rk_conf_, value);
+ return Conf::CONF_OK;
+ }
+
+
+ Conf::ConfResult set_ssl_cert(RdKafka::CertificateType cert_type,
+ RdKafka::CertificateEncoding cert_enc,
+ const void *buffer,
+ size_t size,
+ std::string &errstr) {
+ rd_kafka_conf_res_t res;
+ char errbuf[512];
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ res = rd_kafka_conf_set_ssl_cert(
+ rk_conf_, static_cast<rd_kafka_cert_type_t>(cert_type),
+ static_cast<rd_kafka_cert_enc_t>(cert_enc), buffer, size, errbuf,
+ sizeof(errbuf));
+
+ if (res != RD_KAFKA_CONF_OK)
+ errstr = errbuf;
+
+ return static_cast<Conf::ConfResult>(res);
+ }
+
+ Conf::ConfResult enable_sasl_queue(bool enable, std::string &errstr) {
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ rd_kafka_conf_enable_sasl_queue(rk_conf_, enable ? 1 : 0);
+
+ return Conf::CONF_OK;
+ }
+
+
+ Conf::ConfResult get(const std::string &name, std::string &value) const {
+ if (name.compare("dr_cb") == 0 || name.compare("event_cb") == 0 ||
+ name.compare("partitioner_cb") == 0 ||
+ name.compare("partitioner_key_pointer_cb") == 0 ||
+ name.compare("socket_cb") == 0 || name.compare("open_cb") == 0 ||
+ name.compare("rebalance_cb") == 0 ||
+ name.compare("offset_commit_cb") == 0 ||
+ name.compare("oauthbearer_token_refresh_cb") == 0 ||
+ name.compare("ssl_cert_verify_cb") == 0 ||
+ name.compare("set_engine_callback_data") == 0 ||
+ name.compare("enable_sasl_queue") == 0) {
+ return Conf::CONF_INVALID;
+ }
+ rd_kafka_conf_res_t res = RD_KAFKA_CONF_INVALID;
+
+ /* Get size of property */
+ size_t size;
+ if (rk_conf_)
+ res = rd_kafka_conf_get(rk_conf_, name.c_str(), NULL, &size);
+ else if (rkt_conf_)
+ res = rd_kafka_topic_conf_get(rkt_conf_, name.c_str(), NULL, &size);
+ if (res != RD_KAFKA_CONF_OK)
+ return static_cast<Conf::ConfResult>(res);
+
+ char *tmpValue = new char[size];
+
+ if (rk_conf_)
+ res = rd_kafka_conf_get(rk_conf_, name.c_str(), tmpValue, &size);
+ else if (rkt_conf_)
+ res = rd_kafka_topic_conf_get(rkt_conf_, name.c_str(), tmpValue, &size);
+
+ if (res == RD_KAFKA_CONF_OK)
+ value.assign(tmpValue);
+ delete[] tmpValue;
+
+ return static_cast<Conf::ConfResult>(res);
+ }
+
+ Conf::ConfResult get(DeliveryReportCb *&dr_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ dr_cb = this->dr_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(
+ OAuthBearerTokenRefreshCb *&oauthbearer_token_refresh_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ oauthbearer_token_refresh_cb = this->oauthbearer_token_refresh_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(EventCb *&event_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ event_cb = this->event_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(PartitionerCb *&partitioner_cb) const {
+ if (!rkt_conf_)
+ return Conf::CONF_INVALID;
+ partitioner_cb = this->partitioner_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(PartitionerKeyPointerCb *&partitioner_kp_cb) const {
+ if (!rkt_conf_)
+ return Conf::CONF_INVALID;
+ partitioner_kp_cb = this->partitioner_kp_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(SocketCb *&socket_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ socket_cb = this->socket_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(OpenCb *&open_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ open_cb = this->open_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(RebalanceCb *&rebalance_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ rebalance_cb = this->rebalance_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(OffsetCommitCb *&offset_commit_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ offset_commit_cb = this->offset_commit_cb_;
+ return Conf::CONF_OK;
+ }
+
+ Conf::ConfResult get(SslCertificateVerifyCb *&ssl_cert_verify_cb) const {
+ if (!rk_conf_)
+ return Conf::CONF_INVALID;
+ ssl_cert_verify_cb = this->ssl_cert_verify_cb_;
+ return Conf::CONF_OK;
+ }
+
+ std::list<std::string> *dump();
+
+
+ Conf::ConfResult set(const std::string &name,
+ ConsumeCb *consume_cb,
+ std::string &errstr) {
+ if (name != "consume_cb") {
+ errstr = "Invalid value type, expected RdKafka::ConsumeCb";
+ return Conf::CONF_INVALID;
+ }
+
+ if (!rk_conf_) {
+ errstr = "Requires RdKafka::Conf::CONF_GLOBAL object";
+ return Conf::CONF_INVALID;
+ }
+
+ consume_cb_ = consume_cb;
+ return Conf::CONF_OK;
+ }
+
+ struct rd_kafka_conf_s *c_ptr_global() {
+ if (conf_type_ == CONF_GLOBAL)
+ return rk_conf_;
+ else
+ return NULL;
+ }
+
+ struct rd_kafka_topic_conf_s *c_ptr_topic() {
+ if (conf_type_ == CONF_TOPIC)
+ return rkt_conf_;
+ else
+ return NULL;
+ }
+
+ ConsumeCb *consume_cb_;
+ DeliveryReportCb *dr_cb_;
+ EventCb *event_cb_;
+ SocketCb *socket_cb_;
+ OpenCb *open_cb_;
+ PartitionerCb *partitioner_cb_;
+ PartitionerKeyPointerCb *partitioner_kp_cb_;
+ RebalanceCb *rebalance_cb_;
+ OffsetCommitCb *offset_commit_cb_;
+ OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb_;
+ SslCertificateVerifyCb *ssl_cert_verify_cb_;
+ ConfType conf_type_;
+ rd_kafka_conf_t *rk_conf_;
+ rd_kafka_topic_conf_t *rkt_conf_;
+};
+
+
+class HandleImpl : virtual public Handle {
+ public:
+ ~HandleImpl() {
+ }
+ HandleImpl() {
+ }
+ std::string name() const {
+ return std::string(rd_kafka_name(rk_));
+ }
+ std::string memberid() const {
+ char *str = rd_kafka_memberid(rk_);
+ std::string memberid = str ? str : "";
+ if (str)
+ rd_kafka_mem_free(rk_, str);
+ return memberid;
+ }
+ int poll(int timeout_ms) {
+ return rd_kafka_poll(rk_, timeout_ms);
+ }
+ int outq_len() {
+ return rd_kafka_outq_len(rk_);
+ }
+
+ void set_common_config(const RdKafka::ConfImpl *confimpl);
+
+ RdKafka::ErrorCode metadata(bool all_topics,
+ const Topic *only_rkt,
+ Metadata **metadatap,
+ int timeout_ms);
+
+ ErrorCode pause(std::vector<TopicPartition *> &partitions);
+ ErrorCode resume(std::vector<TopicPartition *> &partitions);
+
+ ErrorCode query_watermark_offsets(const std::string &topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high,
+ int timeout_ms) {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_query_watermark_offsets(
+ rk_, topic.c_str(), partition, low, high, timeout_ms));
+ }
+
+ ErrorCode get_watermark_offsets(const std::string &topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high) {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_get_watermark_offsets(
+ rk_, topic.c_str(), partition, low, high));
+ }
+
+ Queue *get_partition_queue(const TopicPartition *partition);
+
+ Queue *get_sasl_queue() {
+ rd_kafka_queue_t *rkqu;
+ rkqu = rd_kafka_queue_get_sasl(rk_);
+
+ if (rkqu == NULL)
+ return NULL;
+
+ return new QueueImpl(rkqu);
+ }
+
+ Queue *get_background_queue() {
+ rd_kafka_queue_t *rkqu;
+ rkqu = rd_kafka_queue_get_background(rk_);
+
+ if (rkqu == NULL)
+ return NULL;
+
+ return new QueueImpl(rkqu);
+ }
+
+
+ ErrorCode offsetsForTimes(std::vector<TopicPartition *> &offsets,
+ int timeout_ms) {
+ rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets);
+ ErrorCode err = static_cast<ErrorCode>(
+ rd_kafka_offsets_for_times(rk_, c_offsets, timeout_ms));
+ update_partitions_from_c_parts(offsets, c_offsets);
+ rd_kafka_topic_partition_list_destroy(c_offsets);
+ return err;
+ }
+
+ ErrorCode set_log_queue(Queue *queue);
+
+ void yield() {
+ rd_kafka_yield(rk_);
+ }
+
+ std::string clusterid(int timeout_ms) {
+ char *str = rd_kafka_clusterid(rk_, timeout_ms);
+ std::string clusterid = str ? str : "";
+ if (str)
+ rd_kafka_mem_free(rk_, str);
+ return clusterid;
+ }
+
+ struct rd_kafka_s *c_ptr() {
+ return rk_;
+ }
+
+ int32_t controllerid(int timeout_ms) {
+ return rd_kafka_controllerid(rk_, timeout_ms);
+ }
+
+ ErrorCode fatal_error(std::string &errstr) const {
+ char errbuf[512];
+ RdKafka::ErrorCode err = static_cast<RdKafka::ErrorCode>(
+ rd_kafka_fatal_error(rk_, errbuf, sizeof(errbuf)));
+ if (err)
+ errstr = errbuf;
+ return err;
+ }
+
+ ErrorCode oauthbearer_set_token(const std::string &token_value,
+ int64_t md_lifetime_ms,
+ const std::string &md_principal_name,
+ const std::list<std::string> &extensions,
+ std::string &errstr) {
+ char errbuf[512];
+ ErrorCode err;
+ const char **extensions_copy = new const char *[extensions.size()];
+ int elem = 0;
+
+ for (std::list<std::string>::const_iterator it = extensions.begin();
+ it != extensions.end(); it++)
+ extensions_copy[elem++] = it->c_str();
+ err = static_cast<ErrorCode>(rd_kafka_oauthbearer_set_token(
+ rk_, token_value.c_str(), md_lifetime_ms, md_principal_name.c_str(),
+ extensions_copy, extensions.size(), errbuf, sizeof(errbuf)));
+ delete[] extensions_copy;
+
+ if (err != ERR_NO_ERROR)
+ errstr = errbuf;
+
+ return err;
+ }
+
+ ErrorCode oauthbearer_set_token_failure(const std::string &errstr) {
+ return static_cast<ErrorCode>(
+ rd_kafka_oauthbearer_set_token_failure(rk_, errstr.c_str()));
+ }
+
+ Error *sasl_background_callbacks_enable() {
+ rd_kafka_error_t *c_error = rd_kafka_sasl_background_callbacks_enable(rk_);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+
+ return NULL;
+ }
+
+ Error *sasl_set_credentials(const std::string &username,
+ const std::string &password) {
+ rd_kafka_error_t *c_error =
+ rd_kafka_sasl_set_credentials(rk_, username.c_str(), password.c_str());
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+
+ return NULL;
+ };
+
+ void *mem_malloc(size_t size) {
+ return rd_kafka_mem_malloc(rk_, size);
+ }
+
+ void mem_free(void *ptr) {
+ rd_kafka_mem_free(rk_, ptr);
+ }
+
+ rd_kafka_t *rk_;
+ /* All Producer and Consumer callbacks must reside in HandleImpl and
+ * the opaque provided to rdkafka must be a pointer to HandleImpl, since
+ * ProducerImpl and ConsumerImpl classes cannot be safely directly cast to
+ * HandleImpl due to the skewed diamond inheritance. */
+ ConsumeCb *consume_cb_;
+ EventCb *event_cb_;
+ SocketCb *socket_cb_;
+ OpenCb *open_cb_;
+ DeliveryReportCb *dr_cb_;
+ PartitionerCb *partitioner_cb_;
+ PartitionerKeyPointerCb *partitioner_kp_cb_;
+ RebalanceCb *rebalance_cb_;
+ OffsetCommitCb *offset_commit_cb_;
+ OAuthBearerTokenRefreshCb *oauthbearer_token_refresh_cb_;
+ SslCertificateVerifyCb *ssl_cert_verify_cb_;
+};
+
+
+class TopicImpl : public Topic {
+ public:
+ ~TopicImpl() {
+ rd_kafka_topic_destroy(rkt_);
+ }
+
+ std::string name() const {
+ return rd_kafka_topic_name(rkt_);
+ }
+
+ bool partition_available(int32_t partition) const {
+ return !!rd_kafka_topic_partition_available(rkt_, partition);
+ }
+
+ ErrorCode offset_store(int32_t partition, int64_t offset) {
+ return static_cast<RdKafka::ErrorCode>(
+ rd_kafka_offset_store(rkt_, partition, offset));
+ }
+
+ static Topic *create(Handle &base, const std::string &topic, Conf *conf);
+
+ struct rd_kafka_topic_s *c_ptr() {
+ return rkt_;
+ }
+
+ rd_kafka_topic_t *rkt_;
+ PartitionerCb *partitioner_cb_;
+ PartitionerKeyPointerCb *partitioner_kp_cb_;
+};
+
+
+/**
+ * Topic and Partition
+ */
+class TopicPartitionImpl : public TopicPartition {
+ public:
+ ~TopicPartitionImpl() {
+ }
+
+ static TopicPartition *create(const std::string &topic, int partition);
+
+ TopicPartitionImpl(const std::string &topic, int partition) :
+ topic_(topic),
+ partition_(partition),
+ offset_(RdKafka::Topic::OFFSET_INVALID),
+ err_(ERR_NO_ERROR),
+ leader_epoch_(-1) {
+ }
+
+ TopicPartitionImpl(const std::string &topic, int partition, int64_t offset) :
+ topic_(topic),
+ partition_(partition),
+ offset_(offset),
+ err_(ERR_NO_ERROR),
+ leader_epoch_(-1) {
+ }
+
+ TopicPartitionImpl(const rd_kafka_topic_partition_t *c_part) {
+ topic_ = std::string(c_part->topic);
+ partition_ = c_part->partition;
+ offset_ = c_part->offset;
+ err_ = static_cast<ErrorCode>(c_part->err);
+ leader_epoch_ = rd_kafka_topic_partition_get_leader_epoch(c_part);
+ // FIXME: metadata
+ }
+
+ static void destroy(std::vector<TopicPartition *> &partitions);
+
+ int partition() const {
+ return partition_;
+ }
+ const std::string &topic() const {
+ return topic_;
+ }
+
+ int64_t offset() const {
+ return offset_;
+ }
+
+ ErrorCode err() const {
+ return err_;
+ }
+
+ void set_offset(int64_t offset) {
+ offset_ = offset;
+ }
+
+ int32_t get_leader_epoch() {
+ return leader_epoch_;
+ }
+
+ void set_leader_epoch(int32_t leader_epoch) {
+ leader_epoch_ = leader_epoch_;
+ }
+
+ std::ostream &operator<<(std::ostream &ostrm) const {
+ return ostrm << topic_ << " [" << partition_ << "]";
+ }
+
+ std::string topic_;
+ int partition_;
+ int64_t offset_;
+ ErrorCode err_;
+ int32_t leader_epoch_;
+};
+
+
+/**
+ * @class ConsumerGroupMetadata wraps the
+ * C rd_kafka_consumer_group_metadata_t object.
+ */
+class ConsumerGroupMetadataImpl : public ConsumerGroupMetadata {
+ public:
+ ~ConsumerGroupMetadataImpl() {
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata_);
+ }
+
+ ConsumerGroupMetadataImpl(rd_kafka_consumer_group_metadata_t *cgmetadata) :
+ cgmetadata_(cgmetadata) {
+ }
+
+ rd_kafka_consumer_group_metadata_t *cgmetadata_;
+};
+
+
+class KafkaConsumerImpl : virtual public KafkaConsumer,
+ virtual public HandleImpl {
+ public:
+ ~KafkaConsumerImpl() {
+ if (rk_)
+ rd_kafka_destroy_flags(rk_, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
+ }
+
+ static KafkaConsumer *create(Conf *conf, std::string &errstr);
+
+ ErrorCode assignment(std::vector<TopicPartition *> &partitions);
+ bool assignment_lost();
+ std::string rebalance_protocol() {
+ const char *str = rd_kafka_rebalance_protocol(rk_);
+ return std::string(str ? str : "");
+ }
+ ErrorCode subscription(std::vector<std::string> &topics);
+ ErrorCode subscribe(const std::vector<std::string> &topics);
+ ErrorCode unsubscribe();
+ ErrorCode assign(const std::vector<TopicPartition *> &partitions);
+ ErrorCode unassign();
+ Error *incremental_assign(const std::vector<TopicPartition *> &partitions);
+ Error *incremental_unassign(const std::vector<TopicPartition *> &partitions);
+
+ Message *consume(int timeout_ms);
+ ErrorCode commitSync() {
+ return static_cast<ErrorCode>(rd_kafka_commit(rk_, NULL, 0 /*sync*/));
+ }
+ ErrorCode commitAsync() {
+ return static_cast<ErrorCode>(rd_kafka_commit(rk_, NULL, 1 /*async*/));
+ }
+ ErrorCode commitSync(Message *message) {
+ MessageImpl *msgimpl = dynamic_cast<MessageImpl *>(message);
+ return static_cast<ErrorCode>(
+ rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 0 /*sync*/));
+ }
+ ErrorCode commitAsync(Message *message) {
+ MessageImpl *msgimpl = dynamic_cast<MessageImpl *>(message);
+ return static_cast<ErrorCode>(
+ rd_kafka_commit_message(rk_, msgimpl->rkmessage_, 1 /*async*/));
+ }
+
+ ErrorCode commitSync(std::vector<TopicPartition *> &offsets) {
+ rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets);
+ rd_kafka_resp_err_t err = rd_kafka_commit(rk_, c_parts, 0);
+ if (!err)
+ update_partitions_from_c_parts(offsets, c_parts);
+ rd_kafka_topic_partition_list_destroy(c_parts);
+ return static_cast<ErrorCode>(err);
+ }
+
+ ErrorCode commitAsync(const std::vector<TopicPartition *> &offsets) {
+ rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets);
+ rd_kafka_resp_err_t err = rd_kafka_commit(rk_, c_parts, 1);
+ rd_kafka_topic_partition_list_destroy(c_parts);
+ return static_cast<ErrorCode>(err);
+ }
+
+ ErrorCode commitSync(OffsetCommitCb *offset_commit_cb) {
+ return static_cast<ErrorCode>(rd_kafka_commit_queue(
+ rk_, NULL, NULL, RdKafka::offset_commit_cb_trampoline0,
+ offset_commit_cb));
+ }
+
+ ErrorCode commitSync(std::vector<TopicPartition *> &offsets,
+ OffsetCommitCb *offset_commit_cb) {
+ rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets);
+ rd_kafka_resp_err_t err = rd_kafka_commit_queue(
+ rk_, c_parts, NULL, RdKafka::offset_commit_cb_trampoline0,
+ offset_commit_cb);
+ rd_kafka_topic_partition_list_destroy(c_parts);
+ return static_cast<ErrorCode>(err);
+ }
+
+ ErrorCode committed(std::vector<TopicPartition *> &partitions,
+ int timeout_ms);
+ ErrorCode position(std::vector<TopicPartition *> &partitions);
+
+ ConsumerGroupMetadata *groupMetadata() {
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ cgmetadata = rd_kafka_consumer_group_metadata(rk_);
+ if (!cgmetadata)
+ return NULL;
+
+ return new ConsumerGroupMetadataImpl(cgmetadata);
+ }
+
+ ErrorCode close();
+
+ Error *close(Queue *queue);
+
+ bool closed() {
+ return rd_kafka_consumer_closed(rk_) ? true : false;
+ }
+
+ ErrorCode seek(const TopicPartition &partition, int timeout_ms);
+
+ ErrorCode offsets_store(std::vector<TopicPartition *> &offsets) {
+ rd_kafka_topic_partition_list_t *c_parts = partitions_to_c_parts(offsets);
+ rd_kafka_resp_err_t err = rd_kafka_offsets_store(rk_, c_parts);
+ update_partitions_from_c_parts(offsets, c_parts);
+ rd_kafka_topic_partition_list_destroy(c_parts);
+ return static_cast<ErrorCode>(err);
+ }
+};
+
+
+class MetadataImpl : public Metadata {
+ public:
+ MetadataImpl(const rd_kafka_metadata_t *metadata);
+ ~MetadataImpl();
+
+ const std::vector<const BrokerMetadata *> *brokers() const {
+ return &brokers_;
+ }
+
+ const std::vector<const TopicMetadata *> *topics() const {
+ return &topics_;
+ }
+
+ std::string orig_broker_name() const {
+ return std::string(metadata_->orig_broker_name);
+ }
+
+ int32_t orig_broker_id() const {
+ return metadata_->orig_broker_id;
+ }
+
+ private:
+ const rd_kafka_metadata_t *metadata_;
+ std::vector<const BrokerMetadata *> brokers_;
+ std::vector<const TopicMetadata *> topics_;
+ std::string orig_broker_name_;
+};
+
+
+
+class ConsumerImpl : virtual public Consumer, virtual public HandleImpl {
+ public:
+ ~ConsumerImpl() {
+ if (rk_)
+ rd_kafka_destroy(rk_);
+ }
+ static Consumer *create(Conf *conf, std::string &errstr);
+
+ ErrorCode start(Topic *topic, int32_t partition, int64_t offset);
+ ErrorCode start(Topic *topic,
+ int32_t partition,
+ int64_t offset,
+ Queue *queue);
+ ErrorCode stop(Topic *topic, int32_t partition);
+ ErrorCode seek(Topic *topic,
+ int32_t partition,
+ int64_t offset,
+ int timeout_ms);
+ Message *consume(Topic *topic, int32_t partition, int timeout_ms);
+ Message *consume(Queue *queue, int timeout_ms);
+ int consume_callback(Topic *topic,
+ int32_t partition,
+ int timeout_ms,
+ ConsumeCb *cb,
+ void *opaque);
+ int consume_callback(Queue *queue,
+ int timeout_ms,
+ RdKafka::ConsumeCb *consume_cb,
+ void *opaque);
+};
+
+
+
+class ProducerImpl : virtual public Producer, virtual public HandleImpl {
+ public:
+ ~ProducerImpl() {
+ if (rk_)
+ rd_kafka_destroy(rk_);
+ }
+
+ ErrorCode produce(Topic *topic,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const std::string *key,
+ void *msg_opaque);
+
+ ErrorCode produce(Topic *topic,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ void *msg_opaque);
+
+ ErrorCode produce(Topic *topic,
+ int32_t partition,
+ const std::vector<char> *payload,
+ const std::vector<char> *key,
+ void *msg_opaque);
+
+ ErrorCode produce(const std::string topic_name,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ int64_t timestamp,
+ void *msg_opaque);
+
+ ErrorCode produce(const std::string topic_name,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t key_len,
+ int64_t timestamp,
+ RdKafka::Headers *headers,
+ void *msg_opaque);
+
+ ErrorCode flush(int timeout_ms) {
+ return static_cast<RdKafka::ErrorCode>(rd_kafka_flush(rk_, timeout_ms));
+ }
+
+ ErrorCode purge(int purge_flags) {
+ return static_cast<RdKafka::ErrorCode>(
+ rd_kafka_purge(rk_, (int)purge_flags));
+ }
+
+ Error *init_transactions(int timeout_ms) {
+ rd_kafka_error_t *c_error;
+
+ c_error = rd_kafka_init_transactions(rk_, timeout_ms);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+ else
+ return NULL;
+ }
+
+ Error *begin_transaction() {
+ rd_kafka_error_t *c_error;
+
+ c_error = rd_kafka_begin_transaction(rk_);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+ else
+ return NULL;
+ }
+
+ Error *send_offsets_to_transaction(
+ const std::vector<TopicPartition *> &offsets,
+ const ConsumerGroupMetadata *group_metadata,
+ int timeout_ms) {
+ rd_kafka_error_t *c_error;
+ const RdKafka::ConsumerGroupMetadataImpl *cgmdimpl =
+ dynamic_cast<const RdKafka::ConsumerGroupMetadataImpl *>(
+ group_metadata);
+ rd_kafka_topic_partition_list_t *c_offsets = partitions_to_c_parts(offsets);
+
+ c_error = rd_kafka_send_offsets_to_transaction(
+ rk_, c_offsets, cgmdimpl->cgmetadata_, timeout_ms);
+
+ rd_kafka_topic_partition_list_destroy(c_offsets);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+ else
+ return NULL;
+ }
+
+ Error *commit_transaction(int timeout_ms) {
+ rd_kafka_error_t *c_error;
+
+ c_error = rd_kafka_commit_transaction(rk_, timeout_ms);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+ else
+ return NULL;
+ }
+
+ Error *abort_transaction(int timeout_ms) {
+ rd_kafka_error_t *c_error;
+
+ c_error = rd_kafka_abort_transaction(rk_, timeout_ms);
+
+ if (c_error)
+ return new ErrorImpl(c_error);
+ else
+ return NULL;
+ }
+
+ static Producer *create(Conf *conf, std::string &errstr);
+};
+
+
+
+} // namespace RdKafka
+
+#endif /* _RDKAFKACPP_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt
new file mode 100644
index 000000000..37b43c499
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/CMakeLists.txt
@@ -0,0 +1,364 @@
+set(LIBVER 1)
+
+set(
+ sources
+ crc32c.c
+ rdaddr.c
+ rdavl.c
+ rdbuf.c
+ rdcrc32.c
+ rdfnv1a.c
+ rdkafka.c
+ rdkafka_assignor.c
+ rdkafka_broker.c
+ rdkafka_buf.c
+ rdkafka_cgrp.c
+ rdkafka_conf.c
+ rdkafka_event.c
+ rdkafka_feature.c
+ rdkafka_lz4.c
+ rdkafka_metadata.c
+ rdkafka_metadata_cache.c
+ rdkafka_msg.c
+ rdkafka_msgset_reader.c
+ rdkafka_msgset_writer.c
+ rdkafka_offset.c
+ rdkafka_op.c
+ rdkafka_partition.c
+ rdkafka_pattern.c
+ rdkafka_queue.c
+ rdkafka_range_assignor.c
+ rdkafka_request.c
+ rdkafka_roundrobin_assignor.c
+ rdkafka_sasl.c
+ rdkafka_sasl_plain.c
+ rdkafka_sticky_assignor.c
+ rdkafka_subscription.c
+ rdkafka_assignment.c
+ rdkafka_timer.c
+ rdkafka_topic.c
+ rdkafka_transport.c
+ rdkafka_interceptor.c
+ rdkafka_header.c
+ rdkafka_admin.c
+ rdkafka_aux.c
+ rdkafka_background.c
+ rdkafka_idempotence.c
+ rdkafka_txnmgr.c
+ rdkafka_cert.c
+ rdkafka_coord.c
+ rdkafka_mock.c
+ rdkafka_mock_handlers.c
+ rdkafka_mock_cgrp.c
+ rdkafka_error.c
+ rdkafka_fetcher.c
+ rdlist.c
+ rdlog.c
+ rdmurmur2.c
+ rdports.c
+ rdrand.c
+ rdregex.c
+ rdstring.c
+ rdunittest.c
+ rdvarint.c
+ rdmap.c
+ snappy.c
+ tinycthread.c
+ tinycthread_extra.c
+ rdxxhash.c
+ cJSON.c
+)
+
+if(WITH_SSL)
+ list(APPEND sources rdkafka_ssl.c)
+endif()
+
+if(WITH_CURL)
+ list(APPEND sources rdhttp.c)
+endif()
+
+if(WITH_HDRHISTOGRAM)
+ list(APPEND sources rdhdrhistogram.c)
+endif()
+
+if(WITH_LIBDL OR WIN32)
+ list(APPEND sources rddl.c)
+endif()
+
+if(WITH_PLUGINS)
+ list(APPEND sources rdkafka_plugin.c)
+endif()
+
+if(WIN32)
+ list(APPEND sources rdkafka_sasl_win32.c)
+elseif(WITH_SASL_CYRUS)
+ list(APPEND sources rdkafka_sasl_cyrus.c)
+endif()
+
+if(WITH_SASL_SCRAM)
+ list(APPEND sources rdkafka_sasl_scram.c)
+endif()
+
+if(WITH_SASL_OAUTHBEARER)
+ list(APPEND sources rdkafka_sasl_oauthbearer.c)
+endif()
+
+if(WITH_OAUTHBEARER_OIDC)
+ list(APPEND sources rdkafka_sasl_oauthbearer_oidc.c)
+endif()
+
+if(WITH_ZLIB)
+ list(APPEND sources rdgz.c)
+endif()
+
+if(WITH_ZSTD)
+ list(APPEND sources rdkafka_zstd.c)
+endif()
+
+if(NOT WITH_LZ4_EXT)
+ list(APPEND sources lz4.c lz4frame.c lz4hc.c)
+endif()
+
+if(NOT HAVE_REGEX)
+ list(APPEND sources regexp.c)
+endif()
+
+# Define flags with cmake instead of by defining them on win32_config.h
+if(WITHOUT_WIN32_CONFIG)
+ list(APPEND rdkafka_compile_definitions WITHOUT_WIN32_CONFIG)
+ if(WITH_SSL)
+ list(APPEND rdkafka_compile_definitions WITH_SSL=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_SSL=0)
+ endif(WITH_SSL)
+ if(WITH_ZLIB)
+ list(APPEND rdkafka_compile_definitions WITH_ZLIB=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_ZLIB=0)
+ endif(WITH_ZLIB)
+ if(WITH_SNAPPY)
+ list(APPEND rdkafka_compile_definitions WITH_SNAPPY=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_SNAPPY=0)
+ endif(WITH_SNAPPY)
+ if(WITH_ZSTD)
+ list(APPEND rdkafka_compile_definitions WITH_ZSTD=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_ZSTD=0)
+ endif(WITH_ZSTD)
+ if(WITH_SASL_SCRAM)
+ list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_SASL_SCRAM=0)
+ endif(WITH_SASL_SCRAM)
+ if(WITH_SASL_OAUTHBEARER)
+ list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_SASL_OAUTHBEARER=0)
+ endif(WITH_SASL_OAUTHBEARER)
+ if(ENABLE_DEVEL)
+ list(APPEND rdkafka_compile_definitions ENABLE_DEVEL=1)
+ else()
+ list(APPEND rdkafka_compile_definitions ENABLE_DEVEL=0)
+ endif(ENABLE_DEVEL)
+ if(WITH_PLUGINS)
+ list(APPEND rdkafka_compile_definitions WITH_PLUGINS=1)
+ else()
+ list(APPEND rdkafka_compile_definitions WITH_PLUGINS=0)
+ endif(WITH_PLUGINS)
+endif()
+
+if(RDKAFKA_BUILD_STATIC)
+ set(CMAKE_POSITION_INDEPENDENT_CODE ON)
+ set(RDKAFKA_BUILD_MODE STATIC)
+else()
+ set(RDKAFKA_BUILD_MODE SHARED)
+endif()
+
+add_library(rdkafka ${RDKAFKA_BUILD_MODE} ${sources})
+if(NOT RDKAFKA_BUILD_STATIC)
+ set_property(TARGET rdkafka PROPERTY SOVERSION ${LIBVER})
+endif()
+
+if(MINGW)
+ # Target Windows 8.1 to match the VS projects (MinGW defaults to an older WinAPI version)
+ list(APPEND rdkafka_compile_definitions WINVER=0x0603 _WIN32_WINNT=0x0603 UNICODE)
+endif(MINGW)
+
+# Support '#include <rdkafka.h>'
+target_include_directories(rdkafka PUBLIC $<BUILD_INTERFACE:${CMAKE_CURRENT_LIST_DIR}> $<INSTALL_INTERFACE:${CMAKE_INSTALL_INCLUDEDIR}>)
+target_compile_definitions(rdkafka PUBLIC ${rdkafka_compile_definitions})
+if(RDKAFKA_BUILD_STATIC)
+ target_compile_definitions(rdkafka PUBLIC LIBRDKAFKA_STATICLIB)
+endif()
+
+# We need 'dummy' directory to support `#include "../config.h"` path
+set(dummy "${GENERATED_DIR}/dummy")
+file(MAKE_DIRECTORY "${dummy}")
+target_include_directories(rdkafka PUBLIC "$<BUILD_INTERFACE:${dummy}>")
+
+if(WITH_CURL)
+ find_package(CURL REQUIRED)
+ target_include_directories(rdkafka PUBLIC ${CURL_INCLUDE_DIRS})
+ target_link_libraries(rdkafka PUBLIC ${CURL_LIBRARIES})
+endif()
+
+if(WITH_HDRHISTOGRAM)
+ target_link_libraries(rdkafka PUBLIC m)
+endif()
+
+if(WITH_ZLIB)
+ find_package(ZLIB REQUIRED)
+ target_include_directories(rdkafka PRIVATE ${ZLIB_INCLUDE_DIRS})
+ target_link_libraries(rdkafka PUBLIC ZLIB::ZLIB)
+endif()
+
+if(WITH_ZSTD)
+ target_link_libraries(rdkafka PRIVATE ${ZSTD_LIBRARY})
+ target_include_directories(rdkafka PRIVATE ${ZSTD_INCLUDE_DIR})
+ message(STATUS "Found ZSTD: ${ZSTD_LIBRARY}")
+endif()
+
+if(WITH_SSL)
+ if(WITH_BUNDLED_SSL) # option from 'h2o' parent project
+ if(NOT TARGET bundled-ssl)
+ message(FATAL_ERROR "bundled-ssl target not exist")
+ endif()
+ target_include_directories(rdkafka BEFORE PRIVATE ${BUNDLED_SSL_INCLUDE_DIR})
+ target_link_libraries(rdkafka PUBLIC ${BUNDLED_SSL_LIBRARIES})
+ add_dependencies(rdkafka bundled-ssl)
+ else()
+ find_package(OpenSSL REQUIRED)
+ target_include_directories(rdkafka PRIVATE ${OPENSSL_INCLUDE_DIR})
+ target_link_libraries(rdkafka PUBLIC OpenSSL::SSL OpenSSL::Crypto)
+ get_target_property(OPENSSL_TARGET_TYPE OpenSSL::SSL TYPE)
+ if(OPENSSL_CRYPTO_LIBRARY MATCHES "\\.a$")
+ target_compile_definitions(rdkafka PUBLIC WITH_STATIC_LIB_libcrypto)
+ endif()
+ endif()
+endif()
+
+if(LINK_ATOMIC)
+ target_link_libraries(rdkafka PUBLIC "-latomic")
+endif()
+
+find_package(Threads REQUIRED)
+target_link_libraries(rdkafka PUBLIC Threads::Threads)
+
+if(WITH_SASL_CYRUS)
+ target_include_directories(rdkafka PRIVATE ${SASL_INCLUDE_DIRS})
+ target_link_libraries(rdkafka PUBLIC ${SASL_LIBRARIES})
+endif()
+
+if(WITH_LIBDL)
+ target_link_libraries(rdkafka PUBLIC ${CMAKE_DL_LIBS})
+endif()
+
+if(WITH_LZ4_EXT)
+ target_include_directories(rdkafka PRIVATE ${LZ4_INCLUDE_DIRS})
+ target_link_libraries(rdkafka PUBLIC LZ4::LZ4)
+endif()
+
+if(WIN32)
+ if(WITH_SSL)
+ target_link_libraries(rdkafka PUBLIC crypt32)
+ endif()
+
+ target_link_libraries(rdkafka PUBLIC ws2_32 secur32)
+ if(NOT RDKAFKA_BUILD_STATIC)
+ target_compile_definitions(rdkafka PRIVATE LIBRDKAFKA_EXPORTS)
+ endif()
+endif()
+
+# Generate pkg-config file
+set(PKG_CONFIG_VERSION "${PROJECT_VERSION}")
+set(PKG_CONFIG_REQUIRES "")
+if (WIN32)
+ set(PKG_CONFIG_LIBS_PRIVATE "-lws2_32 -lsecur32 -lcrypt32")
+else()
+ set(PKG_CONFIG_LIBS_PRIVATE "-lpthread")
+ find_library(RT_LIBRARY rt)
+ if(RT_LIBRARY)
+ string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lrt")
+ endif()
+
+ if(WITH_LIBDL)
+ string(APPEND PKG_CONFIG_LIBS_PRIVATE " -ldl")
+ endif()
+
+ if(WITH_HDRHISTOGRAM)
+ string(APPEND PKG_CONFIG_LIBS_PRIVATE " -lm")
+ endif()
+endif()
+
+if(NOT RDKAFKA_BUILD_STATIC)
+ set(PKG_CONFIG_NAME "librdkafka")
+ set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library")
+
+ if(WITH_CURL)
+ string(APPEND PKG_CONFIG_REQUIRES "curl ")
+ endif()
+
+ if(WITH_ZLIB)
+ string(APPEND PKG_CONFIG_REQUIRES "zlib ")
+ endif()
+
+ if(WITH_SSL)
+ string(APPEND PKG_CONFIG_REQUIRES "libssl ")
+ endif()
+
+ if(WITH_SASL_CYRUS)
+ string(APPEND PKG_CONFIG_REQUIRES "libsasl2 ")
+ endif()
+
+ if(WITH_ZSTD)
+ string(APPEND PKG_CONFIG_REQUIRES "libzstd ")
+ endif()
+
+ if(WITH_LZ4_EXT)
+ string(APPEND PKG_CONFIG_REQUIRES "liblz4 ")
+ endif()
+
+ set(PKG_CONFIG_CFLAGS "-I\${includedir}")
+ set(PKG_CONFIG_LIBS "-L\${libdir} -lrdkafka")
+
+ configure_file(
+ "../packaging/cmake/rdkafka.pc.in"
+ "${GENERATED_DIR}/rdkafka.pc"
+ @ONLY
+ )
+ install(
+ FILES ${GENERATED_DIR}/rdkafka.pc
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
+ )
+else()
+ set(PKG_CONFIG_NAME "librdkafka-static")
+ set(PKG_CONFIG_DESCRIPTION "The Apache Kafka C/C++ library (static)")
+ set(PKG_CONFIG_CFLAGS "-I\${includedir} -DLIBRDKAFKA_STATICLIB")
+ set(PKG_CONFIG_LIBS "-L\${libdir} \${libdir}/librdkafka.a")
+ string(APPEND PKG_CONFIG_LIBS " ${PKG_CONFIG_LIBS_PRIVATE}")
+ set(PKG_CONFIG_LIBS_PRIVATE "")
+ configure_file(
+ "../packaging/cmake/rdkafka.pc.in"
+ "${GENERATED_DIR}/rdkafka-static.pc"
+ @ONLY
+ )
+ install(
+ FILES ${GENERATED_DIR}/rdkafka-static.pc
+ DESTINATION "${CMAKE_INSTALL_LIBDIR}/pkgconfig"
+ )
+endif()
+
+install(
+ TARGETS rdkafka
+ EXPORT "${targets_export_name}"
+ LIBRARY DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ ARCHIVE DESTINATION "${CMAKE_INSTALL_LIBDIR}"
+ RUNTIME DESTINATION "${CMAKE_INSTALL_BINDIR}"
+ INCLUDES DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}"
+)
+
+install(
+ FILES "rdkafka.h" "rdkafka_mock.h"
+ DESTINATION "${CMAKE_INSTALL_INCLUDEDIR}/librdkafka"
+)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/Makefile b/fluent-bit/lib/librdkafka-2.1.0/src/Makefile
new file mode 100644
index 000000000..26df5723b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/Makefile
@@ -0,0 +1,97 @@
+PKGNAME= librdkafka
+LIBNAME= librdkafka
+LIBVER= 1
+
+-include ../Makefile.config
+
+ifneq ($(wildcard ../.git),)
+# Add librdkafka version string from git tag if this is a git checkout
+CPPFLAGS += -DLIBRDKAFKA_GIT_VERSION="\"$(shell git describe --abbrev=6 --dirty --tags 2>/dev/null)\""
+endif
+
+SRCS_$(WITH_SASL_CYRUS) += rdkafka_sasl_cyrus.c
+SRCS_$(WITH_SASL_SCRAM) += rdkafka_sasl_scram.c
+SRCS_$(WITH_SASL_OAUTHBEARER) += rdkafka_sasl_oauthbearer.c
+SRCS_$(WITH_SNAPPY) += snappy.c
+SRCS_$(WITH_ZLIB) += rdgz.c
+SRCS_$(WITH_ZSTD) += rdkafka_zstd.c
+SRCS_$(WITH_HDRHISTOGRAM) += rdhdrhistogram.c
+SRCS_$(WITH_SSL) += rdkafka_ssl.c
+SRCS_$(WITH_CURL) += rdhttp.c
+SRCS_$(WITH_OAUTHBEARER_OIDC) += rdkafka_sasl_oauthbearer_oidc.c
+
+SRCS_LZ4 = rdxxhash.c
+ifneq ($(WITH_LZ4_EXT), y)
+# Use built-in liblz4
+SRCS_LZ4 += lz4.c lz4frame.c lz4hc.c
+endif
+SRCS_y += rdkafka_lz4.c $(SRCS_LZ4)
+
+SRCS_$(WITH_LIBDL) += rddl.c
+SRCS_$(WITH_PLUGINS) += rdkafka_plugin.c
+
+ifneq ($(HAVE_REGEX), y)
+SRCS_y += regexp.c
+endif
+
+SRCS= rdkafka.c rdkafka_broker.c rdkafka_msg.c rdkafka_topic.c \
+ rdkafka_conf.c rdkafka_timer.c rdkafka_offset.c \
+ rdkafka_transport.c rdkafka_buf.c rdkafka_queue.c rdkafka_op.c \
+ rdkafka_request.c rdkafka_cgrp.c rdkafka_pattern.c \
+ rdkafka_partition.c rdkafka_subscription.c \
+ rdkafka_assignment.c \
+ rdkafka_assignor.c rdkafka_range_assignor.c \
+ rdkafka_roundrobin_assignor.c rdkafka_sticky_assignor.c \
+ rdkafka_feature.c \
+ rdcrc32.c crc32c.c rdmurmur2.c rdfnv1a.c cJSON.c \
+ rdaddr.c rdrand.c rdlist.c \
+ tinycthread.c tinycthread_extra.c \
+ rdlog.c rdstring.c rdkafka_event.c rdkafka_metadata.c \
+ rdregex.c rdports.c rdkafka_metadata_cache.c rdavl.c \
+ rdkafka_sasl.c rdkafka_sasl_plain.c rdkafka_interceptor.c \
+ rdkafka_msgset_writer.c rdkafka_msgset_reader.c \
+ rdkafka_header.c rdkafka_admin.c rdkafka_aux.c \
+ rdkafka_background.c rdkafka_idempotence.c rdkafka_cert.c \
+ rdkafka_txnmgr.c rdkafka_coord.c \
+ rdvarint.c rdbuf.c rdmap.c rdunittest.c \
+ rdkafka_mock.c rdkafka_mock_handlers.c rdkafka_mock_cgrp.c \
+ rdkafka_error.c rdkafka_fetcher.c \
+ $(SRCS_y)
+
+HDRS= rdkafka.h rdkafka_mock.h
+
+OBJS= $(SRCS:.c=.o)
+
+
+all: lib check
+
+include ../mklove/Makefile.base
+
+CHECK_FILES+= $(LIBFILENAME) $(LIBNAME).a
+
+file-check: lib
+check: file-check
+ @(printf "%-30s " "Symbol visibility" ; \
+ (($(SYMDUMPER) $(LIBFILENAME) | grep rd_kafka_new >/dev/null) && \
+ ($(SYMDUMPER) $(LIBFILENAME) | grep -v rd_kafka_destroy >/dev/null) && \
+ printf "$(MKL_GREEN)OK$(MKL_CLR_RESET)\n") || \
+ printf "$(MKL_RED)FAILED$(MKL_CLR_RESET)\n")
+
+install: lib-install
+uninstall: lib-uninstall
+
+clean: lib-clean
+
+# Compile LZ4 with -O3
+$(SRCS_LZ4:.c=.o): CFLAGS:=$(CFLAGS) -O3
+
+ifeq ($(WITH_LDS),y)
+# Enable linker script if supported by platform
+LIB_LDFLAGS+= $(LDFLAG_LINKERSCRIPT)$(LIBNAME_LDS)
+
+$(LIBNAME_LDS): $(HDRS)
+ @(printf "$(MKL_YELLOW)Generating linker script $@ from $(HDRS)$(MKL_CLR_RESET)\n" ; \
+ cat $(HDRS) | ../lds-gen.py > $@)
+endif
+
+-include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c b/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c
new file mode 100644
index 000000000..9aec18469
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.c
@@ -0,0 +1,2834 @@
+/*
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+/* cJSON */
+/* JSON parser in C. */
+
+/* disable warnings about old C89 functions in MSVC */
+#if !defined(_CRT_SECURE_NO_DEPRECATE) && defined(_MSC_VER)
+#define _CRT_SECURE_NO_DEPRECATE
+#endif
+
+#ifdef __GNUC__
+#pragma GCC visibility push(default)
+#endif
+#if defined(_MSC_VER)
+#pragma warning(push)
+/* disable warning about single line comments in system headers */
+#pragma warning(disable : 4001)
+#endif
+
+#include <string.h>
+#include <stdio.h>
+#include <math.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <ctype.h>
+#include <float.h>
+
+#ifdef ENABLE_LOCALES
+#include <locale.h>
+#endif
+
+#if defined(_MSC_VER)
+#pragma warning(pop)
+#endif
+#ifdef __GNUC__
+#pragma GCC visibility pop
+#endif
+
+#include "cJSON.h"
+
+/* define our own boolean type */
+#ifdef true
+#undef true
+#endif
+#define true ((cJSON_bool)1)
+
+#ifdef false
+#undef false
+#endif
+#define false ((cJSON_bool)0)
+
+/* define isnan and isinf for ANSI C, if in C99 or above, isnan and isinf has
+ * been defined in math.h */
+#ifndef isinf
+#define isinf(d) (isnan((d - d)) && !isnan(d))
+#endif
+#ifndef isnan
+#define isnan(d) (d != d)
+#endif
+
+#ifndef NAN
+#define NAN 0.0 / 0.0
+#endif
+
+typedef struct {
+ const unsigned char *json;
+ size_t position;
+} error;
+static error global_error = {NULL, 0};
+
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void) {
+ return (const char *)(global_error.json + global_error.position);
+}
+
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item) {
+ if (!cJSON_IsString(item)) {
+ return NULL;
+ }
+
+ return item->valuestring;
+}
+
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item) {
+ if (!cJSON_IsNumber(item)) {
+ return (double)NAN;
+ }
+
+ return item->valuedouble;
+}
+
+/* This is a safeguard to prevent copy-pasters from using incompatible C and
+ * header files */
+#if (CJSON_VERSION_MAJOR != 1) || (CJSON_VERSION_MINOR != 7) || \
+ (CJSON_VERSION_PATCH != 14)
+#error cJSON.h and cJSON.c have different versions. Make sure that both have the same.
+#endif
+
+CJSON_PUBLIC(const char *) cJSON_Version(void) {
+ static char version[15];
+ sprintf(version, "%i.%i.%i", CJSON_VERSION_MAJOR, CJSON_VERSION_MINOR,
+ CJSON_VERSION_PATCH);
+
+ return version;
+}
+
+/* Case insensitive string comparison, doesn't consider two NULL pointers equal
+ * though */
+static int case_insensitive_strcmp(const unsigned char *string1,
+ const unsigned char *string2) {
+ if ((string1 == NULL) || (string2 == NULL)) {
+ return 1;
+ }
+
+ if (string1 == string2) {
+ return 0;
+ }
+
+ for (; tolower(*string1) == tolower(*string2);
+ (void)string1++, string2++) {
+ if (*string1 == '\0') {
+ return 0;
+ }
+ }
+
+ return tolower(*string1) - tolower(*string2);
+}
+
+typedef struct internal_hooks {
+ void *(CJSON_CDECL *allocate)(size_t size);
+ void(CJSON_CDECL *deallocate)(void *pointer);
+ void *(CJSON_CDECL *reallocate)(void *pointer, size_t size);
+} internal_hooks;
+
+#if defined(_MSC_VER)
+/* work around MSVC error C2322: '...' address of dllimport '...' is not static
+ */
+static void *CJSON_CDECL internal_malloc(size_t size) {
+ return malloc(size);
+}
+static void CJSON_CDECL internal_free(void *pointer) {
+ free(pointer);
+}
+static void *CJSON_CDECL internal_realloc(void *pointer, size_t size) {
+ return realloc(pointer, size);
+}
+#else
+#define internal_malloc malloc
+#define internal_free free
+#define internal_realloc realloc
+#endif
+
+/* strlen of character literals resolved at compile time */
+#define static_strlen(string_literal) (sizeof(string_literal) - sizeof(""))
+
+static internal_hooks global_hooks = {internal_malloc, internal_free,
+ internal_realloc};
+
+static unsigned char *cJSON_strdup(const unsigned char *string,
+ const internal_hooks *const hooks) {
+ size_t length = 0;
+ unsigned char *copy = NULL;
+
+ if (string == NULL) {
+ return NULL;
+ }
+
+ length = strlen((const char *)string) + sizeof("");
+ copy = (unsigned char *)hooks->allocate(length);
+ if (copy == NULL) {
+ return NULL;
+ }
+ memcpy(copy, string, length);
+
+ return copy;
+}
+
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks) {
+ if (hooks == NULL) {
+ /* Reset hooks */
+ global_hooks.allocate = malloc;
+ global_hooks.deallocate = free;
+ global_hooks.reallocate = realloc;
+ return;
+ }
+
+ global_hooks.allocate = malloc;
+ if (hooks->malloc_fn != NULL) {
+ global_hooks.allocate = hooks->malloc_fn;
+ }
+
+ global_hooks.deallocate = free;
+ if (hooks->free_fn != NULL) {
+ global_hooks.deallocate = hooks->free_fn;
+ }
+
+ /* use realloc only if both free and malloc are used */
+ global_hooks.reallocate = NULL;
+ if ((global_hooks.allocate == malloc) &&
+ (global_hooks.deallocate == free)) {
+ global_hooks.reallocate = realloc;
+ }
+}
+
+/* Internal constructor. */
+static cJSON *cJSON_New_Item(const internal_hooks *const hooks) {
+ cJSON *node = (cJSON *)hooks->allocate(sizeof(cJSON));
+ if (node) {
+ memset(node, '\0', sizeof(cJSON));
+ }
+
+ return node;
+}
+
+/* Delete a cJSON structure. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item) {
+ cJSON *next = NULL;
+ while (item != NULL) {
+ next = item->next;
+ if (!(item->type & cJSON_IsReference) &&
+ (item->child != NULL)) {
+ cJSON_Delete(item->child);
+ }
+ if (!(item->type & cJSON_IsReference) &&
+ (item->valuestring != NULL)) {
+ global_hooks.deallocate(item->valuestring);
+ }
+ if (!(item->type & cJSON_StringIsConst) &&
+ (item->string != NULL)) {
+ global_hooks.deallocate(item->string);
+ }
+ global_hooks.deallocate(item);
+ item = next;
+ }
+}
+
+/* get the decimal point character of the current locale */
+static unsigned char get_decimal_point(void) {
+#ifdef ENABLE_LOCALES
+ struct lconv *lconv = localeconv();
+ return (unsigned char)lconv->decimal_point[0];
+#else
+ return '.';
+#endif
+}
+
+typedef struct {
+ const unsigned char *content;
+ size_t length;
+ size_t offset;
+ size_t depth; /* How deeply nested (in arrays/objects) is the input at
+ the current offset. */
+ internal_hooks hooks;
+} parse_buffer;
+
+/* check if the given size is left to read in a given parse buffer (starting
+ * with 1) */
+#define can_read(buffer, size) \
+ ((buffer != NULL) && (((buffer)->offset + size) <= (buffer)->length))
+/* check if the buffer can be accessed at the given index (starting with 0) */
+#define can_access_at_index(buffer, index) \
+ ((buffer != NULL) && (((buffer)->offset + index) < (buffer)->length))
+#define cannot_access_at_index(buffer, index) \
+ (!can_access_at_index(buffer, index))
+/* get a pointer to the buffer at the position */
+#define buffer_at_offset(buffer) ((buffer)->content + (buffer)->offset)
+
+/* Parse the input text to generate a number, and populate the result into item.
+ */
+static cJSON_bool parse_number(cJSON *const item,
+ parse_buffer *const input_buffer) {
+ double number = 0;
+ unsigned char *after_end = NULL;
+ unsigned char number_c_string[64];
+ unsigned char decimal_point = get_decimal_point();
+ size_t i = 0;
+
+ if ((input_buffer == NULL) || (input_buffer->content == NULL)) {
+ return false;
+ }
+
+ /* copy the number into a temporary buffer and replace '.' with the
+ * decimal point of the current locale (for strtod)
+ * This also takes care of '\0' not necessarily being available for
+ * marking the end of the input */
+ for (i = 0; (i < (sizeof(number_c_string) - 1)) &&
+ can_access_at_index(input_buffer, i);
+ i++) {
+ switch (buffer_at_offset(input_buffer)[i]) {
+ case '0':
+ case '1':
+ case '2':
+ case '3':
+ case '4':
+ case '5':
+ case '6':
+ case '7':
+ case '8':
+ case '9':
+ case '+':
+ case '-':
+ case 'e':
+ case 'E':
+ number_c_string[i] = buffer_at_offset(input_buffer)[i];
+ break;
+
+ case '.':
+ number_c_string[i] = decimal_point;
+ break;
+
+ default:
+ goto loop_end;
+ }
+ }
+loop_end:
+ number_c_string[i] = '\0';
+
+ number = strtod((const char *)number_c_string, (char **)&after_end);
+ if (number_c_string == after_end) {
+ return false; /* parse_error */
+ }
+
+ item->valuedouble = number;
+
+ /* use saturation in case of overflow */
+ if (number >= INT_MAX) {
+ item->valueint = INT_MAX;
+ } else if (number <= (double)INT_MIN) {
+ item->valueint = INT_MIN;
+ } else {
+ item->valueint = (int)number;
+ }
+
+ item->type = cJSON_Number;
+
+ input_buffer->offset += (size_t)(after_end - number_c_string);
+ return true;
+}
+
+/* don't ask me, but the original cJSON_SetNumberValue returns an integer or
+ * double */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number) {
+ if (number >= INT_MAX) {
+ object->valueint = INT_MAX;
+ } else if (number <= (double)INT_MIN) {
+ object->valueint = INT_MIN;
+ } else {
+ object->valueint = (int)number;
+ }
+
+ return object->valuedouble = number;
+}
+
+CJSON_PUBLIC(char *)
+cJSON_SetValuestring(cJSON *object, const char *valuestring) {
+ char *copy = NULL;
+ /* if object's type is not cJSON_String or is cJSON_IsReference, it
+ * should not set valuestring */
+ if (!(object->type & cJSON_String) ||
+ (object->type & cJSON_IsReference)) {
+ return NULL;
+ }
+ if (strlen(valuestring) <= strlen(object->valuestring)) {
+ strcpy(object->valuestring, valuestring);
+ return object->valuestring;
+ }
+ copy = (char *)cJSON_strdup((const unsigned char *)valuestring,
+ &global_hooks);
+ if (copy == NULL) {
+ return NULL;
+ }
+ if (object->valuestring != NULL) {
+ cJSON_free(object->valuestring);
+ }
+ object->valuestring = copy;
+
+ return copy;
+}
+
+typedef struct {
+ unsigned char *buffer;
+ size_t length;
+ size_t offset;
+ size_t depth; /* current nesting depth (for formatted printing) */
+ cJSON_bool noalloc;
+ cJSON_bool format; /* is this print a formatted print */
+ internal_hooks hooks;
+} printbuffer;
+
+/* realloc printbuffer if necessary to have at least "needed" bytes more */
+static unsigned char *ensure(printbuffer *const p, size_t needed) {
+ unsigned char *newbuffer = NULL;
+ size_t newsize = 0;
+
+ if ((p == NULL) || (p->buffer == NULL)) {
+ return NULL;
+ }
+
+ if ((p->length > 0) && (p->offset >= p->length)) {
+ /* make sure that offset is valid */
+ return NULL;
+ }
+
+ if (needed > INT_MAX) {
+ /* sizes bigger than INT_MAX are currently not supported */
+ return NULL;
+ }
+
+ needed += p->offset + 1;
+ if (needed <= p->length) {
+ return p->buffer + p->offset;
+ }
+
+ if (p->noalloc) {
+ return NULL;
+ }
+
+ /* calculate new buffer size */
+ if (needed > (INT_MAX / 2)) {
+ /* overflow of int, use INT_MAX if possible */
+ if (needed <= INT_MAX) {
+ newsize = INT_MAX;
+ } else {
+ return NULL;
+ }
+ } else {
+ newsize = needed * 2;
+ }
+
+ if (p->hooks.reallocate != NULL) {
+ /* reallocate with realloc if available */
+ newbuffer =
+ (unsigned char *)p->hooks.reallocate(p->buffer, newsize);
+ if (newbuffer == NULL) {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ } else {
+ /* otherwise reallocate manually */
+ newbuffer = (unsigned char *)p->hooks.allocate(newsize);
+ if (!newbuffer) {
+ p->hooks.deallocate(p->buffer);
+ p->length = 0;
+ p->buffer = NULL;
+
+ return NULL;
+ }
+ if (newbuffer) {
+ memcpy(newbuffer, p->buffer, p->offset + 1);
+ }
+ p->hooks.deallocate(p->buffer);
+ }
+ p->length = newsize;
+ p->buffer = newbuffer;
+
+ return newbuffer + p->offset;
+}
+
+/* calculate the new length of the string in a printbuffer and update the offset
+ */
+static void update_offset(printbuffer *const buffer) {
+ const unsigned char *buffer_pointer = NULL;
+ if ((buffer == NULL) || (buffer->buffer == NULL)) {
+ return;
+ }
+ buffer_pointer = buffer->buffer + buffer->offset;
+
+ buffer->offset += strlen((const char *)buffer_pointer);
+}
+
+/* securely comparison of floating-point variables */
+static cJSON_bool compare_double(double a, double b) {
+ double maxVal = fabs(a) > fabs(b) ? fabs(a) : fabs(b);
+ return (fabs(a - b) <= maxVal * DBL_EPSILON);
+}
+
+/* Render the number nicely from the given item into a string. */
+static cJSON_bool print_number(const cJSON *const item,
+ printbuffer *const output_buffer) {
+ unsigned char *output_pointer = NULL;
+ double d = item->valuedouble;
+ int length = 0;
+ size_t i = 0;
+ unsigned char number_buffer[26] = {
+ 0}; /* temporary buffer to print the number into */
+ unsigned char decimal_point = get_decimal_point();
+ double test = 0.0;
+
+ if (output_buffer == NULL) {
+ return false;
+ }
+
+ /* This checks for NaN and Infinity */
+ if (isnan(d) || isinf(d)) {
+ length = sprintf((char *)number_buffer, "null");
+ } else {
+ /* Try 15 decimal places of precision to avoid nonsignificant
+ * nonzero digits */
+ length = sprintf((char *)number_buffer, "%1.15g", d);
+
+ /* Check whether the original double can be recovered */
+ if ((sscanf((char *)number_buffer, "%lg", &test) != 1) ||
+ !compare_double((double)test, d)) {
+ /* If not, print with 17 decimal places of precision */
+ length = sprintf((char *)number_buffer, "%1.17g", d);
+ }
+ }
+
+ /* sprintf failed or buffer overrun occurred */
+ if ((length < 0) || (length > (int)(sizeof(number_buffer) - 1))) {
+ return false;
+ }
+
+ /* reserve appropriate space in the output */
+ output_pointer = ensure(output_buffer, (size_t)length + sizeof(""));
+ if (output_pointer == NULL) {
+ return false;
+ }
+
+ /* copy the printed number to the output and replace locale
+ * dependent decimal point with '.' */
+ for (i = 0; i < ((size_t)length); i++) {
+ if (number_buffer[i] == decimal_point) {
+ output_pointer[i] = '.';
+ continue;
+ }
+
+ output_pointer[i] = number_buffer[i];
+ }
+ output_pointer[i] = '\0';
+
+ output_buffer->offset += (size_t)length;
+
+ return true;
+}
+
+/* parse 4 digit hexadecimal number */
+static unsigned parse_hex4(const unsigned char *const input) {
+ unsigned int h = 0;
+ size_t i = 0;
+
+ for (i = 0; i < 4; i++) {
+ /* parse digit */
+ if ((input[i] >= '0') && (input[i] <= '9')) {
+ h += (unsigned int)input[i] - '0';
+ } else if ((input[i] >= 'A') && (input[i] <= 'F')) {
+ h += (unsigned int)10 + input[i] - 'A';
+ } else if ((input[i] >= 'a') && (input[i] <= 'f')) {
+ h += (unsigned int)10 + input[i] - 'a';
+ } else /* invalid */
+ {
+ return 0;
+ }
+
+ if (i < 3) {
+ /* shift left to make place for the next nibble */
+ h = h << 4;
+ }
+ }
+
+ return h;
+}
+
+/* converts a UTF-16 literal to UTF-8
+ * A literal can be one or two sequences of the form \uXXXX */
+static unsigned char
+utf16_literal_to_utf8(const unsigned char *const input_pointer,
+ const unsigned char *const input_end,
+ unsigned char **output_pointer) {
+ long unsigned int codepoint = 0;
+ unsigned int first_code = 0;
+ const unsigned char *first_sequence = input_pointer;
+ unsigned char utf8_length = 0;
+ unsigned char utf8_position = 0;
+ unsigned char sequence_length = 0;
+ unsigned char first_byte_mark = 0;
+
+ if ((input_end - first_sequence) < 6) {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ /* get the first utf16 sequence */
+ first_code = parse_hex4(first_sequence + 2);
+
+ /* check that the code is valid */
+ if (((first_code >= 0xDC00) && (first_code <= 0xDFFF))) {
+ goto fail;
+ }
+
+ /* UTF16 surrogate pair */
+ if ((first_code >= 0xD800) && (first_code <= 0xDBFF)) {
+ const unsigned char *second_sequence = first_sequence + 6;
+ unsigned int second_code = 0;
+ sequence_length = 12; /* \uXXXX\uXXXX */
+
+ if ((input_end - second_sequence) < 6) {
+ /* input ends unexpectedly */
+ goto fail;
+ }
+
+ if ((second_sequence[0] != '\\') ||
+ (second_sequence[1] != 'u')) {
+ /* missing second half of the surrogate pair */
+ goto fail;
+ }
+
+ /* get the second utf16 sequence */
+ second_code = parse_hex4(second_sequence + 2);
+ /* check that the code is valid */
+ if ((second_code < 0xDC00) || (second_code > 0xDFFF)) {
+ /* invalid second half of the surrogate pair */
+ goto fail;
+ }
+
+
+ /* calculate the unicode codepoint from the surrogate pair */
+ codepoint = 0x10000 + (((first_code & 0x3FF) << 10) |
+ (second_code & 0x3FF));
+ } else {
+ sequence_length = 6; /* \uXXXX */
+ codepoint = first_code;
+ }
+
+ /* encode as UTF-8
+ * takes at maximum 4 bytes to encode:
+ * 11110xxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ if (codepoint < 0x80) {
+ /* normal ascii, encoding 0xxxxxxx */
+ utf8_length = 1;
+ } else if (codepoint < 0x800) {
+ /* two bytes, encoding 110xxxxx 10xxxxxx */
+ utf8_length = 2;
+ first_byte_mark = 0xC0; /* 11000000 */
+ } else if (codepoint < 0x10000) {
+ /* three bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 3;
+ first_byte_mark = 0xE0; /* 11100000 */
+ } else if (codepoint <= 0x10FFFF) {
+ /* four bytes, encoding 1110xxxx 10xxxxxx 10xxxxxx 10xxxxxx */
+ utf8_length = 4;
+ first_byte_mark = 0xF0; /* 11110000 */
+ } else {
+ /* invalid unicode codepoint */
+ goto fail;
+ }
+
+ /* encode as utf8 */
+ for (utf8_position = (unsigned char)(utf8_length - 1);
+ utf8_position > 0; utf8_position--) {
+ /* 10xxxxxx */
+ (*output_pointer)[utf8_position] =
+ (unsigned char)((codepoint | 0x80) & 0xBF);
+ codepoint >>= 6;
+ }
+ /* encode first byte */
+ if (utf8_length > 1) {
+ (*output_pointer)[0] =
+ (unsigned char)((codepoint | first_byte_mark) & 0xFF);
+ } else {
+ (*output_pointer)[0] = (unsigned char)(codepoint & 0x7F);
+ }
+
+ *output_pointer += utf8_length;
+
+ return sequence_length;
+
+fail:
+ return 0;
+}
+
+/* Parse the input text into an unescaped cinput, and populate item. */
+static cJSON_bool parse_string(cJSON *const item,
+ parse_buffer *const input_buffer) {
+ const unsigned char *input_pointer = buffer_at_offset(input_buffer) + 1;
+ const unsigned char *input_end = buffer_at_offset(input_buffer) + 1;
+ unsigned char *output_pointer = NULL;
+ unsigned char *output = NULL;
+
+ /* not a string */
+ if (buffer_at_offset(input_buffer)[0] != '\"') {
+ goto fail;
+ }
+
+ {
+ /* calculate approximate size of the output (overestimate) */
+ size_t allocation_length = 0;
+ size_t skipped_bytes = 0;
+ while (((size_t)(input_end - input_buffer->content) <
+ input_buffer->length) &&
+ (*input_end != '\"')) {
+ /* is escape sequence */
+ if (input_end[0] == '\\') {
+ if ((size_t)(input_end + 1 -
+ input_buffer->content) >=
+ input_buffer->length) {
+ /* prevent buffer overflow when last
+ * input character is a backslash */
+ goto fail;
+ }
+ skipped_bytes++;
+ input_end++;
+ }
+ input_end++;
+ }
+ if (((size_t)(input_end - input_buffer->content) >=
+ input_buffer->length) ||
+ (*input_end != '\"')) {
+ goto fail; /* string ended unexpectedly */
+ }
+
+ /* This is at most how much we need for the output */
+ allocation_length =
+ (size_t)(input_end - buffer_at_offset(input_buffer)) -
+ skipped_bytes;
+ output = (unsigned char *)input_buffer->hooks.allocate(
+ allocation_length + sizeof(""));
+ if (output == NULL) {
+ goto fail; /* allocation failure */
+ }
+ }
+
+ output_pointer = output;
+ /* loop through the string literal */
+ while (input_pointer < input_end) {
+ if (*input_pointer != '\\') {
+ *output_pointer++ = *input_pointer++;
+ }
+ /* escape sequence */
+ else {
+ unsigned char sequence_length = 2;
+ if ((input_end - input_pointer) < 1) {
+ goto fail;
+ }
+
+ switch (input_pointer[1]) {
+ case 'b':
+ *output_pointer++ = '\b';
+ break;
+ case 'f':
+ *output_pointer++ = '\f';
+ break;
+ case 'n':
+ *output_pointer++ = '\n';
+ break;
+ case 'r':
+ *output_pointer++ = '\r';
+ break;
+ case 't':
+ *output_pointer++ = '\t';
+ break;
+ case '\"':
+ case '\\':
+ case '/':
+ *output_pointer++ = input_pointer[1];
+ break;
+
+ /* UTF-16 literal */
+ case 'u':
+ sequence_length = utf16_literal_to_utf8(
+ input_pointer, input_end, &output_pointer);
+ if (sequence_length == 0) {
+ /* failed to convert UTF16-literal to
+ * UTF-8 */
+ goto fail;
+ }
+ break;
+
+ default:
+ goto fail;
+ }
+ input_pointer += sequence_length;
+ }
+ }
+
+ /* zero terminate the output */
+ *output_pointer = '\0';
+
+ item->type = cJSON_String;
+ item->valuestring = (char *)output;
+
+ input_buffer->offset = (size_t)(input_end - input_buffer->content);
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (output != NULL) {
+ input_buffer->hooks.deallocate(output);
+ }
+
+ if (input_pointer != NULL) {
+ input_buffer->offset =
+ (size_t)(input_pointer - input_buffer->content);
+ }
+
+ return false;
+}
+
+/* Render the cstring provided to an escaped version that can be printed. */
+static cJSON_bool print_string_ptr(const unsigned char *const input,
+ printbuffer *const output_buffer) {
+ const unsigned char *input_pointer = NULL;
+ unsigned char *output = NULL;
+ unsigned char *output_pointer = NULL;
+ size_t output_length = 0;
+ /* numbers of additional characters needed for escaping */
+ size_t escape_characters = 0;
+
+ if (output_buffer == NULL) {
+ return false;
+ }
+
+ /* empty string */
+ if (input == NULL) {
+ output = ensure(output_buffer, sizeof("\"\""));
+ if (output == NULL) {
+ return false;
+ }
+ strcpy((char *)output, "\"\"");
+
+ return true;
+ }
+
+ /* set "flag" to 1 if something needs to be escaped */
+ for (input_pointer = input; *input_pointer; input_pointer++) {
+ switch (*input_pointer) {
+ case '\"':
+ case '\\':
+ case '\b':
+ case '\f':
+ case '\n':
+ case '\r':
+ case '\t':
+ /* one character escape sequence */
+ escape_characters++;
+ break;
+ default:
+ if (*input_pointer < 32) {
+ /* UTF-16 escape sequence uXXXX */
+ escape_characters += 5;
+ }
+ break;
+ }
+ }
+ output_length = (size_t)(input_pointer - input) + escape_characters;
+
+ output = ensure(output_buffer, output_length + sizeof("\"\""));
+ if (output == NULL) {
+ return false;
+ }
+
+ /* no characters have to be escaped */
+ if (escape_characters == 0) {
+ output[0] = '\"';
+ memcpy(output + 1, input, output_length);
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+ }
+
+ output[0] = '\"';
+ output_pointer = output + 1;
+ /* copy the string */
+ for (input_pointer = input; *input_pointer != '\0';
+ (void)input_pointer++, output_pointer++) {
+ if ((*input_pointer > 31) && (*input_pointer != '\"') &&
+ (*input_pointer != '\\')) {
+ /* normal character, copy */
+ *output_pointer = *input_pointer;
+ } else {
+ /* character needs to be escaped */
+ *output_pointer++ = '\\';
+ switch (*input_pointer) {
+ case '\\':
+ *output_pointer = '\\';
+ break;
+ case '\"':
+ *output_pointer = '\"';
+ break;
+ case '\b':
+ *output_pointer = 'b';
+ break;
+ case '\f':
+ *output_pointer = 'f';
+ break;
+ case '\n':
+ *output_pointer = 'n';
+ break;
+ case '\r':
+ *output_pointer = 'r';
+ break;
+ case '\t':
+ *output_pointer = 't';
+ break;
+ default:
+ /* escape and print as unicode codepoint */
+ sprintf((char *)output_pointer, "u%04x",
+ *input_pointer);
+ output_pointer += 4;
+ break;
+ }
+ }
+ }
+ output[output_length + 1] = '\"';
+ output[output_length + 2] = '\0';
+
+ return true;
+}
+
+/* Invoke print_string_ptr (which is useful) on an item. */
+static cJSON_bool print_string(const cJSON *const item, printbuffer *const p) {
+ return print_string_ptr((unsigned char *)item->valuestring, p);
+}
+
+/* Predeclare these prototypes. */
+static cJSON_bool parse_value(cJSON *const item,
+ parse_buffer *const input_buffer);
+static cJSON_bool print_value(const cJSON *const item,
+ printbuffer *const output_buffer);
+static cJSON_bool parse_array(cJSON *const item,
+ parse_buffer *const input_buffer);
+static cJSON_bool print_array(const cJSON *const item,
+ printbuffer *const output_buffer);
+static cJSON_bool parse_object(cJSON *const item,
+ parse_buffer *const input_buffer);
+static cJSON_bool print_object(const cJSON *const item,
+ printbuffer *const output_buffer);
+
+/* Utility to jump whitespace and cr/lf */
+static parse_buffer *buffer_skip_whitespace(parse_buffer *const buffer) {
+ if ((buffer == NULL) || (buffer->content == NULL)) {
+ return NULL;
+ }
+
+ if (cannot_access_at_index(buffer, 0)) {
+ return buffer;
+ }
+
+ while (can_access_at_index(buffer, 0) &&
+ (buffer_at_offset(buffer)[0] <= 32)) {
+ buffer->offset++;
+ }
+
+ if (buffer->offset == buffer->length) {
+ buffer->offset--;
+ }
+
+ return buffer;
+}
+
+/* skip the UTF-8 BOM (byte order mark) if it is at the beginning of a buffer */
+static parse_buffer *skip_utf8_bom(parse_buffer *const buffer) {
+ if ((buffer == NULL) || (buffer->content == NULL) ||
+ (buffer->offset != 0)) {
+ return NULL;
+ }
+
+ if (can_access_at_index(buffer, 4) &&
+ (strncmp((const char *)buffer_at_offset(buffer), "\xEF\xBB\xBF",
+ 3) == 0)) {
+ buffer->offset += 3;
+ }
+
+ return buffer;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_ParseWithOpts(const char *value,
+ const char **return_parse_end,
+ cJSON_bool require_null_terminated) {
+ size_t buffer_length;
+
+ if (NULL == value) {
+ return NULL;
+ }
+
+ /* Adding null character size due to require_null_terminated. */
+ buffer_length = strlen(value) + sizeof("");
+
+ return cJSON_ParseWithLengthOpts(value, buffer_length, return_parse_end,
+ require_null_terminated);
+}
+
+/* Parse an object - create a new root, and populate. */
+CJSON_PUBLIC(cJSON *)
+cJSON_ParseWithLengthOpts(const char *value,
+ size_t buffer_length,
+ const char **return_parse_end,
+ cJSON_bool require_null_terminated) {
+ parse_buffer buffer = {0, 0, 0, 0, {0, 0, 0}};
+ cJSON *item = NULL;
+
+ /* reset error position */
+ global_error.json = NULL;
+ global_error.position = 0;
+
+ if (value == NULL || 0 == buffer_length) {
+ goto fail;
+ }
+
+ buffer.content = (const unsigned char *)value;
+ buffer.length = buffer_length;
+ buffer.offset = 0;
+ buffer.hooks = global_hooks;
+
+ item = cJSON_New_Item(&global_hooks);
+ if (item == NULL) /* memory fail */
+ {
+ goto fail;
+ }
+
+ if (!parse_value(item,
+ buffer_skip_whitespace(skip_utf8_bom(&buffer)))) {
+ /* parse failure. ep is set. */
+ goto fail;
+ }
+
+ /* if we require null-terminated JSON without appended garbage, skip and
+ * then check for a null terminator */
+ if (require_null_terminated) {
+ buffer_skip_whitespace(&buffer);
+ if ((buffer.offset >= buffer.length) ||
+ buffer_at_offset(&buffer)[0] != '\0') {
+ goto fail;
+ }
+ }
+ if (return_parse_end) {
+ *return_parse_end = (const char *)buffer_at_offset(&buffer);
+ }
+
+ return item;
+
+fail:
+ if (item != NULL) {
+ cJSON_Delete(item);
+ }
+
+ if (value != NULL) {
+ error local_error;
+ local_error.json = (const unsigned char *)value;
+ local_error.position = 0;
+
+ if (buffer.offset < buffer.length) {
+ local_error.position = buffer.offset;
+ } else if (buffer.length > 0) {
+ local_error.position = buffer.length - 1;
+ }
+
+ if (return_parse_end != NULL) {
+ *return_parse_end = (const char *)local_error.json +
+ local_error.position;
+ }
+
+ global_error = local_error;
+ }
+
+ return NULL;
+}
+
+/* Default options for cJSON_Parse */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value) {
+ return cJSON_ParseWithOpts(value, 0, 0);
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_ParseWithLength(const char *value, size_t buffer_length) {
+ return cJSON_ParseWithLengthOpts(value, buffer_length, 0, 0);
+}
+
+#define cjson_min(a, b) (((a) < (b)) ? (a) : (b))
+
+static unsigned char *print(const cJSON *const item,
+ cJSON_bool format,
+ const internal_hooks *const hooks) {
+ static const size_t default_buffer_size = 256;
+ printbuffer buffer[1];
+ unsigned char *printed = NULL;
+
+ memset(buffer, 0, sizeof(buffer));
+
+ /* create buffer */
+ buffer->buffer = (unsigned char *)hooks->allocate(default_buffer_size);
+ buffer->length = default_buffer_size;
+ buffer->format = format;
+ buffer->hooks = *hooks;
+ if (buffer->buffer == NULL) {
+ goto fail;
+ }
+
+ /* print the value */
+ if (!print_value(item, buffer)) {
+ goto fail;
+ }
+ update_offset(buffer);
+
+ /* check if reallocate is available */
+ if (hooks->reallocate != NULL) {
+ printed = (unsigned char *)hooks->reallocate(
+ buffer->buffer, buffer->offset + 1);
+ if (printed == NULL) {
+ goto fail;
+ }
+ buffer->buffer = NULL;
+ } else /* otherwise copy the JSON over to a new buffer */
+ {
+ printed = (unsigned char *)hooks->allocate(buffer->offset + 1);
+ if (printed == NULL) {
+ goto fail;
+ }
+ memcpy(printed, buffer->buffer,
+ cjson_min(buffer->length, buffer->offset + 1));
+ printed[buffer->offset] = '\0'; /* just to be sure */
+
+ /* free the buffer */
+ hooks->deallocate(buffer->buffer);
+ }
+
+ return printed;
+
+fail:
+ if (buffer->buffer != NULL) {
+ hooks->deallocate(buffer->buffer);
+ }
+
+ if (printed != NULL) {
+ hooks->deallocate(printed);
+ }
+
+ return NULL;
+}
+
+/* Render a cJSON item/entity/structure to text. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item) {
+ return (char *)print(item, true, &global_hooks);
+}
+
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item) {
+ return (char *)print(item, false, &global_hooks);
+}
+
+CJSON_PUBLIC(char *)
+cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt) {
+ printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}};
+
+ if (prebuffer < 0) {
+ return NULL;
+ }
+
+ p.buffer = (unsigned char *)global_hooks.allocate((size_t)prebuffer);
+ if (!p.buffer) {
+ return NULL;
+ }
+
+ p.length = (size_t)prebuffer;
+ p.offset = 0;
+ p.noalloc = false;
+ p.format = fmt;
+ p.hooks = global_hooks;
+
+ if (!print_value(item, &p)) {
+ global_hooks.deallocate(p.buffer);
+ return NULL;
+ }
+
+ return (char *)p.buffer;
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_PrintPreallocated(cJSON *item,
+ char *buffer,
+ const int length,
+ const cJSON_bool format) {
+ printbuffer p = {0, 0, 0, 0, 0, 0, {0, 0, 0}};
+
+ if ((length < 0) || (buffer == NULL)) {
+ return false;
+ }
+
+ p.buffer = (unsigned char *)buffer;
+ p.length = (size_t)length;
+ p.offset = 0;
+ p.noalloc = true;
+ p.format = format;
+ p.hooks = global_hooks;
+
+ return print_value(item, &p);
+}
+
+/* Parser core - when encountering text, process appropriately. */
+static cJSON_bool parse_value(cJSON *const item,
+ parse_buffer *const input_buffer) {
+ if ((input_buffer == NULL) || (input_buffer->content == NULL)) {
+ return false; /* no input */
+ }
+
+ /* parse the different types of values */
+ /* null */
+ if (can_read(input_buffer, 4) &&
+ (strncmp((const char *)buffer_at_offset(input_buffer), "null", 4) ==
+ 0)) {
+ item->type = cJSON_NULL;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* false */
+ if (can_read(input_buffer, 5) &&
+ (strncmp((const char *)buffer_at_offset(input_buffer), "false",
+ 5) == 0)) {
+ item->type = cJSON_False;
+ input_buffer->offset += 5;
+ return true;
+ }
+ /* true */
+ if (can_read(input_buffer, 4) &&
+ (strncmp((const char *)buffer_at_offset(input_buffer), "true", 4) ==
+ 0)) {
+ item->type = cJSON_True;
+ item->valueint = 1;
+ input_buffer->offset += 4;
+ return true;
+ }
+ /* string */
+ if (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == '\"')) {
+ return parse_string(item, input_buffer);
+ }
+ /* number */
+ if (can_access_at_index(input_buffer, 0) &&
+ ((buffer_at_offset(input_buffer)[0] == '-') ||
+ ((buffer_at_offset(input_buffer)[0] >= '0') &&
+ (buffer_at_offset(input_buffer)[0] <= '9')))) {
+ return parse_number(item, input_buffer);
+ }
+ /* array */
+ if (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == '[')) {
+ return parse_array(item, input_buffer);
+ }
+ /* object */
+ if (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == '{')) {
+ return parse_object(item, input_buffer);
+ }
+
+ return false;
+}
+
+/* Render a value to text. */
+static cJSON_bool print_value(const cJSON *const item,
+ printbuffer *const output_buffer) {
+ unsigned char *output = NULL;
+
+ if ((item == NULL) || (output_buffer == NULL)) {
+ return false;
+ }
+
+ switch ((item->type) & 0xFF) {
+ case cJSON_NULL:
+ output = ensure(output_buffer, 5);
+ if (output == NULL) {
+ return false;
+ }
+ strcpy((char *)output, "null");
+ return true;
+
+ case cJSON_False:
+ output = ensure(output_buffer, 6);
+ if (output == NULL) {
+ return false;
+ }
+ strcpy((char *)output, "false");
+ return true;
+
+ case cJSON_True:
+ output = ensure(output_buffer, 5);
+ if (output == NULL) {
+ return false;
+ }
+ strcpy((char *)output, "true");
+ return true;
+
+ case cJSON_Number:
+ return print_number(item, output_buffer);
+
+ case cJSON_Raw: {
+ size_t raw_length = 0;
+ if (item->valuestring == NULL) {
+ return false;
+ }
+
+ raw_length = strlen(item->valuestring) + sizeof("");
+ output = ensure(output_buffer, raw_length);
+ if (output == NULL) {
+ return false;
+ }
+ memcpy(output, item->valuestring, raw_length);
+ return true;
+ }
+
+ case cJSON_String:
+ return print_string(item, output_buffer);
+
+ case cJSON_Array:
+ return print_array(item, output_buffer);
+
+ case cJSON_Object:
+ return print_object(item, output_buffer);
+
+ default:
+ return false;
+ }
+}
+
+/* Build an array from input text. */
+static cJSON_bool parse_array(cJSON *const item,
+ parse_buffer *const input_buffer) {
+ cJSON *head = NULL; /* head of the linked list */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT) {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (buffer_at_offset(input_buffer)[0] != '[') {
+ /* not an array */
+ goto fail;
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == ']')) {
+ /* empty array */
+ goto success;
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0)) {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL) {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL) {
+ /* start the linked list */
+ current_item = head = new_item;
+ } else {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse next value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer)) {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ } while (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) ||
+ buffer_at_offset(input_buffer)[0] != ']') {
+ goto fail; /* expected end of array */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Array;
+ item->child = head;
+
+ input_buffer->offset++;
+
+ return true;
+
+fail:
+ if (head != NULL) {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an array to text */
+static cJSON_bool print_array(const cJSON *const item,
+ printbuffer *const output_buffer) {
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_element = item->child;
+
+ if (output_buffer == NULL) {
+ return false;
+ }
+
+ /* Compose the output array. */
+ /* opening square bracket */
+ output_pointer = ensure(output_buffer, 1);
+ if (output_pointer == NULL) {
+ return false;
+ }
+
+ *output_pointer = '[';
+ output_buffer->offset++;
+ output_buffer->depth++;
+
+ while (current_element != NULL) {
+ if (!print_value(current_element, output_buffer)) {
+ return false;
+ }
+ update_offset(output_buffer);
+ if (current_element->next) {
+ length = (size_t)(output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL) {
+ return false;
+ }
+ *output_pointer++ = ',';
+ if (output_buffer->format) {
+ *output_pointer++ = ' ';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+ }
+ current_element = current_element->next;
+ }
+
+ output_pointer = ensure(output_buffer, 2);
+ if (output_pointer == NULL) {
+ return false;
+ }
+ *output_pointer++ = ']';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Build an object from the text. */
+static cJSON_bool parse_object(cJSON *const item,
+ parse_buffer *const input_buffer) {
+ cJSON *head = NULL; /* linked list head */
+ cJSON *current_item = NULL;
+
+ if (input_buffer->depth >= CJSON_NESTING_LIMIT) {
+ return false; /* to deeply nested */
+ }
+ input_buffer->depth++;
+
+ if (cannot_access_at_index(input_buffer, 0) ||
+ (buffer_at_offset(input_buffer)[0] != '{')) {
+ goto fail; /* not an object */
+ }
+
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == '}')) {
+ goto success; /* empty object */
+ }
+
+ /* check if we skipped to the end of the buffer */
+ if (cannot_access_at_index(input_buffer, 0)) {
+ input_buffer->offset--;
+ goto fail;
+ }
+
+ /* step back to character in front of the first element */
+ input_buffer->offset--;
+ /* loop through the comma separated array elements */
+ do {
+ /* allocate next item */
+ cJSON *new_item = cJSON_New_Item(&(input_buffer->hooks));
+ if (new_item == NULL) {
+ goto fail; /* allocation failure */
+ }
+
+ /* attach next item to list */
+ if (head == NULL) {
+ /* start the linked list */
+ current_item = head = new_item;
+ } else {
+ /* add to the end and advance */
+ current_item->next = new_item;
+ new_item->prev = current_item;
+ current_item = new_item;
+ }
+
+ /* parse the name of the child */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_string(current_item, input_buffer)) {
+ goto fail; /* failed to parse name */
+ }
+ buffer_skip_whitespace(input_buffer);
+
+ /* swap valuestring and string, because we parsed the name */
+ current_item->string = current_item->valuestring;
+ current_item->valuestring = NULL;
+
+ if (cannot_access_at_index(input_buffer, 0) ||
+ (buffer_at_offset(input_buffer)[0] != ':')) {
+ goto fail; /* invalid object */
+ }
+
+ /* parse the value */
+ input_buffer->offset++;
+ buffer_skip_whitespace(input_buffer);
+ if (!parse_value(current_item, input_buffer)) {
+ goto fail; /* failed to parse value */
+ }
+ buffer_skip_whitespace(input_buffer);
+ } while (can_access_at_index(input_buffer, 0) &&
+ (buffer_at_offset(input_buffer)[0] == ','));
+
+ if (cannot_access_at_index(input_buffer, 0) ||
+ (buffer_at_offset(input_buffer)[0] != '}')) {
+ goto fail; /* expected end of object */
+ }
+
+success:
+ input_buffer->depth--;
+
+ if (head != NULL) {
+ head->prev = current_item;
+ }
+
+ item->type = cJSON_Object;
+ item->child = head;
+
+ input_buffer->offset++;
+ return true;
+
+fail:
+ if (head != NULL) {
+ cJSON_Delete(head);
+ }
+
+ return false;
+}
+
+/* Render an object to text. */
+static cJSON_bool print_object(const cJSON *const item,
+ printbuffer *const output_buffer) {
+ unsigned char *output_pointer = NULL;
+ size_t length = 0;
+ cJSON *current_item = item->child;
+
+ if (output_buffer == NULL) {
+ return false;
+ }
+
+ /* Compose the output: */
+ length = (size_t)(output_buffer->format ? 2 : 1); /* fmt: {\n */
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL) {
+ return false;
+ }
+
+ *output_pointer++ = '{';
+ output_buffer->depth++;
+ if (output_buffer->format) {
+ *output_pointer++ = '\n';
+ }
+ output_buffer->offset += length;
+
+ while (current_item) {
+ if (output_buffer->format) {
+ size_t i;
+ output_pointer =
+ ensure(output_buffer, output_buffer->depth);
+ if (output_pointer == NULL) {
+ return false;
+ }
+ for (i = 0; i < output_buffer->depth; i++) {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += output_buffer->depth;
+ }
+
+ /* print key */
+ if (!print_string_ptr((unsigned char *)current_item->string,
+ output_buffer)) {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ length = (size_t)(output_buffer->format ? 2 : 1);
+ output_pointer = ensure(output_buffer, length);
+ if (output_pointer == NULL) {
+ return false;
+ }
+ *output_pointer++ = ':';
+ if (output_buffer->format) {
+ *output_pointer++ = '\t';
+ }
+ output_buffer->offset += length;
+
+ /* print value */
+ if (!print_value(current_item, output_buffer)) {
+ return false;
+ }
+ update_offset(output_buffer);
+
+ /* print comma if not last */
+ length = ((size_t)(output_buffer->format ? 1 : 0) +
+ (size_t)(current_item->next ? 1 : 0));
+ output_pointer = ensure(output_buffer, length + 1);
+ if (output_pointer == NULL) {
+ return false;
+ }
+ if (current_item->next) {
+ *output_pointer++ = ',';
+ }
+
+ if (output_buffer->format) {
+ *output_pointer++ = '\n';
+ }
+ *output_pointer = '\0';
+ output_buffer->offset += length;
+
+ current_item = current_item->next;
+ }
+
+ output_pointer =
+ ensure(output_buffer,
+ output_buffer->format ? (output_buffer->depth + 1) : 2);
+ if (output_pointer == NULL) {
+ return false;
+ }
+ if (output_buffer->format) {
+ size_t i;
+ for (i = 0; i < (output_buffer->depth - 1); i++) {
+ *output_pointer++ = '\t';
+ }
+ }
+ *output_pointer++ = '}';
+ *output_pointer = '\0';
+ output_buffer->depth--;
+
+ return true;
+}
+
+/* Get Array size/item / object item. */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array) {
+ cJSON *child = NULL;
+ size_t size = 0;
+
+ if (array == NULL) {
+ return 0;
+ }
+
+ child = array->child;
+
+ while (child != NULL) {
+ size++;
+ child = child->next;
+ }
+
+ /* FIXME: Can overflow here. Cannot be fixed without breaking the API */
+
+ return (int)size;
+}
+
+static cJSON *get_array_item(const cJSON *array, size_t index) {
+ cJSON *current_child = NULL;
+
+ if (array == NULL) {
+ return NULL;
+ }
+
+ current_child = array->child;
+ while ((current_child != NULL) && (index > 0)) {
+ index--;
+ current_child = current_child->next;
+ }
+
+ return current_child;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index) {
+ if (index < 0) {
+ return NULL;
+ }
+
+ return get_array_item(array, (size_t)index);
+}
+
+static cJSON *get_object_item(const cJSON *const object,
+ const char *const name,
+ const cJSON_bool case_sensitive) {
+ cJSON *current_element = NULL;
+
+ if ((object == NULL) || (name == NULL)) {
+ return NULL;
+ }
+
+ current_element = object->child;
+ if (case_sensitive) {
+ while ((current_element != NULL) &&
+ (current_element->string != NULL) &&
+ (strcmp(name, current_element->string) != 0)) {
+ current_element = current_element->next;
+ }
+ } else {
+ while ((current_element != NULL) &&
+ (case_insensitive_strcmp(
+ (const unsigned char *)name,
+ (const unsigned char *)(current_element->string)) !=
+ 0)) {
+ current_element = current_element->next;
+ }
+ }
+
+ if ((current_element == NULL) || (current_element->string == NULL)) {
+ return NULL;
+ }
+
+ return current_element;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_GetObjectItem(const cJSON *const object, const char *const string) {
+ return get_object_item(object, string, false);
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
+ const char *const string) {
+ return get_object_item(object, string, true);
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_HasObjectItem(const cJSON *object, const char *string) {
+ return cJSON_GetObjectItem(object, string) ? 1 : 0;
+}
+
+/* Utility for array list handling. */
+static void suffix_object(cJSON *prev, cJSON *item) {
+ prev->next = item;
+ item->prev = prev;
+}
+
+/* Utility for handling references. */
+static cJSON *create_reference(const cJSON *item,
+ const internal_hooks *const hooks) {
+ cJSON *reference = NULL;
+ if (item == NULL) {
+ return NULL;
+ }
+
+ reference = cJSON_New_Item(hooks);
+ if (reference == NULL) {
+ return NULL;
+ }
+
+ memcpy(reference, item, sizeof(cJSON));
+ reference->string = NULL;
+ reference->type |= cJSON_IsReference;
+ reference->next = reference->prev = NULL;
+ return reference;
+}
+
+static cJSON_bool add_item_to_array(cJSON *array, cJSON *item) {
+ cJSON *child = NULL;
+
+ if ((item == NULL) || (array == NULL) || (array == item)) {
+ return false;
+ }
+
+ child = array->child;
+ /*
+ * To find the last item in array quickly, we use prev in array
+ */
+ if (child == NULL) {
+ /* list is empty, start new one */
+ array->child = item;
+ item->prev = item;
+ item->next = NULL;
+ } else {
+ /* append to the end */
+ if (child->prev) {
+ suffix_object(child->prev, item);
+ array->child->prev = item;
+ }
+ }
+
+ return true;
+}
+
+/* Add item to array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item) {
+ return add_item_to_array(array, item);
+}
+
+#if defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+#pragma GCC diagnostic push
+#endif
+#ifdef __GNUC__
+#pragma GCC diagnostic ignored "-Wcast-qual"
+#endif
+/* helper function to cast away const */
+static void *cast_away_const(const void *string) {
+ return (void *)string;
+}
+#if defined(__clang__) || \
+ (defined(__GNUC__) && \
+ ((__GNUC__ > 4) || ((__GNUC__ == 4) && (__GNUC_MINOR__ > 5))))
+#pragma GCC diagnostic pop
+#endif
+
+
+static cJSON_bool add_item_to_object(cJSON *const object,
+ const char *const string,
+ cJSON *const item,
+ const internal_hooks *const hooks,
+ const cJSON_bool constant_key) {
+ char *new_key = NULL;
+ int new_type = cJSON_Invalid;
+
+ if ((object == NULL) || (string == NULL) || (item == NULL) ||
+ (object == item)) {
+ return false;
+ }
+
+ if (constant_key) {
+ new_key = (char *)cast_away_const(string);
+ new_type = item->type | cJSON_StringIsConst;
+ } else {
+ new_key =
+ (char *)cJSON_strdup((const unsigned char *)string, hooks);
+ if (new_key == NULL) {
+ return false;
+ }
+
+ new_type = item->type & ~cJSON_StringIsConst;
+ }
+
+ if (!(item->type & cJSON_StringIsConst) && (item->string != NULL)) {
+ hooks->deallocate(item->string);
+ }
+
+ item->string = new_key;
+ item->type = new_type;
+
+ return add_item_to_array(object, item);
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item) {
+ return add_item_to_object(object, string, item, &global_hooks, false);
+}
+
+/* Add an item to an object with constant string as key */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item) {
+ return add_item_to_object(object, string, item, &global_hooks, true);
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item) {
+ if (array == NULL) {
+ return false;
+ }
+
+ return add_item_to_array(array, create_reference(item, &global_hooks));
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item) {
+ if ((object == NULL) || (string == NULL)) {
+ return false;
+ }
+
+ return add_item_to_object(object, string,
+ create_reference(item, &global_hooks),
+ &global_hooks, false);
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddNullToObject(cJSON *const object, const char *const name) {
+ cJSON *null = cJSON_CreateNull();
+ if (add_item_to_object(object, name, null, &global_hooks, false)) {
+ return null;
+ }
+
+ cJSON_Delete(null);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddTrueToObject(cJSON *const object, const char *const name) {
+ cJSON *true_item = cJSON_CreateTrue();
+ if (add_item_to_object(object, name, true_item, &global_hooks, false)) {
+ return true_item;
+ }
+
+ cJSON_Delete(true_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddFalseToObject(cJSON *const object, const char *const name) {
+ cJSON *false_item = cJSON_CreateFalse();
+ if (add_item_to_object(object, name, false_item, &global_hooks,
+ false)) {
+ return false_item;
+ }
+
+ cJSON_Delete(false_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddBoolToObject(cJSON *const object,
+ const char *const name,
+ const cJSON_bool boolean) {
+ cJSON *bool_item = cJSON_CreateBool(boolean);
+ if (add_item_to_object(object, name, bool_item, &global_hooks, false)) {
+ return bool_item;
+ }
+
+ cJSON_Delete(bool_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddNumberToObject(cJSON *const object,
+ const char *const name,
+ const double number) {
+ cJSON *number_item = cJSON_CreateNumber(number);
+ if (add_item_to_object(object, name, number_item, &global_hooks,
+ false)) {
+ return number_item;
+ }
+
+ cJSON_Delete(number_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddStringToObject(cJSON *const object,
+ const char *const name,
+ const char *const string) {
+ cJSON *string_item = cJSON_CreateString(string);
+ if (add_item_to_object(object, name, string_item, &global_hooks,
+ false)) {
+ return string_item;
+ }
+
+ cJSON_Delete(string_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddRawToObject(cJSON *const object,
+ const char *const name,
+ const char *const raw) {
+ cJSON *raw_item = cJSON_CreateRaw(raw);
+ if (add_item_to_object(object, name, raw_item, &global_hooks, false)) {
+ return raw_item;
+ }
+
+ cJSON_Delete(raw_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddObjectToObject(cJSON *const object, const char *const name) {
+ cJSON *object_item = cJSON_CreateObject();
+ if (add_item_to_object(object, name, object_item, &global_hooks,
+ false)) {
+ return object_item;
+ }
+
+ cJSON_Delete(object_item);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_AddArrayToObject(cJSON *const object, const char *const name) {
+ cJSON *array = cJSON_CreateArray();
+ if (add_item_to_object(object, name, array, &global_hooks, false)) {
+ return array;
+ }
+
+ cJSON_Delete(array);
+ return NULL;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item) {
+ if ((parent == NULL) || (item == NULL)) {
+ return NULL;
+ }
+
+ if (item != parent->child) {
+ /* not the first element */
+ item->prev->next = item->next;
+ }
+ if (item->next != NULL) {
+ /* not the last element */
+ item->next->prev = item->prev;
+ }
+
+ if (item == parent->child) {
+ /* first element */
+ parent->child = item->next;
+ } else if (item->next == NULL) {
+ /* last element */
+ parent->child->prev = item->prev;
+ }
+
+ /* make sure the detached item doesn't point anywhere anymore */
+ item->prev = NULL;
+ item->next = NULL;
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which) {
+ if (which < 0) {
+ return NULL;
+ }
+
+ return cJSON_DetachItemViaPointer(array,
+ get_array_item(array, (size_t)which));
+}
+
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which) {
+ cJSON_Delete(cJSON_DetachItemFromArray(array, which));
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_DetachItemFromObject(cJSON *object, const char *string) {
+ cJSON *to_detach = cJSON_GetObjectItem(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string) {
+ cJSON *to_detach = cJSON_GetObjectItemCaseSensitive(object, string);
+
+ return cJSON_DetachItemViaPointer(object, to_detach);
+}
+
+CJSON_PUBLIC(void)
+cJSON_DeleteItemFromObject(cJSON *object, const char *string) {
+ cJSON_Delete(cJSON_DetachItemFromObject(object, string));
+}
+
+CJSON_PUBLIC(void)
+cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string) {
+ cJSON_Delete(cJSON_DetachItemFromObjectCaseSensitive(object, string));
+}
+
+/* Replace array/object items with new ones. */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_InsertItemInArray(cJSON *array, int which, cJSON *newitem) {
+ cJSON *after_inserted = NULL;
+
+ if (which < 0) {
+ return false;
+ }
+
+ after_inserted = get_array_item(array, (size_t)which);
+ if (after_inserted == NULL) {
+ return add_item_to_array(array, newitem);
+ }
+
+ newitem->next = after_inserted;
+ newitem->prev = after_inserted->prev;
+ after_inserted->prev = newitem;
+ if (after_inserted == array->child) {
+ array->child = newitem;
+ } else {
+ newitem->prev->next = newitem;
+ }
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemViaPointer(cJSON *const parent,
+ cJSON *const item,
+ cJSON *replacement) {
+ if ((parent == NULL) || (replacement == NULL) || (item == NULL)) {
+ return false;
+ }
+
+ if (replacement == item) {
+ return true;
+ }
+
+ replacement->next = item->next;
+ replacement->prev = item->prev;
+
+ if (replacement->next != NULL) {
+ replacement->next->prev = replacement;
+ }
+ if (parent->child == item) {
+ if (parent->child->prev == parent->child) {
+ replacement->prev = replacement;
+ }
+ parent->child = replacement;
+ } else { /*
+ * To find the last item in array quickly, we use prev in
+ * array. We can't modify the last item's next pointer where
+ * this item was the parent's child
+ */
+ if (replacement->prev != NULL) {
+ replacement->prev->next = replacement;
+ }
+ if (replacement->next == NULL) {
+ parent->child->prev = replacement;
+ }
+ }
+
+ item->next = NULL;
+ item->prev = NULL;
+ cJSON_Delete(item);
+
+ return true;
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem) {
+ if (which < 0) {
+ return false;
+ }
+
+ return cJSON_ReplaceItemViaPointer(
+ array, get_array_item(array, (size_t)which), newitem);
+}
+
+static cJSON_bool replace_item_in_object(cJSON *object,
+ const char *string,
+ cJSON *replacement,
+ cJSON_bool case_sensitive) {
+ if ((replacement == NULL) || (string == NULL)) {
+ return false;
+ }
+
+ /* replace the name in the replacement */
+ if (!(replacement->type & cJSON_StringIsConst) &&
+ (replacement->string != NULL)) {
+ cJSON_free(replacement->string);
+ }
+ replacement->string =
+ (char *)cJSON_strdup((const unsigned char *)string, &global_hooks);
+ replacement->type &= ~cJSON_StringIsConst;
+
+ return cJSON_ReplaceItemViaPointer(
+ object, get_object_item(object, string, case_sensitive),
+ replacement);
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem) {
+ return replace_item_in_object(object, string, newitem, false);
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
+ const char *string,
+ cJSON *newitem) {
+ return replace_item_in_object(object, string, newitem, true);
+}
+
+/* Create basic types: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_NULL;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_True;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = boolean ? cJSON_True : cJSON_False;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_Number;
+ item->valuedouble = num;
+
+ /* use saturation in case of overflow */
+ if (num >= INT_MAX) {
+ item->valueint = INT_MAX;
+ } else if (num <= (double)INT_MIN) {
+ item->valueint = INT_MIN;
+ } else {
+ item->valueint = (int)num;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_String;
+ item->valuestring = (char *)cJSON_strdup(
+ (const unsigned char *)string, &global_hooks);
+ if (!item->valuestring) {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_String | cJSON_IsReference;
+ item->valuestring = (char *)cast_away_const(string);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Object | cJSON_IsReference;
+ item->child = (cJSON *)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item != NULL) {
+ item->type = cJSON_Array | cJSON_IsReference;
+ item->child = (cJSON *)cast_away_const(child);
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_Raw;
+ item->valuestring = (char *)cJSON_strdup(
+ (const unsigned char *)raw, &global_hooks);
+ if (!item->valuestring) {
+ cJSON_Delete(item);
+ return NULL;
+ }
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_Array;
+ }
+
+ return item;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void) {
+ cJSON *item = cJSON_New_Item(&global_hooks);
+ if (item) {
+ item->type = cJSON_Object;
+ }
+
+ return item;
+}
+
+/* Create Arrays: */
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count) {
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL)) {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+ for (i = 0; a && (i < (size_t)count); i++) {
+ n = cJSON_CreateNumber(numbers[i]);
+ if (!n) {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if (!i) {
+ a->child = n;
+ } else {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count) {
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL)) {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++) {
+ n = cJSON_CreateNumber((double)numbers[i]);
+ if (!n) {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if (!i) {
+ a->child = n;
+ } else {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_CreateDoubleArray(const double *numbers, int count) {
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (numbers == NULL)) {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++) {
+ n = cJSON_CreateNumber(numbers[i]);
+ if (!n) {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if (!i) {
+ a->child = n;
+ } else {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+CJSON_PUBLIC(cJSON *)
+cJSON_CreateStringArray(const char *const *strings, int count) {
+ size_t i = 0;
+ cJSON *n = NULL;
+ cJSON *p = NULL;
+ cJSON *a = NULL;
+
+ if ((count < 0) || (strings == NULL)) {
+ return NULL;
+ }
+
+ a = cJSON_CreateArray();
+
+ for (i = 0; a && (i < (size_t)count); i++) {
+ n = cJSON_CreateString(strings[i]);
+ if (!n) {
+ cJSON_Delete(a);
+ return NULL;
+ }
+ if (!i) {
+ a->child = n;
+ } else {
+ suffix_object(p, n);
+ }
+ p = n;
+ }
+ a->child->prev = n;
+
+ return a;
+}
+
+/* Duplication */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse) {
+ cJSON *newitem = NULL;
+ cJSON *child = NULL;
+ cJSON *next = NULL;
+ cJSON *newchild = NULL;
+
+ /* Bail on bad ptr */
+ if (!item) {
+ goto fail;
+ }
+ /* Create new item */
+ newitem = cJSON_New_Item(&global_hooks);
+ if (!newitem) {
+ goto fail;
+ }
+ /* Copy over all vars */
+ newitem->type = item->type & (~cJSON_IsReference);
+ newitem->valueint = item->valueint;
+ newitem->valuedouble = item->valuedouble;
+ if (item->valuestring) {
+ newitem->valuestring = (char *)cJSON_strdup(
+ (unsigned char *)item->valuestring, &global_hooks);
+ if (!newitem->valuestring) {
+ goto fail;
+ }
+ }
+ if (item->string) {
+ newitem->string =
+ (item->type & cJSON_StringIsConst)
+ ? item->string
+ : (char *)cJSON_strdup((unsigned char *)item->string,
+ &global_hooks);
+ if (!newitem->string) {
+ goto fail;
+ }
+ }
+ /* If non-recursive, then we're done! */
+ if (!recurse) {
+ return newitem;
+ }
+ /* Walk the ->next chain for the child. */
+ child = item->child;
+ while (child != NULL) {
+ newchild = cJSON_Duplicate(
+ child, true); /* Duplicate (with recurse) each item in the
+ ->next chain */
+ if (!newchild) {
+ goto fail;
+ }
+ if (next != NULL) {
+ /* If newitem->child already set, then crosswire ->prev
+ * and ->next and move on */
+ next->next = newchild;
+ newchild->prev = next;
+ next = newchild;
+ } else {
+ /* Set newitem->child and move to it */
+ newitem->child = newchild;
+ next = newchild;
+ }
+ child = child->next;
+ }
+ if (newitem && newitem->child) {
+ newitem->child->prev = newchild;
+ }
+
+ return newitem;
+
+fail:
+ if (newitem != NULL) {
+ cJSON_Delete(newitem);
+ }
+
+ return NULL;
+}
+
+static void skip_oneline_comment(char **input) {
+ *input += static_strlen("//");
+
+ for (; (*input)[0] != '\0'; ++(*input)) {
+ if ((*input)[0] == '\n') {
+ *input += static_strlen("\n");
+ return;
+ }
+ }
+}
+
+static void skip_multiline_comment(char **input) {
+ *input += static_strlen("/*");
+
+ for (; (*input)[0] != '\0'; ++(*input)) {
+ if (((*input)[0] == '*') && ((*input)[1] == '/')) {
+ *input += static_strlen("*/");
+ return;
+ }
+ }
+}
+
+static void minify_string(char **input, char **output) {
+ (*output)[0] = (*input)[0];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+
+
+ for (; (*input)[0] != '\0'; (void)++(*input), ++(*output)) {
+ (*output)[0] = (*input)[0];
+
+ if ((*input)[0] == '\"') {
+ (*output)[0] = '\"';
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ return;
+ } else if (((*input)[0] == '\\') && ((*input)[1] == '\"')) {
+ (*output)[1] = (*input)[1];
+ *input += static_strlen("\"");
+ *output += static_strlen("\"");
+ }
+ }
+}
+
+CJSON_PUBLIC(void) cJSON_Minify(char *json) {
+ char *into = json;
+
+ if (json == NULL) {
+ return;
+ }
+
+ while (json[0] != '\0') {
+ switch (json[0]) {
+ case ' ':
+ case '\t':
+ case '\r':
+ case '\n':
+ json++;
+ break;
+
+ case '/':
+ if (json[1] == '/') {
+ skip_oneline_comment(&json);
+ } else if (json[1] == '*') {
+ skip_multiline_comment(&json);
+ } else {
+ json++;
+ }
+ break;
+
+ case '\"':
+ minify_string(&json, (char **)&into);
+ break;
+
+ default:
+ into[0] = json[0];
+ json++;
+ into++;
+ }
+ }
+
+ /* and null-terminate. */
+ *into = '\0';
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Invalid;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_False;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xff) == cJSON_True;
+}
+
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & (cJSON_True | cJSON_False)) != 0;
+}
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_NULL;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Number;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_String;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Array;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Object;
+}
+
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item) {
+ if (item == NULL) {
+ return false;
+ }
+
+ return (item->type & 0xFF) == cJSON_Raw;
+}
+
+CJSON_PUBLIC(cJSON_bool)
+cJSON_Compare(const cJSON *const a,
+ const cJSON *const b,
+ const cJSON_bool case_sensitive) {
+ if ((a == NULL) || (b == NULL) ||
+ ((a->type & 0xFF) != (b->type & 0xFF)) || cJSON_IsInvalid(a)) {
+ return false;
+ }
+
+ /* check if type is valid */
+ switch (a->type & 0xFF) {
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ case cJSON_Number:
+ case cJSON_String:
+ case cJSON_Raw:
+ case cJSON_Array:
+ case cJSON_Object:
+ break;
+
+ default:
+ return false;
+ }
+
+ /* identical objects are equal */
+ if (a == b) {
+ return true;
+ }
+
+ switch (a->type & 0xFF) {
+ /* in these cases and equal type is enough */
+ case cJSON_False:
+ case cJSON_True:
+ case cJSON_NULL:
+ return true;
+
+ case cJSON_Number:
+ if (compare_double(a->valuedouble, b->valuedouble)) {
+ return true;
+ }
+ return false;
+
+ case cJSON_String:
+ case cJSON_Raw:
+ if ((a->valuestring == NULL) || (b->valuestring == NULL)) {
+ return false;
+ }
+ if (strcmp(a->valuestring, b->valuestring) == 0) {
+ return true;
+ }
+
+ return false;
+
+ case cJSON_Array: {
+ cJSON *a_element = a->child;
+ cJSON *b_element = b->child;
+
+ for (; (a_element != NULL) && (b_element != NULL);) {
+ if (!cJSON_Compare(a_element, b_element,
+ case_sensitive)) {
+ return false;
+ }
+
+ a_element = a_element->next;
+ b_element = b_element->next;
+ }
+
+ /* one of the arrays is longer than the other */
+ if (a_element != b_element) {
+ return false;
+ }
+
+ return true;
+ }
+
+ case cJSON_Object: {
+ cJSON *a_element = NULL;
+ cJSON *b_element = NULL;
+ cJSON_ArrayForEach(a_element, a) {
+ /* TODO This has O(n^2) runtime, which is horrible! */
+ b_element = get_object_item(b, a_element->string,
+ case_sensitive);
+ if (b_element == NULL) {
+ return false;
+ }
+
+ if (!cJSON_Compare(a_element, b_element,
+ case_sensitive)) {
+ return false;
+ }
+ }
+
+ /* doing this twice, once on a and b to prevent true comparison
+ * if a subset of b
+ * TODO: Do this the proper way, this is just a fix for now */
+ cJSON_ArrayForEach(b_element, b) {
+ a_element = get_object_item(a, b_element->string,
+ case_sensitive);
+ if (a_element == NULL) {
+ return false;
+ }
+
+ if (!cJSON_Compare(b_element, a_element,
+ case_sensitive)) {
+ return false;
+ }
+ }
+
+ return true;
+ }
+
+ default:
+ return false;
+ }
+}
+
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size) {
+ return global_hooks.allocate(size);
+}
+
+CJSON_PUBLIC(void) cJSON_free(void *object) {
+ global_hooks.deallocate(object);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h b/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h
new file mode 100644
index 000000000..1b5655c7b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/cJSON.h
@@ -0,0 +1,398 @@
+/*
+ Copyright (c) 2009-2017 Dave Gamble and cJSON contributors
+
+ Permission is hereby granted, free of charge, to any person obtaining a copy
+ of this software and associated documentation files (the "Software"), to deal
+ in the Software without restriction, including without limitation the rights
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ copies of the Software, and to permit persons to whom the Software is
+ furnished to do so, subject to the following conditions:
+
+ The above copyright notice and this permission notice shall be included in
+ all copies or substantial portions of the Software.
+
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ THE SOFTWARE.
+*/
+
+#ifndef cJSON__h
+#define cJSON__h
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#if !defined(__WINDOWS__) && \
+ (defined(WIN32) || defined(WIN64) || defined(_MSC_VER) || defined(_WIN32))
+#define __WINDOWS__
+#endif
+
+#ifdef __WINDOWS__
+
+/* When compiling for windows, we specify a specific calling convention to avoid
+issues where we are being called from a project with a different default calling
+convention. For windows you have 3 define options:
+
+CJSON_HIDE_SYMBOLS - Define this in the case where you don't want to ever
+dllexport symbols CJSON_EXPORT_SYMBOLS - Define this on library build when you
+want to dllexport symbols (default) CJSON_IMPORT_SYMBOLS - Define this if you
+want to dllimport symbol
+
+For *nix builds that support visibility attribute, you can define similar
+behavior by
+
+setting default visibility to hidden by adding
+-fvisibility=hidden (for gcc)
+or
+-xldscope=hidden (for sun cc)
+to CFLAGS
+
+then using the CJSON_API_VISIBILITY flag to "export" the same symbols the way
+CJSON_EXPORT_SYMBOLS does
+
+*/
+
+#define CJSON_CDECL __cdecl
+#define CJSON_STDCALL __stdcall
+
+/* export symbols by default, this is necessary for copy pasting the C and
+ * header file */
+#if !defined(CJSON_HIDE_SYMBOLS) && !defined(CJSON_IMPORT_SYMBOLS) && \
+ !defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_EXPORT_SYMBOLS
+#endif
+
+#if defined(CJSON_HIDE_SYMBOLS)
+#define CJSON_PUBLIC(type) type CJSON_STDCALL
+#elif defined(CJSON_EXPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllexport) type CJSON_STDCALL
+#elif defined(CJSON_IMPORT_SYMBOLS)
+#define CJSON_PUBLIC(type) __declspec(dllimport) type CJSON_STDCALL
+#endif
+#else /* !__WINDOWS__ */
+#define CJSON_CDECL
+#define CJSON_STDCALL
+
+#if (defined(__GNUC__) || defined(__SUNPRO_CC) || defined(__SUNPRO_C)) && \
+ defined(CJSON_API_VISIBILITY)
+#define CJSON_PUBLIC(type) __attribute__((visibility("default"))) type
+#else
+#define CJSON_PUBLIC(type) type
+#endif
+#endif
+
+/* project version */
+#define CJSON_VERSION_MAJOR 1
+#define CJSON_VERSION_MINOR 7
+#define CJSON_VERSION_PATCH 14
+
+#include <stddef.h>
+
+/* cJSON Types: */
+#define cJSON_Invalid (0)
+#define cJSON_False (1 << 0)
+#define cJSON_True (1 << 1)
+#define cJSON_NULL (1 << 2)
+#define cJSON_Number (1 << 3)
+#define cJSON_String (1 << 4)
+#define cJSON_Array (1 << 5)
+#define cJSON_Object (1 << 6)
+#define cJSON_Raw (1 << 7) /* raw json */
+
+#define cJSON_IsReference 256
+#define cJSON_StringIsConst 512
+
+/* The cJSON structure: */
+typedef struct cJSON {
+ /* next/prev allow you to walk array/object chains. Alternatively, use
+ * GetArraySize/GetArrayItem/GetObjectItem */
+ struct cJSON *next;
+ struct cJSON *prev;
+ /* An array or object item will have a child pointer pointing to a chain
+ * of the items in the array/object. */
+ struct cJSON *child;
+
+ /* The type of the item, as above. */
+ int type;
+
+ /* The item's string, if type==cJSON_String and type == cJSON_Raw */
+ char *valuestring;
+ /* writing to valueint is DEPRECATED, use cJSON_SetNumberValue instead
+ */
+ int valueint;
+ /* The item's number, if type==cJSON_Number */
+ double valuedouble;
+
+ /* The item's name string, if this item is the child of, or is in the
+ * list of subitems of an object. */
+ char *string;
+} cJSON;
+
+typedef struct cJSON_Hooks {
+ /* malloc/free are CDECL on Windows regardless of the default calling
+ * convention of the compiler, so ensure the hooks allow passing those
+ * functions directly. */
+ void *(CJSON_CDECL *malloc_fn)(size_t sz);
+ void(CJSON_CDECL *free_fn)(void *ptr);
+} cJSON_Hooks;
+
+typedef int cJSON_bool;
+
+/* Limits how deeply nested arrays/objects can be before cJSON rejects to parse
+ * them. This is to prevent stack overflows. */
+#ifndef CJSON_NESTING_LIMIT
+#define CJSON_NESTING_LIMIT 1000
+#endif
+
+/* returns the version of cJSON as a string */
+CJSON_PUBLIC(const char *) cJSON_Version(void);
+
+/* Supply malloc, realloc and free functions to cJSON */
+CJSON_PUBLIC(void) cJSON_InitHooks(cJSON_Hooks *hooks);
+
+/* Memory Management: the caller is always responsible to free the results from
+ * all variants of cJSON_Parse (with cJSON_Delete) and cJSON_Print (with stdlib
+ * free, cJSON_Hooks.free_fn, or cJSON_free as appropriate). The exception is
+ * cJSON_PrintPreallocated, where the caller has full responsibility of the
+ * buffer. */
+/* Supply a block of JSON, and this returns a cJSON object you can interrogate.
+ */
+CJSON_PUBLIC(cJSON *) cJSON_Parse(const char *value);
+CJSON_PUBLIC(cJSON *)
+cJSON_ParseWithLength(const char *value, size_t buffer_length);
+/* ParseWithOpts allows you to require (and check) that the JSON is null
+ * terminated, and to retrieve the pointer to the final byte parsed. */
+/* If you supply a ptr in return_parse_end and parsing fails, then
+ * return_parse_end will contain a pointer to the error so will match
+ * cJSON_GetErrorPtr(). */
+CJSON_PUBLIC(cJSON *)
+cJSON_ParseWithOpts(const char *value,
+ const char **return_parse_end,
+ cJSON_bool require_null_terminated);
+CJSON_PUBLIC(cJSON *)
+cJSON_ParseWithLengthOpts(const char *value,
+ size_t buffer_length,
+ const char **return_parse_end,
+ cJSON_bool require_null_terminated);
+
+/* Render a cJSON entity to text for transfer/storage. */
+CJSON_PUBLIC(char *) cJSON_Print(const cJSON *item);
+/* Render a cJSON entity to text for transfer/storage without any formatting. */
+CJSON_PUBLIC(char *) cJSON_PrintUnformatted(const cJSON *item);
+/* Render a cJSON entity to text using a buffered strategy. prebuffer is a guess
+ * at the final size. guessing well reduces reallocation. fmt=0 gives
+ * unformatted, =1 gives formatted */
+CJSON_PUBLIC(char *)
+cJSON_PrintBuffered(const cJSON *item, int prebuffer, cJSON_bool fmt);
+/* Render a cJSON entity to text using a buffer already allocated in memory with
+ * given length. Returns 1 on success and 0 on failure. */
+/* NOTE: cJSON is not always 100% accurate in estimating how much memory it will
+ * use, so to be safe allocate 5 bytes more than you actually need */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_PrintPreallocated(cJSON *item,
+ char *buffer,
+ const int length,
+ const cJSON_bool format);
+/* Delete a cJSON entity and all subentities. */
+CJSON_PUBLIC(void) cJSON_Delete(cJSON *item);
+
+/* Returns the number of items in an array (or object). */
+CJSON_PUBLIC(int) cJSON_GetArraySize(const cJSON *array);
+/* Retrieve item number "index" from array "array". Returns NULL if
+ * unsuccessful. */
+CJSON_PUBLIC(cJSON *) cJSON_GetArrayItem(const cJSON *array, int index);
+/* Get item "string" from object. Case insensitive. */
+CJSON_PUBLIC(cJSON *)
+cJSON_GetObjectItem(const cJSON *const object, const char *const string);
+CJSON_PUBLIC(cJSON *)
+cJSON_GetObjectItemCaseSensitive(const cJSON *const object,
+ const char *const string);
+CJSON_PUBLIC(cJSON_bool)
+cJSON_HasObjectItem(const cJSON *object, const char *string);
+/* For analysing failed parses. This returns a pointer to the parse error.
+ * You'll probably need to look a few chars back to make sense of it. Defined
+ * when cJSON_Parse() returns 0. 0 when cJSON_Parse() succeeds. */
+CJSON_PUBLIC(const char *) cJSON_GetErrorPtr(void);
+
+/* Check item type and return its value */
+CJSON_PUBLIC(char *) cJSON_GetStringValue(const cJSON *const item);
+CJSON_PUBLIC(double) cJSON_GetNumberValue(const cJSON *const item);
+
+/* These functions check the type of an item */
+CJSON_PUBLIC(cJSON_bool) cJSON_IsInvalid(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsFalse(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsTrue(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsBool(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNull(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsNumber(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsString(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsArray(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsObject(const cJSON *const item);
+CJSON_PUBLIC(cJSON_bool) cJSON_IsRaw(const cJSON *const item);
+
+/* These calls create a cJSON item of the appropriate type. */
+CJSON_PUBLIC(cJSON *) cJSON_CreateNull(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateTrue(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFalse(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateBool(cJSON_bool boolean);
+CJSON_PUBLIC(cJSON *) cJSON_CreateNumber(double num);
+CJSON_PUBLIC(cJSON *) cJSON_CreateString(const char *string);
+/* raw json */
+CJSON_PUBLIC(cJSON *) cJSON_CreateRaw(const char *raw);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArray(void);
+CJSON_PUBLIC(cJSON *) cJSON_CreateObject(void);
+
+/* Create a string where valuestring references a string so
+ * it will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateStringReference(const char *string);
+/* Create an object/array that only references it's elements so
+ * they will not be freed by cJSON_Delete */
+CJSON_PUBLIC(cJSON *) cJSON_CreateObjectReference(const cJSON *child);
+CJSON_PUBLIC(cJSON *) cJSON_CreateArrayReference(const cJSON *child);
+
+/* These utilities create an Array of count items.
+ * The parameter count cannot be greater than the number of elements in the
+ * number array, otherwise array access will be out of bounds.*/
+CJSON_PUBLIC(cJSON *) cJSON_CreateIntArray(const int *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateFloatArray(const float *numbers, int count);
+CJSON_PUBLIC(cJSON *) cJSON_CreateDoubleArray(const double *numbers, int count);
+CJSON_PUBLIC(cJSON *)
+cJSON_CreateStringArray(const char *const *strings, int count);
+
+/* Append item to the specified array/object. */
+CJSON_PUBLIC(cJSON_bool) cJSON_AddItemToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemToObject(cJSON *object, const char *string, cJSON *item);
+/* Use this when string is definitely const (i.e. a literal, or as good as), and
+ * will definitely survive the cJSON object. WARNING: When this function was
+ * used, make sure to always check that (item->type & cJSON_StringIsConst) is
+ * zero before writing to `item->string` */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemToObjectCS(cJSON *object, const char *string, cJSON *item);
+/* Append reference to item to the specified array/object. Use this when you
+ * want to add an existing cJSON to a new cJSON, but don't want to corrupt your
+ * existing cJSON. */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemReferenceToArray(cJSON *array, cJSON *item);
+CJSON_PUBLIC(cJSON_bool)
+cJSON_AddItemReferenceToObject(cJSON *object, const char *string, cJSON *item);
+
+/* Remove/Detach items from Arrays/Objects. */
+CJSON_PUBLIC(cJSON *)
+cJSON_DetachItemViaPointer(cJSON *parent, cJSON *const item);
+CJSON_PUBLIC(cJSON *) cJSON_DetachItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(void) cJSON_DeleteItemFromArray(cJSON *array, int which);
+CJSON_PUBLIC(cJSON *)
+cJSON_DetachItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(cJSON *)
+cJSON_DetachItemFromObjectCaseSensitive(cJSON *object, const char *string);
+CJSON_PUBLIC(void)
+cJSON_DeleteItemFromObject(cJSON *object, const char *string);
+CJSON_PUBLIC(void)
+cJSON_DeleteItemFromObjectCaseSensitive(cJSON *object, const char *string);
+
+/* Update array items. */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_InsertItemInArray(
+ cJSON *array,
+ int which,
+ cJSON *newitem); /* Shifts pre-existing items to the right. */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemViaPointer(cJSON *const parent,
+ cJSON *const item,
+ cJSON *replacement);
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemInArray(cJSON *array, int which, cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemInObject(cJSON *object, const char *string, cJSON *newitem);
+CJSON_PUBLIC(cJSON_bool)
+cJSON_ReplaceItemInObjectCaseSensitive(cJSON *object,
+ const char *string,
+ cJSON *newitem);
+
+/* Duplicate a cJSON item */
+CJSON_PUBLIC(cJSON *) cJSON_Duplicate(const cJSON *item, cJSON_bool recurse);
+/* Duplicate will create a new, identical cJSON item to the one you pass, in new
+ * memory that will need to be released. With recurse!=0, it will duplicate any
+ * children connected to the item.
+ * The item->next and ->prev pointers are always zero on return from Duplicate.
+ */
+/* Recursively compare two cJSON items for equality. If either a or b is NULL or
+ * invalid, they will be considered unequal.
+ * case_sensitive determines if object keys are treated case sensitive (1) or
+ * case insensitive (0) */
+CJSON_PUBLIC(cJSON_bool)
+cJSON_Compare(const cJSON *const a,
+ const cJSON *const b,
+ const cJSON_bool case_sensitive);
+
+/* Minify a strings, remove blank characters(such as ' ', '\t', '\r', '\n') from
+ * strings. The input pointer json cannot point to a read-only address area,
+ * such as a string constant,
+ * but should point to a readable and writable adress area. */
+CJSON_PUBLIC(void) cJSON_Minify(char *json);
+
+/* Helper functions for creating and adding items to an object at the same time.
+ * They return the added item or NULL on failure. */
+CJSON_PUBLIC(cJSON *)
+cJSON_AddNullToObject(cJSON *const object, const char *const name);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddTrueToObject(cJSON *const object, const char *const name);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddFalseToObject(cJSON *const object, const char *const name);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddBoolToObject(cJSON *const object,
+ const char *const name,
+ const cJSON_bool boolean);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddNumberToObject(cJSON *const object,
+ const char *const name,
+ const double number);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddStringToObject(cJSON *const object,
+ const char *const name,
+ const char *const string);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddRawToObject(cJSON *const object,
+ const char *const name,
+ const char *const raw);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddObjectToObject(cJSON *const object, const char *const name);
+CJSON_PUBLIC(cJSON *)
+cJSON_AddArrayToObject(cJSON *const object, const char *const name);
+
+/* When assigning an integer value, it needs to be propagated to valuedouble
+ * too. */
+#define cJSON_SetIntValue(object, number) \
+ ((object) ? (object)->valueint = (object)->valuedouble = (number) \
+ : (number))
+/* helper for the cJSON_SetNumberValue macro */
+CJSON_PUBLIC(double) cJSON_SetNumberHelper(cJSON *object, double number);
+#define cJSON_SetNumberValue(object, number) \
+ ((object != NULL) ? cJSON_SetNumberHelper(object, (double)number) \
+ : (number))
+/* Change the valuestring of a cJSON_String object, only takes effect when type
+ * of object is cJSON_String */
+CJSON_PUBLIC(char *)
+cJSON_SetValuestring(cJSON *object, const char *valuestring);
+
+/* Macro for iterating over an array or object */
+#define cJSON_ArrayForEach(element, array) \
+ for (element = (array != NULL) ? (array)->child : NULL; \
+ element != NULL; element = element->next)
+
+/* malloc/free objects using the malloc/free functions that have been set with
+ * cJSON_InitHooks */
+CJSON_PUBLIC(void *) cJSON_malloc(size_t size);
+CJSON_PUBLIC(void) cJSON_free(void *object);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c b/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c
new file mode 100644
index 000000000..f1a716dc6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.c
@@ -0,0 +1,430 @@
+/* Copied from http://stackoverflow.com/a/17646775/1821055
+ * with the following modifications:
+ * * remove test code
+ * * global hw/sw initialization to be called once per process
+ * * HW support is determined by configure's WITH_CRC32C_HW
+ * * Windows porting (no hardware support on Windows yet)
+ *
+ * FIXME:
+ * * Hardware support on Windows (MSVC assembler)
+ * * Hardware support on ARM
+ */
+
+/* crc32c.c -- compute CRC-32C using the Intel crc32 instruction
+ * Copyright (C) 2013 Mark Adler
+ * Version 1.1 1 Aug 2013 Mark Adler
+ */
+
+/*
+ This software is provided 'as-is', without any express or implied
+ warranty. In no event will the author be held liable for any damages
+ arising from the use of this software.
+
+ Permission is granted to anyone to use this software for any purpose,
+ including commercial applications, and to alter it and redistribute it
+ freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+ 3. This notice may not be removed or altered from any source distribution.
+
+ Mark Adler
+ madler@alumni.caltech.edu
+ */
+
+/* Use hardware CRC instruction on Intel SSE 4.2 processors. This computes a
+ CRC-32C, *not* the CRC-32 used by Ethernet and zip, gzip, etc. A software
+ version is provided as a fall-back, as well as for speed comparisons. */
+
+/* Version history:
+ 1.0 10 Feb 2013 First version
+ 1.1 1 Aug 2013 Correct comments on why three crc instructions in parallel
+ */
+
+#include "rd.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+
+#include "rdunittest.h"
+#include "rdendian.h"
+
+#include "crc32c.h"
+
+/* CRC-32C (iSCSI) polynomial in reversed bit order. */
+#define POLY 0x82f63b78
+
+/* Table for a quadword-at-a-time software crc. */
+static uint32_t crc32c_table[8][256];
+
+/* Construct table for software CRC-32C calculation. */
+static void crc32c_init_sw(void)
+{
+ uint32_t n, crc, k;
+
+ for (n = 0; n < 256; n++) {
+ crc = n;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc = crc & 1 ? (crc >> 1) ^ POLY : crc >> 1;
+ crc32c_table[0][n] = crc;
+ }
+ for (n = 0; n < 256; n++) {
+ crc = crc32c_table[0][n];
+ for (k = 1; k < 8; k++) {
+ crc = crc32c_table[0][crc & 0xff] ^ (crc >> 8);
+ crc32c_table[k][n] = crc;
+ }
+ }
+}
+
+/* Table-driven software version as a fall-back. This is about 15 times slower
+ than using the hardware instructions. This assumes little-endian integers,
+ as is the case on Intel processors that the assembler code here is for. */
+static uint32_t crc32c_sw(uint32_t crci, const void *buf, size_t len)
+{
+ const unsigned char *next = buf;
+ uint64_t crc;
+
+ crc = crci ^ 0xffffffff;
+ while (len && ((uintptr_t)next & 7) != 0) {
+ crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8);
+ len--;
+ }
+ while (len >= 8) {
+ /* Alignment-safe */
+ uint64_t ncopy;
+ memcpy(&ncopy, next, sizeof(ncopy));
+ crc ^= le64toh(ncopy);
+ crc = crc32c_table[7][crc & 0xff] ^
+ crc32c_table[6][(crc >> 8) & 0xff] ^
+ crc32c_table[5][(crc >> 16) & 0xff] ^
+ crc32c_table[4][(crc >> 24) & 0xff] ^
+ crc32c_table[3][(crc >> 32) & 0xff] ^
+ crc32c_table[2][(crc >> 40) & 0xff] ^
+ crc32c_table[1][(crc >> 48) & 0xff] ^
+ crc32c_table[0][crc >> 56];
+ next += 8;
+ len -= 8;
+ }
+ while (len) {
+ crc = crc32c_table[0][(crc ^ *next++) & 0xff] ^ (crc >> 8);
+ len--;
+ }
+ return (uint32_t)crc ^ 0xffffffff;
+}
+
+
+#if WITH_CRC32C_HW
+static int sse42; /* Cached SSE42 support */
+
+/* Multiply a matrix times a vector over the Galois field of two elements,
+ GF(2). Each element is a bit in an unsigned integer. mat must have at
+ least as many entries as the power of two for most significant one bit in
+ vec. */
+static RD_INLINE uint32_t gf2_matrix_times(uint32_t *mat, uint32_t vec)
+{
+ uint32_t sum;
+
+ sum = 0;
+ while (vec) {
+ if (vec & 1)
+ sum ^= *mat;
+ vec >>= 1;
+ mat++;
+ }
+ return sum;
+}
+
+/* Multiply a matrix by itself over GF(2). Both mat and square must have 32
+ rows. */
+static RD_INLINE void gf2_matrix_square(uint32_t *square, uint32_t *mat)
+{
+ int n;
+
+ for (n = 0; n < 32; n++)
+ square[n] = gf2_matrix_times(mat, mat[n]);
+}
+
+/* Construct an operator to apply len zeros to a crc. len must be a power of
+ two. If len is not a power of two, then the result is the same as for the
+ largest power of two less than len. The result for len == 0 is the same as
+ for len == 1. A version of this routine could be easily written for any
+ len, but that is not needed for this application. */
+static void crc32c_zeros_op(uint32_t *even, size_t len)
+{
+ int n;
+ uint32_t row;
+ uint32_t odd[32]; /* odd-power-of-two zeros operator */
+
+ /* put operator for one zero bit in odd */
+ odd[0] = POLY; /* CRC-32C polynomial */
+ row = 1;
+ for (n = 1; n < 32; n++) {
+ odd[n] = row;
+ row <<= 1;
+ }
+
+ /* put operator for two zero bits in even */
+ gf2_matrix_square(even, odd);
+
+ /* put operator for four zero bits in odd */
+ gf2_matrix_square(odd, even);
+
+ /* first square will put the operator for one zero byte (eight zero bits),
+ in even -- next square puts operator for two zero bytes in odd, and so
+ on, until len has been rotated down to zero */
+ do {
+ gf2_matrix_square(even, odd);
+ len >>= 1;
+ if (len == 0)
+ return;
+ gf2_matrix_square(odd, even);
+ len >>= 1;
+ } while (len);
+
+ /* answer ended up in odd -- copy to even */
+ for (n = 0; n < 32; n++)
+ even[n] = odd[n];
+}
+
+/* Take a length and build four lookup tables for applying the zeros operator
+ for that length, byte-by-byte on the operand. */
+static void crc32c_zeros(uint32_t zeros[][256], size_t len)
+{
+ uint32_t n;
+ uint32_t op[32];
+
+ crc32c_zeros_op(op, len);
+ for (n = 0; n < 256; n++) {
+ zeros[0][n] = gf2_matrix_times(op, n);
+ zeros[1][n] = gf2_matrix_times(op, n << 8);
+ zeros[2][n] = gf2_matrix_times(op, n << 16);
+ zeros[3][n] = gf2_matrix_times(op, n << 24);
+ }
+}
+
+/* Apply the zeros operator table to crc. */
+static RD_INLINE uint32_t crc32c_shift(uint32_t zeros[][256], uint32_t crc)
+{
+ return zeros[0][crc & 0xff] ^ zeros[1][(crc >> 8) & 0xff] ^
+ zeros[2][(crc >> 16) & 0xff] ^ zeros[3][crc >> 24];
+}
+
+/* Block sizes for three-way parallel crc computation. LONG and SHORT must
+ both be powers of two. The associated string constants must be set
+ accordingly, for use in constructing the assembler instructions. */
+#define LONG 8192
+#define LONGx1 "8192"
+#define LONGx2 "16384"
+#define SHORT 256
+#define SHORTx1 "256"
+#define SHORTx2 "512"
+
+/* Tables for hardware crc that shift a crc by LONG and SHORT zeros. */
+static uint32_t crc32c_long[4][256];
+static uint32_t crc32c_short[4][256];
+
+/* Initialize tables for shifting crcs. */
+static void crc32c_init_hw(void)
+{
+ crc32c_zeros(crc32c_long, LONG);
+ crc32c_zeros(crc32c_short, SHORT);
+}
+
+/* Compute CRC-32C using the Intel hardware instruction. */
+static uint32_t crc32c_hw(uint32_t crc, const void *buf, size_t len)
+{
+ const unsigned char *next = buf;
+ const unsigned char *end;
+ uint64_t crc0, crc1, crc2; /* need to be 64 bits for crc32q */
+
+ /* pre-process the crc */
+ crc0 = crc ^ 0xffffffff;
+
+ /* compute the crc for up to seven leading bytes to bring the data pointer
+ to an eight-byte boundary */
+ while (len && ((uintptr_t)next & 7) != 0) {
+ __asm__("crc32b\t" "(%1), %0"
+ : "=r"(crc0)
+ : "r"(next), "0"(crc0));
+ next++;
+ len--;
+ }
+
+ /* compute the crc on sets of LONG*3 bytes, executing three independent crc
+ instructions, each on LONG bytes -- this is optimized for the Nehalem,
+ Westmere, Sandy Bridge, and Ivy Bridge architectures, which have a
+ throughput of one crc per cycle, but a latency of three cycles */
+ while (len >= LONG*3) {
+ crc1 = 0;
+ crc2 = 0;
+ end = next + LONG;
+ do {
+ __asm__("crc32q\t" "(%3), %0\n\t"
+ "crc32q\t" LONGx1 "(%3), %1\n\t"
+ "crc32q\t" LONGx2 "(%3), %2"
+ : "=r"(crc0), "=r"(crc1), "=r"(crc2)
+ : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2));
+ next += 8;
+ } while (next < end);
+ crc0 = crc32c_shift(crc32c_long, crc0) ^ crc1;
+ crc0 = crc32c_shift(crc32c_long, crc0) ^ crc2;
+ next += LONG*2;
+ len -= LONG*3;
+ }
+
+ /* do the same thing, but now on SHORT*3 blocks for the remaining data less
+ than a LONG*3 block */
+ while (len >= SHORT*3) {
+ crc1 = 0;
+ crc2 = 0;
+ end = next + SHORT;
+ do {
+ __asm__("crc32q\t" "(%3), %0\n\t"
+ "crc32q\t" SHORTx1 "(%3), %1\n\t"
+ "crc32q\t" SHORTx2 "(%3), %2"
+ : "=r"(crc0), "=r"(crc1), "=r"(crc2)
+ : "r"(next), "0"(crc0), "1"(crc1), "2"(crc2));
+ next += 8;
+ } while (next < end);
+ crc0 = crc32c_shift(crc32c_short, crc0) ^ crc1;
+ crc0 = crc32c_shift(crc32c_short, crc0) ^ crc2;
+ next += SHORT*2;
+ len -= SHORT*3;
+ }
+
+ /* compute the crc on the remaining eight-byte units less than a SHORT*3
+ block */
+ end = next + (len - (len & 7));
+ while (next < end) {
+ __asm__("crc32q\t" "(%1), %0"
+ : "=r"(crc0)
+ : "r"(next), "0"(crc0));
+ next += 8;
+ }
+ len &= 7;
+
+ /* compute the crc for up to seven trailing bytes */
+ while (len) {
+ __asm__("crc32b\t" "(%1), %0"
+ : "=r"(crc0)
+ : "r"(next), "0"(crc0));
+ next++;
+ len--;
+ }
+
+ /* return a post-processed crc */
+ return (uint32_t)crc0 ^ 0xffffffff;
+}
+
+/* Check for SSE 4.2. SSE 4.2 was first supported in Nehalem processors
+ introduced in November, 2008. This does not check for the existence of the
+ cpuid instruction itself, which was introduced on the 486SL in 1992, so this
+ will fail on earlier x86 processors. cpuid works on all Pentium and later
+ processors. */
+#define SSE42(have) \
+ do { \
+ uint32_t eax, ecx; \
+ eax = 1; \
+ __asm__("cpuid" \
+ : "=c"(ecx) \
+ : "a"(eax) \
+ : "%ebx", "%edx"); \
+ (have) = (ecx >> 20) & 1; \
+ } while (0)
+
+#endif /* WITH_CRC32C_HW */
+
+/* Compute a CRC-32C. If the crc32 instruction is available, use the hardware
+ version. Otherwise, use the software version. */
+uint32_t rd_crc32c(uint32_t crc, const void *buf, size_t len)
+{
+#if WITH_CRC32C_HW
+ if (sse42)
+ return crc32c_hw(crc, buf, len);
+ else
+#endif
+ return crc32c_sw(crc, buf, len);
+}
+
+
+
+
+
+
+/**
+ * @brief Populate shift tables once
+ */
+void rd_crc32c_global_init (void) {
+#if WITH_CRC32C_HW
+ SSE42(sse42);
+ if (sse42)
+ crc32c_init_hw();
+ else
+#endif
+ crc32c_init_sw();
+}
+
+int unittest_rd_crc32c (void) {
+ const char *buf =
+" This software is provided 'as-is', without any express or implied\n"
+" warranty. In no event will the author be held liable for any damages\n"
+" arising from the use of this software.\n"
+"\n"
+" Permission is granted to anyone to use this software for any purpose,\n"
+" including commercial applications, and to alter it and redistribute it\n"
+" freely, subject to the following restrictions:\n"
+"\n"
+" 1. The origin of this software must not be misrepresented; you must not\n"
+" claim that you wrote the original software. If you use this software\n"
+" in a product, an acknowledgment in the product documentation would be\n"
+" appreciated but is not required.\n"
+" 2. Altered source versions must be plainly marked as such, and must not be\n"
+" misrepresented as being the original software.\n"
+" 3. This notice may not be removed or altered from any source distribution.";
+ const uint32_t expected_crc = 0x7dcde113;
+ uint32_t crc;
+ const char *how;
+
+#if WITH_CRC32C_HW
+ if (sse42)
+ how = "hardware (SSE42)";
+ else
+ how = "software (SSE42 supported in build but not at runtime)";
+#else
+ how = "software";
+#endif
+ RD_UT_SAY("Calculate CRC32C using %s", how);
+
+ crc = rd_crc32c(0, buf, strlen(buf));
+ RD_UT_ASSERT(crc == expected_crc,
+ "Calculated CRC (%s) 0x%"PRIx32
+ " not matching expected CRC 0x%"PRIx32,
+ how, crc, expected_crc);
+
+ /* Verify software version too, regardless of which
+ * version was used above. */
+ crc32c_init_sw();
+ RD_UT_SAY("Calculate CRC32C using software");
+ crc = crc32c_sw(0, buf, strlen(buf));
+ RD_UT_ASSERT(crc == expected_crc,
+ "Calculated CRC (software) 0x%"PRIx32
+ " not matching expected CRC 0x%"PRIx32,
+ crc, expected_crc);
+
+ RD_UT_PASS();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h b/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h
new file mode 100644
index 000000000..21c7badc7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/crc32c.h
@@ -0,0 +1,38 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RD_CRC32C_H_
+#define _RD_CRC32C_H_
+
+uint32_t rd_crc32c(uint32_t crc, const void *buf, size_t len);
+
+void rd_crc32c_global_init (void);
+
+int unittest_rd_crc32c (void);
+
+#endif /* _RD_CRC32C_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh b/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh
new file mode 100755
index 000000000..c7023f47a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/generate_proto.sh
@@ -0,0 +1,66 @@
+#!/bin/bash
+#
+# librdkafka - Apache Kafka C library
+#
+# Copyright (c) 2020 Magnus Edenhill
+# All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions are met:
+#
+# 1. Redistributions of source code must retain the above copyright notice,
+# this list of conditions and the following disclaimer.
+# 2. Redistributions in binary form must reproduce the above copyright notice,
+# this list of conditions and the following disclaimer in the documentation
+# and/or other materials provided with the distribution.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+
+# Generate ApiKey / protocol request defines and rd_kafka_ApiKey2str() fields.
+# Cut'n'paste as needed to rdkafka_protocol.h and rdkafka_proto.h
+#
+#
+# Usage:
+# src/generate_proto.sh /path/to/apache-kafka-source
+
+set -e
+
+KAFKA_DIR="$1"
+
+if [[ ! -d $KAFKA_DIR ]]; then
+ echo "Usage: $0 <path-to-kafka-source-directory>"
+ exit 1
+fi
+
+cd "$KAFKA_DIR"
+
+echo "################## Protocol defines (add to rdkafka_protocol.h) ###################"
+grep apiKey clients/src/main/resources/common/message/*Request.json | \
+ awk '{print $3, $1 }' | \
+ sort -n | \
+ sed -E -s 's/ cli.*\///' | \
+ sed -E 's/\.json:$//' | \
+ awk -F, '{print "#define RD_KAFKAP_" $2 " " $1}'
+echo "!! Don't forget to update RD_KAFKAP__NUM !!"
+echo
+echo
+
+echo "################## Protocol names (add to rdkafka_proto.h) ###################"
+grep apiKey clients/src/main/resources/common/message/*Request.json | \
+ awk '{print $3, $1 }' | \
+ sort -n | \
+ sed -E -s 's/ cli.*\///' | \
+ sed -E 's/\.json:$//' | \
+ awk -F, '{print "[RD_KAFKAP_" $2 "] = \"" $2 "\","}'
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png b/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png
new file mode 100644
index 000000000..8df1eda82
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/librdkafka_cgrp_synch.png
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c b/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c
new file mode 100644
index 000000000..c19b11b7f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4.c
@@ -0,0 +1,2498 @@
+/*
+ LZ4 - Fast LZ compression algorithm
+ Copyright (C) 2011-2020, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
+ * LZ4_HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#ifndef LZ4_HEAPMODE
+# define LZ4_HEAPMODE 0
+#endif
+
+/*
+ * LZ4_ACCELERATION_DEFAULT :
+ * Select "acceleration" for LZ4_compress_fast() when parameter value <= 0
+ */
+#define LZ4_ACCELERATION_DEFAULT 1
+/*
+ * LZ4_ACCELERATION_MAX :
+ * Any "acceleration" value higher than this threshold
+ * get treated as LZ4_ACCELERATION_MAX instead (fix #876)
+ */
+#define LZ4_ACCELERATION_MAX 65537
+
+
+/*-************************************
+* CPU Feature Detection
+**************************************/
+/* LZ4_FORCE_MEMORY_ACCESS
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is safe and portable.
+ * Unfortunately, on some target/compiler combinations, the generated assembly is sub-optimal.
+ * The below switch allow to select different access method for improved performance.
+ * Method 0 (default) : use `memcpy()`. Safe and portable.
+ * Method 1 : `__packed` statement. It depends on compiler extension (ie, not portable).
+ * This method is safe if your compiler supports it, and *generally* as fast or faster than `memcpy`.
+ * Method 2 : direct access. This method is portable but violate C standard.
+ * It can generate buggy code on targets which assembly generation depends on alignment.
+ * But in some circumstances, it's the only known way to get the most performance (ie GCC + ARMv6)
+ * See https://fastcompression.blogspot.fr/2015/08/accessing-unaligned-memory.html for details.
+ * Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef LZ4_FORCE_MEMORY_ACCESS /* can be defined externally */
+# if defined(__GNUC__) && \
+ ( defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || defined(__ARM_ARCH_6K__) \
+ || defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__) )
+# define LZ4_FORCE_MEMORY_ACCESS 2
+# elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || defined(__GNUC__)
+# define LZ4_FORCE_MEMORY_ACCESS 1
+# endif
+#endif
+
+/*
+ * LZ4_FORCE_SW_BITCOUNT
+ * Define this parameter if your target system or compiler does not support hardware bit count
+ */
+#if defined(_MSC_VER) && defined(_WIN32_WCE) /* Visual Studio for WinCE doesn't support Hardware bit count */
+# undef LZ4_FORCE_SW_BITCOUNT /* avoid double def */
+# define LZ4_FORCE_SW_BITCOUNT
+#endif
+
+
+
+/*-************************************
+* Dependency
+**************************************/
+/*
+ * LZ4_SRC_INCLUDED:
+ * Amalgamation flag, whether lz4.c is included
+ */
+#ifndef LZ4_SRC_INCLUDED
+# define LZ4_SRC_INCLUDED 1
+#endif
+
+#ifndef LZ4_STATIC_LINKING_ONLY
+#define LZ4_STATIC_LINKING_ONLY
+#endif
+
+#ifndef LZ4_DISABLE_DEPRECATE_WARNINGS
+#define LZ4_DISABLE_DEPRECATE_WARNINGS /* due to LZ4_decompress_safe_withPrefix64k */
+#endif
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4_DISTANCE_MAX */
+#include "lz4.h"
+/* see also "memory routines" below */
+
+
+/*-************************************
+* Compiler Options
+**************************************/
+#if defined(_MSC_VER) && (_MSC_VER >= 1400) /* Visual Studio 2005+ */
+# include <intrin.h> /* only present in VS2005+ */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif /* _MSC_VER */
+
+#ifndef LZ4_FORCE_INLINE
+# ifdef _MSC_VER /* Visual Studio */
+# define LZ4_FORCE_INLINE static __forceinline
+# else
+# if defined (__cplusplus) || defined (__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+# ifdef __GNUC__
+# define LZ4_FORCE_INLINE static inline __attribute__((always_inline))
+# else
+# define LZ4_FORCE_INLINE static inline
+# endif
+# else
+# define LZ4_FORCE_INLINE static
+# endif /* __STDC_VERSION__ */
+# endif /* _MSC_VER */
+#endif /* LZ4_FORCE_INLINE */
+
+/* LZ4_FORCE_O2 and LZ4_FORCE_INLINE
+ * gcc on ppc64le generates an unrolled SIMDized loop for LZ4_wildCopy8,
+ * together with a simple 8-byte copy loop as a fall-back path.
+ * However, this optimization hurts the decompression speed by >30%,
+ * because the execution does not go to the optimized loop
+ * for typical compressible data, and all of the preamble checks
+ * before going to the fall-back path become useless overhead.
+ * This optimization happens only with the -O3 flag, and -O2 generates
+ * a simple 8-byte copy loop.
+ * With gcc on ppc64le, all of the LZ4_decompress_* and LZ4_wildCopy8
+ * functions are annotated with __attribute__((optimize("O2"))),
+ * and also LZ4_wildCopy8 is forcibly inlined, so that the O2 attribute
+ * of LZ4_wildCopy8 does not affect the compression speed.
+ */
+#if defined(__PPC64__) && defined(__LITTLE_ENDIAN__) && defined(__GNUC__) && !defined(__clang__)
+# define LZ4_FORCE_O2 __attribute__((optimize("O2")))
+# undef LZ4_FORCE_INLINE
+# define LZ4_FORCE_INLINE static __inline __attribute__((optimize("O2"),always_inline))
+#else
+# define LZ4_FORCE_O2
+#endif
+
+#if (defined(__GNUC__) && (__GNUC__ >= 3)) || (defined(__INTEL_COMPILER) && (__INTEL_COMPILER >= 800)) || defined(__clang__)
+# define expect(expr,value) (__builtin_expect ((expr),(value)) )
+#else
+# define expect(expr,value) (expr)
+#endif
+
+#ifndef likely
+#define likely(expr) expect((expr) != 0, 1)
+#endif
+#ifndef unlikely
+#define unlikely(expr) expect((expr) != 0, 0)
+#endif
+
+/* Should the alignment test prove unreliable, for some reason,
+ * it can be disabled by setting LZ4_ALIGN_TEST to 0 */
+#ifndef LZ4_ALIGN_TEST /* can be externally provided */
+# define LZ4_ALIGN_TEST 1
+#endif
+
+
+/*-************************************
+* Memory routines
+**************************************/
+#ifdef LZ4_USER_MEMORY_FUNCTIONS
+/* memory management functions can be customized by user project.
+ * Below functions must exist somewhere in the Project
+ * and be available at link time */
+void* LZ4_malloc(size_t s);
+void* LZ4_calloc(size_t n, size_t s);
+void LZ4_free(void* p);
+# define ALLOC(s) LZ4_malloc(s)
+# define ALLOC_AND_ZERO(s) LZ4_calloc(1,s)
+# define FREEMEM(p) LZ4_free(p)
+#else
+struct rdkafka_s;
+extern void *rd_kafka_mem_malloc(struct rdkafka_s *rk, size_t s);
+extern void *rd_kafka_mem_calloc(struct rdkafka_s *rk, size_t n, size_t s);
+extern void rd_kafka_mem_free(struct rdkafka_s *rk, void *p);
+# define ALLOC(s) rd_kafka_mem_malloc(NULL, s)
+# define ALLOC_AND_ZERO(s) rd_kafka_mem_calloc(NULL, 1, s)
+# define FREEMEM(p) rd_kafka_mem_free(NULL, p)
+#endif
+
+#include <string.h> /* memset, memcpy */
+#define MEM_INIT(p,v,s) memset((p),(v),(s))
+
+
+/*-************************************
+* Common Constants
+**************************************/
+#define MINMATCH 4
+
+#define WILDCOPYLENGTH 8
+#define LASTLITERALS 5 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MFLIMIT 12 /* see ../doc/lz4_Block_format.md#parsing-restrictions */
+#define MATCH_SAFEGUARD_DISTANCE ((2*WILDCOPYLENGTH) - MINMATCH) /* ensure it's possible to write 2 x wildcopyLength without overflowing output buffer */
+#define FASTLOOP_SAFE_DISTANCE 64
+static const int LZ4_minLength = (MFLIMIT+1);
+
+#define KB *(1 <<10)
+#define MB *(1 <<20)
+#define GB *(1U<<30)
+
+#define LZ4_DISTANCE_ABSOLUTE_MAX 65535
+#if (LZ4_DISTANCE_MAX > LZ4_DISTANCE_ABSOLUTE_MAX) /* max supported by LZ4 format */
+# error "LZ4_DISTANCE_MAX is too big : must be <= 65535"
+#endif
+
+#define ML_BITS 4
+#define ML_MASK ((1U<<ML_BITS)-1)
+#define RUN_BITS (8-ML_BITS)
+#define RUN_MASK ((1U<<RUN_BITS)-1)
+
+
+/*-************************************
+* Error detection
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4_STATIC_ASSERT(c) { enum { LZ4_static_assert = 1/(int)(!!(c)) }; } /* use after variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2)
+# include <stdio.h>
+ static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+static int LZ4_isAligned(const void* ptr, size_t alignment)
+{
+ return ((size_t)ptr & (alignment -1)) == 0;
+}
+
+
+/*-************************************
+* Types
+**************************************/
+#include <limits.h>
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+ typedef uintptr_t uptrval;
+#else
+# if UINT_MAX != 4294967295UL
+# error "LZ4 code (when not C++ or C99) assumes that sizeof(int) == 4"
+# endif
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+ typedef size_t uptrval; /* generally true, except OpenVMS-64 */
+#endif
+
+#if defined(__x86_64__)
+ typedef U64 reg_t; /* 64-bits in x32 mode */
+#else
+ typedef size_t reg_t; /* 32-bits in x32 mode */
+#endif
+
+typedef enum {
+ notLimited = 0,
+ limitedOutput = 1,
+ fillOutput = 2
+} limitedOutput_directive;
+
+
+/*-************************************
+* Reading and writing into memory
+**************************************/
+
+/**
+ * LZ4 relies on memcpy with a constant size being inlined. In freestanding
+ * environments, the compiler can't assume the implementation of memcpy() is
+ * standard compliant, so it can't apply its specialized memcpy() inlining
+ * logic. When possible, use __builtin_memcpy() to tell the compiler to analyze
+ * memcpy() as if it were standard compliant, so it can inline it in freestanding
+ * environments. This is needed when decompressing the Linux Kernel, for example.
+ */
+#if defined(__GNUC__) && (__GNUC__ >= 4)
+#define LZ4_memcpy(dst, src, size) __builtin_memcpy(dst, src, size)
+#else
+#define LZ4_memcpy(dst, src, size) memcpy(dst, src, size)
+#endif
+
+static unsigned LZ4_isLittleEndian(void)
+{
+ const union { U32 u; BYTE c[4]; } one = { 1 }; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+
+
+#if defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==2)
+/* lie to the compiler about data alignment; use with caution */
+
+static U16 LZ4_read16(const void* memPtr) { return *(const U16*) memPtr; }
+static U32 LZ4_read32(const void* memPtr) { return *(const U32*) memPtr; }
+static reg_t LZ4_read_ARCH(const void* memPtr) { return *(const reg_t*) memPtr; }
+
+static void LZ4_write16(void* memPtr, U16 value) { *(U16*)memPtr = value; }
+static void LZ4_write32(void* memPtr, U32 value) { *(U32*)memPtr = value; }
+
+#elif defined(LZ4_FORCE_MEMORY_ACCESS) && (LZ4_FORCE_MEMORY_ACCESS==1)
+
+/* __pack instructions are safer, but compiler specific, hence potentially problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union { U16 u16; U32 u32; reg_t uArch; } __attribute__((packed)) unalign;
+
+static U16 LZ4_read16(const void* ptr) { return ((const unalign*)ptr)->u16; }
+static U32 LZ4_read32(const void* ptr) { return ((const unalign*)ptr)->u32; }
+static reg_t LZ4_read_ARCH(const void* ptr) { return ((const unalign*)ptr)->uArch; }
+
+static void LZ4_write16(void* memPtr, U16 value) { ((unalign*)memPtr)->u16 = value; }
+static void LZ4_write32(void* memPtr, U32 value) { ((unalign*)memPtr)->u32 = value; }
+
+#else /* safe and portable access using memcpy() */
+
+static U16 LZ4_read16(const void* memPtr)
+{
+ U16 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static U32 LZ4_read32(const void* memPtr)
+{
+ U32 val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static reg_t LZ4_read_ARCH(const void* memPtr)
+{
+ reg_t val; LZ4_memcpy(&val, memPtr, sizeof(val)); return val;
+}
+
+static void LZ4_write16(void* memPtr, U16 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+static void LZ4_write32(void* memPtr, U32 value)
+{
+ LZ4_memcpy(memPtr, &value, sizeof(value));
+}
+
+#endif /* LZ4_FORCE_MEMORY_ACCESS */
+
+
+static U16 LZ4_readLE16(const void* memPtr)
+{
+ if (LZ4_isLittleEndian()) {
+ return LZ4_read16(memPtr);
+ } else {
+ const BYTE* p = (const BYTE*)memPtr;
+ return (U16)((U16)p[0] + (p[1]<<8));
+ }
+}
+
+static void LZ4_writeLE16(void* memPtr, U16 value)
+{
+ if (LZ4_isLittleEndian()) {
+ LZ4_write16(memPtr, value);
+ } else {
+ BYTE* p = (BYTE*)memPtr;
+ p[0] = (BYTE) value;
+ p[1] = (BYTE)(value>>8);
+ }
+}
+
+/* customized variant of memcpy, which can overwrite up to 8 bytes beyond dstEnd */
+LZ4_FORCE_INLINE
+void LZ4_wildCopy8(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,8); d+=8; s+=8; } while (d<e);
+}
+
+static const unsigned inc32table[8] = {0, 1, 2, 1, 0, 4, 4, 4};
+static const int dec64table[8] = {0, 0, 0, -1, -4, 1, 2, 3};
+
+
+#ifndef LZ4_FAST_DEC_LOOP
+# if defined __i386__ || defined _M_IX86 || defined __x86_64__ || defined _M_X64
+# define LZ4_FAST_DEC_LOOP 1
+# elif defined(__aarch64__) && !defined(__clang__)
+ /* On aarch64, we disable this optimization for clang because on certain
+ * mobile chipsets, performance is reduced with clang. For information
+ * refer to https://github.com/lz4/lz4/pull/707 */
+# define LZ4_FAST_DEC_LOOP 1
+# else
+# define LZ4_FAST_DEC_LOOP 0
+# endif
+#endif
+
+#if LZ4_FAST_DEC_LOOP
+
+LZ4_FORCE_INLINE void
+LZ4_memcpy_using_offset_base(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ assert(srcPtr + offset == dstPtr);
+ if (offset < 8) {
+ LZ4_write32(dstPtr, 0); /* silence an msan warning when offset==0 */
+ dstPtr[0] = srcPtr[0];
+ dstPtr[1] = srcPtr[1];
+ dstPtr[2] = srcPtr[2];
+ dstPtr[3] = srcPtr[3];
+ srcPtr += inc32table[offset];
+ LZ4_memcpy(dstPtr+4, srcPtr, 4);
+ srcPtr -= dec64table[offset];
+ dstPtr += 8;
+ } else {
+ LZ4_memcpy(dstPtr, srcPtr, 8);
+ dstPtr += 8;
+ srcPtr += 8;
+ }
+
+ LZ4_wildCopy8(dstPtr, srcPtr, dstEnd);
+}
+
+/* customized variant of memcpy, which can overwrite up to 32 bytes beyond dstEnd
+ * this version copies two times 16 bytes (instead of one time 32 bytes)
+ * because it must be compatible with offsets >= 16. */
+LZ4_FORCE_INLINE void
+LZ4_wildCopy32(void* dstPtr, const void* srcPtr, void* dstEnd)
+{
+ BYTE* d = (BYTE*)dstPtr;
+ const BYTE* s = (const BYTE*)srcPtr;
+ BYTE* const e = (BYTE*)dstEnd;
+
+ do { LZ4_memcpy(d,s,16); LZ4_memcpy(d+16,s+16,16); d+=32; s+=32; } while (d<e);
+}
+
+/* LZ4_memcpy_using_offset() presumes :
+ * - dstEnd >= dstPtr + MINMATCH
+ * - there is at least 8 bytes available to write after dstEnd */
+LZ4_FORCE_INLINE void
+LZ4_memcpy_using_offset(BYTE* dstPtr, const BYTE* srcPtr, BYTE* dstEnd, const size_t offset)
+{
+ BYTE v[8];
+
+ assert(dstEnd >= dstPtr + MINMATCH);
+
+ switch(offset) {
+ case 1:
+ MEM_INIT(v, *srcPtr, 8);
+ break;
+ case 2:
+ LZ4_memcpy(v, srcPtr, 2);
+ LZ4_memcpy(&v[2], srcPtr, 2);
+ LZ4_memcpy(&v[4], v, 4);
+ break;
+ case 4:
+ LZ4_memcpy(v, srcPtr, 4);
+ LZ4_memcpy(&v[4], srcPtr, 4);
+ break;
+ default:
+ LZ4_memcpy_using_offset_base(dstPtr, srcPtr, dstEnd, offset);
+ return;
+ }
+
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ while (dstPtr < dstEnd) {
+ LZ4_memcpy(dstPtr, v, 8);
+ dstPtr += 8;
+ }
+}
+#endif
+
+
+/*-************************************
+* Common functions
+**************************************/
+static unsigned LZ4_NbCommonBytes (reg_t val)
+{
+ assert(val != 0);
+ if (LZ4_isLittleEndian()) {
+ if (sizeof(val) == 8) {
+# if defined(_MSC_VER) && (_MSC_VER >= 1800) && defined(_M_AMD64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ /* x64 CPUS without BMI support interpret `TZCNT` as `REP BSF` */
+ return (unsigned)_tzcnt_u64(val) >> 3;
+# elif defined(_MSC_VER) && defined(_WIN64) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r = 0;
+ _BitScanForward64(&r, (U64)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctzll((U64)val) >> 3;
+# else
+ const U64 m = 0x0101010101010101ULL;
+ val ^= val - 1;
+ return (unsigned)(((U64)((val & (m - 1)) * m)) >> 56);
+# endif
+ } else /* 32 bits */ {
+# if defined(_MSC_VER) && (_MSC_VER >= 1400) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ unsigned long r;
+ _BitScanForward(&r, (U32)val);
+ return (unsigned)r >> 3;
+# elif (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_ctz((U32)val) >> 3;
+# else
+ const U32 m = 0x01010101;
+ return (unsigned)((((val - 1) ^ val) & (m - 1)) * m) >> 24;
+# endif
+ }
+ } else /* Big Endian CPU */ {
+ if (sizeof(val)==8) {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(__TINYC__) && !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clzll((U64)val) >> 3;
+# else
+#if 1
+ /* this method is probably faster,
+ * but adds a 128 bytes lookup table */
+ static const unsigned char ctz7_tab[128] = {
+ 7, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 6, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 5, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ 4, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0,
+ };
+ U64 const mask = 0x0101010101010101ULL;
+ U64 const t = (((val >> 8) - mask) | val) & mask;
+ return ctz7_tab[(t * 0x0080402010080402ULL) >> 57];
+#else
+ /* this method doesn't consume memory space like the previous one,
+ * but it contains several branches,
+ * that may end up slowing execution */
+ static const U32 by32 = sizeof(val)*4; /* 32 on 64 bits (goal), 16 on 32 bits.
+ Just to avoid some static analyzer complaining about shift by 32 on 32-bits target.
+ Note that this code path is never triggered in 32-bits mode. */
+ unsigned r;
+ if (!(val>>by32)) { r=4; } else { r=0; val>>=by32; }
+ if (!(val>>16)) { r+=2; val>>=8; } else { val>>=24; }
+ r += (!val);
+ return r;
+#endif
+# endif
+ } else /* 32 bits */ {
+# if (defined(__clang__) || (defined(__GNUC__) && ((__GNUC__ > 3) || \
+ ((__GNUC__ == 3) && (__GNUC_MINOR__ >= 4))))) && \
+ !defined(LZ4_FORCE_SW_BITCOUNT)
+ return (unsigned)__builtin_clz((U32)val) >> 3;
+# else
+ val >>= 8;
+ val = ((((val + 0x00FFFF00) | 0x00FFFFFF) + val) |
+ (val + 0x00FF0000)) >> 24;
+ return (unsigned)val ^ 3;
+# endif
+ }
+ }
+}
+
+
+#define STEPSIZE sizeof(reg_t)
+LZ4_FORCE_INLINE
+unsigned LZ4_count(const BYTE* pIn, const BYTE* pMatch, const BYTE* pInLimit)
+{
+ const BYTE* const pStart = pIn;
+
+ if (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) {
+ pIn+=STEPSIZE; pMatch+=STEPSIZE;
+ } else {
+ return LZ4_NbCommonBytes(diff);
+ } }
+
+ while (likely(pIn < pInLimit-(STEPSIZE-1))) {
+ reg_t const diff = LZ4_read_ARCH(pMatch) ^ LZ4_read_ARCH(pIn);
+ if (!diff) { pIn+=STEPSIZE; pMatch+=STEPSIZE; continue; }
+ pIn += LZ4_NbCommonBytes(diff);
+ return (unsigned)(pIn - pStart);
+ }
+
+ if ((STEPSIZE==8) && (pIn<(pInLimit-3)) && (LZ4_read32(pMatch) == LZ4_read32(pIn))) { pIn+=4; pMatch+=4; }
+ if ((pIn<(pInLimit-1)) && (LZ4_read16(pMatch) == LZ4_read16(pIn))) { pIn+=2; pMatch+=2; }
+ if ((pIn<pInLimit) && (*pMatch == *pIn)) pIn++;
+ return (unsigned)(pIn - pStart);
+}
+
+
+#ifndef LZ4_COMMONDEFS_ONLY
+/*-************************************
+* Local Constants
+**************************************/
+static const int LZ4_64Klimit = ((64 KB) + (MFLIMIT-1));
+static const U32 LZ4_skipTrigger = 6; /* Increase this value ==> compression run slower on incompressible data */
+
+
+/*-************************************
+* Local Structures and types
+**************************************/
+typedef enum { clearedTable = 0, byPtr, byU32, byU16 } tableType_t;
+
+/**
+ * This enum distinguishes several different modes of accessing previous
+ * content in the stream.
+ *
+ * - noDict : There is no preceding content.
+ * - withPrefix64k : Table entries up to ctx->dictSize before the current blob
+ * blob being compressed are valid and refer to the preceding
+ * content (of length ctx->dictSize), which is available
+ * contiguously preceding in memory the content currently
+ * being compressed.
+ * - usingExtDict : Like withPrefix64k, but the preceding content is somewhere
+ * else in memory, starting at ctx->dictionary with length
+ * ctx->dictSize.
+ * - usingDictCtx : Like usingExtDict, but everything concerning the preceding
+ * content is in a separate context, pointed to by
+ * ctx->dictCtx. ctx->dictionary, ctx->dictSize, and table
+ * entries in the current context that refer to positions
+ * preceding the beginning of the current compression are
+ * ignored. Instead, ctx->dictCtx->dictionary and ctx->dictCtx
+ * ->dictSize describe the location and size of the preceding
+ * content, and matches are found by looking in the ctx
+ * ->dictCtx->hashTable.
+ */
+typedef enum { noDict = 0, withPrefix64k, usingExtDict, usingDictCtx } dict_directive;
+typedef enum { noDictIssue = 0, dictSmall } dictIssue_directive;
+
+
+/*-************************************
+* Local Utils
+**************************************/
+int LZ4_versionNumber (void) { return LZ4_VERSION_NUMBER; }
+const char* LZ4_versionString(void) { return LZ4_VERSION_STRING; }
+int LZ4_compressBound(int isize) { return LZ4_COMPRESSBOUND(isize); }
+int LZ4_sizeofState(void) { return LZ4_STREAMSIZE; }
+
+
+/*-************************************
+* Internal Definitions used in Tests
+**************************************/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize);
+
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize);
+
+#if defined (__cplusplus)
+}
+#endif
+
+/*-******************************
+* Compression functions
+********************************/
+LZ4_FORCE_INLINE U32 LZ4_hash4(U32 sequence, tableType_t const tableType)
+{
+ if (tableType == byU16)
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-(LZ4_HASHLOG+1)));
+ else
+ return ((sequence * 2654435761U) >> ((MINMATCH*8)-LZ4_HASHLOG));
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hash5(U64 sequence, tableType_t const tableType)
+{
+ const U32 hashLog = (tableType == byU16) ? LZ4_HASHLOG+1 : LZ4_HASHLOG;
+ if (LZ4_isLittleEndian()) {
+ const U64 prime5bytes = 889523592379ULL;
+ return (U32)(((sequence << 24) * prime5bytes) >> (64 - hashLog));
+ } else {
+ const U64 prime8bytes = 11400714785074694791ULL;
+ return (U32)(((sequence >> 24) * prime8bytes) >> (64 - hashLog));
+ }
+}
+
+LZ4_FORCE_INLINE U32 LZ4_hashPosition(const void* const p, tableType_t const tableType)
+{
+ if ((sizeof(reg_t)==8) && (tableType != byU16)) return LZ4_hash5(LZ4_read_ARCH(p), tableType);
+ return LZ4_hash4(LZ4_read32(p), tableType);
+}
+
+LZ4_FORCE_INLINE void LZ4_clearHash(U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = NULL; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = 0; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = 0; return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putIndexOnHash(U32 idx, U32 h, void* tableBase, tableType_t const tableType)
+{
+ switch (tableType)
+ {
+ default: /* fallthrough */
+ case clearedTable: /* fallthrough */
+ case byPtr: { /* illegal! */ assert(0); return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = idx; return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; assert(idx < 65536); hashTable[h] = (U16)idx; return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putPositionOnHash(const BYTE* p, U32 h,
+ void* tableBase, tableType_t const tableType,
+ const BYTE* srcBase)
+{
+ switch (tableType)
+ {
+ case clearedTable: { /* illegal! */ assert(0); return; }
+ case byPtr: { const BYTE** hashTable = (const BYTE**)tableBase; hashTable[h] = p; return; }
+ case byU32: { U32* hashTable = (U32*) tableBase; hashTable[h] = (U32)(p-srcBase); return; }
+ case byU16: { U16* hashTable = (U16*) tableBase; hashTable[h] = (U16)(p-srcBase); return; }
+ }
+}
+
+LZ4_FORCE_INLINE void LZ4_putPosition(const BYTE* p, void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ LZ4_putPositionOnHash(p, h, tableBase, tableType, srcBase);
+}
+
+/* LZ4_getIndexOnHash() :
+ * Index of match position registered in hash table.
+ * hash position must be calculated by using base+index, or dictBase+index.
+ * Assumption 1 : only valid if tableType == byU32 or byU16.
+ * Assumption 2 : h is presumed valid (within limits of hash table)
+ */
+LZ4_FORCE_INLINE U32 LZ4_getIndexOnHash(U32 h, const void* tableBase, tableType_t tableType)
+{
+ LZ4_STATIC_ASSERT(LZ4_MEMORY_USAGE > 2);
+ if (tableType == byU32) {
+ const U32* const hashTable = (const U32*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-2)));
+ return hashTable[h];
+ }
+ if (tableType == byU16) {
+ const U16* const hashTable = (const U16*) tableBase;
+ assert(h < (1U << (LZ4_MEMORY_USAGE-1)));
+ return hashTable[h];
+ }
+ assert(0); return 0; /* forbidden case */
+}
+
+static const BYTE* LZ4_getPositionOnHash(U32 h, const void* tableBase, tableType_t tableType, const BYTE* srcBase)
+{
+ if (tableType == byPtr) { const BYTE* const* hashTable = (const BYTE* const*) tableBase; return hashTable[h]; }
+ if (tableType == byU32) { const U32* const hashTable = (const U32*) tableBase; return hashTable[h] + srcBase; }
+ { const U16* const hashTable = (const U16*) tableBase; return hashTable[h] + srcBase; } /* default, to ensure a return */
+}
+
+LZ4_FORCE_INLINE const BYTE*
+LZ4_getPosition(const BYTE* p,
+ const void* tableBase, tableType_t tableType,
+ const BYTE* srcBase)
+{
+ U32 const h = LZ4_hashPosition(p, tableType);
+ return LZ4_getPositionOnHash(h, tableBase, tableType, srcBase);
+}
+
+LZ4_FORCE_INLINE void
+LZ4_prepareTable(LZ4_stream_t_internal* const cctx,
+ const int inputSize,
+ const tableType_t tableType) {
+ /* If the table hasn't been used, it's guaranteed to be zeroed out, and is
+ * therefore safe to use no matter what mode we're in. Otherwise, we figure
+ * out if it's safe to leave as is or whether it needs to be reset.
+ */
+ if ((tableType_t)cctx->tableType != clearedTable) {
+ assert(inputSize >= 0);
+ if ((tableType_t)cctx->tableType != tableType
+ || ((tableType == byU16) && cctx->currentOffset + (unsigned)inputSize >= 0xFFFFU)
+ || ((tableType == byU32) && cctx->currentOffset > 1 GB)
+ || tableType == byPtr
+ || inputSize >= 4 KB)
+ {
+ DEBUGLOG(4, "LZ4_prepareTable: Resetting table in %p", cctx);
+ MEM_INIT(cctx->hashTable, 0, LZ4_HASHTABLESIZE);
+ cctx->currentOffset = 0;
+ cctx->tableType = (U32)clearedTable;
+ } else {
+ DEBUGLOG(4, "LZ4_prepareTable: Re-use hash table (no reset)");
+ }
+ }
+
+ /* Adding a gap, so all previous entries are > LZ4_DISTANCE_MAX back, is faster
+ * than compressing without a gap. However, compressing with
+ * currentOffset == 0 is faster still, so we preserve that case.
+ */
+ if (cctx->currentOffset != 0 && tableType == byU32) {
+ DEBUGLOG(5, "LZ4_prepareTable: adding 64KB to currentOffset");
+ cctx->currentOffset += 64 KB;
+ }
+
+ /* Finally, clear history */
+ cctx->dictCtx = NULL;
+ cctx->dictionary = NULL;
+ cctx->dictSize = 0;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time.
+ * Presumed already validated at this stage:
+ * - source != NULL
+ * - inputSize > 0
+ */
+LZ4_FORCE_INLINE int LZ4_compress_generic_validated(
+ LZ4_stream_t_internal* const cctx,
+ const char* const source,
+ char* const dest,
+ const int inputSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int maxOutputSize,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ int result;
+ const BYTE* ip = (const BYTE*) source;
+
+ U32 const startIndex = cctx->currentOffset;
+ const BYTE* base = (const BYTE*) source - startIndex;
+ const BYTE* lowLimit;
+
+ const LZ4_stream_t_internal* dictCtx = (const LZ4_stream_t_internal*) cctx->dictCtx;
+ const BYTE* const dictionary =
+ dictDirective == usingDictCtx ? dictCtx->dictionary : cctx->dictionary;
+ const U32 dictSize =
+ dictDirective == usingDictCtx ? dictCtx->dictSize : cctx->dictSize;
+ const U32 dictDelta = (dictDirective == usingDictCtx) ? startIndex - dictCtx->currentOffset : 0; /* make indexes in dictCtx comparable with index in current context */
+
+ int const maybe_extMem = (dictDirective == usingExtDict) || (dictDirective == usingDictCtx);
+ U32 const prefixIdxLimit = startIndex - dictSize; /* used when dictDirective == dictSmall */
+ const BYTE* const dictEnd = dictionary ? dictionary + dictSize : dictionary;
+ const BYTE* anchor = (const BYTE*) source;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimitPlusOne = iend - MFLIMIT + 1;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+
+ /* the dictCtx currentOffset is indexed on the start of the dictionary,
+ * while a dictionary in the current context precedes the currentOffset */
+ const BYTE* dictBase = !dictionary ? NULL : (dictDirective == usingDictCtx) ?
+ dictionary + dictSize - dictCtx->currentOffset :
+ dictionary + dictSize - startIndex;
+
+ BYTE* op = (BYTE*) dest;
+ BYTE* const olimit = op + maxOutputSize;
+
+ U32 offset = 0;
+ U32 forwardH;
+
+ DEBUGLOG(5, "LZ4_compress_generic_validated: srcSize=%i, tableType=%u", inputSize, tableType);
+ assert(ip != NULL);
+ /* If init conditions are not met, we don't have to mark stream
+ * as having dirty context, since no action was taken yet */
+ if (outputDirective == fillOutput && maxOutputSize < 1) { return 0; } /* Impossible to store anything */
+ if ((tableType == byU16) && (inputSize>=LZ4_64Klimit)) { return 0; } /* Size too large (not within 64K limit) */
+ if (tableType==byPtr) assert(dictDirective==noDict); /* only supported use case with byPtr */
+ assert(acceleration >= 1);
+
+ lowLimit = (const BYTE*)source - (dictDirective == withPrefix64k ? dictSize : 0);
+
+ /* Update context state */
+ if (dictDirective == usingDictCtx) {
+ /* Subsequent linked blocks can't use the dictionary. */
+ /* Instead, they use the block we just compressed. */
+ cctx->dictCtx = NULL;
+ cctx->dictSize = (U32)inputSize;
+ } else {
+ cctx->dictSize += (U32)inputSize;
+ }
+ cctx->currentOffset += (U32)inputSize;
+ cctx->tableType = (U32)tableType;
+
+ if (inputSize<LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* First Byte */
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ ip++; forwardH = LZ4_hashPosition(ip, tableType);
+
+ /* Main Loop */
+ for ( ; ; ) {
+ const BYTE* match;
+ BYTE* token;
+ const BYTE* filledIp;
+
+ /* Find a match */
+ if (tableType == byPtr) {
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ match = LZ4_getPositionOnHash(h, cctx->hashTable, tableType, base);
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putPositionOnHash(ip, h, cctx->hashTable, tableType, base);
+
+ } while ( (match+LZ4_DISTANCE_MAX < ip)
+ || (LZ4_read32(match) != LZ4_read32(ip)) );
+
+ } else { /* byU32, byU16 */
+
+ const BYTE* forwardIp = ip;
+ int step = 1;
+ int searchMatchNb = acceleration << LZ4_skipTrigger;
+ do {
+ U32 const h = forwardH;
+ U32 const current = (U32)(forwardIp - base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex <= current);
+ assert(forwardIp - base < (ptrdiff_t)(2 GB - 1));
+ ip = forwardIp;
+ forwardIp += step;
+ step = (searchMatchNb++ >> LZ4_skipTrigger);
+
+ if (unlikely(forwardIp > mflimitPlusOne)) goto _last_literals;
+ assert(ip < mflimitPlusOne);
+
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ assert(tableType == byU32);
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ matchIndex += dictDelta; /* make dictCtx index comparable with current context */
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ DEBUGLOG(7, "extDict candidate: matchIndex=%5u < startIndex=%5u", matchIndex, startIndex);
+ assert(startIndex - matchIndex >= MINMATCH);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source;
+ }
+ } else { /* single continuous memory segment */
+ match = base + matchIndex;
+ }
+ forwardH = LZ4_hashPosition(forwardIp, tableType);
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+
+ DEBUGLOG(7, "candidate at pos=%u (offset=%u \n", matchIndex, current - matchIndex);
+ if ((dictIssue == dictSmall) && (matchIndex < prefixIdxLimit)) { continue; } /* match outside of valid area */
+ assert(matchIndex < current);
+ if ( ((tableType != byU16) || (LZ4_DISTANCE_MAX < LZ4_DISTANCE_ABSOLUTE_MAX))
+ && (matchIndex+LZ4_DISTANCE_MAX < current)) {
+ continue;
+ } /* too far */
+ assert((current - matchIndex) <= LZ4_DISTANCE_MAX); /* match now expected within distance */
+
+ if (LZ4_read32(match) == LZ4_read32(ip)) {
+ if (maybe_extMem) offset = current - matchIndex;
+ break; /* match found */
+ }
+
+ } while(1);
+ }
+
+ /* Catch up */
+ filledIp = ip;
+ while (((ip>anchor) & (match > lowLimit)) && (unlikely(ip[-1]==match[-1]))) { ip--; match--; }
+
+ /* Encode Literals */
+ { unsigned const litLength = (unsigned)(ip - anchor);
+ token = op++;
+ if ((outputDirective == limitedOutput) && /* Check output buffer overflow */
+ (unlikely(op + litLength + (2 + 1 + LASTLITERALS) + (litLength/255) > olimit)) ) {
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ if ((outputDirective == fillOutput) &&
+ (unlikely(op + (litLength+240)/255 /* litlen */ + litLength /* literals */ + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit))) {
+ op--;
+ goto _last_literals;
+ }
+ if (litLength >= RUN_MASK) {
+ int len = (int)(litLength - RUN_MASK);
+ *token = (RUN_MASK<<ML_BITS);
+ for(; len >= 255 ; len-=255) *op++ = 255;
+ *op++ = (BYTE)len;
+ }
+ else *token = (BYTE)(litLength<<ML_BITS);
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op+litLength);
+ op+=litLength;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), litLength, (int)(ip-(const BYTE*)source));
+ }
+
+_next_match:
+ /* at this stage, the following variables must be correctly set :
+ * - ip : at start of LZ operation
+ * - match : at start of previous pattern occurrence; can be within current prefix, or within extDict
+ * - offset : if maybe_ext_memSegment==1 (constant)
+ * - lowLimit : must be == dictionary to mean "match is within extDict"; must be == source otherwise
+ * - token and *token : position to write 4-bits for match length; higher 4-bits for literal length supposed already written
+ */
+
+ if ((outputDirective == fillOutput) &&
+ (op + 2 /* offset */ + 1 /* token */ + MFLIMIT - MINMATCH /* min last literals so last match is <= end - MFLIMIT */ > olimit)) {
+ /* the match was too close to the end, rewind and go to last literals */
+ op = token;
+ goto _last_literals;
+ }
+
+ /* Encode Offset */
+ if (maybe_extMem) { /* static test */
+ DEBUGLOG(6, " with offset=%u (ext if > %i)", offset, (int)(ip - (const BYTE*)source));
+ assert(offset <= LZ4_DISTANCE_MAX && offset > 0);
+ LZ4_writeLE16(op, (U16)offset); op+=2;
+ } else {
+ DEBUGLOG(6, " with offset=%u (same segment)", (U32)(ip - match));
+ assert(ip-match <= LZ4_DISTANCE_MAX);
+ LZ4_writeLE16(op, (U16)(ip - match)); op+=2;
+ }
+
+ /* Encode MatchLength */
+ { unsigned matchCode;
+
+ if ( (dictDirective==usingExtDict || dictDirective==usingDictCtx)
+ && (lowLimit==dictionary) /* match within extDict */ ) {
+ const BYTE* limit = ip + (dictEnd-match);
+ assert(dictEnd > match);
+ if (limit > matchlimit) limit = matchlimit;
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, limit);
+ ip += (size_t)matchCode + MINMATCH;
+ if (ip==limit) {
+ unsigned const more = LZ4_count(limit, (const BYTE*)source, matchlimit);
+ matchCode += more;
+ ip += more;
+ }
+ DEBUGLOG(6, " with matchLength=%u starting in extDict", matchCode+MINMATCH);
+ } else {
+ matchCode = LZ4_count(ip+MINMATCH, match+MINMATCH, matchlimit);
+ ip += (size_t)matchCode + MINMATCH;
+ DEBUGLOG(6, " with matchLength=%u", matchCode+MINMATCH);
+ }
+
+ if ((outputDirective) && /* Check output buffer overflow */
+ (unlikely(op + (1 + LASTLITERALS) + (matchCode+240)/255 > olimit)) ) {
+ if (outputDirective == fillOutput) {
+ /* Match description too long : reduce it */
+ U32 newMatchCode = 15 /* in token */ - 1 /* to avoid needing a zero byte */ + ((U32)(olimit - op) - 1 - LASTLITERALS) * 255;
+ ip -= matchCode - newMatchCode;
+ assert(newMatchCode < matchCode);
+ matchCode = newMatchCode;
+ if (unlikely(ip <= filledIp)) {
+ /* We have already filled up to filledIp so if ip ends up less than filledIp
+ * we have positions in the hash table beyond the current position. This is
+ * a problem if we reuse the hash table. So we have to remove these positions
+ * from the hash table.
+ */
+ const BYTE* ptr;
+ DEBUGLOG(5, "Clearing %u positions", (U32)(filledIp - ip));
+ for (ptr = ip; ptr <= filledIp; ++ptr) {
+ U32 const h = LZ4_hashPosition(ptr, tableType);
+ LZ4_clearHash(h, cctx->hashTable, tableType);
+ }
+ }
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ if (matchCode >= ML_MASK) {
+ *token += ML_MASK;
+ matchCode -= ML_MASK;
+ LZ4_write32(op, 0xFFFFFFFF);
+ while (matchCode >= 4*255) {
+ op+=4;
+ LZ4_write32(op, 0xFFFFFFFF);
+ matchCode -= 4*255;
+ }
+ op += matchCode / 255;
+ *op++ = (BYTE)(matchCode % 255);
+ } else
+ *token += (BYTE)(matchCode);
+ }
+ /* Ensure we have enough space for the last literals. */
+ assert(!(outputDirective == fillOutput && op + 1 + LASTLITERALS > olimit));
+
+ anchor = ip;
+
+ /* Test end of chunk */
+ if (ip >= mflimitPlusOne) break;
+
+ /* Fill table */
+ LZ4_putPosition(ip-2, cctx->hashTable, tableType, base);
+
+ /* Test next position */
+ if (tableType == byPtr) {
+
+ match = LZ4_getPosition(ip, cctx->hashTable, tableType, base);
+ LZ4_putPosition(ip, cctx->hashTable, tableType, base);
+ if ( (match+LZ4_DISTANCE_MAX >= ip)
+ && (LZ4_read32(match) == LZ4_read32(ip)) )
+ { token=op++; *token=0; goto _next_match; }
+
+ } else { /* byU32, byU16 */
+
+ U32 const h = LZ4_hashPosition(ip, tableType);
+ U32 const current = (U32)(ip-base);
+ U32 matchIndex = LZ4_getIndexOnHash(h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if (dictDirective == usingDictCtx) {
+ if (matchIndex < startIndex) {
+ /* there was no match, try the dictionary */
+ matchIndex = LZ4_getIndexOnHash(h, dictCtx->hashTable, byU32);
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ matchIndex += dictDelta;
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else if (dictDirective==usingExtDict) {
+ if (matchIndex < startIndex) {
+ match = dictBase + matchIndex;
+ lowLimit = dictionary; /* required for match length counter */
+ } else {
+ match = base + matchIndex;
+ lowLimit = (const BYTE*)source; /* required for match length counter */
+ }
+ } else { /* single memory segment */
+ match = base + matchIndex;
+ }
+ LZ4_putIndexOnHash(current, h, cctx->hashTable, tableType);
+ assert(matchIndex < current);
+ if ( ((dictIssue==dictSmall) ? (matchIndex >= prefixIdxLimit) : 1)
+ && (((tableType==byU16) && (LZ4_DISTANCE_MAX == LZ4_DISTANCE_ABSOLUTE_MAX)) ? 1 : (matchIndex+LZ4_DISTANCE_MAX >= current))
+ && (LZ4_read32(match) == LZ4_read32(ip)) ) {
+ token=op++;
+ *token=0;
+ if (maybe_extMem) offset = current - matchIndex;
+ DEBUGLOG(6, "seq.start:%i, literals=%u, match.start:%i",
+ (int)(anchor-(const BYTE*)source), 0, (int)(ip-(const BYTE*)source));
+ goto _next_match;
+ }
+ }
+
+ /* Prepare next loop */
+ forwardH = LZ4_hashPosition(++ip, tableType);
+
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRun = (size_t)(iend - anchor);
+ if ( (outputDirective) && /* Check output buffer overflow */
+ (op + lastRun + 1 + ((lastRun+255-RUN_MASK)/255) > olimit)) {
+ if (outputDirective == fillOutput) {
+ /* adapt lastRun to fill 'dst' */
+ assert(olimit >= op);
+ lastRun = (size_t)(olimit-op) - 1/*token*/;
+ lastRun -= (lastRun + 256 - RUN_MASK) / 256; /*additional length tokens*/
+ } else {
+ assert(outputDirective == limitedOutput);
+ return 0; /* cannot compress within `dst` budget. Stored indexes in hash table are nonetheless fine */
+ }
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRun);
+ if (lastRun >= RUN_MASK) {
+ size_t accumulator = lastRun - RUN_MASK;
+ *op++ = RUN_MASK << ML_BITS;
+ for(; accumulator >= 255 ; accumulator-=255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRun<<ML_BITS);
+ }
+ LZ4_memcpy(op, anchor, lastRun);
+ ip = anchor + lastRun;
+ op += lastRun;
+ }
+
+ if (outputDirective == fillOutput) {
+ *inputConsumed = (int) (((const char*)ip)-source);
+ }
+ result = (int)(((char*)op) - dest);
+ assert(result > 0);
+ DEBUGLOG(5, "LZ4_compress_generic: compressed %i bytes into %i bytes", inputSize, result);
+ return result;
+}
+
+/** LZ4_compress_generic() :
+ * inlined, to ensure branches are decided at compilation time;
+ * takes care of src == (NULL, 0)
+ * and forward the rest to LZ4_compress_generic_validated */
+LZ4_FORCE_INLINE int LZ4_compress_generic(
+ LZ4_stream_t_internal* const cctx,
+ const char* const src,
+ char* const dst,
+ const int srcSize,
+ int *inputConsumed, /* only written when outputDirective == fillOutput */
+ const int dstCapacity,
+ const limitedOutput_directive outputDirective,
+ const tableType_t tableType,
+ const dict_directive dictDirective,
+ const dictIssue_directive dictIssue,
+ const int acceleration)
+{
+ DEBUGLOG(5, "LZ4_compress_generic: srcSize=%i, dstCapacity=%i",
+ srcSize, dstCapacity);
+
+ if ((U32)srcSize > (U32)LZ4_MAX_INPUT_SIZE) { return 0; } /* Unsupported srcSize, too large (or negative) */
+ if (srcSize == 0) { /* src == NULL supported if srcSize == 0 */
+ if (outputDirective != notLimited && dstCapacity <= 0) return 0; /* no output, can't write anything */
+ DEBUGLOG(5, "Generating an empty block");
+ assert(outputDirective == notLimited || dstCapacity >= 1);
+ assert(dst != NULL);
+ dst[0] = 0;
+ if (outputDirective == fillOutput) {
+ assert (inputConsumed != NULL);
+ *inputConsumed = 0;
+ }
+ return 1;
+ }
+ assert(src != NULL);
+
+ return LZ4_compress_generic_validated(cctx, src, dst, srcSize,
+ inputConsumed, /* only written into if outputDirective == fillOutput */
+ dstCapacity, outputDirective,
+ tableType, dictDirective, dictIssue, acceleration);
+}
+
+
+int LZ4_compress_fast_extState(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ LZ4_stream_t_internal* const ctx = & LZ4_initStream(state, sizeof(LZ4_stream_t)) -> internal_donotuse;
+ assert(ctx != NULL);
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+ if (maxOutputSize >= LZ4_compressBound(inputSize)) {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (inputSize < LZ4_64Klimit) {
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, byU16, noDict, noDictIssue, acceleration);
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)source > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(ctx, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+/**
+ * LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see comment in lz4.h on LZ4_resetStream_fast() for a definition of
+ * "correctly initialized").
+ */
+int LZ4_compress_fast_extState_fastReset(void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration)
+{
+ LZ4_stream_t_internal* ctx = &((LZ4_stream_t*)state)->internal_donotuse;
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
+ if (dstCapacity >= LZ4_compressBound(srcSize)) {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, 0, notLimited, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ if (srcSize < LZ4_64Klimit) {
+ const tableType_t tableType = byU16;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ if (ctx->currentOffset) {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, dictSmall, acceleration);
+ } else {
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ } else {
+ const tableType_t tableType = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ LZ4_prepareTable(ctx, srcSize, tableType);
+ return LZ4_compress_generic(ctx, src, dst, srcSize, NULL, dstCapacity, limitedOutput, tableType, noDict, noDictIssue, acceleration);
+ }
+ }
+}
+
+
+int LZ4_compress_fast(const char* source, char* dest, int inputSize, int maxOutputSize, int acceleration)
+{
+ int result;
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctxPtr = ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctxPtr == NULL) return 0;
+#else
+ LZ4_stream_t ctx;
+ LZ4_stream_t* const ctxPtr = &ctx;
+#endif
+ result = LZ4_compress_fast_extState(ctxPtr, source, dest, inputSize, maxOutputSize, acceleration);
+
+#if (LZ4_HEAPMODE)
+ FREEMEM(ctxPtr);
+#endif
+ return result;
+}
+
+
+int LZ4_compress_default(const char* src, char* dst, int srcSize, int maxOutputSize)
+{
+ return LZ4_compress_fast(src, dst, srcSize, maxOutputSize, 1);
+}
+
+
+/* Note!: This function leaves the stream in an unclean/broken state!
+ * It is not safe to subsequently use the same state with a _fastReset() or
+ * _continue() call without resetting it. */
+static int LZ4_compress_destSize_extState (LZ4_stream_t* state, const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+ void* const s = LZ4_initStream(state, sizeof (*state));
+ assert(s != NULL); (void)s;
+
+ if (targetDstSize >= LZ4_compressBound(*srcSizePtr)) { /* compression success is guaranteed */
+ return LZ4_compress_fast_extState(state, src, dst, *srcSizePtr, targetDstSize, 1);
+ } else {
+ if (*srcSizePtr < LZ4_64Klimit) {
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, byU16, noDict, noDictIssue, 1);
+ } else {
+ tableType_t const addrMode = ((sizeof(void*)==4) && ((uptrval)src > LZ4_DISTANCE_MAX)) ? byPtr : byU32;
+ return LZ4_compress_generic(&state->internal_donotuse, src, dst, *srcSizePtr, srcSizePtr, targetDstSize, fillOutput, addrMode, noDict, noDictIssue, 1);
+ } }
+}
+
+
+int LZ4_compress_destSize(const char* src, char* dst, int* srcSizePtr, int targetDstSize)
+{
+#if (LZ4_HEAPMODE)
+ LZ4_stream_t* ctx = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t)); /* malloc-calloc always properly aligned */
+ if (ctx == NULL) return 0;
+#else
+ LZ4_stream_t ctxBody;
+ LZ4_stream_t* ctx = &ctxBody;
+#endif
+
+ int result = LZ4_compress_destSize_extState(ctx, src, dst, srcSizePtr, targetDstSize);
+
+#if (LZ4_HEAPMODE)
+ FREEMEM(ctx);
+#endif
+ return result;
+}
+
+
+
+/*-******************************
+* Streaming functions
+********************************/
+
+LZ4_stream_t* LZ4_createStream(void)
+{
+ LZ4_stream_t* const lz4s = (LZ4_stream_t*)ALLOC(sizeof(LZ4_stream_t));
+ LZ4_STATIC_ASSERT(LZ4_STREAMSIZE >= sizeof(LZ4_stream_t_internal)); /* A compilation error here means LZ4_STREAMSIZE is not large enough */
+ DEBUGLOG(4, "LZ4_createStream %p", lz4s);
+ if (lz4s == NULL) return NULL;
+ LZ4_initStream(lz4s, sizeof(*lz4s));
+ return lz4s;
+}
+
+static size_t LZ4_stream_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_stream_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_stream_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+LZ4_stream_t* LZ4_initStream (void* buffer, size_t size)
+{
+ DEBUGLOG(5, "LZ4_initStream");
+ if (buffer == NULL) { return NULL; }
+ if (size < sizeof(LZ4_stream_t)) { return NULL; }
+ if (!LZ4_isAligned(buffer, LZ4_stream_t_alignment())) return NULL;
+ MEM_INIT(buffer, 0, sizeof(LZ4_stream_t_internal));
+ return (LZ4_stream_t*)buffer;
+}
+
+/* resetStream is now deprecated,
+ * prefer initStream() which is more general */
+void LZ4_resetStream (LZ4_stream_t* LZ4_stream)
+{
+ DEBUGLOG(5, "LZ4_resetStream (ctx:%p)", LZ4_stream);
+ MEM_INIT(LZ4_stream, 0, sizeof(LZ4_stream_t_internal));
+}
+
+void LZ4_resetStream_fast(LZ4_stream_t* ctx) {
+ LZ4_prepareTable(&(ctx->internal_donotuse), 0, byU32);
+}
+
+int LZ4_freeStream (LZ4_stream_t* LZ4_stream)
+{
+ if (!LZ4_stream) return 0; /* support free on NULL */
+ DEBUGLOG(5, "LZ4_freeStream %p", LZ4_stream);
+ FREEMEM(LZ4_stream);
+ return (0);
+}
+
+
+#define HASH_UNIT sizeof(reg_t)
+int LZ4_loadDict (LZ4_stream_t* LZ4_dict, const char* dictionary, int dictSize)
+{
+ LZ4_stream_t_internal* dict = &LZ4_dict->internal_donotuse;
+ const tableType_t tableType = byU32;
+ const BYTE* p = (const BYTE*)dictionary;
+ const BYTE* const dictEnd = p + dictSize;
+ const BYTE* base;
+
+ DEBUGLOG(4, "LZ4_loadDict (%i bytes from %p into %p)", dictSize, dictionary, LZ4_dict);
+
+ /* It's necessary to reset the context,
+ * and not just continue it with prepareTable()
+ * to avoid any risk of generating overflowing matchIndex
+ * when compressing using this dictionary */
+ LZ4_resetStream(LZ4_dict);
+
+ /* We always increment the offset by 64 KB, since, if the dict is longer,
+ * we truncate it to the last 64k, and if it's shorter, we still want to
+ * advance by a whole window length so we can provide the guarantee that
+ * there are only valid offsets in the window, which allows an optimization
+ * in LZ4_compress_fast_continue() where it uses noDictIssue even when the
+ * dictionary isn't a full 64k. */
+ dict->currentOffset += 64 KB;
+
+ if (dictSize < (int)HASH_UNIT) {
+ return 0;
+ }
+
+ if ((dictEnd - p) > 64 KB) p = dictEnd - 64 KB;
+ base = dictEnd - dict->currentOffset;
+ dict->dictionary = p;
+ dict->dictSize = (U32)(dictEnd - p);
+ dict->tableType = (U32)tableType;
+
+ while (p <= dictEnd-HASH_UNIT) {
+ LZ4_putPosition(p, dict->hashTable, tableType, base);
+ p+=3;
+ }
+
+ return (int)dict->dictSize;
+}
+
+void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream) {
+ const LZ4_stream_t_internal* dictCtx = dictionaryStream == NULL ? NULL :
+ &(dictionaryStream->internal_donotuse);
+
+ DEBUGLOG(4, "LZ4_attach_dictionary (%p, %p, size %u)",
+ workingStream, dictionaryStream,
+ dictCtx != NULL ? dictCtx->dictSize : 0);
+
+ if (dictCtx != NULL) {
+ /* If the current offset is zero, we will never look in the
+ * external dictionary context, since there is no value a table
+ * entry can take that indicate a miss. In that case, we need
+ * to bump the offset to something non-zero.
+ */
+ if (workingStream->internal_donotuse.currentOffset == 0) {
+ workingStream->internal_donotuse.currentOffset = 64 KB;
+ }
+
+ /* Don't actually attach an empty dictionary.
+ */
+ if (dictCtx->dictSize == 0) {
+ dictCtx = NULL;
+ }
+ }
+ workingStream->internal_donotuse.dictCtx = dictCtx;
+}
+
+
+static void LZ4_renormDictT(LZ4_stream_t_internal* LZ4_dict, int nextSize)
+{
+ assert(nextSize >= 0);
+ if (LZ4_dict->currentOffset + (unsigned)nextSize > 0x80000000) { /* potential ptrdiff_t overflow (32-bits mode) */
+ /* rescale hash table */
+ U32 const delta = LZ4_dict->currentOffset - 64 KB;
+ const BYTE* dictEnd = LZ4_dict->dictionary + LZ4_dict->dictSize;
+ int i;
+ DEBUGLOG(4, "LZ4_renormDictT");
+ for (i=0; i<LZ4_HASH_SIZE_U32; i++) {
+ if (LZ4_dict->hashTable[i] < delta) LZ4_dict->hashTable[i]=0;
+ else LZ4_dict->hashTable[i] -= delta;
+ }
+ LZ4_dict->currentOffset = 64 KB;
+ if (LZ4_dict->dictSize > 64 KB) LZ4_dict->dictSize = 64 KB;
+ LZ4_dict->dictionary = dictEnd - LZ4_dict->dictSize;
+ }
+}
+
+
+int LZ4_compress_fast_continue (LZ4_stream_t* LZ4_stream,
+ const char* source, char* dest,
+ int inputSize, int maxOutputSize,
+ int acceleration)
+{
+ const tableType_t tableType = byU32;
+ LZ4_stream_t_internal* streamPtr = &LZ4_stream->internal_donotuse;
+ const BYTE* dictEnd = streamPtr->dictionary + streamPtr->dictSize;
+
+ DEBUGLOG(5, "LZ4_compress_fast_continue (inputSize=%i)", inputSize);
+
+ LZ4_renormDictT(streamPtr, inputSize); /* avoid index overflow */
+ if (acceleration < 1) acceleration = LZ4_ACCELERATION_DEFAULT;
+ if (acceleration > LZ4_ACCELERATION_MAX) acceleration = LZ4_ACCELERATION_MAX;
+
+ /* invalidate tiny dictionaries */
+ if ( (streamPtr->dictSize-1 < 4-1) /* intentional underflow */
+ && (dictEnd != (const BYTE*)source) ) {
+ DEBUGLOG(5, "LZ4_compress_fast_continue: dictSize(%u) at addr:%p is too small", streamPtr->dictSize, streamPtr->dictionary);
+ streamPtr->dictSize = 0;
+ streamPtr->dictionary = (const BYTE*)source;
+ dictEnd = (const BYTE*)source;
+ }
+
+ /* Check overlapping input/dictionary space */
+ { const BYTE* sourceEnd = (const BYTE*) source + inputSize;
+ if ((sourceEnd > streamPtr->dictionary) && (sourceEnd < dictEnd)) {
+ streamPtr->dictSize = (U32)(dictEnd - sourceEnd);
+ if (streamPtr->dictSize > 64 KB) streamPtr->dictSize = 64 KB;
+ if (streamPtr->dictSize < 4) streamPtr->dictSize = 0;
+ streamPtr->dictionary = dictEnd - streamPtr->dictSize;
+ }
+ }
+
+ /* prefix mode : source data follows dictionary */
+ if (dictEnd == (const BYTE*)source) {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset))
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, dictSmall, acceleration);
+ else
+ return LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, withPrefix64k, noDictIssue, acceleration);
+ }
+
+ /* external dictionary mode */
+ { int result;
+ if (streamPtr->dictCtx) {
+ /* We depend here on the fact that dictCtx'es (produced by
+ * LZ4_loadDict) guarantee that their tables contain no references
+ * to offsets between dictCtx->currentOffset - 64 KB and
+ * dictCtx->currentOffset - dictCtx->dictSize. This makes it safe
+ * to use noDictIssue even when the dict isn't a full 64 KB.
+ */
+ if (inputSize > 4 KB) {
+ /* For compressing large blobs, it is faster to pay the setup
+ * cost to copy the dictionary's tables into the active context,
+ * so that the compression loop is only looking into one table.
+ */
+ LZ4_memcpy(streamPtr, streamPtr->dictCtx, sizeof(*streamPtr));
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingDictCtx, noDictIssue, acceleration);
+ }
+ } else {
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, dictSmall, acceleration);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, inputSize, NULL, maxOutputSize, limitedOutput, tableType, usingExtDict, noDictIssue, acceleration);
+ }
+ }
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)inputSize;
+ return result;
+ }
+}
+
+
+/* Hidden debug function, to force-test external dictionary mode */
+int LZ4_compress_forceExtDict (LZ4_stream_t* LZ4_dict, const char* source, char* dest, int srcSize)
+{
+ LZ4_stream_t_internal* streamPtr = &LZ4_dict->internal_donotuse;
+ int result;
+
+ LZ4_renormDictT(streamPtr, srcSize);
+
+ if ((streamPtr->dictSize < 64 KB) && (streamPtr->dictSize < streamPtr->currentOffset)) {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, dictSmall, 1);
+ } else {
+ result = LZ4_compress_generic(streamPtr, source, dest, srcSize, NULL, 0, notLimited, byU32, usingExtDict, noDictIssue, 1);
+ }
+
+ streamPtr->dictionary = (const BYTE*)source;
+ streamPtr->dictSize = (U32)srcSize;
+
+ return result;
+}
+
+
+/*! LZ4_saveDict() :
+ * If previously compressed data block is not guaranteed to remain available at its memory location,
+ * save it into a safer place (char* safeBuffer).
+ * Note : you don't need to call LZ4_loadDict() afterwards,
+ * dictionary is immediately usable, you can therefore call LZ4_compress_fast_continue().
+ * Return : saved dictionary size in bytes (necessarily <= dictSize), or 0 if error.
+ */
+int LZ4_saveDict (LZ4_stream_t* LZ4_dict, char* safeBuffer, int dictSize)
+{
+ LZ4_stream_t_internal* const dict = &LZ4_dict->internal_donotuse;
+ const BYTE* const previousDictEnd = dict->dictionary + dict->dictSize;
+
+ if ((U32)dictSize > 64 KB) { dictSize = 64 KB; } /* useless to define a dictionary > 64 KB */
+ if ((U32)dictSize > dict->dictSize) { dictSize = (int)dict->dictSize; }
+
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, previousDictEnd - dictSize, dictSize);
+
+ dict->dictionary = (const BYTE*)safeBuffer;
+ dict->dictSize = (U32)dictSize;
+
+ return dictSize;
+}
+
+
+
+/*-*******************************
+ * Decompression functions
+ ********************************/
+
+typedef enum { endOnOutputSize = 0, endOnInputSize = 1 } endCondition_directive;
+typedef enum { decode_full_block = 0, partial_decode = 1 } earlyEnd_directive;
+
+#undef MIN
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+
+/* Read the variable-length literal or match length.
+ *
+ * ip - pointer to use as input.
+ * lencheck - end ip. Return an error if ip advances >= lencheck.
+ * loop_check - check ip >= lencheck in body of loop. Returns loop_error if so.
+ * initial_check - check ip >= lencheck before start of loop. Returns initial_error if so.
+ * error (output) - error code. Should be set to 0 before call.
+ */
+typedef enum { loop_error = -2, initial_error = -1, ok = 0 } variable_length_error;
+LZ4_FORCE_INLINE unsigned
+read_variable_length(const BYTE**ip, const BYTE* lencheck,
+ int loop_check, int initial_check,
+ variable_length_error* error)
+{
+ U32 length = 0;
+ U32 s;
+ if (initial_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = initial_error;
+ return length;
+ }
+ do {
+ s = **ip;
+ (*ip)++;
+ length += s;
+ if (loop_check && unlikely((*ip) >= lencheck)) { /* overflow detection */
+ *error = loop_error;
+ return length;
+ }
+ } while (s==255);
+
+ return length;
+}
+
+/*! LZ4_decompress_generic() :
+ * This generic decompression function covers all use cases.
+ * It shall be instantiated several times, using different sets of directives.
+ * Note that it is important for performance that this function really get inlined,
+ * in order to remove useless branches during compilation optimization.
+ */
+LZ4_FORCE_INLINE int
+LZ4_decompress_generic(
+ const char* const src,
+ char* const dst,
+ int srcSize,
+ int outputSize, /* If endOnInput==endOnInputSize, this value is `dstCapacity` */
+
+ endCondition_directive endOnInput, /* endOnOutputSize, endOnInputSize */
+ earlyEnd_directive partialDecoding, /* full, partial */
+ dict_directive dict, /* noDict, withPrefix64k, usingExtDict */
+ const BYTE* const lowPrefix, /* always <= dst, == dst when no prefix */
+ const BYTE* const dictStart, /* only if dict==usingExtDict */
+ const size_t dictSize /* note : = 0 if noDict */
+ )
+{
+ if ((src == NULL) || (outputSize < 0)) { return -1; }
+
+ { const BYTE* ip = (const BYTE*) src;
+ const BYTE* const iend = ip + srcSize;
+
+ BYTE* op = (BYTE*) dst;
+ BYTE* const oend = op + outputSize;
+ BYTE* cpy;
+
+ const BYTE* const dictEnd = (dictStart == NULL) ? NULL : dictStart + dictSize;
+
+ const int safeDecode = (endOnInput==endOnInputSize);
+ const int checkOffset = ((safeDecode) && (dictSize < (int)(64 KB)));
+
+
+ /* Set up the "end" pointers for the shortcut. */
+ const BYTE* const shortiend = iend - (endOnInput ? 14 : 8) /*maxLL*/ - 2 /*offset*/;
+ const BYTE* const shortoend = oend - (endOnInput ? 14 : 8) /*maxLL*/ - 18 /*maxML*/;
+
+ const BYTE* match;
+ size_t offset;
+ unsigned token;
+ size_t length;
+
+
+ DEBUGLOG(5, "LZ4_decompress_generic (srcSize:%i, dstSize:%i)", srcSize, outputSize);
+
+ /* Special cases */
+ assert(lowPrefix <= op);
+ if ((endOnInput) && (unlikely(outputSize==0))) {
+ /* Empty output buffer */
+ if (partialDecoding) return 0;
+ return ((srcSize==1) && (*ip==0)) ? 0 : -1;
+ }
+ if ((!endOnInput) && (unlikely(outputSize==0))) { return (*ip==0 ? 1 : -1); }
+ if ((endOnInput) && unlikely(srcSize==0)) { return -1; }
+
+ /* Currently the fast loop shows a regression on qualcomm arm chips. */
+#if LZ4_FAST_DEC_LOOP
+ if ((oend - op) < FASTLOOP_SAFE_DISTANCE) {
+ DEBUGLOG(6, "skip fast decode loop");
+ goto safe_decode;
+ }
+
+ /* Fast loop : decode sequences as long as output < iend-FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ /* Main fastloop assertion: We can always wildcopy FASTLOOP_SAFE_DISTANCE */
+ assert(oend - op >= FASTLOOP_SAFE_DISTANCE);
+ if (endOnInput) { assert(ip < iend); }
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+
+ /* copy literals */
+ cpy = op+length;
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if (endOnInput) { /* LZ4_decompress_safe() */
+ if ((cpy>oend-32) || (ip+length>iend-32)) { goto safe_literal_copy; }
+ LZ4_wildCopy32(op, ip, cpy);
+ } else { /* LZ4_decompress_fast() */
+ if (cpy>oend-8) { goto safe_literal_copy; }
+ LZ4_wildCopy8(op, ip, cpy); /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
+ * it doesn't know input length, and only relies on end-of-block properties */
+ }
+ ip += length; op = cpy;
+ } else {
+ cpy = op+length;
+ if (endOnInput) { /* LZ4_decompress_safe() */
+ DEBUGLOG(7, "copy %u bytes in a 16-bytes stripe", (unsigned)length);
+ /* We don't need to check oend, since we check it once for each loop below */
+ if (ip > iend-(16 + 1/*max lit + offset + nextToken*/)) { goto safe_literal_copy; }
+ /* Literals can only be 14, but hope compilers optimize if we copy by a register size */
+ LZ4_memcpy(op, ip, 16);
+ } else { /* LZ4_decompress_fast() */
+ /* LZ4_decompress_fast() cannot copy more than 8 bytes at a time :
+ * it doesn't know input length, and relies on end-of-block properties */
+ LZ4_memcpy(op, ip, 8);
+ if (length > 8) { LZ4_memcpy(op+8, ip+8, 8); }
+ }
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+ assert(match <= op);
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ if (length == ML_MASK) {
+ variable_length_error error = ok;
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ if (error != ok) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) { goto _output_error; } /* overflow detection */
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+ } else {
+ length += MINMATCH;
+ if (op + length >= oend - FASTLOOP_SAFE_DISTANCE) {
+ goto safe_match_copy;
+ }
+
+ /* Fastpath check: Avoids a branch in LZ4_wildCopy32 if true */
+ if ((dict == withPrefix64k) || (match >= lowPrefix)) {
+ if (offset >= 8) {
+ assert(match >= lowPrefix);
+ assert(match <= op);
+ assert(op + 18 <= oend);
+
+ LZ4_memcpy(op, match, 8);
+ LZ4_memcpy(op+8, match+8, 8);
+ LZ4_memcpy(op+16, match+16, 2);
+ op += length;
+ continue;
+ } } }
+
+ if (checkOffset && (unlikely(match + dictSize < lowPrefix))) { goto _output_error; } /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) {
+ DEBUGLOG(7, "partialDecoding: dictionary match, close to dstEnd");
+ length = MIN(length, (size_t)(oend-op));
+ } else {
+ goto _output_error; /* end-of-block condition violated */
+ } }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) { *op++ = *copyFrom++; }
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+
+ /* copy match within block */
+ cpy = op + length;
+
+ assert((op <= oend) && (oend-op >= 32));
+ if (unlikely(offset<16)) {
+ LZ4_memcpy_using_offset(op, match, cpy, offset);
+ } else {
+ LZ4_wildCopy32(op, match, cpy);
+ }
+
+ op = cpy; /* wildcopy correction */
+ }
+ safe_decode:
+#endif
+
+ /* Main Loop : decode remaining sequences where output < FASTLOOP_SAFE_DISTANCE */
+ while (1) {
+ token = *ip++;
+ length = token >> ML_BITS; /* literal length */
+
+ assert(!endOnInput || ip <= iend); /* ip < iend before the increment */
+
+ /* A two-stage shortcut for the most common case:
+ * 1) If the literal length is 0..14, and there is enough space,
+ * enter the shortcut and copy 16 bytes on behalf of the literals
+ * (in the fast mode, only 8 bytes can be safely copied this way).
+ * 2) Further if the match length is 4..18, copy 18 bytes in a similar
+ * manner; but we ensure that there's enough space in the output for
+ * those 18 bytes earlier, upon entering the shortcut (in other words,
+ * there is a combined check for both stages).
+ */
+ if ( (endOnInput ? length != RUN_MASK : length <= 8)
+ /* strictly "less than" on input, to re-enter the loop with at least one byte */
+ && likely((endOnInput ? ip < shortiend : 1) & (op <= shortoend)) ) {
+ /* Copy the literals */
+ LZ4_memcpy(op, ip, endOnInput ? 16 : 8);
+ op += length; ip += length;
+
+ /* The second stage: prepare for match copying, decode full info.
+ * If it doesn't work out, the info won't be wasted. */
+ length = token & ML_MASK; /* match length */
+ offset = LZ4_readLE16(ip); ip += 2;
+ match = op - offset;
+ assert(match <= op); /* check overflow */
+
+ /* Do not deal with overlapping matches. */
+ if ( (length != ML_MASK)
+ && (offset >= 8)
+ && (dict==withPrefix64k || match >= lowPrefix) ) {
+ /* Copy the match. */
+ LZ4_memcpy(op + 0, match + 0, 8);
+ LZ4_memcpy(op + 8, match + 8, 8);
+ LZ4_memcpy(op +16, match +16, 2);
+ op += length + MINMATCH;
+ /* Both stages worked, load the next token. */
+ continue;
+ }
+
+ /* The second stage didn't work out, but the info is ready.
+ * Propel it right to the point of match copying. */
+ goto _copy_match;
+ }
+
+ /* decode literal length */
+ if (length == RUN_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend-RUN_MASK, (int)endOnInput, (int)endOnInput, &error);
+ if (error == initial_error) { goto _output_error; }
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)(op))) { goto _output_error; } /* overflow detection */
+ if ((safeDecode) && unlikely((uptrval)(ip)+length<(uptrval)(ip))) { goto _output_error; } /* overflow detection */
+ }
+
+ /* copy literals */
+ cpy = op+length;
+#if LZ4_FAST_DEC_LOOP
+ safe_literal_copy:
+#endif
+ LZ4_STATIC_ASSERT(MFLIMIT >= WILDCOPYLENGTH);
+ if ( ((endOnInput) && ((cpy>oend-MFLIMIT) || (ip+length>iend-(2+1+LASTLITERALS))) )
+ || ((!endOnInput) && (cpy>oend-WILDCOPYLENGTH)) )
+ {
+ /* We've either hit the input parsing restriction or the output parsing restriction.
+ * In the normal scenario, decoding a full block, it must be the last sequence,
+ * otherwise it's an error (invalid input or dimensions).
+ * In partialDecoding scenario, it's necessary to ensure there is no buffer overflow.
+ */
+ if (partialDecoding) {
+ /* Since we are partial decoding we may be in this block because of the output parsing
+ * restriction, which is not valid since the output buffer is allowed to be undersized.
+ */
+ assert(endOnInput);
+ DEBUGLOG(7, "partialDecoding: copying literals, close to input or output end")
+ DEBUGLOG(7, "partialDecoding: literal length = %u", (unsigned)length);
+ DEBUGLOG(7, "partialDecoding: remaining space in dstBuffer : %i", (int)(oend - op));
+ DEBUGLOG(7, "partialDecoding: remaining space in srcBuffer : %i", (int)(iend - ip));
+ /* Finishing in the middle of a literals segment,
+ * due to lack of input.
+ */
+ if (ip+length > iend) {
+ length = (size_t)(iend-ip);
+ cpy = op + length;
+ }
+ /* Finishing in the middle of a literals segment,
+ * due to lack of output space.
+ */
+ if (cpy > oend) {
+ cpy = oend;
+ assert(op<=oend);
+ length = (size_t)(oend-op);
+ }
+ } else {
+ /* We must be on the last sequence because of the parsing limitations so check
+ * that we exactly regenerate the original size (must be exact when !endOnInput).
+ */
+ if ((!endOnInput) && (cpy != oend)) { goto _output_error; }
+ /* We must be on the last sequence (or invalid) because of the parsing limitations
+ * so check that we exactly consume the input and don't overrun the output buffer.
+ */
+ if ((endOnInput) && ((ip+length != iend) || (cpy > oend))) {
+ DEBUGLOG(6, "should have been last run of literals")
+ DEBUGLOG(6, "ip(%p) + length(%i) = %p != iend (%p)", ip, (int)length, ip+length, iend);
+ DEBUGLOG(6, "or cpy(%p) > oend(%p)", cpy, oend);
+ goto _output_error;
+ }
+ }
+ memmove(op, ip, length); /* supports overlapping memory regions; only matters for in-place decompression scenarios */
+ ip += length;
+ op += length;
+ /* Necessarily EOF when !partialDecoding.
+ * When partialDecoding, it is EOF if we've either
+ * filled the output buffer or
+ * can't proceed with reading an offset for following match.
+ */
+ if (!partialDecoding || (cpy == oend) || (ip >= (iend-2))) {
+ break;
+ }
+ } else {
+ LZ4_wildCopy8(op, ip, cpy); /* may overwrite up to WILDCOPYLENGTH beyond cpy */
+ ip += length; op = cpy;
+ }
+
+ /* get offset */
+ offset = LZ4_readLE16(ip); ip+=2;
+ match = op - offset;
+
+ /* get matchlength */
+ length = token & ML_MASK;
+
+ _copy_match:
+ if (length == ML_MASK) {
+ variable_length_error error = ok;
+ length += read_variable_length(&ip, iend - LASTLITERALS + 1, (int)endOnInput, 0, &error);
+ if (error != ok) goto _output_error;
+ if ((safeDecode) && unlikely((uptrval)(op)+length<(uptrval)op)) goto _output_error; /* overflow detection */
+ }
+ length += MINMATCH;
+
+#if LZ4_FAST_DEC_LOOP
+ safe_match_copy:
+#endif
+ if ((checkOffset) && (unlikely(match + dictSize < lowPrefix))) goto _output_error; /* Error : offset outside buffers */
+ /* match starting within external dictionary */
+ if ((dict==usingExtDict) && (match < lowPrefix)) {
+ if (unlikely(op+length > oend-LASTLITERALS)) {
+ if (partialDecoding) length = MIN(length, (size_t)(oend-op));
+ else goto _output_error; /* doesn't respect parsing restriction */
+ }
+
+ if (length <= (size_t)(lowPrefix-match)) {
+ /* match fits entirely within external dictionary : just copy */
+ memmove(op, dictEnd - (lowPrefix-match), length);
+ op += length;
+ } else {
+ /* match stretches into both external dictionary and current block */
+ size_t const copySize = (size_t)(lowPrefix - match);
+ size_t const restSize = length - copySize;
+ LZ4_memcpy(op, dictEnd - copySize, copySize);
+ op += copySize;
+ if (restSize > (size_t)(op - lowPrefix)) { /* overlap copy */
+ BYTE* const endOfMatch = op + restSize;
+ const BYTE* copyFrom = lowPrefix;
+ while (op < endOfMatch) *op++ = *copyFrom++;
+ } else {
+ LZ4_memcpy(op, lowPrefix, restSize);
+ op += restSize;
+ } }
+ continue;
+ }
+ assert(match >= lowPrefix);
+
+ /* copy match within block */
+ cpy = op + length;
+
+ /* partialDecoding : may end anywhere within the block */
+ assert(op<=oend);
+ if (partialDecoding && (cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ size_t const mlen = MIN(length, (size_t)(oend-op));
+ const BYTE* const matchEnd = match + mlen;
+ BYTE* const copyEnd = op + mlen;
+ if (matchEnd > op) { /* overlap copy */
+ while (op < copyEnd) { *op++ = *match++; }
+ } else {
+ LZ4_memcpy(op, match, mlen);
+ }
+ op = copyEnd;
+ if (op == oend) { break; }
+ continue;
+ }
+
+ if (unlikely(offset<8)) {
+ LZ4_write32(op, 0); /* silence msan warning when offset==0 */
+ op[0] = match[0];
+ op[1] = match[1];
+ op[2] = match[2];
+ op[3] = match[3];
+ match += inc32table[offset];
+ LZ4_memcpy(op+4, match, 4);
+ match -= dec64table[offset];
+ } else {
+ LZ4_memcpy(op, match, 8);
+ match += 8;
+ }
+ op += 8;
+
+ if (unlikely(cpy > oend-MATCH_SAFEGUARD_DISTANCE)) {
+ BYTE* const oCopyLimit = oend - (WILDCOPYLENGTH-1);
+ if (cpy > oend-LASTLITERALS) { goto _output_error; } /* Error : last LASTLITERALS bytes must be literals (uncompressed) */
+ if (op < oCopyLimit) {
+ LZ4_wildCopy8(op, match, oCopyLimit);
+ match += oCopyLimit - op;
+ op = oCopyLimit;
+ }
+ while (op < cpy) { *op++ = *match++; }
+ } else {
+ LZ4_memcpy(op, match, 8);
+ if (length > 16) { LZ4_wildCopy8(op+8, match+8, cpy); }
+ }
+ op = cpy; /* wildcopy correction */
+ }
+
+ /* end of decoding */
+ if (endOnInput) {
+ DEBUGLOG(5, "decoded %i bytes", (int) (((char*)op)-dst));
+ return (int) (((char*)op)-dst); /* Nb of output bytes decoded */
+ } else {
+ return (int) (((const char*)ip)-src); /* Nb of input bytes read */
+ }
+
+ /* Overflow error detected */
+ _output_error:
+ return (int) (-(((const char*)ip)-src))-1;
+ }
+}
+
+
+/*===== Instantiate the API decoding functions. =====*/
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe(const char* source, char* dest, int compressedSize, int maxDecompressedSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxDecompressedSize,
+ endOnInputSize, decode_full_block, noDict,
+ (BYTE*)dest, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_partial(const char* src, char* dst, int compressedSize, int targetOutputSize, int dstCapacity)
+{
+ dstCapacity = MIN(targetOutputSize, dstCapacity);
+ return LZ4_decompress_generic(src, dst, compressedSize, dstCapacity,
+ endOnInputSize, partial_decode,
+ noDict, (BYTE*)dst, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast(const char* source, char* dest, int originalSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/*===== Instantiate a few more decoding cases, used more than once. =====*/
+
+LZ4_FORCE_O2 /* Exported, an obsolete API function. */
+int LZ4_decompress_safe_withPrefix64k(const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, withPrefix64k,
+ (BYTE*)dest - 64 KB, NULL, 0);
+}
+
+/* Another obsolete API function, paired with the previous one. */
+int LZ4_decompress_fast_withPrefix64k(const char* source, char* dest, int originalSize)
+{
+ /* LZ4_decompress_fast doesn't validate match offsets,
+ * and thus serves well with any prefixed dictionary. */
+ return LZ4_decompress_fast(source, dest, originalSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_safe_withSmallPrefix(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, noDict,
+ (BYTE*)dest-prefixSize, NULL, 0);
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_safe_forceExtDict(const char* source, char* dest,
+ int compressedSize, int maxOutputSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_O2
+static int LZ4_decompress_fast_extDict(const char* source, char* dest, int originalSize,
+ const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest, (const BYTE*)dictStart, dictSize);
+}
+
+/* The "double dictionary" mode, for use with e.g. ring buffers: the first part
+ * of the dictionary is passed as prefix, and the second via dictStart + dictSize.
+ * These routines are used only once, in LZ4_decompress_*_continue().
+ */
+LZ4_FORCE_INLINE
+int LZ4_decompress_safe_doubleDict(const char* source, char* dest, int compressedSize, int maxOutputSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, compressedSize, maxOutputSize,
+ endOnInputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+LZ4_FORCE_INLINE
+int LZ4_decompress_fast_doubleDict(const char* source, char* dest, int originalSize,
+ size_t prefixSize, const void* dictStart, size_t dictSize)
+{
+ return LZ4_decompress_generic(source, dest, 0, originalSize,
+ endOnOutputSize, decode_full_block, usingExtDict,
+ (BYTE*)dest-prefixSize, (const BYTE*)dictStart, dictSize);
+}
+
+/*===== streaming decompression functions =====*/
+
+LZ4_streamDecode_t* LZ4_createStreamDecode(void)
+{
+ LZ4_streamDecode_t* lz4s = (LZ4_streamDecode_t*) ALLOC_AND_ZERO(sizeof(LZ4_streamDecode_t));
+ LZ4_STATIC_ASSERT(LZ4_STREAMDECODESIZE >= sizeof(LZ4_streamDecode_t_internal)); /* A compilation error here means LZ4_STREAMDECODESIZE is not large enough */
+ return lz4s;
+}
+
+int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream)
+{
+ if (LZ4_stream == NULL) { return 0; } /* support free on NULL */
+ FREEMEM(LZ4_stream);
+ return 0;
+}
+
+/*! LZ4_setStreamDecode() :
+ * Use this function to instruct where to find the dictionary.
+ * This function is not necessary if previous data is still available where it was decoded.
+ * Loading a size of 0 is allowed (same effect as no dictionary).
+ * @return : 1 if OK, 0 if error
+ */
+int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ lz4sd->prefixSize = (size_t) dictSize;
+ lz4sd->prefixEnd = (const BYTE*) dictionary + dictSize;
+ lz4sd->externalDict = NULL;
+ lz4sd->extDictSize = 0;
+ return 1;
+}
+
+/*! LZ4_decoderRingBufferSize() :
+ * when setting a ring buffer for streaming decompression (optional scenario),
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * Note : in a ring buffer scenario,
+ * blocks are presumed decompressed next to each other.
+ * When not enough space remains for next block (remainingSize < maxBlockSize),
+ * decoding resumes from beginning of ring buffer.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+int LZ4_decoderRingBufferSize(int maxBlockSize)
+{
+ if (maxBlockSize < 0) return 0;
+ if (maxBlockSize > LZ4_MAX_INPUT_SIZE) return 0;
+ if (maxBlockSize < 16) maxBlockSize = 16;
+ return LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize);
+}
+
+/*
+*_continue() :
+ These decoding functions allow decompression of multiple blocks in "streaming" mode.
+ Previously decoded blocks must still be available at the memory position where they were decoded.
+ If it's not possible, save the relevant part of decoded data into a safe buffer,
+ and indicate where it stands using LZ4_setStreamDecode()
+*/
+LZ4_FORCE_O2
+int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int compressedSize, int maxOutputSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+
+ if (lz4sd->prefixSize == 0) {
+ /* The first call, no dictionary yet. */
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ /* They're rolling the current segment. */
+ if (lz4sd->prefixSize >= 64 KB - 1)
+ result = LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ else if (lz4sd->extDictSize == 0)
+ result = LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize);
+ else
+ result = LZ4_decompress_safe_doubleDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += (size_t)result;
+ lz4sd->prefixEnd += result;
+ } else {
+ /* The buffer wraps around, or they're switching to another buffer. */
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)result;
+ lz4sd->prefixEnd = (BYTE*)dest + result;
+ }
+
+ return result;
+}
+
+LZ4_FORCE_O2
+int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* source, char* dest, int originalSize)
+{
+ LZ4_streamDecode_t_internal* lz4sd = &LZ4_streamDecode->internal_donotuse;
+ int result;
+ assert(originalSize >= 0);
+
+ if (lz4sd->prefixSize == 0) {
+ assert(lz4sd->extDictSize == 0);
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ } else if (lz4sd->prefixEnd == (BYTE*)dest) {
+ if (lz4sd->prefixSize >= 64 KB - 1 || lz4sd->extDictSize == 0)
+ result = LZ4_decompress_fast(source, dest, originalSize);
+ else
+ result = LZ4_decompress_fast_doubleDict(source, dest, originalSize,
+ lz4sd->prefixSize, lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize += (size_t)originalSize;
+ lz4sd->prefixEnd += originalSize;
+ } else {
+ lz4sd->extDictSize = lz4sd->prefixSize;
+ lz4sd->externalDict = lz4sd->prefixEnd - lz4sd->extDictSize;
+ result = LZ4_decompress_fast_extDict(source, dest, originalSize,
+ lz4sd->externalDict, lz4sd->extDictSize);
+ if (result <= 0) return result;
+ lz4sd->prefixSize = (size_t)originalSize;
+ lz4sd->prefixEnd = (BYTE*)dest + originalSize;
+ }
+
+ return result;
+}
+
+
+/*
+Advanced decoding functions :
+*_usingDict() :
+ These decoding functions work the same as "_continue" ones,
+ the dictionary must be explicitly provided within parameters
+*/
+
+int LZ4_decompress_safe_usingDict(const char* source, char* dest, int compressedSize, int maxOutputSize, const char* dictStart, int dictSize)
+{
+ if (dictSize==0)
+ return LZ4_decompress_safe(source, dest, compressedSize, maxOutputSize);
+ if (dictStart+dictSize == dest) {
+ if (dictSize >= 64 KB - 1) {
+ return LZ4_decompress_safe_withPrefix64k(source, dest, compressedSize, maxOutputSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_withSmallPrefix(source, dest, compressedSize, maxOutputSize, (size_t)dictSize);
+ }
+ assert(dictSize >= 0);
+ return LZ4_decompress_safe_forceExtDict(source, dest, compressedSize, maxOutputSize, dictStart, (size_t)dictSize);
+}
+
+int LZ4_decompress_fast_usingDict(const char* source, char* dest, int originalSize, const char* dictStart, int dictSize)
+{
+ if (dictSize==0 || dictStart+dictSize == dest)
+ return LZ4_decompress_fast(source, dest, originalSize);
+ assert(dictSize >= 0);
+ return LZ4_decompress_fast_extDict(source, dest, originalSize, dictStart, (size_t)dictSize);
+}
+
+
+/*=*************************************************
+* Obsolete Functions
+***************************************************/
+/* obsolete compression functions */
+int LZ4_compress_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize)
+{
+ return LZ4_compress_default(source, dest, inputSize, maxOutputSize);
+}
+int LZ4_compress(const char* src, char* dest, int srcSize)
+{
+ return LZ4_compress_default(src, dest, srcSize, LZ4_compressBound(srcSize));
+}
+int LZ4_compress_limitedOutput_withState (void* state, const char* src, char* dst, int srcSize, int dstSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, dstSize, 1);
+}
+int LZ4_compress_withState (void* state, const char* src, char* dst, int srcSize)
+{
+ return LZ4_compress_fast_extState(state, src, dst, srcSize, LZ4_compressBound(srcSize), 1);
+}
+int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_stream, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, src, dst, srcSize, dstCapacity, 1);
+}
+int LZ4_compress_continue (LZ4_stream_t* LZ4_stream, const char* source, char* dest, int inputSize)
+{
+ return LZ4_compress_fast_continue(LZ4_stream, source, dest, inputSize, LZ4_compressBound(inputSize), 1);
+}
+
+/*
+These decompression functions are deprecated and should no longer be used.
+They are only provided here for compatibility with older user programs.
+- LZ4_uncompress is totally equivalent to LZ4_decompress_fast
+- LZ4_uncompress_unknownOutputSize is totally equivalent to LZ4_decompress_safe
+*/
+int LZ4_uncompress (const char* source, char* dest, int outputSize)
+{
+ return LZ4_decompress_fast(source, dest, outputSize);
+}
+int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize)
+{
+ return LZ4_decompress_safe(source, dest, isize, maxOutputSize);
+}
+
+/* Obsolete Streaming functions */
+
+int LZ4_sizeofStreamState(void) { return LZ4_STREAMSIZE; }
+
+int LZ4_resetStreamState(void* state, char* inputBuffer)
+{
+ (void)inputBuffer;
+ LZ4_resetStream((LZ4_stream_t*)state);
+ return 0;
+}
+
+void* LZ4_create (char* inputBuffer)
+{
+ (void)inputBuffer;
+ return LZ4_createStream();
+}
+
+char* LZ4_slideInputBuffer (void* state)
+{
+ /* avoid const char * -> char * conversion warning */
+ return (char *)(uptrval)((LZ4_stream_t*)state)->internal_donotuse.dictionary;
+}
+
+#endif /* LZ4_COMMONDEFS_ONLY */ \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h
new file mode 100644
index 000000000..7ab1e483a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4.h
@@ -0,0 +1,774 @@
+/*
+ * LZ4 - Fast LZ compression algorithm
+ * Header File
+ * Copyright (C) 2011-present, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 homepage : http://www.lz4.org
+ - LZ4 source repository : https://github.com/lz4/lz4
+*/
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+#ifndef LZ4_H_2983827168210
+#define LZ4_H_2983827168210
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+
+
+/**
+ Introduction
+
+ LZ4 is lossless compression algorithm, providing compression speed >500 MB/s per core,
+ scalable with multi-cores CPU. It features an extremely fast decoder, with speed in
+ multiple GB/s per core, typically reaching RAM speed limits on multi-core systems.
+
+ The LZ4 compression library provides in-memory compression and decompression functions.
+ It gives full buffer control to user.
+ Compression can be done in:
+ - a single step (described as Simple Functions)
+ - a single step, reusing a context (described in Advanced Functions)
+ - unbounded multiple steps (described as Streaming compression)
+
+ lz4.h generates and decodes LZ4-compressed blocks (doc/lz4_Block_format.md).
+ Decompressing such a compressed block requires additional metadata.
+ Exact metadata depends on exact decompression function.
+ For the typical case of LZ4_decompress_safe(),
+ metadata includes block's compressed size, and maximum bound of decompressed size.
+ Each application is free to encode and pass such metadata in whichever way it wants.
+
+ lz4.h only handle blocks, it can not generate Frames.
+
+ Blocks are different from Frames (doc/lz4_Frame_format.md).
+ Frames bundle both blocks and metadata in a specified manner.
+ Embedding metadata is required for compressed data to be self-contained and portable.
+ Frame format is delivered through a companion API, declared in lz4frame.h.
+ The `lz4` CLI can only manage frames.
+*/
+
+/*^***************************************************************
+* Export parameters
+*****************************************************************/
+/*
+* LZ4_DLL_EXPORT :
+* Enable exporting of functions when building a Windows DLL
+* LZ4LIB_VISIBILITY :
+* Control library symbols visibility.
+*/
+#ifndef LZ4LIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4LIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4LIB_VISIBILITY
+# endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+# define LZ4LIB_API __declspec(dllexport) LZ4LIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+# define LZ4LIB_API __declspec(dllimport) LZ4LIB_VISIBILITY /* It isn't required but allows to generate better code, saving a function pointer load from the IAT and an indirect jump.*/
+#else
+# define LZ4LIB_API LZ4LIB_VISIBILITY
+#endif
+
+/*------ Version ------*/
+#define LZ4_VERSION_MAJOR 1 /* for breaking interface changes */
+#define LZ4_VERSION_MINOR 9 /* for new (non-breaking) interface capabilities */
+#define LZ4_VERSION_RELEASE 3 /* for tweaks, bug-fixes, or development */
+
+#define LZ4_VERSION_NUMBER (LZ4_VERSION_MAJOR *100*100 + LZ4_VERSION_MINOR *100 + LZ4_VERSION_RELEASE)
+
+#define LZ4_LIB_VERSION LZ4_VERSION_MAJOR.LZ4_VERSION_MINOR.LZ4_VERSION_RELEASE
+#define LZ4_QUOTE(str) #str
+#define LZ4_EXPAND_AND_QUOTE(str) LZ4_QUOTE(str)
+#define LZ4_VERSION_STRING LZ4_EXPAND_AND_QUOTE(LZ4_LIB_VERSION)
+
+LZ4LIB_API int LZ4_versionNumber (void); /**< library version number; useful to check dll version */
+LZ4LIB_API const char* LZ4_versionString (void); /**< library version string; useful to check dll version */
+
+
+/*-************************************
+* Tuning parameter
+**************************************/
+/*!
+ * LZ4_MEMORY_USAGE :
+ * Memory usage formula : N->2^N Bytes (examples : 10 -> 1KB; 12 -> 4KB ; 16 -> 64KB; 20 -> 1MB; etc.)
+ * Increasing memory usage improves compression ratio.
+ * Reduced memory usage may improve speed, thanks to better cache locality.
+ * Default value is 14, for 16KB, which nicely fits into Intel x86 L1 cache
+ */
+#ifndef LZ4_MEMORY_USAGE
+# define LZ4_MEMORY_USAGE 14
+#endif
+
+
+/*-************************************
+* Simple Functions
+**************************************/
+/*! LZ4_compress_default() :
+ * Compresses 'srcSize' bytes from buffer 'src'
+ * into already allocated 'dst' buffer of size 'dstCapacity'.
+ * Compression is guaranteed to succeed if 'dstCapacity' >= LZ4_compressBound(srcSize).
+ * It also runs faster, so it's a recommended setting.
+ * If the function cannot compress 'src' into a more limited 'dst' budget,
+ * compression stops *immediately*, and the function result is zero.
+ * In which case, 'dst' content is undefined (invalid).
+ * srcSize : max supported value is LZ4_MAX_INPUT_SIZE.
+ * dstCapacity : size of buffer 'dst' (which must be already allocated)
+ * @return : the number of bytes written into buffer 'dst' (necessarily <= dstCapacity)
+ * or 0 if compression fails
+ * Note : This function is protected against buffer overflow scenarios (never writes outside 'dst' buffer, nor read outside 'source' buffer).
+ */
+LZ4LIB_API int LZ4_compress_default(const char* src, char* dst, int srcSize, int dstCapacity);
+
+/*! LZ4_decompress_safe() :
+ * compressedSize : is the exact complete size of the compressed block.
+ * dstCapacity : is the size of destination buffer (which must be already allocated), presumed an upper bound of decompressed size.
+ * @return : the number of bytes decompressed into destination buffer (necessarily <= dstCapacity)
+ * If destination buffer is not large enough, decoding will stop and output an error code (negative value).
+ * If the source stream is detected malformed, the function will stop decoding and return a negative result.
+ * Note 1 : This function is protected against malicious data packets :
+ * it will never writes outside 'dst' buffer, nor read outside 'source' buffer,
+ * even if the compressed block is maliciously modified to order the decoder to do these actions.
+ * In such case, the decoder stops immediately, and considers the compressed block malformed.
+ * Note 2 : compressedSize and dstCapacity must be provided to the function, the compressed block does not contain them.
+ * The implementation is free to send / store / derive this information in whichever way is most beneficial.
+ * If there is a need for a different format which bundles together both compressed data and its metadata, consider looking at lz4frame.h instead.
+ */
+LZ4LIB_API int LZ4_decompress_safe (const char* src, char* dst, int compressedSize, int dstCapacity);
+
+
+/*-************************************
+* Advanced Functions
+**************************************/
+#define LZ4_MAX_INPUT_SIZE 0x7E000000 /* 2 113 929 216 bytes */
+#define LZ4_COMPRESSBOUND(isize) ((unsigned)(isize) > (unsigned)LZ4_MAX_INPUT_SIZE ? 0 : (isize) + ((isize)/255) + 16)
+
+/*! LZ4_compressBound() :
+ Provides the maximum size that LZ4 compression may output in a "worst case" scenario (input data not compressible)
+ This function is primarily useful for memory allocation purposes (destination buffer size).
+ Macro LZ4_COMPRESSBOUND() is also provided for compilation-time evaluation (stack memory allocation for example).
+ Note that LZ4_compress_default() compresses faster when dstCapacity is >= LZ4_compressBound(srcSize)
+ inputSize : max supported value is LZ4_MAX_INPUT_SIZE
+ return : maximum output size in a "worst case" scenario
+ or 0, if input size is incorrect (too large or negative)
+*/
+LZ4LIB_API int LZ4_compressBound(int inputSize);
+
+/*! LZ4_compress_fast() :
+ Same as LZ4_compress_default(), but allows selection of "acceleration" factor.
+ The larger the acceleration value, the faster the algorithm, but also the lesser the compression.
+ It's a trade-off. It can be fine tuned, with each successive value providing roughly +~3% to speed.
+ An acceleration value of "1" is the same as regular LZ4_compress_default()
+ Values <= 0 will be replaced by LZ4_ACCELERATION_DEFAULT (currently == 1, see lz4.c).
+ Values > LZ4_ACCELERATION_MAX will be replaced by LZ4_ACCELERATION_MAX (currently == 65537, see lz4.c).
+*/
+LZ4LIB_API int LZ4_compress_fast (const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_fast_extState() :
+ * Same as LZ4_compress_fast(), using an externally allocated memory space for its state.
+ * Use LZ4_sizeofState() to know how much memory must be allocated,
+ * and allocate it on 8-bytes boundaries (using `malloc()` typically).
+ * Then, provide this buffer as `void* state` to compression function.
+ */
+LZ4LIB_API int LZ4_sizeofState(void);
+LZ4LIB_API int LZ4_compress_fast_extState (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+
+/*! LZ4_compress_destSize() :
+ * Reverse the logic : compresses as much data as possible from 'src' buffer
+ * into already allocated buffer 'dst', of size >= 'targetDestSize'.
+ * This function either compresses the entire 'src' content into 'dst' if it's large enough,
+ * or fill 'dst' buffer completely with as much data as possible from 'src'.
+ * note: acceleration parameter is fixed to "default".
+ *
+ * *srcSizePtr : will be modified to indicate how many bytes where read from 'src' to fill 'dst'.
+ * New value is necessarily <= input value.
+ * @return : Nb bytes written into 'dst' (necessarily <= targetDestSize)
+ * or 0 if compression fails.
+ *
+ * Note : from v1.8.2 to v1.9.1, this function had a bug (fixed un v1.9.2+):
+ * the produced compressed content could, in specific circumstances,
+ * require to be decompressed into a destination buffer larger
+ * by at least 1 byte than the content to decompress.
+ * If an application uses `LZ4_compress_destSize()`,
+ * it's highly recommended to update liblz4 to v1.9.2 or better.
+ * If this can't be done or ensured,
+ * the receiving decompression function should provide
+ * a dstCapacity which is > decompressedSize, by at least 1 byte.
+ * See https://github.com/lz4/lz4/issues/859 for details
+ */
+LZ4LIB_API int LZ4_compress_destSize (const char* src, char* dst, int* srcSizePtr, int targetDstSize);
+
+
+/*! LZ4_decompress_safe_partial() :
+ * Decompress an LZ4 compressed block, of size 'srcSize' at position 'src',
+ * into destination buffer 'dst' of size 'dstCapacity'.
+ * Up to 'targetOutputSize' bytes will be decoded.
+ * The function stops decoding on reaching this objective.
+ * This can be useful to boost performance
+ * whenever only the beginning of a block is required.
+ *
+ * @return : the number of bytes decoded in `dst` (necessarily <= targetOutputSize)
+ * If source stream is detected malformed, function returns a negative result.
+ *
+ * Note 1 : @return can be < targetOutputSize, if compressed block contains less data.
+ *
+ * Note 2 : targetOutputSize must be <= dstCapacity
+ *
+ * Note 3 : this function effectively stops decoding on reaching targetOutputSize,
+ * so dstCapacity is kind of redundant.
+ * This is because in older versions of this function,
+ * decoding operation would still write complete sequences.
+ * Therefore, there was no guarantee that it would stop writing at exactly targetOutputSize,
+ * it could write more bytes, though only up to dstCapacity.
+ * Some "margin" used to be required for this operation to work properly.
+ * Thankfully, this is no longer necessary.
+ * The function nonetheless keeps the same signature, in an effort to preserve API compatibility.
+ *
+ * Note 4 : If srcSize is the exact size of the block,
+ * then targetOutputSize can be any value,
+ * including larger than the block's decompressed size.
+ * The function will, at most, generate block's decompressed size.
+ *
+ * Note 5 : If srcSize is _larger_ than block's compressed size,
+ * then targetOutputSize **MUST** be <= block's decompressed size.
+ * Otherwise, *silent corruption will occur*.
+ */
+LZ4LIB_API int LZ4_decompress_safe_partial (const char* src, char* dst, int srcSize, int targetOutputSize, int dstCapacity);
+
+
+/*-*********************************************
+* Streaming Compression Functions
+***********************************************/
+typedef union LZ4_stream_u LZ4_stream_t; /* incomplete type (defined later) */
+
+LZ4LIB_API LZ4_stream_t* LZ4_createStream(void);
+LZ4LIB_API int LZ4_freeStream (LZ4_stream_t* streamPtr);
+
+/*! LZ4_resetStream_fast() : v1.9.0+
+ * Use this to prepare an LZ4_stream_t for a new chain of dependent blocks
+ * (e.g., LZ4_compress_fast_continue()).
+ *
+ * An LZ4_stream_t must be initialized once before usage.
+ * This is automatically done when created by LZ4_createStream().
+ * However, should the LZ4_stream_t be simply declared on stack (for example),
+ * it's necessary to initialize it first, using LZ4_initStream().
+ *
+ * After init, start any new stream with LZ4_resetStream_fast().
+ * A same LZ4_stream_t can be re-used multiple times consecutively
+ * and compress multiple streams,
+ * provided that it starts each new stream with LZ4_resetStream_fast().
+ *
+ * LZ4_resetStream_fast() is much faster than LZ4_initStream(),
+ * but is not compatible with memory regions containing garbage data.
+ *
+ * Note: it's only useful to call LZ4_resetStream_fast()
+ * in the context of streaming compression.
+ * The *extState* functions perform their own resets.
+ * Invoking LZ4_resetStream_fast() before is redundant, and even counterproductive.
+ */
+LZ4LIB_API void LZ4_resetStream_fast (LZ4_stream_t* streamPtr);
+
+/*! LZ4_loadDict() :
+ * Use this function to reference a static dictionary into LZ4_stream_t.
+ * The dictionary must remain available during compression.
+ * LZ4_loadDict() triggers a reset, so any previous data will be forgotten.
+ * The same dictionary will have to be loaded on decompression side for successful decoding.
+ * Dictionary are useful for better compression of small data (KB range).
+ * While LZ4 accept any input as dictionary,
+ * results are generally better when using Zstandard's Dictionary Builder.
+ * Loading a size of 0 is allowed, and is the same as reset.
+ * @return : loaded dictionary size, in bytes (necessarily <= 64 KB)
+ */
+LZ4LIB_API int LZ4_loadDict (LZ4_stream_t* streamPtr, const char* dictionary, int dictSize);
+
+/*! LZ4_compress_fast_continue() :
+ * Compress 'src' content using data from previously compressed blocks, for better compression ratio.
+ * 'dst' buffer must be already allocated.
+ * If dstCapacity >= LZ4_compressBound(srcSize), compression is guaranteed to succeed, and runs faster.
+ *
+ * @return : size of compressed block
+ * or 0 if there is an error (typically, cannot fit into 'dst').
+ *
+ * Note 1 : Each invocation to LZ4_compress_fast_continue() generates a new block.
+ * Each block has precise boundaries.
+ * Each block must be decompressed separately, calling LZ4_decompress_*() with relevant metadata.
+ * It's not possible to append blocks together and expect a single invocation of LZ4_decompress_*() to decompress them together.
+ *
+ * Note 2 : The previous 64KB of source data is __assumed__ to remain present, unmodified, at same address in memory !
+ *
+ * Note 3 : When input is structured as a double-buffer, each buffer can have any size, including < 64 KB.
+ * Make sure that buffers are separated, by at least one byte.
+ * This construction ensures that each block only depends on previous block.
+ *
+ * Note 4 : If input buffer is a ring-buffer, it can have any size, including < 64 KB.
+ *
+ * Note 5 : After an error, the stream status is undefined (invalid), it can only be reset or freed.
+ */
+LZ4LIB_API int LZ4_compress_fast_continue (LZ4_stream_t* streamPtr, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_saveDict() :
+ * If last 64KB data cannot be guaranteed to remain available at its current memory location,
+ * save it into a safer place (char* safeBuffer).
+ * This is schematically equivalent to a memcpy() followed by LZ4_loadDict(),
+ * but is much faster, because LZ4_saveDict() doesn't need to rebuild tables.
+ * @return : saved dictionary size in bytes (necessarily <= maxDictSize), or 0 if error.
+ */
+LZ4LIB_API int LZ4_saveDict (LZ4_stream_t* streamPtr, char* safeBuffer, int maxDictSize);
+
+
+/*-**********************************************
+* Streaming Decompression Functions
+* Bufferless synchronous API
+************************************************/
+typedef union LZ4_streamDecode_u LZ4_streamDecode_t; /* tracking context */
+
+/*! LZ4_createStreamDecode() and LZ4_freeStreamDecode() :
+ * creation / destruction of streaming decompression tracking context.
+ * A tracking context can be re-used multiple times.
+ */
+LZ4LIB_API LZ4_streamDecode_t* LZ4_createStreamDecode(void);
+LZ4LIB_API int LZ4_freeStreamDecode (LZ4_streamDecode_t* LZ4_stream);
+
+/*! LZ4_setStreamDecode() :
+ * An LZ4_streamDecode_t context can be allocated once and re-used multiple times.
+ * Use this function to start decompression of a new stream of blocks.
+ * A dictionary can optionally be set. Use NULL or size 0 for a reset order.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during next decompression.
+ * @return : 1 if OK, 0 if error
+ */
+LZ4LIB_API int LZ4_setStreamDecode (LZ4_streamDecode_t* LZ4_streamDecode, const char* dictionary, int dictSize);
+
+/*! LZ4_decoderRingBufferSize() : v1.8.2+
+ * Note : in a ring buffer scenario (optional),
+ * blocks are presumed decompressed next to each other
+ * up to the moment there is not enough remaining space for next block (remainingSize < maxBlockSize),
+ * at which stage it resumes from beginning of ring buffer.
+ * When setting such a ring buffer for streaming decompression,
+ * provides the minimum size of this ring buffer
+ * to be compatible with any source respecting maxBlockSize condition.
+ * @return : minimum ring buffer size,
+ * or 0 if there is an error (invalid maxBlockSize).
+ */
+LZ4LIB_API int LZ4_decoderRingBufferSize(int maxBlockSize);
+#define LZ4_DECODER_RING_BUFFER_SIZE(maxBlockSize) (65536 + 14 + (maxBlockSize)) /* for static allocation; maxBlockSize presumed valid */
+
+/*! LZ4_decompress_*_continue() :
+ * These decoding functions allow decompression of consecutive blocks in "streaming" mode.
+ * A block is an unsplittable entity, it must be presented entirely to a decompression function.
+ * Decompression functions only accepts one block at a time.
+ * The last 64KB of previously decoded data *must* remain available and unmodified at the memory position where they were decoded.
+ * If less than 64KB of data has been decoded, all the data must be present.
+ *
+ * Special : if decompression side sets a ring buffer, it must respect one of the following conditions :
+ * - Decompression buffer size is _at least_ LZ4_decoderRingBufferSize(maxBlockSize).
+ * maxBlockSize is the maximum size of any single block. It can have any value > 16 bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized.
+ * Actually, data can be produced by any source compliant with LZ4 format specification, and respecting maxBlockSize.
+ * - Synchronized mode :
+ * Decompression buffer size is _exactly_ the same as compression buffer size,
+ * and follows exactly same update rule (block boundaries at same positions),
+ * and decoding function is provided with exact decompressed size of each block (exception for last block of the stream),
+ * _then_ decoding & encoding ring buffer can have any size, including small ones ( < 64 KB).
+ * - Decompression buffer is larger than encoding buffer, by a minimum of maxBlockSize more bytes.
+ * In which case, encoding and decoding buffers do not need to be synchronized,
+ * and encoding ring buffer can have any size, including small ones ( < 64 KB).
+ *
+ * Whenever these conditions are not possible,
+ * save the last 64KB of decoded data into a safe buffer where it can't be modified during decompression,
+ * then indicate where this data is saved using LZ4_setStreamDecode(), before decompressing next block.
+*/
+LZ4LIB_API int LZ4_decompress_safe_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int srcSize, int dstCapacity);
+
+
+/*! LZ4_decompress_*_usingDict() :
+ * These decoding functions work the same as
+ * a combination of LZ4_setStreamDecode() followed by LZ4_decompress_*_continue()
+ * They are stand-alone, and don't need an LZ4_streamDecode_t structure.
+ * Dictionary is presumed stable : it must remain accessible and unmodified during decompression.
+ * Performance tip : Decompression speed can be substantially increased
+ * when dst == dictStart + dictSize.
+ */
+LZ4LIB_API int LZ4_decompress_safe_usingDict (const char* src, char* dst, int srcSize, int dstCapcity, const char* dictStart, int dictSize);
+
+#endif /* LZ4_H_2983827168210 */
+
+
+/*^*************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***************************************/
+
+/*-****************************************************************************
+ * Experimental section
+ *
+ * Symbols declared in this section must be considered unstable. Their
+ * signatures or semantics may change, or they may be removed altogether in the
+ * future. They are therefore only safe to depend on when the caller is
+ * statically linked against the library.
+ *
+ * To protect against unsafe usage, not only are the declarations guarded,
+ * the definitions are hidden by default
+ * when building LZ4 as a shared/dynamic library.
+ *
+ * In order to access these declarations,
+ * define LZ4_STATIC_LINKING_ONLY in your application
+ * before including LZ4's headers.
+ *
+ * In order to make their implementations accessible dynamically, you must
+ * define LZ4_PUBLISH_STATIC_FUNCTIONS when building the LZ4 library.
+ ******************************************************************************/
+
+#ifdef LZ4_STATIC_LINKING_ONLY
+
+#ifndef LZ4_STATIC_3504398509
+#define LZ4_STATIC_3504398509
+
+#ifdef LZ4_PUBLISH_STATIC_FUNCTIONS
+#define LZ4LIB_STATIC_API LZ4LIB_API
+#else
+#define LZ4LIB_STATIC_API
+#endif
+
+
+/*! LZ4_compress_fast_extState_fastReset() :
+ * A variant of LZ4_compress_fast_extState().
+ *
+ * Using this variant avoids an expensive initialization step.
+ * It is only safe to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStream_fast() for a definition of "correctly initialized").
+ * From a high level, the difference is that
+ * this function initializes the provided state with a call to something like LZ4_resetStream_fast()
+ * while LZ4_compress_fast_extState() starts with a call to LZ4_resetStream().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_fast_extState_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int acceleration);
+
+/*! LZ4_attach_dictionary() :
+ * This is an experimental API that allows
+ * efficient use of a static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_stream_t into a
+ * working LZ4_stream_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDict() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionaryStream may be NULL,
+ * in which case any existing dictionary stream is unset.
+ *
+ * If a dictionary is provided, it replaces any pre-existing stream history.
+ * The dictionary contents are the only history that can be referenced and
+ * logically immediately precede the data compressed in the first subsequent
+ * compression call.
+ *
+ * The dictionary will only remain attached to the working stream through the
+ * first compression call, at the end of which it is cleared. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the completion of the first compression call on the stream.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_dictionary(LZ4_stream_t* workingStream, const LZ4_stream_t* dictionaryStream);
+
+
+/*! In-place compression and decompression
+ *
+ * It's possible to have input and output sharing the same buffer,
+ * for highly contrained memory environments.
+ * In both cases, it requires input to lay at the end of the buffer,
+ * and decompression to start at beginning of the buffer.
+ * Buffer size must feature some margin, hence be larger than final size.
+ *
+ * |<------------------------buffer--------------------------------->|
+ * |<-----------compressed data--------->|
+ * |<-----------decompressed size------------------>|
+ * |<----margin---->|
+ *
+ * This technique is more useful for decompression,
+ * since decompressed size is typically larger,
+ * and margin is short.
+ *
+ * In-place decompression will work inside any buffer
+ * which size is >= LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize).
+ * This presumes that decompressedSize > compressedSize.
+ * Otherwise, it means compression actually expanded data,
+ * and it would be more efficient to store such data with a flag indicating it's not compressed.
+ * This can happen when data is not compressible (already compressed, or encrypted).
+ *
+ * For in-place compression, margin is larger, as it must be able to cope with both
+ * history preservation, requiring input data to remain unmodified up to LZ4_DISTANCE_MAX,
+ * and data expansion, which can happen when input is not compressible.
+ * As a consequence, buffer size requirements are much higher,
+ * and memory savings offered by in-place compression are more limited.
+ *
+ * There are ways to limit this cost for compression :
+ * - Reduce history size, by modifying LZ4_DISTANCE_MAX.
+ * Note that it is a compile-time constant, so all compressions will apply this limit.
+ * Lower values will reduce compression ratio, except when input_size < LZ4_DISTANCE_MAX,
+ * so it's a reasonable trick when inputs are known to be small.
+ * - Require the compressor to deliver a "maximum compressed size".
+ * This is the `dstCapacity` parameter in `LZ4_compress*()`.
+ * When this size is < LZ4_COMPRESSBOUND(inputSize), then compression can fail,
+ * in which case, the return code will be 0 (zero).
+ * The caller must be ready for these cases to happen,
+ * and typically design a backup scheme to send data uncompressed.
+ * The combination of both techniques can significantly reduce
+ * the amount of margin required for in-place compression.
+ *
+ * In-place compression can work in any buffer
+ * which size is >= (maxCompressedSize)
+ * with maxCompressedSize == LZ4_COMPRESSBOUND(srcSize) for guaranteed compression success.
+ * LZ4_COMPRESS_INPLACE_BUFFER_SIZE() depends on both maxCompressedSize and LZ4_DISTANCE_MAX,
+ * so it's possible to reduce memory requirements by playing with them.
+ */
+
+#define LZ4_DECOMPRESS_INPLACE_MARGIN(compressedSize) (((compressedSize) >> 8) + 32)
+#define LZ4_DECOMPRESS_INPLACE_BUFFER_SIZE(decompressedSize) ((decompressedSize) + LZ4_DECOMPRESS_INPLACE_MARGIN(decompressedSize)) /**< note: presumes that compressedSize < decompressedSize. note2: margin is overestimated a bit, since it could use compressedSize instead */
+
+#ifndef LZ4_DISTANCE_MAX /* history window size; can be user-defined at compile time */
+# define LZ4_DISTANCE_MAX 65535 /* set to maximum value by default */
+#endif
+
+#define LZ4_COMPRESS_INPLACE_MARGIN (LZ4_DISTANCE_MAX + 32) /* LZ4_DISTANCE_MAX can be safely replaced by srcSize when it's smaller */
+#define LZ4_COMPRESS_INPLACE_BUFFER_SIZE(maxCompressedSize) ((maxCompressedSize) + LZ4_COMPRESS_INPLACE_MARGIN) /**< maxCompressedSize is generally LZ4_COMPRESSBOUND(inputSize), but can be set to any lower value, with the risk that compression can fail (return code 0(zero)) */
+
+#endif /* LZ4_STATIC_3504398509 */
+#endif /* LZ4_STATIC_LINKING_ONLY */
+
+
+
+#ifndef LZ4_H_98237428734687
+#define LZ4_H_98237428734687
+
+/*-************************************************************
+ * Private Definitions
+ **************************************************************
+ * Do not use these definitions directly.
+ * They are only exposed to allow static allocation of `LZ4_stream_t` and `LZ4_streamDecode_t`.
+ * Accessing members will expose user code to API and/or ABI break in future versions of the library.
+ **************************************************************/
+#define LZ4_HASHLOG (LZ4_MEMORY_USAGE-2)
+#define LZ4_HASHTABLESIZE (1 << LZ4_MEMORY_USAGE)
+#define LZ4_HASH_SIZE_U32 (1 << LZ4_HASHLOG) /* required as macro for static allocation */
+
+#if defined(__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+# include <stdint.h>
+ typedef int8_t LZ4_i8;
+ typedef uint8_t LZ4_byte;
+ typedef uint16_t LZ4_u16;
+ typedef uint32_t LZ4_u32;
+#else
+ typedef signed char LZ4_i8;
+ typedef unsigned char LZ4_byte;
+ typedef unsigned short LZ4_u16;
+ typedef unsigned int LZ4_u32;
+#endif
+
+typedef struct LZ4_stream_t_internal LZ4_stream_t_internal;
+struct LZ4_stream_t_internal {
+ LZ4_u32 hashTable[LZ4_HASH_SIZE_U32];
+ LZ4_u32 currentOffset;
+ LZ4_u32 tableType;
+ const LZ4_byte* dictionary;
+ const LZ4_stream_t_internal* dictCtx;
+ LZ4_u32 dictSize;
+};
+
+typedef struct {
+ const LZ4_byte* externalDict;
+ size_t extDictSize;
+ const LZ4_byte* prefixEnd;
+ size_t prefixSize;
+} LZ4_streamDecode_t_internal;
+
+
+/*! LZ4_stream_t :
+ * Do not use below internal definitions directly !
+ * Declare or allocate an LZ4_stream_t instead.
+ * LZ4_stream_t can also be created using LZ4_createStream(), which is recommended.
+ * The structure definition can be convenient for static allocation
+ * (on stack, or as part of larger structure).
+ * Init this structure with LZ4_initStream() before first use.
+ * note : only use this definition in association with static linking !
+ * this definition is not API/ABI safe, and may change in future versions.
+ */
+#define LZ4_STREAMSIZE 16416 /* static size, for inter-version compatibility */
+#define LZ4_STREAMSIZE_VOIDP (LZ4_STREAMSIZE / sizeof(void*))
+union LZ4_stream_u {
+ void* table[LZ4_STREAMSIZE_VOIDP];
+ LZ4_stream_t_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_stream_t */
+
+
+/*! LZ4_initStream() : v1.9.0+
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is automatically done when invoking LZ4_createStream(),
+ * but it's not when the structure is simply declared on stack (for example).
+ *
+ * Use LZ4_initStream() to properly initialize a newly declared LZ4_stream_t.
+ * It can also initialize any arbitrary buffer of sufficient size,
+ * and will @return a pointer of proper type upon initialization.
+ *
+ * Note : initialization fails if size and alignment conditions are not respected.
+ * In which case, the function will @return NULL.
+ * Note2: An LZ4_stream_t structure guarantees correct alignment and size.
+ * Note3: Before v1.9.0, use LZ4_resetStream() instead
+ */
+LZ4LIB_API LZ4_stream_t* LZ4_initStream (void* buffer, size_t size);
+
+
+/*! LZ4_streamDecode_t :
+ * information structure to track an LZ4 stream during decompression.
+ * init this structure using LZ4_setStreamDecode() before first use.
+ * note : only use in association with static linking !
+ * this definition is not API/ABI safe,
+ * and may change in a future version !
+ */
+#define LZ4_STREAMDECODESIZE_U64 (4 + ((sizeof(void*)==16) ? 2 : 0) /*AS-400*/ )
+#define LZ4_STREAMDECODESIZE (LZ4_STREAMDECODESIZE_U64 * sizeof(unsigned long long))
+union LZ4_streamDecode_u {
+ unsigned long long table[LZ4_STREAMDECODESIZE_U64];
+ LZ4_streamDecode_t_internal internal_donotuse;
+} ; /* previously typedef'd to LZ4_streamDecode_t */
+
+
+
+/*-************************************
+* Obsolete Functions
+**************************************/
+
+/*! Deprecation warnings
+ *
+ * Deprecated functions make the compiler generate a warning when invoked.
+ * This is meant to invite users to update their source code.
+ * Should deprecation warnings be a problem, it is generally possible to disable them,
+ * typically with -Wno-deprecated-declarations for gcc
+ * or _CRT_SECURE_NO_WARNINGS in Visual.
+ *
+ * Another method is to define LZ4_DISABLE_DEPRECATE_WARNINGS
+ * before including the header file.
+ */
+#ifdef LZ4_DISABLE_DEPRECATE_WARNINGS
+# define LZ4_DEPRECATED(message) /* disable deprecation warnings */
+#else
+# if defined (__cplusplus) && (__cplusplus >= 201402) /* C++14 or greater */
+# define LZ4_DEPRECATED(message) [[deprecated(message)]]
+# elif defined(_MSC_VER)
+# define LZ4_DEPRECATED(message) __declspec(deprecated(message))
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 45))
+# define LZ4_DEPRECATED(message) __attribute__((deprecated(message)))
+# elif defined(__GNUC__) && (__GNUC__ * 10 + __GNUC_MINOR__ >= 31)
+# define LZ4_DEPRECATED(message) __attribute__((deprecated))
+# else
+# pragma message("WARNING: LZ4_DEPRECATED needs custom implementation for this compiler")
+# define LZ4_DEPRECATED(message) /* disabled */
+# endif
+#endif /* LZ4_DISABLE_DEPRECATE_WARNINGS */
+
+/*! Obsolete compression functions (since v1.7.3) */
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress (const char* src, char* dest, int srcSize);
+LZ4_DEPRECATED("use LZ4_compress_default() instead") LZ4LIB_API int LZ4_compress_limitedOutput (const char* src, char* dest, int srcSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_withState (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_extState() instead") LZ4LIB_API int LZ4_compress_limitedOutput_withState (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_fast_continue() instead") LZ4LIB_API int LZ4_compress_limitedOutput_continue (LZ4_stream_t* LZ4_streamPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/*! Obsolete decompression functions (since v1.8.0) */
+LZ4_DEPRECATED("use LZ4_decompress_fast() instead") LZ4LIB_API int LZ4_uncompress (const char* source, char* dest, int outputSize);
+LZ4_DEPRECATED("use LZ4_decompress_safe() instead") LZ4LIB_API int LZ4_uncompress_unknownOutputSize (const char* source, char* dest, int isize, int maxOutputSize);
+
+/* Obsolete streaming functions (since v1.7.0)
+ * degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, they don't
+ * actually retain any history between compression calls. The compression ratio
+ * achieved will therefore be no better than compressing each chunk
+ * independently.
+ */
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API void* LZ4_create (char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_createStream() instead") LZ4LIB_API int LZ4_sizeofStreamState(void);
+LZ4_DEPRECATED("Use LZ4_resetStream() instead") LZ4LIB_API int LZ4_resetStreamState(void* state, char* inputBuffer);
+LZ4_DEPRECATED("Use LZ4_saveDict() instead") LZ4LIB_API char* LZ4_slideInputBuffer (void* state);
+
+/*! Obsolete streaming decoding functions (since v1.7.0) */
+LZ4_DEPRECATED("use LZ4_decompress_safe_usingDict() instead") LZ4LIB_API int LZ4_decompress_safe_withPrefix64k (const char* src, char* dst, int compressedSize, int maxDstSize);
+LZ4_DEPRECATED("use LZ4_decompress_fast_usingDict() instead") LZ4LIB_API int LZ4_decompress_fast_withPrefix64k (const char* src, char* dst, int originalSize);
+
+/*! Obsolete LZ4_decompress_fast variants (since v1.9.0) :
+ * These functions used to be faster than LZ4_decompress_safe(),
+ * but this is no longer the case. They are now slower.
+ * This is because LZ4_decompress_fast() doesn't know the input size,
+ * and therefore must progress more cautiously into the input buffer to not read beyond the end of block.
+ * On top of that `LZ4_decompress_fast()` is not protected vs malformed or malicious inputs, making it a security liability.
+ * As a consequence, LZ4_decompress_fast() is strongly discouraged, and deprecated.
+ *
+ * The last remaining LZ4_decompress_fast() specificity is that
+ * it can decompress a block without knowing its compressed size.
+ * Such functionality can be achieved in a more secure manner
+ * by employing LZ4_decompress_safe_partial().
+ *
+ * Parameters:
+ * originalSize : is the uncompressed size to regenerate.
+ * `dst` must be already allocated, its size must be >= 'originalSize' bytes.
+ * @return : number of bytes read from source buffer (== compressed size).
+ * The function expects to finish at block's end exactly.
+ * If the source stream is detected malformed, the function stops decoding and returns a negative result.
+ * note : LZ4_decompress_fast*() requires originalSize. Thanks to this information, it never writes past the output buffer.
+ * However, since it doesn't know its 'src' size, it may read an unknown amount of input, past input buffer bounds.
+ * Also, since match offsets are not validated, match reads from 'src' may underflow too.
+ * These issues never happen if input (compressed) data is correct.
+ * But they may happen if input data is invalid (error or intentional tampering).
+ * As a consequence, use these functions in trusted environments with trusted data **only**.
+ */
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe() instead")
+LZ4LIB_API int LZ4_decompress_fast (const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_continue() instead")
+LZ4LIB_API int LZ4_decompress_fast_continue (LZ4_streamDecode_t* LZ4_streamDecode, const char* src, char* dst, int originalSize);
+LZ4_DEPRECATED("This function is deprecated and unsafe. Consider using LZ4_decompress_safe_usingDict() instead")
+LZ4LIB_API int LZ4_decompress_fast_usingDict (const char* src, char* dst, int originalSize, const char* dictStart, int dictSize);
+
+/*! LZ4_resetStream() :
+ * An LZ4_stream_t structure must be initialized at least once.
+ * This is done with LZ4_initStream(), or LZ4_resetStream().
+ * Consider switching to LZ4_initStream(),
+ * invoking LZ4_resetStream() will trigger deprecation warnings in the future.
+ */
+LZ4LIB_API void LZ4_resetStream (LZ4_stream_t* streamPtr);
+
+
+#endif /* LZ4_H_98237428734687 */
+
+
+#if defined (__cplusplus)
+}
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c
new file mode 100644
index 000000000..945f9f7a3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.c
@@ -0,0 +1,1899 @@
+/*
+ * LZ4 auto-framing library
+ * Copyright (C) 2011-2016, Yann Collet.
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at :
+ * - LZ4 homepage : http://www.lz4.org
+ * - LZ4 source repository : https://github.com/lz4/lz4
+ */
+
+/* LZ4F is a stand-alone API to create LZ4-compressed Frames
+ * in full conformance with specification v1.6.1 .
+ * This library rely upon memory management capabilities (malloc, free)
+ * provided either by <stdlib.h>,
+ * or redirected towards another library of user's choice
+ * (see Memory Routines below).
+ */
+
+
+/*-************************************
+* Compiler Options
+**************************************/
+#ifdef _MSC_VER /* Visual Studio */
+# pragma warning(disable : 4127) /* disable: C4127: conditional expression is constant */
+#endif
+
+
+/*-************************************
+* Tuning parameters
+**************************************/
+/*
+ * LZ4F_HEAPMODE :
+ * Select how default compression functions will allocate memory for their hash table,
+ * in memory stack (0:default, fastest), or in memory heap (1:requires malloc()).
+ */
+#ifndef LZ4F_HEAPMODE
+# define LZ4F_HEAPMODE 0
+#endif
+
+
+/*-************************************
+* Memory routines
+**************************************/
+/*
+ * User may redirect invocations of
+ * malloc(), calloc() and free()
+ * towards another library or solution of their choice
+ * by modifying below section.
+ */
+#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
+#include "rd.h" /* rd_malloc, rd_calloc, rd_free */
+# define ALLOC(s) rd_malloc(s)
+# define ALLOC_AND_ZERO(s) rd_calloc(1,(s))
+# define FREEMEM(p) rd_free(p)
+#endif
+
+#include <string.h> /* memset, memcpy, memmove */
+#ifndef LZ4_SRC_INCLUDED /* avoid redefinition when sources are coalesced */
+# define MEM_INIT(p,v,s) memset((p),(v),(s))
+#endif
+
+
+/*-************************************
+* Library declarations
+**************************************/
+#define LZ4F_STATIC_LINKING_ONLY
+#include "lz4frame.h"
+#define LZ4_STATIC_LINKING_ONLY
+#include "lz4.h"
+#define LZ4_HC_STATIC_LINKING_ONLY
+#include "lz4hc.h"
+#define XXH_STATIC_LINKING_ONLY
+#include "rdxxhash.h"
+
+
+/*-************************************
+* Debug
+**************************************/
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=1)
+# include <assert.h>
+#else
+# ifndef assert
+# define assert(condition) ((void)0)
+# endif
+#endif
+
+#define LZ4F_STATIC_ASSERT(c) { enum { LZ4F_static_assert = 1/(int)(!!(c)) }; } /* use only *after* variable declarations */
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG>=2) && !defined(DEBUGLOG)
+# include <stdio.h>
+static int g_debuglog_enable = 1;
+# define DEBUGLOG(l, ...) { \
+ if ((g_debuglog_enable) && (l<=LZ4_DEBUG)) { \
+ fprintf(stderr, __FILE__ ": "); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, " \n"); \
+ } }
+#else
+# define DEBUGLOG(l, ...) {} /* disabled */
+#endif
+
+
+/*-************************************
+* Basic Types
+**************************************/
+#if !defined (__VMS) && (defined (__cplusplus) || (defined (__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */) )
+# include <stdint.h>
+ typedef uint8_t BYTE;
+ typedef uint16_t U16;
+ typedef uint32_t U32;
+ typedef int32_t S32;
+ typedef uint64_t U64;
+#else
+ typedef unsigned char BYTE;
+ typedef unsigned short U16;
+ typedef unsigned int U32;
+ typedef signed int S32;
+ typedef unsigned long long U64;
+#endif
+
+
+/* unoptimized version; solves endianess & alignment issues */
+static U32 LZ4F_readLE32 (const void* src)
+{
+ const BYTE* const srcPtr = (const BYTE*)src;
+ U32 value32 = srcPtr[0];
+ value32 += ((U32)srcPtr[1])<< 8;
+ value32 += ((U32)srcPtr[2])<<16;
+ value32 += ((U32)srcPtr[3])<<24;
+ return value32;
+}
+
+static void LZ4F_writeLE32 (void* dst, U32 value32)
+{
+ BYTE* const dstPtr = (BYTE*)dst;
+ dstPtr[0] = (BYTE)value32;
+ dstPtr[1] = (BYTE)(value32 >> 8);
+ dstPtr[2] = (BYTE)(value32 >> 16);
+ dstPtr[3] = (BYTE)(value32 >> 24);
+}
+
+static U64 LZ4F_readLE64 (const void* src)
+{
+ const BYTE* const srcPtr = (const BYTE*)src;
+ U64 value64 = srcPtr[0];
+ value64 += ((U64)srcPtr[1]<<8);
+ value64 += ((U64)srcPtr[2]<<16);
+ value64 += ((U64)srcPtr[3]<<24);
+ value64 += ((U64)srcPtr[4]<<32);
+ value64 += ((U64)srcPtr[5]<<40);
+ value64 += ((U64)srcPtr[6]<<48);
+ value64 += ((U64)srcPtr[7]<<56);
+ return value64;
+}
+
+static void LZ4F_writeLE64 (void* dst, U64 value64)
+{
+ BYTE* const dstPtr = (BYTE*)dst;
+ dstPtr[0] = (BYTE)value64;
+ dstPtr[1] = (BYTE)(value64 >> 8);
+ dstPtr[2] = (BYTE)(value64 >> 16);
+ dstPtr[3] = (BYTE)(value64 >> 24);
+ dstPtr[4] = (BYTE)(value64 >> 32);
+ dstPtr[5] = (BYTE)(value64 >> 40);
+ dstPtr[6] = (BYTE)(value64 >> 48);
+ dstPtr[7] = (BYTE)(value64 >> 56);
+}
+
+
+/*-************************************
+* Constants
+**************************************/
+#ifndef LZ4_SRC_INCLUDED /* avoid double definition */
+# define KB *(1<<10)
+# define MB *(1<<20)
+# define GB *(1<<30)
+#endif
+
+#define _1BIT 0x01
+#define _2BITS 0x03
+#define _3BITS 0x07
+#define _4BITS 0x0F
+#define _8BITS 0xFF
+
+#define LZ4F_MAGIC_SKIPPABLE_START 0x184D2A50U
+#define LZ4F_MAGICNUMBER 0x184D2204U
+#define LZ4F_BLOCKUNCOMPRESSED_FLAG 0x80000000U
+#define LZ4F_BLOCKSIZEID_DEFAULT LZ4F_max64KB
+
+static const size_t minFHSize = LZ4F_HEADER_SIZE_MIN; /* 7 */
+static const size_t maxFHSize = LZ4F_HEADER_SIZE_MAX; /* 19 */
+static const size_t BHSize = LZ4F_BLOCK_HEADER_SIZE; /* block header : size, and compress flag */
+static const size_t BFSize = LZ4F_BLOCK_CHECKSUM_SIZE; /* block footer : checksum (optional) */
+
+
+/*-************************************
+* Structures and local types
+**************************************/
+typedef struct LZ4F_cctx_s
+{
+ LZ4F_preferences_t prefs;
+ U32 version;
+ U32 cStage;
+ const LZ4F_CDict* cdict;
+ size_t maxBlockSize;
+ size_t maxBufferSize;
+ BYTE* tmpBuff;
+ BYTE* tmpIn;
+ size_t tmpInSize;
+ U64 totalInSize;
+ XXH32_state_t xxh;
+ void* lz4CtxPtr;
+ U16 lz4CtxAlloc; /* sized for: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
+ U16 lz4CtxState; /* in use as: 0 = none, 1 = lz4 ctx, 2 = lz4hc ctx */
+} LZ4F_cctx_t;
+
+
+/*-************************************
+* Error management
+**************************************/
+#define LZ4F_GENERATE_STRING(STRING) #STRING,
+static const char* LZ4F_errorStrings[] = { LZ4F_LIST_ERRORS(LZ4F_GENERATE_STRING) };
+
+
+unsigned LZ4F_isError(LZ4F_errorCode_t code)
+{
+ return (code > (LZ4F_errorCode_t)(-LZ4F_ERROR_maxCode));
+}
+
+const char* LZ4F_getErrorName(LZ4F_errorCode_t code)
+{
+ static const char* codeError = "Unspecified error code";
+ if (LZ4F_isError(code)) return LZ4F_errorStrings[-(int)(code)];
+ return codeError;
+}
+
+LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult)
+{
+ if (!LZ4F_isError(functionResult)) return LZ4F_OK_NoError;
+ return (LZ4F_errorCodes)(-(ptrdiff_t)functionResult);
+}
+
+static LZ4F_errorCode_t err0r(LZ4F_errorCodes code)
+{
+ /* A compilation error here means sizeof(ptrdiff_t) is not large enough */
+ LZ4F_STATIC_ASSERT(sizeof(ptrdiff_t) >= sizeof(size_t));
+ return (LZ4F_errorCode_t)-(ptrdiff_t)code;
+}
+
+unsigned LZ4F_getVersion(void) { return LZ4F_VERSION; }
+
+int LZ4F_compressionLevel_max(void) { return LZ4HC_CLEVEL_MAX; }
+
+size_t LZ4F_getBlockSize(unsigned blockSizeID)
+{
+ static const size_t blockSizes[4] = { 64 KB, 256 KB, 1 MB, 4 MB };
+
+ if (blockSizeID == 0) blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
+ if (blockSizeID < LZ4F_max64KB || blockSizeID > LZ4F_max4MB)
+ return err0r(LZ4F_ERROR_maxBlockSize_invalid);
+ blockSizeID -= LZ4F_max64KB;
+ return blockSizes[blockSizeID];
+}
+
+/*-************************************
+* Private functions
+**************************************/
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+
+static BYTE LZ4F_headerChecksum (const void* header, size_t length)
+{
+ U32 const xxh = XXH32(header, length, 0);
+ return (BYTE)(xxh >> 8);
+}
+
+
+/*-************************************
+* Simple-pass compression functions
+**************************************/
+static LZ4F_blockSizeID_t LZ4F_optimalBSID(const LZ4F_blockSizeID_t requestedBSID,
+ const size_t srcSize)
+{
+ LZ4F_blockSizeID_t proposedBSID = LZ4F_max64KB;
+ size_t maxBlockSize = 64 KB;
+ while (requestedBSID > proposedBSID) {
+ if (srcSize <= maxBlockSize)
+ return proposedBSID;
+ proposedBSID = (LZ4F_blockSizeID_t)((int)proposedBSID + 1);
+ maxBlockSize <<= 2;
+ }
+ return requestedBSID;
+}
+
+/*! LZ4F_compressBound_internal() :
+ * Provides dstCapacity given a srcSize to guarantee operation success in worst case situations.
+ * prefsPtr is optional : if NULL is provided, preferences will be set to cover worst case scenario.
+ * @return is always the same for a srcSize and prefsPtr, so it can be relied upon to size reusable buffers.
+ * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() operations.
+ */
+static size_t LZ4F_compressBound_internal(size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr,
+ size_t alreadyBuffered)
+{
+ LZ4F_preferences_t prefsNull = LZ4F_INIT_PREFERENCES;
+ prefsNull.frameInfo.contentChecksumFlag = LZ4F_contentChecksumEnabled; /* worst case */
+ prefsNull.frameInfo.blockChecksumFlag = LZ4F_blockChecksumEnabled; /* worst case */
+ { const LZ4F_preferences_t* const prefsPtr = (preferencesPtr==NULL) ? &prefsNull : preferencesPtr;
+ U32 const flush = prefsPtr->autoFlush | (srcSize==0);
+ LZ4F_blockSizeID_t const blockID = prefsPtr->frameInfo.blockSizeID;
+ size_t const blockSize = LZ4F_getBlockSize(blockID);
+ size_t const maxBuffered = blockSize - 1;
+ size_t const bufferedSize = MIN(alreadyBuffered, maxBuffered);
+ size_t const maxSrcSize = srcSize + bufferedSize;
+ unsigned const nbFullBlocks = (unsigned)(maxSrcSize / blockSize);
+ size_t const partialBlockSize = maxSrcSize & (blockSize-1);
+ size_t const lastBlockSize = flush ? partialBlockSize : 0;
+ unsigned const nbBlocks = nbFullBlocks + (lastBlockSize>0);
+
+ size_t const blockCRCSize = BFSize * prefsPtr->frameInfo.blockChecksumFlag;
+ size_t const frameEnd = BHSize + (prefsPtr->frameInfo.contentChecksumFlag*BFSize);
+
+ return ((BHSize + blockCRCSize) * nbBlocks) +
+ (blockSize * nbFullBlocks) + lastBlockSize + frameEnd;
+ }
+}
+
+size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t prefs;
+ size_t const headerSize = maxFHSize; /* max header size, including optional fields */
+
+ if (preferencesPtr!=NULL) prefs = *preferencesPtr;
+ else MEM_INIT(&prefs, 0, sizeof(prefs));
+ prefs.autoFlush = 1;
+
+ return headerSize + LZ4F_compressBound_internal(srcSize, &prefs, 0);;
+}
+
+
+/*! LZ4F_compressFrame_usingCDict() :
+ * Compress srcBuffer using a dictionary, in a single step.
+ * cdict can be NULL, in which case, no dictionary is used.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
+ * however, it's the only way to provide a dictID, so it's not recommended.
+ * @return : number of bytes written into dstBuffer,
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressFrame_usingCDict(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t prefs;
+ LZ4F_compressOptions_t options;
+ BYTE* const dstStart = (BYTE*) dstBuffer;
+ BYTE* dstPtr = dstStart;
+ BYTE* const dstEnd = dstStart + dstCapacity;
+
+ if (preferencesPtr!=NULL)
+ prefs = *preferencesPtr;
+ else
+ MEM_INIT(&prefs, 0, sizeof(prefs));
+ if (prefs.frameInfo.contentSize != 0)
+ prefs.frameInfo.contentSize = (U64)srcSize; /* auto-correct content size if selected (!=0) */
+
+ prefs.frameInfo.blockSizeID = LZ4F_optimalBSID(prefs.frameInfo.blockSizeID, srcSize);
+ prefs.autoFlush = 1;
+ if (srcSize <= LZ4F_getBlockSize(prefs.frameInfo.blockSizeID))
+ prefs.frameInfo.blockMode = LZ4F_blockIndependent; /* only one block => no need for inter-block link */
+
+ MEM_INIT(&options, 0, sizeof(options));
+ options.stableSrc = 1;
+
+ if (dstCapacity < LZ4F_compressFrameBound(srcSize, &prefs)) /* condition to guarantee success */
+ return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+
+ { size_t const headerSize = LZ4F_compressBegin_usingCDict(cctx, dstBuffer, dstCapacity, cdict, &prefs); /* write header */
+ if (LZ4F_isError(headerSize)) return headerSize;
+ dstPtr += headerSize; /* header size */ }
+
+ assert(dstEnd >= dstPtr);
+ { size_t const cSize = LZ4F_compressUpdate(cctx, dstPtr, (size_t)(dstEnd-dstPtr), srcBuffer, srcSize, &options);
+ if (LZ4F_isError(cSize)) return cSize;
+ dstPtr += cSize; }
+
+ assert(dstEnd >= dstPtr);
+ { size_t const tailSize = LZ4F_compressEnd(cctx, dstPtr, (size_t)(dstEnd-dstPtr), &options); /* flush last block, and generate suffix */
+ if (LZ4F_isError(tailSize)) return tailSize;
+ dstPtr += tailSize; }
+
+ assert(dstEnd >= dstStart);
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_compressFrame() :
+ * Compress an entire srcBuffer into a valid LZ4 frame, in a single step.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ size_t result;
+#if (LZ4F_HEAPMODE)
+ LZ4F_cctx_t *cctxPtr;
+ result = LZ4F_createCompressionContext(&cctxPtr, LZ4F_VERSION);
+ if (LZ4F_isError(result)) return result;
+#else
+ LZ4F_cctx_t cctx;
+ LZ4_stream_t lz4ctx;
+ LZ4F_cctx_t *cctxPtr = &cctx;
+
+ DEBUGLOG(4, "LZ4F_compressFrame");
+ MEM_INIT(&cctx, 0, sizeof(cctx));
+ cctx.version = LZ4F_VERSION;
+ cctx.maxBufferSize = 5 MB; /* mess with real buffer size to prevent dynamic allocation; works only because autoflush==1 & stableSrc==1 */
+ if (preferencesPtr == NULL ||
+ preferencesPtr->compressionLevel < LZ4HC_CLEVEL_MIN)
+ {
+ LZ4_initStream(&lz4ctx, sizeof(lz4ctx));
+ cctxPtr->lz4CtxPtr = &lz4ctx;
+ cctxPtr->lz4CtxAlloc = 1;
+ cctxPtr->lz4CtxState = 1;
+ }
+#endif
+
+ result = LZ4F_compressFrame_usingCDict(cctxPtr, dstBuffer, dstCapacity,
+ srcBuffer, srcSize,
+ NULL, preferencesPtr);
+
+#if (LZ4F_HEAPMODE)
+ LZ4F_freeCompressionContext(cctxPtr);
+#else
+ if (preferencesPtr != NULL &&
+ preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN)
+ {
+ FREEMEM(cctxPtr->lz4CtxPtr);
+ }
+#endif
+ return result;
+}
+
+
+/*-***************************************************
+* Dictionary compression
+*****************************************************/
+
+struct LZ4F_CDict_s {
+ void* dictContent;
+ LZ4_stream_t* fastCtx;
+ LZ4_streamHC_t* HCCtx;
+}; /* typedef'd to LZ4F_CDict within lz4frame_static.h */
+
+/*! LZ4F_createCDict() :
+ * When compressing multiple messages / blocks with the same dictionary, it's recommended to load it just once.
+ * LZ4F_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * LZ4F_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after LZ4F_CDict creation, since its content is copied within CDict
+ * @return : digested dictionary for compression, or NULL if failed */
+LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize)
+{
+ const char* dictStart = (const char*)dictBuffer;
+ LZ4F_CDict* cdict = (LZ4F_CDict*) ALLOC(sizeof(*cdict));
+ DEBUGLOG(4, "LZ4F_createCDict");
+ if (!cdict) return NULL;
+ if (dictSize > 64 KB) {
+ dictStart += dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ cdict->dictContent = ALLOC(dictSize);
+ cdict->fastCtx = LZ4_createStream();
+ cdict->HCCtx = LZ4_createStreamHC();
+ if (!cdict->dictContent || !cdict->fastCtx || !cdict->HCCtx) {
+ LZ4F_freeCDict(cdict);
+ return NULL;
+ }
+ memcpy(cdict->dictContent, dictStart, dictSize);
+ LZ4_loadDict (cdict->fastCtx, (const char*)cdict->dictContent, (int)dictSize);
+ LZ4_setCompressionLevel(cdict->HCCtx, LZ4HC_CLEVEL_DEFAULT);
+ LZ4_loadDictHC(cdict->HCCtx, (const char*)cdict->dictContent, (int)dictSize);
+ return cdict;
+}
+
+void LZ4F_freeCDict(LZ4F_CDict* cdict)
+{
+ if (cdict==NULL) return; /* support free on NULL */
+ FREEMEM(cdict->dictContent);
+ LZ4_freeStream(cdict->fastCtx);
+ LZ4_freeStreamHC(cdict->HCCtx);
+ FREEMEM(cdict);
+}
+
+
+/*-*********************************
+* Advanced compression functions
+***********************************/
+
+/*! LZ4F_createCompressionContext() :
+ * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
+ * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version and an LZ4F_preferences_t structure.
+ * The version provided MUST be LZ4F_VERSION. It is intended to track potential incompatible differences between different binaries.
+ * The function will provide a pointer to an allocated LZ4F_compressionContext_t object.
+ * If the result LZ4F_errorCode_t is not OK_NoError, there was an error during context creation.
+ * Object can release its memory using LZ4F_freeCompressionContext();
+ */
+LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** LZ4F_compressionContextPtr, unsigned version)
+{
+ LZ4F_cctx_t* const cctxPtr = (LZ4F_cctx_t*)ALLOC_AND_ZERO(sizeof(LZ4F_cctx_t));
+ if (cctxPtr==NULL) return err0r(LZ4F_ERROR_allocation_failed);
+
+ cctxPtr->version = version;
+ cctxPtr->cStage = 0; /* Next stage : init stream */
+
+ *LZ4F_compressionContextPtr = cctxPtr;
+
+ return LZ4F_OK_NoError;
+}
+
+
+LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctxPtr)
+{
+ if (cctxPtr != NULL) { /* support free on NULL */
+ FREEMEM(cctxPtr->lz4CtxPtr); /* note: LZ4_streamHC_t and LZ4_stream_t are simple POD types */
+ FREEMEM(cctxPtr->tmpBuff);
+ FREEMEM(cctxPtr);
+ }
+
+ return LZ4F_OK_NoError;
+}
+
+
+/**
+ * This function prepares the internal LZ4(HC) stream for a new compression,
+ * resetting the context and attaching the dictionary, if there is one.
+ *
+ * It needs to be called at the beginning of each independent compression
+ * stream (i.e., at the beginning of a frame in blockLinked mode, or at the
+ * beginning of each block in blockIndependent mode).
+ */
+static void LZ4F_initStream(void* ctx,
+ const LZ4F_CDict* cdict,
+ int level,
+ LZ4F_blockMode_t blockMode) {
+ if (level < LZ4HC_CLEVEL_MIN) {
+ if (cdict != NULL || blockMode == LZ4F_blockLinked) {
+ /* In these cases, we will call LZ4_compress_fast_continue(),
+ * which needs an already reset context. Otherwise, we'll call a
+ * one-shot API. The non-continued APIs internally perform their own
+ * resets at the beginning of their calls, where they know what
+ * tableType they need the context to be in. So in that case this
+ * would be misguided / wasted work. */
+ LZ4_resetStream_fast((LZ4_stream_t*)ctx);
+ }
+ LZ4_attach_dictionary((LZ4_stream_t *)ctx, cdict ? cdict->fastCtx : NULL);
+ } else {
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)ctx, level);
+ LZ4_attach_HC_dictionary((LZ4_streamHC_t *)ctx, cdict ? cdict->HCCtx : NULL);
+ }
+}
+
+
+/*! LZ4F_compressBegin_usingCDict() :
+ * init streaming compression and writes frame header into dstBuffer.
+ * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressBegin_usingCDict(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ LZ4F_preferences_t prefNull;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ BYTE* headerStart;
+
+ if (dstCapacity < maxFHSize) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+ MEM_INIT(&prefNull, 0, sizeof(prefNull));
+ if (preferencesPtr == NULL) preferencesPtr = &prefNull;
+ cctxPtr->prefs = *preferencesPtr;
+
+ /* Ctx Management */
+ { U16 const ctxTypeID = (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) ? 1 : 2;
+ if (cctxPtr->lz4CtxAlloc < ctxTypeID) {
+ FREEMEM(cctxPtr->lz4CtxPtr);
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
+ cctxPtr->lz4CtxPtr = LZ4_createStream();
+ } else {
+ cctxPtr->lz4CtxPtr = LZ4_createStreamHC();
+ }
+ if (cctxPtr->lz4CtxPtr == NULL)
+ return err0r(LZ4F_ERROR_allocation_failed);
+ cctxPtr->lz4CtxAlloc = ctxTypeID;
+ cctxPtr->lz4CtxState = ctxTypeID;
+ } else if (cctxPtr->lz4CtxState != ctxTypeID) {
+ /* otherwise, a sufficient buffer is allocated, but we need to
+ * reset it to the correct context type */
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN) {
+ LZ4_initStream((LZ4_stream_t *) cctxPtr->lz4CtxPtr, sizeof (LZ4_stream_t));
+ } else {
+ LZ4_initStreamHC((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, sizeof(LZ4_streamHC_t));
+ LZ4_setCompressionLevel((LZ4_streamHC_t *) cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel);
+ }
+ cctxPtr->lz4CtxState = ctxTypeID;
+ }
+ }
+
+ /* Buffer Management */
+ if (cctxPtr->prefs.frameInfo.blockSizeID == 0)
+ cctxPtr->prefs.frameInfo.blockSizeID = LZ4F_BLOCKSIZEID_DEFAULT;
+ cctxPtr->maxBlockSize = LZ4F_getBlockSize(cctxPtr->prefs.frameInfo.blockSizeID);
+
+ { size_t const requiredBuffSize = preferencesPtr->autoFlush ?
+ ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 64 KB : 0) : /* only needs past data up to window size */
+ cctxPtr->maxBlockSize + ((cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) ? 128 KB : 0);
+
+ if (cctxPtr->maxBufferSize < requiredBuffSize) {
+ cctxPtr->maxBufferSize = 0;
+ FREEMEM(cctxPtr->tmpBuff);
+ cctxPtr->tmpBuff = (BYTE*)ALLOC_AND_ZERO(requiredBuffSize);
+ if (cctxPtr->tmpBuff == NULL) return err0r(LZ4F_ERROR_allocation_failed);
+ cctxPtr->maxBufferSize = requiredBuffSize;
+ } }
+ cctxPtr->tmpIn = cctxPtr->tmpBuff;
+ cctxPtr->tmpInSize = 0;
+ (void)XXH32_reset(&(cctxPtr->xxh), 0);
+
+ /* context init */
+ cctxPtr->cdict = cdict;
+ if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked) {
+ /* frame init only for blockLinked : blockIndependent will be init at each block */
+ LZ4F_initStream(cctxPtr->lz4CtxPtr, cdict, cctxPtr->prefs.compressionLevel, LZ4F_blockLinked);
+ }
+ if (preferencesPtr->compressionLevel >= LZ4HC_CLEVEL_MIN) {
+ LZ4_favorDecompressionSpeed((LZ4_streamHC_t*)cctxPtr->lz4CtxPtr, (int)preferencesPtr->favorDecSpeed);
+ }
+
+ /* Magic Number */
+ LZ4F_writeLE32(dstPtr, LZ4F_MAGICNUMBER);
+ dstPtr += 4;
+ headerStart = dstPtr;
+
+ /* FLG Byte */
+ *dstPtr++ = (BYTE)(((1 & _2BITS) << 6) /* Version('01') */
+ + ((cctxPtr->prefs.frameInfo.blockMode & _1BIT ) << 5)
+ + ((cctxPtr->prefs.frameInfo.blockChecksumFlag & _1BIT ) << 4)
+ + ((unsigned)(cctxPtr->prefs.frameInfo.contentSize > 0) << 3)
+ + ((cctxPtr->prefs.frameInfo.contentChecksumFlag & _1BIT ) << 2)
+ + (cctxPtr->prefs.frameInfo.dictID > 0) );
+ /* BD Byte */
+ *dstPtr++ = (BYTE)((cctxPtr->prefs.frameInfo.blockSizeID & _3BITS) << 4);
+ /* Optional Frame content size field */
+ if (cctxPtr->prefs.frameInfo.contentSize) {
+ LZ4F_writeLE64(dstPtr, cctxPtr->prefs.frameInfo.contentSize);
+ dstPtr += 8;
+ cctxPtr->totalInSize = 0;
+ }
+ /* Optional dictionary ID field */
+ if (cctxPtr->prefs.frameInfo.dictID) {
+ LZ4F_writeLE32(dstPtr, cctxPtr->prefs.frameInfo.dictID);
+ dstPtr += 4;
+ }
+ /* Header CRC Byte */
+ *dstPtr = LZ4F_headerChecksum(headerStart, (size_t)(dstPtr - headerStart));
+ dstPtr++;
+
+ cctxPtr->cStage = 1; /* header written, now request input data block */
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_compressBegin() :
+ * init streaming compression and writes frame header into dstBuffer.
+ * dstBuffer must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * preferencesPtr can be NULL, in which case default parameters are selected.
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressBegin(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_preferences_t* preferencesPtr)
+{
+ return LZ4F_compressBegin_usingCDict(cctxPtr, dstBuffer, dstCapacity,
+ NULL, preferencesPtr);
+}
+
+
+/* LZ4F_compressBound() :
+ * @return minimum capacity of dstBuffer for a given srcSize to handle worst case scenario.
+ * LZ4F_preferences_t structure is optional : if NULL, preferences will be set to cover worst case scenario.
+ * This function cannot fail.
+ */
+size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr)
+{
+ if (preferencesPtr && preferencesPtr->autoFlush) {
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, 0);
+ }
+ return LZ4F_compressBound_internal(srcSize, preferencesPtr, (size_t)-1);
+}
+
+
+typedef int (*compressFunc_t)(void* ctx, const char* src, char* dst, int srcSize, int dstSize, int level, const LZ4F_CDict* cdict);
+
+
+/*! LZ4F_makeBlock():
+ * compress a single block, add header and optional checksum.
+ * assumption : dst buffer capacity is >= BHSize + srcSize + crcSize
+ */
+static size_t LZ4F_makeBlock(void* dst,
+ const void* src, size_t srcSize,
+ compressFunc_t compress, void* lz4ctx, int level,
+ const LZ4F_CDict* cdict,
+ LZ4F_blockChecksum_t crcFlag)
+{
+ BYTE* const cSizePtr = (BYTE*)dst;
+ U32 cSize = (U32)compress(lz4ctx, (const char*)src, (char*)(cSizePtr+BHSize),
+ (int)(srcSize), (int)(srcSize-1),
+ level, cdict);
+ if (cSize == 0) { /* compression failed */
+ DEBUGLOG(5, "LZ4F_makeBlock: compression failed, creating a raw block (size %u)", (U32)srcSize);
+ cSize = (U32)srcSize;
+ LZ4F_writeLE32(cSizePtr, cSize | LZ4F_BLOCKUNCOMPRESSED_FLAG);
+ memcpy(cSizePtr+BHSize, src, srcSize);
+ } else {
+ LZ4F_writeLE32(cSizePtr, cSize);
+ }
+ if (crcFlag) {
+ U32 const crc32 = XXH32(cSizePtr+BHSize, cSize, 0); /* checksum of compressed data */
+ LZ4F_writeLE32(cSizePtr+BHSize+cSize, crc32);
+ }
+ return BHSize + cSize + ((U32)crcFlag)*BFSize;
+}
+
+
+static int LZ4F_compressBlock(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ int const acceleration = (level < 0) ? -level + 1 : 1;
+ LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
+ if (cdict) {
+ return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
+ } else {
+ return LZ4_compress_fast_extState_fastReset(ctx, src, dst, srcSize, dstCapacity, acceleration);
+ }
+}
+
+static int LZ4F_compressBlock_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ int const acceleration = (level < 0) ? -level + 1 : 1;
+ (void)cdict; /* init once at beginning of frame */
+ return LZ4_compress_fast_continue((LZ4_stream_t*)ctx, src, dst, srcSize, dstCapacity, acceleration);
+}
+
+static int LZ4F_compressBlockHC(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ LZ4F_initStream(ctx, cdict, level, LZ4F_blockIndependent);
+ if (cdict) {
+ return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
+ }
+ return LZ4_compress_HC_extStateHC_fastReset(ctx, src, dst, srcSize, dstCapacity, level);
+}
+
+static int LZ4F_compressBlockHC_continue(void* ctx, const char* src, char* dst, int srcSize, int dstCapacity, int level, const LZ4F_CDict* cdict)
+{
+ (void)level; (void)cdict; /* init once at beginning of frame */
+ return LZ4_compress_HC_continue((LZ4_streamHC_t*)ctx, src, dst, srcSize, dstCapacity);
+}
+
+static compressFunc_t LZ4F_selectCompression(LZ4F_blockMode_t blockMode, int level)
+{
+ if (level < LZ4HC_CLEVEL_MIN) {
+ if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlock;
+ return LZ4F_compressBlock_continue;
+ }
+ if (blockMode == LZ4F_blockIndependent) return LZ4F_compressBlockHC;
+ return LZ4F_compressBlockHC_continue;
+}
+
+static int LZ4F_localSaveDict(LZ4F_cctx_t* cctxPtr)
+{
+ if (cctxPtr->prefs.compressionLevel < LZ4HC_CLEVEL_MIN)
+ return LZ4_saveDict ((LZ4_stream_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
+ return LZ4_saveDictHC ((LZ4_streamHC_t*)(cctxPtr->lz4CtxPtr), (char*)(cctxPtr->tmpBuff), 64 KB);
+}
+
+typedef enum { notDone, fromTmpBuffer, fromSrcBuffer } LZ4F_lastBlockStatus;
+
+/*! LZ4F_compressUpdate() :
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
+ * dstBuffer MUST be >= LZ4F_compressBound(srcSize, preferencesPtr).
+ * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
+ * @return : the number of bytes written into dstBuffer. It can be zero, meaning input data was just buffered.
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ */
+size_t LZ4F_compressUpdate(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
+ LZ4F_compressOptions_t cOptionsNull;
+ size_t const blockSize = cctxPtr->maxBlockSize;
+ const BYTE* srcPtr = (const BYTE*)srcBuffer;
+ const BYTE* const srcEnd = srcPtr + srcSize;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ LZ4F_lastBlockStatus lastBlockCompressed = notDone;
+ compressFunc_t const compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
+
+ DEBUGLOG(4, "LZ4F_compressUpdate (srcSize=%zu)", srcSize);
+
+ if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
+ if (dstCapacity < LZ4F_compressBound_internal(srcSize, &(cctxPtr->prefs), cctxPtr->tmpInSize))
+ return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+ MEM_INIT(&cOptionsNull, 0, sizeof(cOptionsNull));
+ if (compressOptionsPtr == NULL) compressOptionsPtr = &cOptionsNull;
+
+ /* complete tmp buffer */
+ if (cctxPtr->tmpInSize > 0) { /* some data already within tmp buffer */
+ size_t const sizeToCopy = blockSize - cctxPtr->tmpInSize;
+ if (sizeToCopy > srcSize) {
+ /* add src to tmpIn buffer */
+ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, srcSize);
+ srcPtr = srcEnd;
+ cctxPtr->tmpInSize += srcSize;
+ /* still needs some CRC */
+ } else {
+ /* complete tmpIn block and then compress it */
+ lastBlockCompressed = fromTmpBuffer;
+ memcpy(cctxPtr->tmpIn + cctxPtr->tmpInSize, srcBuffer, sizeToCopy);
+ srcPtr += sizeToCopy;
+
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ cctxPtr->tmpIn, blockSize,
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+
+ if (cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) cctxPtr->tmpIn += blockSize;
+ cctxPtr->tmpInSize = 0;
+ }
+ }
+
+ while ((size_t)(srcEnd - srcPtr) >= blockSize) {
+ /* compress full blocks */
+ lastBlockCompressed = fromSrcBuffer;
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ srcPtr, blockSize,
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ srcPtr += blockSize;
+ }
+
+ if ((cctxPtr->prefs.autoFlush) && (srcPtr < srcEnd)) {
+ /* compress remaining input < blockSize */
+ lastBlockCompressed = fromSrcBuffer;
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ srcPtr, (size_t)(srcEnd - srcPtr),
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ srcPtr = srcEnd;
+ }
+
+ /* preserve dictionary if necessary */
+ if ((cctxPtr->prefs.frameInfo.blockMode==LZ4F_blockLinked) && (lastBlockCompressed==fromSrcBuffer)) {
+ if (compressOptionsPtr->stableSrc) {
+ cctxPtr->tmpIn = cctxPtr->tmpBuff;
+ } else {
+ int const realDictSize = LZ4F_localSaveDict(cctxPtr);
+ if (realDictSize==0) return err0r(LZ4F_ERROR_GENERIC);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+ }
+
+ /* keep tmpIn within limits */
+ if ((cctxPtr->tmpIn + blockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize) /* necessarily LZ4F_blockLinked && lastBlockCompressed==fromTmpBuffer */
+ && !(cctxPtr->prefs.autoFlush))
+ {
+ int const realDictSize = LZ4F_localSaveDict(cctxPtr);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+
+ /* some input data left, necessarily < blockSize */
+ if (srcPtr < srcEnd) {
+ /* fill tmp buffer */
+ size_t const sizeToCopy = (size_t)(srcEnd - srcPtr);
+ memcpy(cctxPtr->tmpIn, srcPtr, sizeToCopy);
+ cctxPtr->tmpInSize = sizeToCopy;
+ }
+
+ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled)
+ (void)XXH32_update(&(cctxPtr->xxh), srcBuffer, srcSize);
+
+ cctxPtr->totalInSize += srcSize;
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_flush() :
+ * When compressed data must be sent immediately, without waiting for a block to be filled,
+ * invoke LZ4_flush(), which will immediately compress any remaining data stored within LZ4F_cctx.
+ * The result of the function is the number of bytes written into dstBuffer.
+ * It can be zero, this means there was no data left within LZ4F_cctx.
+ * The function outputs an error code if it fails (can be tested using LZ4F_isError())
+ * LZ4F_compressOptions_t* is optional. NULL is a valid argument.
+ */
+size_t LZ4F_flush(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+ compressFunc_t compress;
+
+ if (cctxPtr->tmpInSize == 0) return 0; /* nothing to flush */
+ if (cctxPtr->cStage != 1) return err0r(LZ4F_ERROR_GENERIC);
+ if (dstCapacity < (cctxPtr->tmpInSize + BHSize + BFSize))
+ return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+ (void)compressOptionsPtr; /* not yet useful */
+
+ /* select compression function */
+ compress = LZ4F_selectCompression(cctxPtr->prefs.frameInfo.blockMode, cctxPtr->prefs.compressionLevel);
+
+ /* compress tmp buffer */
+ dstPtr += LZ4F_makeBlock(dstPtr,
+ cctxPtr->tmpIn, cctxPtr->tmpInSize,
+ compress, cctxPtr->lz4CtxPtr, cctxPtr->prefs.compressionLevel,
+ cctxPtr->cdict,
+ cctxPtr->prefs.frameInfo.blockChecksumFlag);
+ assert(((void)"flush overflows dstBuffer!", (size_t)(dstPtr - dstStart) <= dstCapacity));
+
+ if (cctxPtr->prefs.frameInfo.blockMode == LZ4F_blockLinked)
+ cctxPtr->tmpIn += cctxPtr->tmpInSize;
+ cctxPtr->tmpInSize = 0;
+
+ /* keep tmpIn within limits */
+ if ((cctxPtr->tmpIn + cctxPtr->maxBlockSize) > (cctxPtr->tmpBuff + cctxPtr->maxBufferSize)) { /* necessarily LZ4F_blockLinked */
+ int const realDictSize = LZ4F_localSaveDict(cctxPtr);
+ cctxPtr->tmpIn = cctxPtr->tmpBuff + realDictSize;
+ }
+
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*! LZ4F_compressEnd() :
+ * When you want to properly finish the compressed frame, just call LZ4F_compressEnd().
+ * It will flush whatever data remained within compressionContext (like LZ4_flush())
+ * but also properly finalize the frame, with an endMark and an (optional) checksum.
+ * LZ4F_compressOptions_t structure is optional : you can provide NULL as argument.
+ * @return: the number of bytes written into dstBuffer (necessarily >= 4 (endMark size))
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ * The context can then be used again to compress a new frame, starting with LZ4F_compressBegin().
+ */
+size_t LZ4F_compressEnd(LZ4F_cctx* cctxPtr,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* compressOptionsPtr)
+{
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* dstPtr = dstStart;
+
+ size_t const flushSize = LZ4F_flush(cctxPtr, dstBuffer, dstCapacity, compressOptionsPtr);
+ DEBUGLOG(5,"LZ4F_compressEnd: dstCapacity=%u", (unsigned)dstCapacity);
+ if (LZ4F_isError(flushSize)) return flushSize;
+ dstPtr += flushSize;
+
+ assert(flushSize <= dstCapacity);
+ dstCapacity -= flushSize;
+
+ if (dstCapacity < 4) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+ LZ4F_writeLE32(dstPtr, 0);
+ dstPtr += 4; /* endMark */
+
+ if (cctxPtr->prefs.frameInfo.contentChecksumFlag == LZ4F_contentChecksumEnabled) {
+ U32 const xxh = XXH32_digest(&(cctxPtr->xxh));
+ if (dstCapacity < 8) return err0r(LZ4F_ERROR_dstMaxSize_tooSmall);
+ DEBUGLOG(5,"Writing 32-bit content checksum");
+ LZ4F_writeLE32(dstPtr, xxh);
+ dstPtr+=4; /* content Checksum */
+ }
+
+ cctxPtr->cStage = 0; /* state is now re-usable (with identical preferences) */
+ cctxPtr->maxBufferSize = 0; /* reuse HC context */
+
+ if (cctxPtr->prefs.frameInfo.contentSize) {
+ if (cctxPtr->prefs.frameInfo.contentSize != cctxPtr->totalInSize)
+ return err0r(LZ4F_ERROR_frameSize_wrong);
+ }
+
+ return (size_t)(dstPtr - dstStart);
+}
+
+
+/*-***************************************************
+* Frame Decompression
+*****************************************************/
+
+typedef enum {
+ dstage_getFrameHeader=0, dstage_storeFrameHeader,
+ dstage_init,
+ dstage_getBlockHeader, dstage_storeBlockHeader,
+ dstage_copyDirect, dstage_getBlockChecksum,
+ dstage_getCBlock, dstage_storeCBlock,
+ dstage_flushOut,
+ dstage_getSuffix, dstage_storeSuffix,
+ dstage_getSFrameSize, dstage_storeSFrameSize,
+ dstage_skipSkippable
+} dStage_t;
+
+struct LZ4F_dctx_s {
+ LZ4F_frameInfo_t frameInfo;
+ U32 version;
+ dStage_t dStage;
+ U64 frameRemainingSize;
+ size_t maxBlockSize;
+ size_t maxBufferSize;
+ BYTE* tmpIn;
+ size_t tmpInSize;
+ size_t tmpInTarget;
+ BYTE* tmpOutBuffer;
+ const BYTE* dict;
+ size_t dictSize;
+ BYTE* tmpOut;
+ size_t tmpOutSize;
+ size_t tmpOutStart;
+ XXH32_state_t xxh;
+ XXH32_state_t blockChecksum;
+ BYTE header[LZ4F_HEADER_SIZE_MAX];
+}; /* typedef'd to LZ4F_dctx in lz4frame.h */
+
+
+/*! LZ4F_createDecompressionContext() :
+ * Create a decompressionContext object, which will track all decompression operations.
+ * Provides a pointer to a fully allocated and initialized LZ4F_decompressionContext object.
+ * Object can later be released using LZ4F_freeDecompressionContext().
+ * @return : if != 0, there was an error during context creation.
+ */
+LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** LZ4F_decompressionContextPtr, unsigned versionNumber)
+{
+ LZ4F_dctx* const dctx = (LZ4F_dctx*)ALLOC_AND_ZERO(sizeof(LZ4F_dctx));
+ if (dctx == NULL) { /* failed allocation */
+ *LZ4F_decompressionContextPtr = NULL;
+ return err0r(LZ4F_ERROR_allocation_failed);
+ }
+
+ dctx->version = versionNumber;
+ *LZ4F_decompressionContextPtr = dctx;
+ return LZ4F_OK_NoError;
+}
+
+LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx)
+{
+ LZ4F_errorCode_t result = LZ4F_OK_NoError;
+ if (dctx != NULL) { /* can accept NULL input, like free() */
+ result = (LZ4F_errorCode_t)dctx->dStage;
+ FREEMEM(dctx->tmpIn);
+ FREEMEM(dctx->tmpOutBuffer);
+ FREEMEM(dctx);
+ }
+ return result;
+}
+
+
+/*==--- Streaming Decompression operations ---==*/
+
+void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx)
+{
+ dctx->dStage = dstage_getFrameHeader;
+ dctx->dict = NULL;
+ dctx->dictSize = 0;
+}
+
+
+/*! LZ4F_decodeHeader() :
+ * input : `src` points at the **beginning of the frame**
+ * output : set internal values of dctx, such as
+ * dctx->frameInfo and dctx->dStage.
+ * Also allocates internal buffers.
+ * @return : nb Bytes read from src (necessarily <= srcSize)
+ * or an error code (testable with LZ4F_isError())
+ */
+static size_t LZ4F_decodeHeader(LZ4F_dctx* dctx, const void* src, size_t srcSize)
+{
+ unsigned blockMode, blockChecksumFlag, contentSizeFlag, contentChecksumFlag, dictIDFlag, blockSizeID;
+ size_t frameHeaderSize;
+ const BYTE* srcPtr = (const BYTE*)src;
+
+ DEBUGLOG(5, "LZ4F_decodeHeader");
+ /* need to decode header to get frameInfo */
+ if (srcSize < minFHSize) return err0r(LZ4F_ERROR_frameHeader_incomplete); /* minimal frame header size */
+ MEM_INIT(&(dctx->frameInfo), 0, sizeof(dctx->frameInfo));
+
+ /* special case : skippable frames */
+ if ((LZ4F_readLE32(srcPtr) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START) {
+ dctx->frameInfo.frameType = LZ4F_skippableFrame;
+ if (src == (void*)(dctx->header)) {
+ dctx->tmpInSize = srcSize;
+ dctx->tmpInTarget = 8;
+ dctx->dStage = dstage_storeSFrameSize;
+ return srcSize;
+ } else {
+ dctx->dStage = dstage_getSFrameSize;
+ return 4;
+ }
+ }
+
+ /* control magic number */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (LZ4F_readLE32(srcPtr) != LZ4F_MAGICNUMBER) {
+ DEBUGLOG(4, "frame header error : unknown magic number");
+ return err0r(LZ4F_ERROR_frameType_unknown);
+ }
+#endif
+ dctx->frameInfo.frameType = LZ4F_frame;
+
+ /* Flags */
+ { U32 const FLG = srcPtr[4];
+ U32 const version = (FLG>>6) & _2BITS;
+ blockChecksumFlag = (FLG>>4) & _1BIT;
+ blockMode = (FLG>>5) & _1BIT;
+ contentSizeFlag = (FLG>>3) & _1BIT;
+ contentChecksumFlag = (FLG>>2) & _1BIT;
+ dictIDFlag = FLG & _1BIT;
+ /* validate */
+ if (((FLG>>1)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
+ if (version != 1) return err0r(LZ4F_ERROR_headerVersion_wrong); /* Version Number, only supported value */
+ }
+
+ /* Frame Header Size */
+ frameHeaderSize = minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
+
+ if (srcSize < frameHeaderSize) {
+ /* not enough input to fully decode frame header */
+ if (srcPtr != dctx->header)
+ memcpy(dctx->header, srcPtr, srcSize);
+ dctx->tmpInSize = srcSize;
+ dctx->tmpInTarget = frameHeaderSize;
+ dctx->dStage = dstage_storeFrameHeader;
+ return srcSize;
+ }
+
+ { U32 const BD = srcPtr[5];
+ blockSizeID = (BD>>4) & _3BITS;
+ /* validate */
+ if (((BD>>7)&_1BIT) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bit */
+ if (blockSizeID < 4) return err0r(LZ4F_ERROR_maxBlockSize_invalid); /* 4-7 only supported values for the time being */
+ if (((BD>>0)&_4BITS) != 0) return err0r(LZ4F_ERROR_reservedFlag_set); /* Reserved bits */
+ }
+
+ /* check header */
+ assert(frameHeaderSize > 5);
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ { BYTE const HC = LZ4F_headerChecksum(srcPtr+4, frameHeaderSize-5);
+ if (HC != srcPtr[frameHeaderSize-1])
+ return err0r(LZ4F_ERROR_headerChecksum_invalid);
+ }
+#endif
+
+ /* save */
+ dctx->frameInfo.blockMode = (LZ4F_blockMode_t)blockMode;
+ dctx->frameInfo.blockChecksumFlag = (LZ4F_blockChecksum_t)blockChecksumFlag;
+ dctx->frameInfo.contentChecksumFlag = (LZ4F_contentChecksum_t)contentChecksumFlag;
+ dctx->frameInfo.blockSizeID = (LZ4F_blockSizeID_t)blockSizeID;
+ dctx->maxBlockSize = LZ4F_getBlockSize(blockSizeID);
+ if (contentSizeFlag)
+ dctx->frameRemainingSize =
+ dctx->frameInfo.contentSize = LZ4F_readLE64(srcPtr+6);
+ if (dictIDFlag)
+ dctx->frameInfo.dictID = LZ4F_readLE32(srcPtr + frameHeaderSize - 5);
+
+ dctx->dStage = dstage_init;
+
+ return frameHeaderSize;
+}
+
+
+/*! LZ4F_headerSize() :
+ * @return : size of frame header
+ * or an error code, which can be tested using LZ4F_isError()
+ */
+size_t LZ4F_headerSize(const void* src, size_t srcSize)
+{
+ if (src == NULL) return err0r(LZ4F_ERROR_srcPtr_wrong);
+
+ /* minimal srcSize to determine header size */
+ if (srcSize < LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH)
+ return err0r(LZ4F_ERROR_frameHeader_incomplete);
+
+ /* special case : skippable frames */
+ if ((LZ4F_readLE32(src) & 0xFFFFFFF0U) == LZ4F_MAGIC_SKIPPABLE_START)
+ return 8;
+
+ /* control magic number */
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (LZ4F_readLE32(src) != LZ4F_MAGICNUMBER)
+ return err0r(LZ4F_ERROR_frameType_unknown);
+#endif
+
+ /* Frame Header Size */
+ { BYTE const FLG = ((const BYTE*)src)[4];
+ U32 const contentSizeFlag = (FLG>>3) & _1BIT;
+ U32 const dictIDFlag = FLG & _1BIT;
+ return minFHSize + (contentSizeFlag?8:0) + (dictIDFlag?4:0);
+ }
+}
+
+/*! LZ4F_getFrameInfo() :
+ * This function extracts frame parameters (max blockSize, frame checksum, etc.).
+ * Usage is optional. Objective is to provide relevant information for allocation purposes.
+ * This function works in 2 situations :
+ * - At the beginning of a new frame, in which case it will decode this information from `srcBuffer`, and start the decoding process.
+ * Amount of input data provided must be large enough to successfully decode the frame header.
+ * A header size is variable, but is guaranteed to be <= LZ4F_HEADER_SIZE_MAX bytes. It's possible to provide more input data than this minimum.
+ * - After decoding has been started. In which case, no input is read, frame parameters are extracted from dctx.
+ * The number of bytes consumed from srcBuffer will be updated within *srcSizePtr (necessarily <= original value).
+ * Decompression must resume from (srcBuffer + *srcSizePtr).
+ * @return : an hint about how many srcSize bytes LZ4F_decompress() expects for next call,
+ * or an error code which can be tested using LZ4F_isError()
+ * note 1 : in case of error, dctx is not modified. Decoding operations can resume from where they stopped.
+ * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
+ */
+LZ4F_errorCode_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
+ LZ4F_frameInfo_t* frameInfoPtr,
+ const void* srcBuffer, size_t* srcSizePtr)
+{
+ LZ4F_STATIC_ASSERT(dstage_getFrameHeader < dstage_storeFrameHeader);
+ if (dctx->dStage > dstage_storeFrameHeader) {
+ /* frameInfo already decoded */
+ size_t o=0, i=0;
+ *srcSizePtr = 0;
+ *frameInfoPtr = dctx->frameInfo;
+ /* returns : recommended nb of bytes for LZ4F_decompress() */
+ return LZ4F_decompress(dctx, NULL, &o, NULL, &i, NULL);
+ } else {
+ if (dctx->dStage == dstage_storeFrameHeader) {
+ /* frame decoding already started, in the middle of header => automatic fail */
+ *srcSizePtr = 0;
+ return err0r(LZ4F_ERROR_frameDecoding_alreadyStarted);
+ } else {
+ size_t const hSize = LZ4F_headerSize(srcBuffer, *srcSizePtr);
+ if (LZ4F_isError(hSize)) { *srcSizePtr=0; return hSize; }
+ if (*srcSizePtr < hSize) {
+ *srcSizePtr=0;
+ return err0r(LZ4F_ERROR_frameHeader_incomplete);
+ }
+
+ { size_t decodeResult = LZ4F_decodeHeader(dctx, srcBuffer, hSize);
+ if (LZ4F_isError(decodeResult)) {
+ *srcSizePtr = 0;
+ } else {
+ *srcSizePtr = decodeResult;
+ decodeResult = BHSize; /* block header size */
+ }
+ *frameInfoPtr = dctx->frameInfo;
+ return decodeResult;
+ } } }
+}
+
+
+/* LZ4F_updateDict() :
+ * only used for LZ4F_blockLinked mode
+ * Condition : dstPtr != NULL
+ */
+static void LZ4F_updateDict(LZ4F_dctx* dctx,
+ const BYTE* dstPtr, size_t dstSize, const BYTE* dstBufferStart,
+ unsigned withinTmp)
+{
+ assert(dstPtr != NULL);
+ if (dctx->dictSize==0) {
+ dctx->dict = (const BYTE*)dstPtr; /* priority to prefix mode */
+ }
+ assert(dctx->dict != NULL);
+
+ if (dctx->dict + dctx->dictSize == dstPtr) { /* prefix mode, everything within dstBuffer */
+ dctx->dictSize += dstSize;
+ return;
+ }
+
+ assert(dstPtr >= dstBufferStart);
+ if ((size_t)(dstPtr - dstBufferStart) + dstSize >= 64 KB) { /* history in dstBuffer becomes large enough to become dictionary */
+ dctx->dict = (const BYTE*)dstBufferStart;
+ dctx->dictSize = (size_t)(dstPtr - dstBufferStart) + dstSize;
+ return;
+ }
+
+ assert(dstSize < 64 KB); /* if dstSize >= 64 KB, dictionary would be set into dstBuffer directly */
+
+ /* dstBuffer does not contain whole useful history (64 KB), so it must be saved within tmpOutBuffer */
+ assert(dctx->tmpOutBuffer != NULL);
+
+ if (withinTmp && (dctx->dict == dctx->tmpOutBuffer)) { /* continue history within tmpOutBuffer */
+ /* withinTmp expectation : content of [dstPtr,dstSize] is same as [dict+dictSize,dstSize], so we just extend it */
+ assert(dctx->dict + dctx->dictSize == dctx->tmpOut + dctx->tmpOutStart);
+ dctx->dictSize += dstSize;
+ return;
+ }
+
+ if (withinTmp) { /* copy relevant dict portion in front of tmpOut within tmpOutBuffer */
+ size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
+ size_t copySize = 64 KB - dctx->tmpOutSize;
+ const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
+ if (dctx->tmpOutSize > 64 KB) copySize = 0;
+ if (copySize > preserveSize) copySize = preserveSize;
+
+ memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
+
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = preserveSize + dctx->tmpOutStart + dstSize;
+ return;
+ }
+
+ if (dctx->dict == dctx->tmpOutBuffer) { /* copy dst into tmp to complete dict */
+ if (dctx->dictSize + dstSize > dctx->maxBufferSize) { /* tmp buffer not large enough */
+ size_t const preserveSize = 64 KB - dstSize;
+ memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
+ dctx->dictSize = preserveSize;
+ }
+ memcpy(dctx->tmpOutBuffer + dctx->dictSize, dstPtr, dstSize);
+ dctx->dictSize += dstSize;
+ return;
+ }
+
+ /* join dict & dest into tmp */
+ { size_t preserveSize = 64 KB - dstSize;
+ if (preserveSize > dctx->dictSize) preserveSize = dctx->dictSize;
+ memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - preserveSize, preserveSize);
+ memcpy(dctx->tmpOutBuffer + preserveSize, dstPtr, dstSize);
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = preserveSize + dstSize;
+ }
+}
+
+
+
+/*! LZ4F_decompress() :
+ * Call this function repetitively to regenerate compressed data in srcBuffer.
+ * The function will attempt to decode up to *srcSizePtr bytes from srcBuffer
+ * into dstBuffer of capacity *dstSizePtr.
+ *
+ * The number of bytes regenerated into dstBuffer will be provided within *dstSizePtr (necessarily <= original value).
+ *
+ * The number of bytes effectively read from srcBuffer will be provided within *srcSizePtr (necessarily <= original value).
+ * If number of bytes read is < number of bytes provided, then decompression operation is not complete.
+ * Remaining data will have to be presented again in a subsequent invocation.
+ *
+ * The function result is an hint of the better srcSize to use for next call to LZ4F_decompress.
+ * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
+ * Respecting the hint provides a small boost to performance, since it allows less buffer shuffling.
+ * Note that this is just a hint, and it's always possible to any srcSize value.
+ * When a frame is fully decoded, @return will be 0.
+ * If decompression failed, @return is an error code which can be tested using LZ4F_isError().
+ */
+size_t LZ4F_decompress(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr)
+{
+ LZ4F_decompressOptions_t optionsNull;
+ const BYTE* const srcStart = (const BYTE*)srcBuffer;
+ const BYTE* const srcEnd = srcStart + *srcSizePtr;
+ const BYTE* srcPtr = srcStart;
+ BYTE* const dstStart = (BYTE*)dstBuffer;
+ BYTE* const dstEnd = dstStart ? dstStart + *dstSizePtr : NULL;
+ BYTE* dstPtr = dstStart;
+ const BYTE* selectedIn = NULL;
+ unsigned doAnotherStage = 1;
+ size_t nextSrcSizeHint = 1;
+
+
+ DEBUGLOG(5, "LZ4F_decompress : %p,%u => %p,%u",
+ srcBuffer, (unsigned)*srcSizePtr, dstBuffer, (unsigned)*dstSizePtr);
+ if (dstBuffer == NULL) assert(*dstSizePtr == 0);
+ MEM_INIT(&optionsNull, 0, sizeof(optionsNull));
+ if (decompressOptionsPtr==NULL) decompressOptionsPtr = &optionsNull;
+ *srcSizePtr = 0;
+ *dstSizePtr = 0;
+ assert(dctx != NULL);
+
+ /* behaves as a state machine */
+
+ while (doAnotherStage) {
+
+ switch(dctx->dStage)
+ {
+
+ case dstage_getFrameHeader:
+ DEBUGLOG(6, "dstage_getFrameHeader");
+ if ((size_t)(srcEnd-srcPtr) >= maxFHSize) { /* enough to decode - shortcut */
+ size_t const hSize = LZ4F_decodeHeader(dctx, srcPtr, (size_t)(srcEnd-srcPtr)); /* will update dStage appropriately */
+ if (LZ4F_isError(hSize)) return hSize;
+ srcPtr += hSize;
+ break;
+ }
+ dctx->tmpInSize = 0;
+ if (srcEnd-srcPtr == 0) return minFHSize; /* 0-size input */
+ dctx->tmpInTarget = minFHSize; /* minimum size to decode header */
+ dctx->dStage = dstage_storeFrameHeader;
+ /* fall-through */
+
+ case dstage_storeFrameHeader:
+ DEBUGLOG(6, "dstage_storeFrameHeader");
+ { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize, (size_t)(srcEnd - srcPtr));
+ memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
+ dctx->tmpInSize += sizeToCopy;
+ srcPtr += sizeToCopy;
+ }
+ if (dctx->tmpInSize < dctx->tmpInTarget) {
+ nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize) + BHSize; /* rest of header + nextBlockHeader */
+ doAnotherStage = 0; /* not enough src data, ask for some more */
+ break;
+ }
+ { size_t const hSize = LZ4F_decodeHeader(dctx, dctx->header, dctx->tmpInTarget); /* will update dStage appropriately */
+ if (LZ4F_isError(hSize)) return hSize;
+ }
+ break;
+
+ case dstage_init:
+ DEBUGLOG(6, "dstage_init");
+ if (dctx->frameInfo.contentChecksumFlag) (void)XXH32_reset(&(dctx->xxh), 0);
+ /* internal buffers allocation */
+ { size_t const bufferNeeded = dctx->maxBlockSize
+ + ((dctx->frameInfo.blockMode==LZ4F_blockLinked) ? 128 KB : 0);
+ if (bufferNeeded > dctx->maxBufferSize) { /* tmp buffers too small */
+ dctx->maxBufferSize = 0; /* ensure allocation will be re-attempted on next entry*/
+ FREEMEM(dctx->tmpIn);
+ dctx->tmpIn = (BYTE*)ALLOC(dctx->maxBlockSize + BFSize /* block checksum */);
+ if (dctx->tmpIn == NULL)
+ return err0r(LZ4F_ERROR_allocation_failed);
+ FREEMEM(dctx->tmpOutBuffer);
+ dctx->tmpOutBuffer= (BYTE*)ALLOC(bufferNeeded);
+ if (dctx->tmpOutBuffer== NULL)
+ return err0r(LZ4F_ERROR_allocation_failed);
+ dctx->maxBufferSize = bufferNeeded;
+ } }
+ dctx->tmpInSize = 0;
+ dctx->tmpInTarget = 0;
+ dctx->tmpOut = dctx->tmpOutBuffer;
+ dctx->tmpOutStart = 0;
+ dctx->tmpOutSize = 0;
+
+ dctx->dStage = dstage_getBlockHeader;
+ /* fall-through */
+
+ case dstage_getBlockHeader:
+ if ((size_t)(srcEnd - srcPtr) >= BHSize) {
+ selectedIn = srcPtr;
+ srcPtr += BHSize;
+ } else {
+ /* not enough input to read cBlockSize field */
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_storeBlockHeader;
+ }
+
+ if (dctx->dStage == dstage_storeBlockHeader) /* can be skipped */
+ case dstage_storeBlockHeader:
+ { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
+ size_t const wantedData = BHSize - dctx->tmpInSize;
+ size_t const sizeToCopy = MIN(wantedData, remainingInput);
+ memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
+ srcPtr += sizeToCopy;
+ dctx->tmpInSize += sizeToCopy;
+
+ if (dctx->tmpInSize < BHSize) { /* not enough input for cBlockSize */
+ nextSrcSizeHint = BHSize - dctx->tmpInSize;
+ doAnotherStage = 0;
+ break;
+ }
+ selectedIn = dctx->tmpIn;
+ } /* if (dctx->dStage == dstage_storeBlockHeader) */
+
+ /* decode block header */
+ { U32 const blockHeader = LZ4F_readLE32(selectedIn);
+ size_t const nextCBlockSize = blockHeader & 0x7FFFFFFFU;
+ size_t const crcSize = dctx->frameInfo.blockChecksumFlag * BFSize;
+ if (blockHeader==0) { /* frameEnd signal, no more block */
+ DEBUGLOG(5, "end of frame");
+ dctx->dStage = dstage_getSuffix;
+ break;
+ }
+ if (nextCBlockSize > dctx->maxBlockSize) {
+ return err0r(LZ4F_ERROR_maxBlockSize_invalid);
+ }
+ if (blockHeader & LZ4F_BLOCKUNCOMPRESSED_FLAG) {
+ /* next block is uncompressed */
+ dctx->tmpInTarget = nextCBlockSize;
+ DEBUGLOG(5, "next block is uncompressed (size %u)", (U32)nextCBlockSize);
+ if (dctx->frameInfo.blockChecksumFlag) {
+ (void)XXH32_reset(&dctx->blockChecksum, 0);
+ }
+ dctx->dStage = dstage_copyDirect;
+ break;
+ }
+ /* next block is a compressed block */
+ dctx->tmpInTarget = nextCBlockSize + crcSize;
+ dctx->dStage = dstage_getCBlock;
+ if (dstPtr==dstEnd || srcPtr==srcEnd) {
+ nextSrcSizeHint = BHSize + nextCBlockSize + crcSize;
+ doAnotherStage = 0;
+ }
+ break;
+ }
+
+ case dstage_copyDirect: /* uncompressed block */
+ DEBUGLOG(6, "dstage_copyDirect");
+ { size_t sizeToCopy;
+ if (dstPtr == NULL) {
+ sizeToCopy = 0;
+ } else {
+ size_t const minBuffSize = MIN((size_t)(srcEnd-srcPtr), (size_t)(dstEnd-dstPtr));
+ sizeToCopy = MIN(dctx->tmpInTarget, minBuffSize);
+ memcpy(dstPtr, srcPtr, sizeToCopy);
+ if (dctx->frameInfo.blockChecksumFlag) {
+ (void)XXH32_update(&dctx->blockChecksum, srcPtr, sizeToCopy);
+ }
+ if (dctx->frameInfo.contentChecksumFlag)
+ (void)XXH32_update(&dctx->xxh, srcPtr, sizeToCopy);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= sizeToCopy;
+
+ /* history management (linked blocks only)*/
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
+ LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 0);
+ } }
+
+ srcPtr += sizeToCopy;
+ dstPtr += sizeToCopy;
+ if (sizeToCopy == dctx->tmpInTarget) { /* all done */
+ if (dctx->frameInfo.blockChecksumFlag) {
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_getBlockChecksum;
+ } else
+ dctx->dStage = dstage_getBlockHeader; /* new block */
+ break;
+ }
+ dctx->tmpInTarget -= sizeToCopy; /* need to copy more */
+ }
+ nextSrcSizeHint = dctx->tmpInTarget +
+ +(dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ + BHSize /* next header size */;
+ doAnotherStage = 0;
+ break;
+
+ /* check block checksum for recently transferred uncompressed block */
+ case dstage_getBlockChecksum:
+ DEBUGLOG(6, "dstage_getBlockChecksum");
+ { const void* crcSrc;
+ if ((srcEnd-srcPtr >= 4) && (dctx->tmpInSize==0)) {
+ crcSrc = srcPtr;
+ srcPtr += 4;
+ } else {
+ size_t const stillToCopy = 4 - dctx->tmpInSize;
+ size_t const sizeToCopy = MIN(stillToCopy, (size_t)(srcEnd-srcPtr));
+ memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
+ dctx->tmpInSize += sizeToCopy;
+ srcPtr += sizeToCopy;
+ if (dctx->tmpInSize < 4) { /* all input consumed */
+ doAnotherStage = 0;
+ break;
+ }
+ crcSrc = dctx->header;
+ }
+ { U32 const readCRC = LZ4F_readLE32(crcSrc);
+ U32 const calcCRC = XXH32_digest(&dctx->blockChecksum);
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ DEBUGLOG(6, "compare block checksum");
+ if (readCRC != calcCRC) {
+ DEBUGLOG(4, "incorrect block checksum: %08X != %08X",
+ readCRC, calcCRC);
+ return err0r(LZ4F_ERROR_blockChecksum_invalid);
+ }
+#else
+ (void)readCRC;
+ (void)calcCRC;
+#endif
+ } }
+ dctx->dStage = dstage_getBlockHeader; /* new block */
+ break;
+
+ case dstage_getCBlock:
+ DEBUGLOG(6, "dstage_getCBlock");
+ if ((size_t)(srcEnd-srcPtr) < dctx->tmpInTarget) {
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_storeCBlock;
+ break;
+ }
+ /* input large enough to read full block directly */
+ selectedIn = srcPtr;
+ srcPtr += dctx->tmpInTarget;
+
+ if (0) /* always jump over next block */
+ case dstage_storeCBlock:
+ { size_t const wantedData = dctx->tmpInTarget - dctx->tmpInSize;
+ size_t const inputLeft = (size_t)(srcEnd-srcPtr);
+ size_t const sizeToCopy = MIN(wantedData, inputLeft);
+ memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
+ dctx->tmpInSize += sizeToCopy;
+ srcPtr += sizeToCopy;
+ if (dctx->tmpInSize < dctx->tmpInTarget) { /* need more input */
+ nextSrcSizeHint = (dctx->tmpInTarget - dctx->tmpInSize)
+ + (dctx->frameInfo.blockChecksumFlag ? BFSize : 0)
+ + BHSize /* next header size */;
+ doAnotherStage = 0;
+ break;
+ }
+ selectedIn = dctx->tmpIn;
+ }
+
+ /* At this stage, input is large enough to decode a block */
+ if (dctx->frameInfo.blockChecksumFlag) {
+ dctx->tmpInTarget -= 4;
+ assert(selectedIn != NULL); /* selectedIn is defined at this stage (either srcPtr, or dctx->tmpIn) */
+ { U32 const readBlockCrc = LZ4F_readLE32(selectedIn + dctx->tmpInTarget);
+ U32 const calcBlockCrc = XXH32(selectedIn, dctx->tmpInTarget, 0);
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (readBlockCrc != calcBlockCrc)
+ return err0r(LZ4F_ERROR_blockChecksum_invalid);
+#else
+ (void)readBlockCrc;
+ (void)calcBlockCrc;
+#endif
+ } }
+
+ if ((size_t)(dstEnd-dstPtr) >= dctx->maxBlockSize) {
+ const char* dict = (const char*)dctx->dict;
+ size_t dictSize = dctx->dictSize;
+ int decodedSize;
+ assert(dstPtr != NULL);
+ if (dict && dictSize > 1 GB) {
+ /* the dictSize param is an int, avoid truncation / sign issues */
+ dict += dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ /* enough capacity in `dst` to decompress directly there */
+ decodedSize = LZ4_decompress_safe_usingDict(
+ (const char*)selectedIn, (char*)dstPtr,
+ (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
+ dict, (int)dictSize);
+ if (decodedSize < 0) return err0r(LZ4F_ERROR_GENERIC); /* decompression failed */
+ if (dctx->frameInfo.contentChecksumFlag)
+ XXH32_update(&(dctx->xxh), dstPtr, (size_t)decodedSize);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= (size_t)decodedSize;
+
+ /* dictionary management */
+ if (dctx->frameInfo.blockMode==LZ4F_blockLinked) {
+ LZ4F_updateDict(dctx, dstPtr, (size_t)decodedSize, dstStart, 0);
+ }
+
+ dstPtr += decodedSize;
+ dctx->dStage = dstage_getBlockHeader;
+ break;
+ }
+
+ /* not enough place into dst : decode into tmpOut */
+ /* ensure enough place for tmpOut */
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked) {
+ if (dctx->dict == dctx->tmpOutBuffer) {
+ if (dctx->dictSize > 128 KB) {
+ memcpy(dctx->tmpOutBuffer, dctx->dict + dctx->dictSize - 64 KB, 64 KB);
+ dctx->dictSize = 64 KB;
+ }
+ dctx->tmpOut = dctx->tmpOutBuffer + dctx->dictSize;
+ } else { /* dict not within tmp */
+ size_t const reservedDictSpace = MIN(dctx->dictSize, 64 KB);
+ dctx->tmpOut = dctx->tmpOutBuffer + reservedDictSpace;
+ } }
+
+ /* Decode block */
+ { const char* dict = (const char*)dctx->dict;
+ size_t dictSize = dctx->dictSize;
+ int decodedSize;
+ if (dict && dictSize > 1 GB) {
+ /* the dictSize param is an int, avoid truncation / sign issues */
+ dict += dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ decodedSize = LZ4_decompress_safe_usingDict(
+ (const char*)selectedIn, (char*)dctx->tmpOut,
+ (int)dctx->tmpInTarget, (int)dctx->maxBlockSize,
+ dict, (int)dictSize);
+ if (decodedSize < 0) /* decompression failed */
+ return err0r(LZ4F_ERROR_decompressionFailed);
+ if (dctx->frameInfo.contentChecksumFlag)
+ XXH32_update(&(dctx->xxh), dctx->tmpOut, (size_t)decodedSize);
+ if (dctx->frameInfo.contentSize)
+ dctx->frameRemainingSize -= (size_t)decodedSize;
+ dctx->tmpOutSize = (size_t)decodedSize;
+ dctx->tmpOutStart = 0;
+ dctx->dStage = dstage_flushOut;
+ }
+ /* fall-through */
+
+ case dstage_flushOut: /* flush decoded data from tmpOut to dstBuffer */
+ DEBUGLOG(6, "dstage_flushOut");
+ if (dstPtr != NULL) {
+ size_t const sizeToCopy = MIN(dctx->tmpOutSize - dctx->tmpOutStart, (size_t)(dstEnd-dstPtr));
+ memcpy(dstPtr, dctx->tmpOut + dctx->tmpOutStart, sizeToCopy);
+
+ /* dictionary management */
+ if (dctx->frameInfo.blockMode == LZ4F_blockLinked)
+ LZ4F_updateDict(dctx, dstPtr, sizeToCopy, dstStart, 1 /*withinTmp*/);
+
+ dctx->tmpOutStart += sizeToCopy;
+ dstPtr += sizeToCopy;
+ }
+ if (dctx->tmpOutStart == dctx->tmpOutSize) { /* all flushed */
+ dctx->dStage = dstage_getBlockHeader; /* get next block */
+ break;
+ }
+ /* could not flush everything : stop there, just request a block header */
+ doAnotherStage = 0;
+ nextSrcSizeHint = BHSize;
+ break;
+
+ case dstage_getSuffix:
+ if (dctx->frameRemainingSize)
+ return err0r(LZ4F_ERROR_frameSize_wrong); /* incorrect frame size decoded */
+ if (!dctx->frameInfo.contentChecksumFlag) { /* no checksum, frame is completed */
+ nextSrcSizeHint = 0;
+ LZ4F_resetDecompressionContext(dctx);
+ doAnotherStage = 0;
+ break;
+ }
+ if ((srcEnd - srcPtr) < 4) { /* not enough size for entire CRC */
+ dctx->tmpInSize = 0;
+ dctx->dStage = dstage_storeSuffix;
+ } else {
+ selectedIn = srcPtr;
+ srcPtr += 4;
+ }
+
+ if (dctx->dStage == dstage_storeSuffix) /* can be skipped */
+ case dstage_storeSuffix:
+ { size_t const remainingInput = (size_t)(srcEnd - srcPtr);
+ size_t const wantedData = 4 - dctx->tmpInSize;
+ size_t const sizeToCopy = MIN(wantedData, remainingInput);
+ memcpy(dctx->tmpIn + dctx->tmpInSize, srcPtr, sizeToCopy);
+ srcPtr += sizeToCopy;
+ dctx->tmpInSize += sizeToCopy;
+ if (dctx->tmpInSize < 4) { /* not enough input to read complete suffix */
+ nextSrcSizeHint = 4 - dctx->tmpInSize;
+ doAnotherStage=0;
+ break;
+ }
+ selectedIn = dctx->tmpIn;
+ } /* if (dctx->dStage == dstage_storeSuffix) */
+
+ /* case dstage_checkSuffix: */ /* no direct entry, avoid initialization risks */
+ { U32 const readCRC = LZ4F_readLE32(selectedIn);
+ U32 const resultCRC = XXH32_digest(&(dctx->xxh));
+#ifndef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
+ if (readCRC != resultCRC)
+ return err0r(LZ4F_ERROR_contentChecksum_invalid);
+#else
+ (void)readCRC;
+ (void)resultCRC;
+#endif
+ nextSrcSizeHint = 0;
+ LZ4F_resetDecompressionContext(dctx);
+ doAnotherStage = 0;
+ break;
+ }
+
+ case dstage_getSFrameSize:
+ if ((srcEnd - srcPtr) >= 4) {
+ selectedIn = srcPtr;
+ srcPtr += 4;
+ } else {
+ /* not enough input to read cBlockSize field */
+ dctx->tmpInSize = 4;
+ dctx->tmpInTarget = 8;
+ dctx->dStage = dstage_storeSFrameSize;
+ }
+
+ if (dctx->dStage == dstage_storeSFrameSize)
+ case dstage_storeSFrameSize:
+ { size_t const sizeToCopy = MIN(dctx->tmpInTarget - dctx->tmpInSize,
+ (size_t)(srcEnd - srcPtr) );
+ memcpy(dctx->header + dctx->tmpInSize, srcPtr, sizeToCopy);
+ srcPtr += sizeToCopy;
+ dctx->tmpInSize += sizeToCopy;
+ if (dctx->tmpInSize < dctx->tmpInTarget) {
+ /* not enough input to get full sBlockSize; wait for more */
+ nextSrcSizeHint = dctx->tmpInTarget - dctx->tmpInSize;
+ doAnotherStage = 0;
+ break;
+ }
+ selectedIn = dctx->header + 4;
+ } /* if (dctx->dStage == dstage_storeSFrameSize) */
+
+ /* case dstage_decodeSFrameSize: */ /* no direct entry */
+ { size_t const SFrameSize = LZ4F_readLE32(selectedIn);
+ dctx->frameInfo.contentSize = SFrameSize;
+ dctx->tmpInTarget = SFrameSize;
+ dctx->dStage = dstage_skipSkippable;
+ break;
+ }
+
+ case dstage_skipSkippable:
+ { size_t const skipSize = MIN(dctx->tmpInTarget, (size_t)(srcEnd-srcPtr));
+ srcPtr += skipSize;
+ dctx->tmpInTarget -= skipSize;
+ doAnotherStage = 0;
+ nextSrcSizeHint = dctx->tmpInTarget;
+ if (nextSrcSizeHint) break; /* still more to skip */
+ /* frame fully skipped : prepare context for a new frame */
+ LZ4F_resetDecompressionContext(dctx);
+ break;
+ }
+ } /* switch (dctx->dStage) */
+ } /* while (doAnotherStage) */
+
+ /* preserve history within tmp whenever necessary */
+ LZ4F_STATIC_ASSERT((unsigned)dstage_init == 2);
+ if ( (dctx->frameInfo.blockMode==LZ4F_blockLinked) /* next block will use up to 64KB from previous ones */
+ && (dctx->dict != dctx->tmpOutBuffer) /* dictionary is not already within tmp */
+ && (dctx->dict != NULL) /* dictionary exists */
+ && (!decompressOptionsPtr->stableDst) /* cannot rely on dst data to remain there for next call */
+ && ((unsigned)(dctx->dStage)-2 < (unsigned)(dstage_getSuffix)-2) ) /* valid stages : [init ... getSuffix[ */
+ {
+ if (dctx->dStage == dstage_flushOut) {
+ size_t const preserveSize = (size_t)(dctx->tmpOut - dctx->tmpOutBuffer);
+ size_t copySize = 64 KB - dctx->tmpOutSize;
+ const BYTE* oldDictEnd = dctx->dict + dctx->dictSize - dctx->tmpOutStart;
+ if (dctx->tmpOutSize > 64 KB) copySize = 0;
+ if (copySize > preserveSize) copySize = preserveSize;
+ assert(dctx->tmpOutBuffer != NULL);
+
+ memcpy(dctx->tmpOutBuffer + preserveSize - copySize, oldDictEnd - copySize, copySize);
+
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = preserveSize + dctx->tmpOutStart;
+ } else {
+ const BYTE* const oldDictEnd = dctx->dict + dctx->dictSize;
+ size_t const newDictSize = MIN(dctx->dictSize, 64 KB);
+
+ memcpy(dctx->tmpOutBuffer, oldDictEnd - newDictSize, newDictSize);
+
+ dctx->dict = dctx->tmpOutBuffer;
+ dctx->dictSize = newDictSize;
+ dctx->tmpOut = dctx->tmpOutBuffer + newDictSize;
+ }
+ }
+
+ *srcSizePtr = (size_t)(srcPtr - srcStart);
+ *dstSizePtr = (size_t)(dstPtr - dstStart);
+ return nextSrcSizeHint;
+}
+
+/*! LZ4F_decompress_usingDict() :
+ * Same as LZ4F_decompress(), using a predefined dictionary.
+ * Dictionary is used "in place", without any preprocessing.
+ * It must remain accessible throughout the entire frame decoding.
+ */
+size_t LZ4F_decompress_usingDict(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr)
+{
+ if (dctx->dStage <= dstage_init) {
+ dctx->dict = (const BYTE*)dict;
+ dctx->dictSize = dictSize;
+ }
+ return LZ4F_decompress(dctx, dstBuffer, dstSizePtr,
+ srcBuffer, srcSizePtr,
+ decompressOptionsPtr);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h
new file mode 100644
index 000000000..4573317ef
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame.h
@@ -0,0 +1,623 @@
+/*
+ LZ4 auto-framing library
+ Header File
+ Copyright (C) 2011-2017, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+/* LZ4F is a stand-alone API able to create and decode LZ4 frames
+ * conformant with specification v1.6.1 in doc/lz4_Frame_format.md .
+ * Generated frames are compatible with `lz4` CLI.
+ *
+ * LZ4F also offers streaming capabilities.
+ *
+ * lz4.h is not required when using lz4frame.h,
+ * except to extract common constant such as LZ4_VERSION_NUMBER.
+ * */
+
+#ifndef LZ4F_H_09782039843
+#define LZ4F_H_09782039843
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* --- Dependency --- */
+#include <stddef.h> /* size_t */
+
+
+/**
+ Introduction
+
+ lz4frame.h implements LZ4 frame specification (doc/lz4_Frame_format.md).
+ lz4frame.h provides frame compression functions that take care
+ of encoding standard metadata alongside LZ4-compressed blocks.
+*/
+
+/*-***************************************************************
+ * Compiler specifics
+ *****************************************************************/
+/* LZ4_DLL_EXPORT :
+ * Enable exporting of functions when building a Windows DLL
+ * LZ4FLIB_VISIBILITY :
+ * Control library symbols visibility.
+ */
+#ifndef LZ4FLIB_VISIBILITY
+# if defined(__GNUC__) && (__GNUC__ >= 4)
+# define LZ4FLIB_VISIBILITY __attribute__ ((visibility ("default")))
+# else
+# define LZ4FLIB_VISIBILITY
+# endif
+#endif
+#if defined(LZ4_DLL_EXPORT) && (LZ4_DLL_EXPORT==1)
+# define LZ4FLIB_API __declspec(dllexport) LZ4FLIB_VISIBILITY
+#elif defined(LZ4_DLL_IMPORT) && (LZ4_DLL_IMPORT==1)
+# define LZ4FLIB_API __declspec(dllimport) LZ4FLIB_VISIBILITY
+#else
+# define LZ4FLIB_API LZ4FLIB_VISIBILITY
+#endif
+
+#ifdef LZ4F_DISABLE_DEPRECATE_WARNINGS
+# define LZ4F_DEPRECATE(x) x
+#else
+# if defined(_MSC_VER)
+# define LZ4F_DEPRECATE(x) x /* __declspec(deprecated) x - only works with C++ */
+# elif defined(__clang__) || (defined(__GNUC__) && (__GNUC__ >= 6))
+# define LZ4F_DEPRECATE(x) x __attribute__((deprecated))
+# else
+# define LZ4F_DEPRECATE(x) x /* no deprecation warning for this compiler */
+# endif
+#endif
+
+
+/*-************************************
+ * Error management
+ **************************************/
+typedef size_t LZ4F_errorCode_t;
+
+LZ4FLIB_API unsigned LZ4F_isError(LZ4F_errorCode_t code); /**< tells when a function result is an error code */
+LZ4FLIB_API const char* LZ4F_getErrorName(LZ4F_errorCode_t code); /**< return error code string; for debugging */
+
+
+/*-************************************
+ * Frame compression types
+ ************************************* */
+/* #define LZ4F_ENABLE_OBSOLETE_ENUMS // uncomment to enable obsolete enums */
+#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
+# define LZ4F_OBSOLETE_ENUM(x) , LZ4F_DEPRECATE(x) = LZ4F_##x
+#else
+# define LZ4F_OBSOLETE_ENUM(x)
+#endif
+
+/* The larger the block size, the (slightly) better the compression ratio,
+ * though there are diminishing returns.
+ * Larger blocks also increase memory usage on both compression and decompression sides.
+ */
+typedef enum {
+ LZ4F_default=0,
+ LZ4F_max64KB=4,
+ LZ4F_max256KB=5,
+ LZ4F_max1MB=6,
+ LZ4F_max4MB=7
+ LZ4F_OBSOLETE_ENUM(max64KB)
+ LZ4F_OBSOLETE_ENUM(max256KB)
+ LZ4F_OBSOLETE_ENUM(max1MB)
+ LZ4F_OBSOLETE_ENUM(max4MB)
+} LZ4F_blockSizeID_t;
+
+/* Linked blocks sharply reduce inefficiencies when using small blocks,
+ * they compress better.
+ * However, some LZ4 decoders are only compatible with independent blocks */
+typedef enum {
+ LZ4F_blockLinked=0,
+ LZ4F_blockIndependent
+ LZ4F_OBSOLETE_ENUM(blockLinked)
+ LZ4F_OBSOLETE_ENUM(blockIndependent)
+} LZ4F_blockMode_t;
+
+typedef enum {
+ LZ4F_noContentChecksum=0,
+ LZ4F_contentChecksumEnabled
+ LZ4F_OBSOLETE_ENUM(noContentChecksum)
+ LZ4F_OBSOLETE_ENUM(contentChecksumEnabled)
+} LZ4F_contentChecksum_t;
+
+typedef enum {
+ LZ4F_noBlockChecksum=0,
+ LZ4F_blockChecksumEnabled
+} LZ4F_blockChecksum_t;
+
+typedef enum {
+ LZ4F_frame=0,
+ LZ4F_skippableFrame
+ LZ4F_OBSOLETE_ENUM(skippableFrame)
+} LZ4F_frameType_t;
+
+#ifdef LZ4F_ENABLE_OBSOLETE_ENUMS
+typedef LZ4F_blockSizeID_t blockSizeID_t;
+typedef LZ4F_blockMode_t blockMode_t;
+typedef LZ4F_frameType_t frameType_t;
+typedef LZ4F_contentChecksum_t contentChecksum_t;
+#endif
+
+/*! LZ4F_frameInfo_t :
+ * makes it possible to set or read frame parameters.
+ * Structure must be first init to 0, using memset() or LZ4F_INIT_FRAMEINFO,
+ * setting all parameters to default.
+ * It's then possible to update selectively some parameters */
+typedef struct {
+ LZ4F_blockSizeID_t blockSizeID; /* max64KB, max256KB, max1MB, max4MB; 0 == default */
+ LZ4F_blockMode_t blockMode; /* LZ4F_blockLinked, LZ4F_blockIndependent; 0 == default */
+ LZ4F_contentChecksum_t contentChecksumFlag; /* 1: frame terminated with 32-bit checksum of decompressed data; 0: disabled (default) */
+ LZ4F_frameType_t frameType; /* read-only field : LZ4F_frame or LZ4F_skippableFrame */
+ unsigned long long contentSize; /* Size of uncompressed content ; 0 == unknown */
+ unsigned dictID; /* Dictionary ID, sent by compressor to help decoder select correct dictionary; 0 == no dictID provided */
+ LZ4F_blockChecksum_t blockChecksumFlag; /* 1: each block followed by a checksum of block's compressed data; 0: disabled (default) */
+} LZ4F_frameInfo_t;
+
+#define LZ4F_INIT_FRAMEINFO { LZ4F_default, LZ4F_blockLinked, LZ4F_noContentChecksum, LZ4F_frame, 0ULL, 0U, LZ4F_noBlockChecksum } /* v1.8.3+ */
+
+/*! LZ4F_preferences_t :
+ * makes it possible to supply advanced compression instructions to streaming interface.
+ * Structure must be first init to 0, using memset() or LZ4F_INIT_PREFERENCES,
+ * setting all parameters to default.
+ * All reserved fields must be set to zero. */
+typedef struct {
+ LZ4F_frameInfo_t frameInfo;
+ int compressionLevel; /* 0: default (fast mode); values > LZ4HC_CLEVEL_MAX count as LZ4HC_CLEVEL_MAX; values < 0 trigger "fast acceleration" */
+ unsigned autoFlush; /* 1: always flush; reduces usage of internal buffers */
+ unsigned favorDecSpeed; /* 1: parser favors decompression speed vs compression ratio. Only works for high compression modes (>= LZ4HC_CLEVEL_OPT_MIN) */ /* v1.8.2+ */
+ unsigned reserved[3]; /* must be zero for forward compatibility */
+} LZ4F_preferences_t;
+
+#define LZ4F_INIT_PREFERENCES { LZ4F_INIT_FRAMEINFO, 0, 0u, 0u, { 0u, 0u, 0u } } /* v1.8.3+ */
+
+
+/*-*********************************
+* Simple compression function
+***********************************/
+
+LZ4FLIB_API int LZ4F_compressionLevel_max(void); /* v1.8.0+ */
+
+/*! LZ4F_compressFrameBound() :
+ * Returns the maximum possible compressed size with LZ4F_compressFrame() given srcSize and preferences.
+ * `preferencesPtr` is optional. It can be replaced by NULL, in which case, the function will assume default preferences.
+ * Note : this result is only usable with LZ4F_compressFrame().
+ * It may also be used with LZ4F_compressUpdate() _if no flush() operation_ is performed.
+ */
+LZ4FLIB_API size_t LZ4F_compressFrameBound(size_t srcSize, const LZ4F_preferences_t* preferencesPtr);
+
+/*! LZ4F_compressFrame() :
+ * Compress an entire srcBuffer into a valid LZ4 frame.
+ * dstCapacity MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * The LZ4F_preferences_t structure is optional : you can provide NULL as argument. All preferences will be set to default.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressFrame(void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_preferences_t* preferencesPtr);
+
+
+/*-***********************************
+* Advanced compression functions
+*************************************/
+typedef struct LZ4F_cctx_s LZ4F_cctx; /* incomplete type */
+typedef LZ4F_cctx* LZ4F_compressionContext_t; /* for compatibility with previous API version */
+
+typedef struct {
+ unsigned stableSrc; /* 1 == src content will remain present on future calls to LZ4F_compress(); skip copying src content within tmp buffer */
+ unsigned reserved[3];
+} LZ4F_compressOptions_t;
+
+/*--- Resource Management ---*/
+
+#define LZ4F_VERSION 100 /* This number can be used to check for an incompatible API breaking change */
+LZ4FLIB_API unsigned LZ4F_getVersion(void);
+
+/*! LZ4F_createCompressionContext() :
+ * The first thing to do is to create a compressionContext object, which will be used in all compression operations.
+ * This is achieved using LZ4F_createCompressionContext(), which takes as argument a version.
+ * The version provided MUST be LZ4F_VERSION. It is intended to track potential version mismatch, notably when using DLL.
+ * The function will provide a pointer to a fully allocated LZ4F_cctx object.
+ * If @return != zero, there was an error during context creation.
+ * Object can release its memory using LZ4F_freeCompressionContext();
+ */
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_createCompressionContext(LZ4F_cctx** cctxPtr, unsigned version);
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeCompressionContext(LZ4F_cctx* cctx);
+
+
+/*---- Compression ----*/
+
+#define LZ4F_HEADER_SIZE_MIN 7 /* LZ4 Frame header size can vary, depending on selected paramaters */
+#define LZ4F_HEADER_SIZE_MAX 19
+
+/* Size in bytes of a block header in little-endian format. Highest bit indicates if block data is uncompressed */
+#define LZ4F_BLOCK_HEADER_SIZE 4
+
+/* Size in bytes of a block checksum footer in little-endian format. */
+#define LZ4F_BLOCK_CHECKSUM_SIZE 4
+
+/* Size in bytes of the content checksum. */
+#define LZ4F_CONTENT_CHECKSUM_SIZE 4
+
+/*! LZ4F_compressBegin() :
+ * will write the frame header into dstBuffer.
+ * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * `prefsPtr` is optional : you can provide NULL as argument, all preferences will then be set to default.
+ * @return : number of bytes written into dstBuffer for the header
+ * or an error code (which can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressBegin(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_preferences_t* prefsPtr);
+
+/*! LZ4F_compressBound() :
+ * Provides minimum dstCapacity required to guarantee success of
+ * LZ4F_compressUpdate(), given a srcSize and preferences, for a worst case scenario.
+ * When srcSize==0, LZ4F_compressBound() provides an upper bound for LZ4F_flush() and LZ4F_compressEnd() instead.
+ * Note that the result is only valid for a single invocation of LZ4F_compressUpdate().
+ * When invoking LZ4F_compressUpdate() multiple times,
+ * if the output buffer is gradually filled up instead of emptied and re-used from its start,
+ * one must check if there is enough remaining capacity before each invocation, using LZ4F_compressBound().
+ * @return is always the same for a srcSize and prefsPtr.
+ * prefsPtr is optional : when NULL is provided, preferences will be set to cover worst case scenario.
+ * tech details :
+ * @return if automatic flushing is not enabled, includes the possibility that internal buffer might already be filled by up to (blockSize-1) bytes.
+ * It also includes frame footer (ending + checksum), since it might be generated by LZ4F_compressEnd().
+ * @return doesn't include frame header, as it was already generated by LZ4F_compressBegin().
+ */
+LZ4FLIB_API size_t LZ4F_compressBound(size_t srcSize, const LZ4F_preferences_t* prefsPtr);
+
+/*! LZ4F_compressUpdate() :
+ * LZ4F_compressUpdate() can be called repetitively to compress as much data as necessary.
+ * Important rule: dstCapacity MUST be large enough to ensure operation success even in worst case situations.
+ * This value is provided by LZ4F_compressBound().
+ * If this condition is not respected, LZ4F_compress() will fail (result is an errorCode).
+ * LZ4F_compressUpdate() doesn't guarantee error recovery.
+ * When an error occurs, compression context must be freed or resized.
+ * `cOptPtr` is optional : NULL can be provided, in which case all options are set to default.
+ * @return : number of bytes written into `dstBuffer` (it can be zero, meaning input data was just buffered).
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ */
+LZ4FLIB_API size_t LZ4F_compressUpdate(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const void* srcBuffer, size_t srcSize,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+/*! LZ4F_flush() :
+ * When data must be generated and sent immediately, without waiting for a block to be completely filled,
+ * it's possible to call LZ4_flush(). It will immediately compress any data buffered within cctx.
+ * `dstCapacity` must be large enough to ensure the operation will be successful.
+ * `cOptPtr` is optional : it's possible to provide NULL, all options will be set to default.
+ * @return : nb of bytes written into dstBuffer (can be zero, when there is no data stored within cctx)
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * Note : LZ4F_flush() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
+ */
+LZ4FLIB_API size_t LZ4F_flush(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+/*! LZ4F_compressEnd() :
+ * To properly finish an LZ4 frame, invoke LZ4F_compressEnd().
+ * It will flush whatever data remained within `cctx` (like LZ4_flush())
+ * and properly finalize the frame, with an endMark and a checksum.
+ * `cOptPtr` is optional : NULL can be provided, in which case all options will be set to default.
+ * @return : nb of bytes written into dstBuffer, necessarily >= 4 (endMark),
+ * or an error code if it fails (which can be tested using LZ4F_isError())
+ * Note : LZ4F_compressEnd() is guaranteed to be successful when dstCapacity >= LZ4F_compressBound(0, prefsPtr).
+ * A successful call to LZ4F_compressEnd() makes `cctx` available again for another compression task.
+ */
+LZ4FLIB_API size_t LZ4F_compressEnd(LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_compressOptions_t* cOptPtr);
+
+
+/*-*********************************
+* Decompression functions
+***********************************/
+typedef struct LZ4F_dctx_s LZ4F_dctx; /* incomplete type */
+typedef LZ4F_dctx* LZ4F_decompressionContext_t; /* compatibility with previous API versions */
+
+typedef struct {
+ unsigned stableDst; /* pledges that last 64KB decompressed data will remain available unmodified. This optimization skips storage operations in tmp buffers. */
+ unsigned reserved[3]; /* must be set to zero for forward compatibility */
+} LZ4F_decompressOptions_t;
+
+
+/* Resource management */
+
+/*! LZ4F_createDecompressionContext() :
+ * Create an LZ4F_dctx object, to track all decompression operations.
+ * The version provided MUST be LZ4F_VERSION.
+ * The function provides a pointer to an allocated and initialized LZ4F_dctx object.
+ * The result is an errorCode, which can be tested using LZ4F_isError().
+ * dctx memory can be released using LZ4F_freeDecompressionContext();
+ * Result of LZ4F_freeDecompressionContext() indicates current state of decompressionContext when being released.
+ * That is, it should be == 0 if decompression has been completed fully and correctly.
+ */
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_createDecompressionContext(LZ4F_dctx** dctxPtr, unsigned version);
+LZ4FLIB_API LZ4F_errorCode_t LZ4F_freeDecompressionContext(LZ4F_dctx* dctx);
+
+
+/*-***********************************
+* Streaming decompression functions
+*************************************/
+
+#define LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH 5
+
+/*! LZ4F_headerSize() : v1.9.0+
+ * Provide the header size of a frame starting at `src`.
+ * `srcSize` must be >= LZ4F_MIN_SIZE_TO_KNOW_HEADER_LENGTH,
+ * which is enough to decode the header length.
+ * @return : size of frame header
+ * or an error code, which can be tested using LZ4F_isError()
+ * note : Frame header size is variable, but is guaranteed to be
+ * >= LZ4F_HEADER_SIZE_MIN bytes, and <= LZ4F_HEADER_SIZE_MAX bytes.
+ */
+LZ4FLIB_API size_t LZ4F_headerSize(const void* src, size_t srcSize);
+
+/*! LZ4F_getFrameInfo() :
+ * This function extracts frame parameters (max blockSize, dictID, etc.).
+ * Its usage is optional: user can call LZ4F_decompress() directly.
+ *
+ * Extracted information will fill an existing LZ4F_frameInfo_t structure.
+ * This can be useful for allocation and dictionary identification purposes.
+ *
+ * LZ4F_getFrameInfo() can work in the following situations :
+ *
+ * 1) At the beginning of a new frame, before any invocation of LZ4F_decompress().
+ * It will decode header from `srcBuffer`,
+ * consuming the header and starting the decoding process.
+ *
+ * Input size must be large enough to contain the full frame header.
+ * Frame header size can be known beforehand by LZ4F_headerSize().
+ * Frame header size is variable, but is guaranteed to be >= LZ4F_HEADER_SIZE_MIN bytes,
+ * and not more than <= LZ4F_HEADER_SIZE_MAX bytes.
+ * Hence, blindly providing LZ4F_HEADER_SIZE_MAX bytes or more will always work.
+ * It's allowed to provide more input data than the header size,
+ * LZ4F_getFrameInfo() will only consume the header.
+ *
+ * If input size is not large enough,
+ * aka if it's smaller than header size,
+ * function will fail and return an error code.
+ *
+ * 2) After decoding has been started,
+ * it's possible to invoke LZ4F_getFrameInfo() anytime
+ * to extract already decoded frame parameters stored within dctx.
+ *
+ * Note that, if decoding has barely started,
+ * and not yet read enough information to decode the header,
+ * LZ4F_getFrameInfo() will fail.
+ *
+ * The number of bytes consumed from srcBuffer will be updated in *srcSizePtr (necessarily <= original value).
+ * LZ4F_getFrameInfo() only consumes bytes when decoding has not yet started,
+ * and when decoding the header has been successful.
+ * Decompression must then resume from (srcBuffer + *srcSizePtr).
+ *
+ * @return : a hint about how many srcSize bytes LZ4F_decompress() expects for next call,
+ * or an error code which can be tested using LZ4F_isError().
+ * note 1 : in case of error, dctx is not modified. Decoding operation can resume from beginning safely.
+ * note 2 : frame parameters are *copied into* an already allocated LZ4F_frameInfo_t structure.
+ */
+LZ4FLIB_API size_t LZ4F_getFrameInfo(LZ4F_dctx* dctx,
+ LZ4F_frameInfo_t* frameInfoPtr,
+ const void* srcBuffer, size_t* srcSizePtr);
+
+/*! LZ4F_decompress() :
+ * Call this function repetitively to regenerate data compressed in `srcBuffer`.
+ *
+ * The function requires a valid dctx state.
+ * It will read up to *srcSizePtr bytes from srcBuffer,
+ * and decompress data into dstBuffer, of capacity *dstSizePtr.
+ *
+ * The nb of bytes consumed from srcBuffer will be written into *srcSizePtr (necessarily <= original value).
+ * The nb of bytes decompressed into dstBuffer will be written into *dstSizePtr (necessarily <= original value).
+ *
+ * The function does not necessarily read all input bytes, so always check value in *srcSizePtr.
+ * Unconsumed source data must be presented again in subsequent invocations.
+ *
+ * `dstBuffer` can freely change between each consecutive function invocation.
+ * `dstBuffer` content will be overwritten.
+ *
+ * @return : an hint of how many `srcSize` bytes LZ4F_decompress() expects for next call.
+ * Schematically, it's the size of the current (or remaining) compressed block + header of next block.
+ * Respecting the hint provides some small speed benefit, because it skips intermediate buffers.
+ * This is just a hint though, it's always possible to provide any srcSize.
+ *
+ * When a frame is fully decoded, @return will be 0 (no more data expected).
+ * When provided with more bytes than necessary to decode a frame,
+ * LZ4F_decompress() will stop reading exactly at end of current frame, and @return 0.
+ *
+ * If decompression failed, @return is an error code, which can be tested using LZ4F_isError().
+ * After a decompression error, the `dctx` context is not resumable.
+ * Use LZ4F_resetDecompressionContext() to return to clean state.
+ *
+ * After a frame is fully decoded, dctx can be used again to decompress another frame.
+ */
+LZ4FLIB_API size_t LZ4F_decompress(LZ4F_dctx* dctx,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const LZ4F_decompressOptions_t* dOptPtr);
+
+
+/*! LZ4F_resetDecompressionContext() : added in v1.8.0
+ * In case of an error, the context is left in "undefined" state.
+ * In which case, it's necessary to reset it, before re-using it.
+ * This method can also be used to abruptly stop any unfinished decompression,
+ * and start a new one using same context resources. */
+LZ4FLIB_API void LZ4F_resetDecompressionContext(LZ4F_dctx* dctx); /* always successful */
+
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4F_H_09782039843 */
+
+#if defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843)
+#define LZ4F_H_STATIC_09782039843
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* These declarations are not stable and may change in the future.
+ * They are therefore only safe to depend on
+ * when the caller is statically linked against the library.
+ * To access their declarations, define LZ4F_STATIC_LINKING_ONLY.
+ *
+ * By default, these symbols aren't published into shared/dynamic libraries.
+ * You can override this behavior and force them to be published
+ * by defining LZ4F_PUBLISH_STATIC_FUNCTIONS.
+ * Use at your own risk.
+ */
+#ifdef LZ4F_PUBLISH_STATIC_FUNCTIONS
+# define LZ4FLIB_STATIC_API LZ4FLIB_API
+#else
+# define LZ4FLIB_STATIC_API
+#endif
+
+
+/* --- Error List --- */
+#define LZ4F_LIST_ERRORS(ITEM) \
+ ITEM(OK_NoError) \
+ ITEM(ERROR_GENERIC) \
+ ITEM(ERROR_maxBlockSize_invalid) \
+ ITEM(ERROR_blockMode_invalid) \
+ ITEM(ERROR_contentChecksumFlag_invalid) \
+ ITEM(ERROR_compressionLevel_invalid) \
+ ITEM(ERROR_headerVersion_wrong) \
+ ITEM(ERROR_blockChecksum_invalid) \
+ ITEM(ERROR_reservedFlag_set) \
+ ITEM(ERROR_allocation_failed) \
+ ITEM(ERROR_srcSize_tooLarge) \
+ ITEM(ERROR_dstMaxSize_tooSmall) \
+ ITEM(ERROR_frameHeader_incomplete) \
+ ITEM(ERROR_frameType_unknown) \
+ ITEM(ERROR_frameSize_wrong) \
+ ITEM(ERROR_srcPtr_wrong) \
+ ITEM(ERROR_decompressionFailed) \
+ ITEM(ERROR_headerChecksum_invalid) \
+ ITEM(ERROR_contentChecksum_invalid) \
+ ITEM(ERROR_frameDecoding_alreadyStarted) \
+ ITEM(ERROR_maxCode)
+
+#define LZ4F_GENERATE_ENUM(ENUM) LZ4F_##ENUM,
+
+/* enum list is exposed, to handle specific errors */
+typedef enum { LZ4F_LIST_ERRORS(LZ4F_GENERATE_ENUM)
+ _LZ4F_dummy_error_enum_for_c89_never_used } LZ4F_errorCodes;
+
+LZ4FLIB_STATIC_API LZ4F_errorCodes LZ4F_getErrorCode(size_t functionResult);
+
+LZ4FLIB_STATIC_API size_t LZ4F_getBlockSize(unsigned);
+
+/**********************************
+ * Bulk processing dictionary API
+ *********************************/
+
+/* A Dictionary is useful for the compression of small messages (KB range).
+ * It dramatically improves compression efficiency.
+ *
+ * LZ4 can ingest any input as dictionary, though only the last 64 KB are useful.
+ * Best results are generally achieved by using Zstandard's Dictionary Builder
+ * to generate a high-quality dictionary from a set of samples.
+ *
+ * Loading a dictionary has a cost, since it involves construction of tables.
+ * The Bulk processing dictionary API makes it possible to share this cost
+ * over an arbitrary number of compression jobs, even concurrently,
+ * markedly improving compression latency for these cases.
+ *
+ * The same dictionary will have to be used on the decompression side
+ * for decoding to be successful.
+ * To help identify the correct dictionary at decoding stage,
+ * the frame header allows optional embedding of a dictID field.
+ */
+typedef struct LZ4F_CDict_s LZ4F_CDict;
+
+/*! LZ4_createCDict() :
+ * When compressing multiple messages / blocks using the same dictionary, it's recommended to load it just once.
+ * LZ4_createCDict() will create a digested dictionary, ready to start future compression operations without startup delay.
+ * LZ4_CDict can be created once and shared by multiple threads concurrently, since its usage is read-only.
+ * `dictBuffer` can be released after LZ4_CDict creation, since its content is copied within CDict */
+LZ4FLIB_STATIC_API LZ4F_CDict* LZ4F_createCDict(const void* dictBuffer, size_t dictSize);
+LZ4FLIB_STATIC_API void LZ4F_freeCDict(LZ4F_CDict* CDict);
+
+
+/*! LZ4_compressFrame_usingCDict() :
+ * Compress an entire srcBuffer into a valid LZ4 frame using a digested Dictionary.
+ * cctx must point to a context created by LZ4F_createCompressionContext().
+ * If cdict==NULL, compress without a dictionary.
+ * dstBuffer MUST be >= LZ4F_compressFrameBound(srcSize, preferencesPtr).
+ * If this condition is not respected, function will fail (@return an errorCode).
+ * The LZ4F_preferences_t structure is optional : you may provide NULL as argument,
+ * but it's not recommended, as it's the only way to provide dictID in the frame header.
+ * @return : number of bytes written into dstBuffer.
+ * or an error code if it fails (can be tested using LZ4F_isError()) */
+LZ4FLIB_STATIC_API size_t LZ4F_compressFrame_usingCDict(
+ LZ4F_cctx* cctx,
+ void* dst, size_t dstCapacity,
+ const void* src, size_t srcSize,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* preferencesPtr);
+
+
+/*! LZ4F_compressBegin_usingCDict() :
+ * Inits streaming dictionary compression, and writes the frame header into dstBuffer.
+ * dstCapacity must be >= LZ4F_HEADER_SIZE_MAX bytes.
+ * `prefsPtr` is optional : you may provide NULL as argument,
+ * however, it's the only way to provide dictID in the frame header.
+ * @return : number of bytes written into dstBuffer for the header,
+ * or an error code (which can be tested using LZ4F_isError()) */
+LZ4FLIB_STATIC_API size_t LZ4F_compressBegin_usingCDict(
+ LZ4F_cctx* cctx,
+ void* dstBuffer, size_t dstCapacity,
+ const LZ4F_CDict* cdict,
+ const LZ4F_preferences_t* prefsPtr);
+
+
+/*! LZ4F_decompress_usingDict() :
+ * Same as LZ4F_decompress(), using a predefined dictionary.
+ * Dictionary is used "in place", without any preprocessing.
+ * It must remain accessible throughout the entire frame decoding. */
+LZ4FLIB_STATIC_API size_t LZ4F_decompress_usingDict(
+ LZ4F_dctx* dctxPtr,
+ void* dstBuffer, size_t* dstSizePtr,
+ const void* srcBuffer, size_t* srcSizePtr,
+ const void* dict, size_t dictSize,
+ const LZ4F_decompressOptions_t* decompressOptionsPtr);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* defined(LZ4F_STATIC_LINKING_ONLY) && !defined(LZ4F_H_STATIC_09782039843) */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h
new file mode 100644
index 000000000..925a2c5c3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4frame_static.h
@@ -0,0 +1,47 @@
+/*
+ LZ4 auto-framing library
+ Header File for static linking only
+ Copyright (C) 2011-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+
+#ifndef LZ4FRAME_STATIC_H_0398209384
+#define LZ4FRAME_STATIC_H_0398209384
+
+/* The declarations that formerly were made here have been merged into
+ * lz4frame.h, protected by the LZ4F_STATIC_LINKING_ONLY macro. Going forward,
+ * it is recommended to simply include that header directly.
+ */
+
+#define LZ4F_STATIC_LINKING_ONLY
+#include "lz4frame.h"
+
+#endif /* LZ4FRAME_STATIC_H_0398209384 */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c b/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c
new file mode 100644
index 000000000..77c9f4305
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.c
@@ -0,0 +1,1615 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Copyright (C) 2011-2017, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+/* note : lz4hc is not an independent module, it requires lz4.h/lz4.c for proper compilation */
+
+
+/* *************************************
+* Tuning Parameter
+***************************************/
+
+/*! HEAPMODE :
+ * Select how default compression function will allocate workplace memory,
+ * in stack (0:fastest), or in heap (1:requires malloc()).
+ * Since workplace is rather large, heap mode is recommended.
+ */
+#ifndef LZ4HC_HEAPMODE
+# define LZ4HC_HEAPMODE 1
+#endif
+
+
+/*=== Dependency ===*/
+#define LZ4_HC_STATIC_LINKING_ONLY
+#include "lz4hc.h"
+
+
+/*=== Common definitions ===*/
+#if defined(__GNUC__)
+# pragma GCC diagnostic ignored "-Wunused-function"
+#endif
+#if defined (__clang__)
+# pragma clang diagnostic ignored "-Wunused-function"
+#endif
+
+#define LZ4_COMMONDEFS_ONLY
+#ifndef LZ4_SRC_INCLUDED
+#include "lz4.c" /* LZ4_count, constants, mem */
+#endif
+
+
+/*=== Enums ===*/
+typedef enum { noDictCtx, usingDictCtxHc } dictCtx_directive;
+
+
+/*=== Constants ===*/
+#define OPTIMAL_ML (int)((ML_MASK-1)+MINMATCH)
+#define LZ4_OPT_NUM (1<<12)
+
+
+/*=== Macros ===*/
+#define MIN(a,b) ( (a) < (b) ? (a) : (b) )
+#define MAX(a,b) ( (a) > (b) ? (a) : (b) )
+#define HASH_FUNCTION(i) (((i) * 2654435761U) >> ((MINMATCH*8)-LZ4HC_HASH_LOG))
+#define DELTANEXTMAXD(p) chainTable[(p) & LZ4HC_MAXD_MASK] /* flexible, LZ4HC_MAXD dependent */
+#define DELTANEXTU16(table, pos) table[(U16)(pos)] /* faster */
+/* Make fields passed to, and updated by LZ4HC_encodeSequence explicit */
+#define UPDATABLE(ip, op, anchor) &ip, &op, &anchor
+
+static U32 LZ4HC_hashPtr(const void* ptr) { return HASH_FUNCTION(LZ4_read32(ptr)); }
+
+
+/**************************************
+* HC Compression
+**************************************/
+static void LZ4HC_clearTables (LZ4HC_CCtx_internal* hc4)
+{
+ MEM_INIT(hc4->hashTable, 0, sizeof(hc4->hashTable));
+ MEM_INIT(hc4->chainTable, 0xFF, sizeof(hc4->chainTable));
+}
+
+static void LZ4HC_init_internal (LZ4HC_CCtx_internal* hc4, const BYTE* start)
+{
+ uptrval startingOffset = (uptrval)(hc4->end - hc4->base);
+ if (startingOffset > 1 GB) {
+ LZ4HC_clearTables(hc4);
+ startingOffset = 0;
+ }
+ startingOffset += 64 KB;
+ hc4->nextToUpdate = (U32) startingOffset;
+ hc4->base = start - startingOffset;
+ hc4->end = start;
+ hc4->dictBase = start - startingOffset;
+ hc4->dictLimit = (U32) startingOffset;
+ hc4->lowLimit = (U32) startingOffset;
+}
+
+
+/* Update chains up to ip (excluded) */
+LZ4_FORCE_INLINE void LZ4HC_Insert (LZ4HC_CCtx_internal* hc4, const BYTE* ip)
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const hashTable = hc4->hashTable;
+ const BYTE* const base = hc4->base;
+ U32 const target = (U32)(ip - base);
+ U32 idx = hc4->nextToUpdate;
+
+ while (idx < target) {
+ U32 const h = LZ4HC_hashPtr(base+idx);
+ size_t delta = idx - hashTable[h];
+ if (delta>LZ4_DISTANCE_MAX) delta = LZ4_DISTANCE_MAX;
+ DELTANEXTU16(chainTable, idx) = (U16)delta;
+ hashTable[h] = idx;
+ idx++;
+ }
+
+ hc4->nextToUpdate = target;
+}
+
+/** LZ4HC_countBack() :
+ * @return : negative value, nb of common bytes before ip/match */
+LZ4_FORCE_INLINE
+int LZ4HC_countBack(const BYTE* const ip, const BYTE* const match,
+ const BYTE* const iMin, const BYTE* const mMin)
+{
+ int back = 0;
+ int const min = (int)MAX(iMin - ip, mMin - match);
+ assert(min <= 0);
+ assert(ip >= iMin); assert((size_t)(ip-iMin) < (1U<<31));
+ assert(match >= mMin); assert((size_t)(match - mMin) < (1U<<31));
+ while ( (back > min)
+ && (ip[back-1] == match[back-1]) )
+ back--;
+ return back;
+}
+
+#if defined(_MSC_VER)
+# define LZ4HC_rotl32(x,r) _rotl(x,r)
+#else
+# define LZ4HC_rotl32(x,r) ((x << r) | (x >> (32 - r)))
+#endif
+
+
+static U32 LZ4HC_rotatePattern(size_t const rotate, U32 const pattern)
+{
+ size_t const bitsToRotate = (rotate & (sizeof(pattern) - 1)) << 3;
+ if (bitsToRotate == 0) return pattern;
+ return LZ4HC_rotl32(pattern, (int)bitsToRotate);
+}
+
+/* LZ4HC_countPattern() :
+ * pattern32 must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!) */
+static unsigned
+LZ4HC_countPattern(const BYTE* ip, const BYTE* const iEnd, U32 const pattern32)
+{
+ const BYTE* const iStart = ip;
+ reg_t const pattern = (sizeof(pattern)==8) ?
+ (reg_t)pattern32 + (((reg_t)pattern32) << (sizeof(pattern)*4)) : pattern32;
+
+ while (likely(ip < iEnd-(sizeof(pattern)-1))) {
+ reg_t const diff = LZ4_read_ARCH(ip) ^ pattern;
+ if (!diff) { ip+=sizeof(pattern); continue; }
+ ip += LZ4_NbCommonBytes(diff);
+ return (unsigned)(ip - iStart);
+ }
+
+ if (LZ4_isLittleEndian()) {
+ reg_t patternByte = pattern;
+ while ((ip<iEnd) && (*ip == (BYTE)patternByte)) {
+ ip++; patternByte >>= 8;
+ }
+ } else { /* big endian */
+ U32 bitOffset = (sizeof(pattern)*8) - 8;
+ while (ip < iEnd) {
+ BYTE const byte = (BYTE)(pattern >> bitOffset);
+ if (*ip != byte) break;
+ ip ++; bitOffset -= 8;
+ }
+ }
+
+ return (unsigned)(ip - iStart);
+}
+
+/* LZ4HC_reverseCountPattern() :
+ * pattern must be a sample of repetitive pattern of length 1, 2 or 4 (but not 3!)
+ * read using natural platform endianess */
+static unsigned
+LZ4HC_reverseCountPattern(const BYTE* ip, const BYTE* const iLow, U32 pattern)
+{
+ const BYTE* const iStart = ip;
+
+ while (likely(ip >= iLow+4)) {
+ if (LZ4_read32(ip-4) != pattern) break;
+ ip -= 4;
+ }
+ { const BYTE* bytePtr = (const BYTE*)(&pattern) + 3; /* works for any endianess */
+ while (likely(ip>iLow)) {
+ if (ip[-1] != *bytePtr) break;
+ ip--; bytePtr--;
+ } }
+ return (unsigned)(iStart - ip);
+}
+
+/* LZ4HC_protectDictEnd() :
+ * Checks if the match is in the last 3 bytes of the dictionary, so reading the
+ * 4 byte MINMATCH would overflow.
+ * @returns true if the match index is okay.
+ */
+static int LZ4HC_protectDictEnd(U32 const dictLimit, U32 const matchIndex)
+{
+ return ((U32)((dictLimit - 1) - matchIndex) >= 3);
+}
+
+typedef enum { rep_untested, rep_not, rep_confirmed } repeat_state_e;
+typedef enum { favorCompressionRatio=0, favorDecompressionSpeed } HCfavor_e;
+
+LZ4_FORCE_INLINE int
+LZ4HC_InsertAndGetWiderMatch (
+ LZ4HC_CCtx_internal* hc4,
+ const BYTE* const ip,
+ const BYTE* const iLowLimit,
+ const BYTE* const iHighLimit,
+ int longest,
+ const BYTE** matchpos,
+ const BYTE** startpos,
+ const int maxNbAttempts,
+ const int patternAnalysis,
+ const int chainSwap,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ U16* const chainTable = hc4->chainTable;
+ U32* const HashTable = hc4->hashTable;
+ const LZ4HC_CCtx_internal * const dictCtx = hc4->dictCtx;
+ const BYTE* const base = hc4->base;
+ const U32 dictLimit = hc4->dictLimit;
+ const BYTE* const lowPrefixPtr = base + dictLimit;
+ const U32 ipIndex = (U32)(ip - base);
+ const U32 lowestMatchIndex = (hc4->lowLimit + (LZ4_DISTANCE_MAX + 1) > ipIndex) ? hc4->lowLimit : ipIndex - LZ4_DISTANCE_MAX;
+ const BYTE* const dictBase = hc4->dictBase;
+ int const lookBackLength = (int)(ip-iLowLimit);
+ int nbAttempts = maxNbAttempts;
+ U32 matchChainPos = 0;
+ U32 const pattern = LZ4_read32(ip);
+ U32 matchIndex;
+ repeat_state_e repeat = rep_untested;
+ size_t srcPatternLength = 0;
+
+ DEBUGLOG(7, "LZ4HC_InsertAndGetWiderMatch");
+ /* First Match */
+ LZ4HC_Insert(hc4, ip);
+ matchIndex = HashTable[LZ4HC_hashPtr(ip)];
+ DEBUGLOG(7, "First match at index %u / %u (lowestMatchIndex)",
+ matchIndex, lowestMatchIndex);
+
+ while ((matchIndex>=lowestMatchIndex) && (nbAttempts>0)) {
+ int matchLength=0;
+ nbAttempts--;
+ assert(matchIndex < ipIndex);
+ if (favorDecSpeed && (ipIndex - matchIndex < 8)) {
+ /* do nothing */
+ } else if (matchIndex >= dictLimit) { /* within current Prefix */
+ const BYTE* const matchPtr = base + matchIndex;
+ assert(matchPtr >= lowPrefixPtr);
+ assert(matchPtr < ip);
+ assert(longest >= 1);
+ if (LZ4_read16(iLowLimit + longest - 1) == LZ4_read16(matchPtr - lookBackLength + longest - 1)) {
+ if (LZ4_read32(matchPtr) == pattern) {
+ int const back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, lowPrefixPtr) : 0;
+ matchLength = MINMATCH + (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, iHighLimit);
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ *matchpos = matchPtr + back;
+ *startpos = ip + back;
+ } } }
+ } else { /* lowestMatchIndex <= matchIndex < dictLimit */
+ const BYTE* const matchPtr = dictBase + matchIndex;
+ if (LZ4_read32(matchPtr) == pattern) {
+ const BYTE* const dictStart = dictBase + hc4->lowLimit;
+ int back = 0;
+ const BYTE* vLimit = ip + (dictLimit - matchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ matchLength = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ if ((ip+matchLength == vLimit) && (vLimit < iHighLimit))
+ matchLength += LZ4_count(ip+matchLength, lowPrefixPtr, iHighLimit);
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictStart) : 0;
+ matchLength -= back;
+ if (matchLength > longest) {
+ longest = matchLength;
+ *matchpos = base + matchIndex + back; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip + back;
+ } } }
+
+ if (chainSwap && matchLength==longest) { /* better match => select a better chain */
+ assert(lookBackLength==0); /* search forward only */
+ if (matchIndex + (U32)longest <= ipIndex) {
+ int const kTrigger = 4;
+ U32 distanceToNextMatch = 1;
+ int const end = longest - MINMATCH + 1;
+ int step = 1;
+ int accel = 1 << kTrigger;
+ int pos;
+ for (pos = 0; pos < end; pos += step) {
+ U32 const candidateDist = DELTANEXTU16(chainTable, matchIndex + (U32)pos);
+ step = (accel++ >> kTrigger);
+ if (candidateDist > distanceToNextMatch) {
+ distanceToNextMatch = candidateDist;
+ matchChainPos = (U32)pos;
+ accel = 1 << kTrigger;
+ }
+ }
+ if (distanceToNextMatch > 1) {
+ if (distanceToNextMatch > matchIndex) break; /* avoid overflow */
+ matchIndex -= distanceToNextMatch;
+ continue;
+ } } }
+
+ { U32 const distNextMatch = DELTANEXTU16(chainTable, matchIndex);
+ if (patternAnalysis && distNextMatch==1 && matchChainPos==0) {
+ U32 const matchCandidateIdx = matchIndex-1;
+ /* may be a repeated pattern */
+ if (repeat == rep_untested) {
+ if ( ((pattern & 0xFFFF) == (pattern >> 16))
+ & ((pattern & 0xFF) == (pattern >> 24)) ) {
+ repeat = rep_confirmed;
+ srcPatternLength = LZ4HC_countPattern(ip+sizeof(pattern), iHighLimit, pattern) + sizeof(pattern);
+ } else {
+ repeat = rep_not;
+ } }
+ if ( (repeat == rep_confirmed) && (matchCandidateIdx >= lowestMatchIndex)
+ && LZ4HC_protectDictEnd(dictLimit, matchCandidateIdx) ) {
+ const int extDict = matchCandidateIdx < dictLimit;
+ const BYTE* const matchPtr = (extDict ? dictBase : base) + matchCandidateIdx;
+ if (LZ4_read32(matchPtr) == pattern) { /* good candidate */
+ const BYTE* const dictStart = dictBase + hc4->lowLimit;
+ const BYTE* const iLimit = extDict ? dictBase + dictLimit : iHighLimit;
+ size_t forwardPatternLength = LZ4HC_countPattern(matchPtr+sizeof(pattern), iLimit, pattern) + sizeof(pattern);
+ if (extDict && matchPtr + forwardPatternLength == iLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern(forwardPatternLength, pattern);
+ forwardPatternLength += LZ4HC_countPattern(lowPrefixPtr, iHighLimit, rotatedPattern);
+ }
+ { const BYTE* const lowestMatchPtr = extDict ? dictStart : lowPrefixPtr;
+ size_t backLength = LZ4HC_reverseCountPattern(matchPtr, lowestMatchPtr, pattern);
+ size_t currentSegmentLength;
+ if (!extDict && matchPtr - backLength == lowPrefixPtr && hc4->lowLimit < dictLimit) {
+ U32 const rotatedPattern = LZ4HC_rotatePattern((U32)(-(int)backLength), pattern);
+ backLength += LZ4HC_reverseCountPattern(dictBase + dictLimit, dictStart, rotatedPattern);
+ }
+ /* Limit backLength not go further than lowestMatchIndex */
+ backLength = matchCandidateIdx - MAX(matchCandidateIdx - (U32)backLength, lowestMatchIndex);
+ assert(matchCandidateIdx - backLength >= lowestMatchIndex);
+ currentSegmentLength = backLength + forwardPatternLength;
+ /* Adjust to end of pattern if the source pattern fits, otherwise the beginning of the pattern */
+ if ( (currentSegmentLength >= srcPatternLength) /* current pattern segment large enough to contain full srcPatternLength */
+ && (forwardPatternLength <= srcPatternLength) ) { /* haven't reached this position yet */
+ U32 const newMatchIndex = matchCandidateIdx + (U32)forwardPatternLength - (U32)srcPatternLength; /* best position, full pattern, might be followed by more match */
+ if (LZ4HC_protectDictEnd(dictLimit, newMatchIndex))
+ matchIndex = newMatchIndex;
+ else {
+ /* Can only happen if started in the prefix */
+ assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
+ matchIndex = dictLimit;
+ }
+ } else {
+ U32 const newMatchIndex = matchCandidateIdx - (U32)backLength; /* farthest position in current segment, will find a match of length currentSegmentLength + maybe some back */
+ if (!LZ4HC_protectDictEnd(dictLimit, newMatchIndex)) {
+ assert(newMatchIndex >= dictLimit - 3 && newMatchIndex < dictLimit && !extDict);
+ matchIndex = dictLimit;
+ } else {
+ matchIndex = newMatchIndex;
+ if (lookBackLength==0) { /* no back possible */
+ size_t const maxML = MIN(currentSegmentLength, srcPatternLength);
+ if ((size_t)longest < maxML) {
+ assert(base + matchIndex != ip);
+ if ((size_t)(ip - base) - matchIndex > LZ4_DISTANCE_MAX) break;
+ assert(maxML < 2 GB);
+ longest = (int)maxML;
+ *matchpos = base + matchIndex; /* virtual pos, relative to ip, to retrieve offset */
+ *startpos = ip;
+ }
+ { U32 const distToNextPattern = DELTANEXTU16(chainTable, matchIndex);
+ if (distToNextPattern > matchIndex) break; /* avoid overflow */
+ matchIndex -= distToNextPattern;
+ } } } } }
+ continue;
+ } }
+ } } /* PA optimization */
+
+ /* follow current chain */
+ matchIndex -= DELTANEXTU16(chainTable, matchIndex + matchChainPos);
+
+ } /* while ((matchIndex>=lowestMatchIndex) && (nbAttempts)) */
+
+ if ( dict == usingDictCtxHc
+ && nbAttempts > 0
+ && ipIndex - lowestMatchIndex < LZ4_DISTANCE_MAX) {
+ size_t const dictEndOffset = (size_t)(dictCtx->end - dictCtx->base);
+ U32 dictMatchIndex = dictCtx->hashTable[LZ4HC_hashPtr(ip)];
+ assert(dictEndOffset <= 1 GB);
+ matchIndex = dictMatchIndex + lowestMatchIndex - (U32)dictEndOffset;
+ while (ipIndex - matchIndex <= LZ4_DISTANCE_MAX && nbAttempts--) {
+ const BYTE* const matchPtr = dictCtx->base + dictMatchIndex;
+
+ if (LZ4_read32(matchPtr) == pattern) {
+ int mlt;
+ int back = 0;
+ const BYTE* vLimit = ip + (dictEndOffset - dictMatchIndex);
+ if (vLimit > iHighLimit) vLimit = iHighLimit;
+ mlt = (int)LZ4_count(ip+MINMATCH, matchPtr+MINMATCH, vLimit) + MINMATCH;
+ back = lookBackLength ? LZ4HC_countBack(ip, matchPtr, iLowLimit, dictCtx->base + dictCtx->dictLimit) : 0;
+ mlt -= back;
+ if (mlt > longest) {
+ longest = mlt;
+ *matchpos = base + matchIndex + back;
+ *startpos = ip + back;
+ } }
+
+ { U32 const nextOffset = DELTANEXTU16(dictCtx->chainTable, dictMatchIndex);
+ dictMatchIndex -= nextOffset;
+ matchIndex -= nextOffset;
+ } } }
+
+ return longest;
+}
+
+LZ4_FORCE_INLINE
+int LZ4HC_InsertAndFindBestMatch(LZ4HC_CCtx_internal* const hc4, /* Index table will be updated */
+ const BYTE* const ip, const BYTE* const iLimit,
+ const BYTE** matchpos,
+ const int maxNbAttempts,
+ const int patternAnalysis,
+ const dictCtx_directive dict)
+{
+ const BYTE* uselessPtr = ip;
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ return LZ4HC_InsertAndGetWiderMatch(hc4, ip, ip, iLimit, MINMATCH-1, matchpos, &uselessPtr, maxNbAttempts, patternAnalysis, 0 /*chainSwap*/, dict, favorCompressionRatio);
+}
+
+/* LZ4HC_encodeSequence() :
+ * @return : 0 if ok,
+ * 1 if buffer issue detected */
+LZ4_FORCE_INLINE int LZ4HC_encodeSequence (
+ const BYTE** _ip,
+ BYTE** _op,
+ const BYTE** _anchor,
+ int matchLength,
+ const BYTE* const match,
+ limitedOutput_directive limit,
+ BYTE* oend)
+{
+#define ip (*_ip)
+#define op (*_op)
+#define anchor (*_anchor)
+
+ size_t length;
+ BYTE* const token = op++;
+
+#if defined(LZ4_DEBUG) && (LZ4_DEBUG >= 6)
+ static const BYTE* start = NULL;
+ static U32 totalCost = 0;
+ U32 const pos = (start==NULL) ? 0 : (U32)(anchor - start);
+ U32 const ll = (U32)(ip - anchor);
+ U32 const llAdd = (ll>=15) ? ((ll-15) / 255) + 1 : 0;
+ U32 const mlAdd = (matchLength>=19) ? ((matchLength-19) / 255) + 1 : 0;
+ U32 const cost = 1 + llAdd + ll + 2 + mlAdd;
+ if (start==NULL) start = anchor; /* only works for single segment */
+ /* g_debuglog_enable = (pos >= 2228) & (pos <= 2262); */
+ DEBUGLOG(6, "pos:%7u -- literals:%4u, match:%4i, offset:%5u, cost:%4u + %5u",
+ pos,
+ (U32)(ip - anchor), matchLength, (U32)(ip-match),
+ cost, totalCost);
+ totalCost += cost;
+#endif
+
+ /* Encode Literal length */
+ length = (size_t)(ip - anchor);
+ LZ4_STATIC_ASSERT(notLimited == 0);
+ /* Check output limit */
+ if (limit && ((op + (length / 255) + length + (2 + 1 + LASTLITERALS)) > oend)) {
+ DEBUGLOG(6, "Not enough room to write %i literals (%i bytes remaining)",
+ (int)length, (int)(oend - op));
+ return 1;
+ }
+ if (length >= RUN_MASK) {
+ size_t len = length - RUN_MASK;
+ *token = (RUN_MASK << ML_BITS);
+ for(; len >= 255 ; len -= 255) *op++ = 255;
+ *op++ = (BYTE)len;
+ } else {
+ *token = (BYTE)(length << ML_BITS);
+ }
+
+ /* Copy Literals */
+ LZ4_wildCopy8(op, anchor, op + length);
+ op += length;
+
+ /* Encode Offset */
+ assert( (ip - match) <= LZ4_DISTANCE_MAX ); /* note : consider providing offset as a value, rather than as a pointer difference */
+ LZ4_writeLE16(op, (U16)(ip - match)); op += 2;
+
+ /* Encode MatchLength */
+ assert(matchLength >= MINMATCH);
+ length = (size_t)matchLength - MINMATCH;
+ if (limit && (op + (length / 255) + (1 + LASTLITERALS) > oend)) {
+ DEBUGLOG(6, "Not enough room to write match length");
+ return 1; /* Check output limit */
+ }
+ if (length >= ML_MASK) {
+ *token += ML_MASK;
+ length -= ML_MASK;
+ for(; length >= 510 ; length -= 510) { *op++ = 255; *op++ = 255; }
+ if (length >= 255) { length -= 255; *op++ = 255; }
+ *op++ = (BYTE)length;
+ } else {
+ *token += (BYTE)(length);
+ }
+
+ /* Prepare next loop */
+ ip += matchLength;
+ anchor = ip;
+
+ return 0;
+}
+#undef ip
+#undef op
+#undef anchor
+
+LZ4_FORCE_INLINE int LZ4HC_compress_hashChain (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const source,
+ char* const dest,
+ int* srcSizePtr,
+ int const maxOutputSize,
+ int maxNbAttempts,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ const int inputSize = *srcSizePtr;
+ const int patternAnalysis = (maxNbAttempts > 128); /* levels 9+ */
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + inputSize;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = (iend - LASTLITERALS);
+
+ BYTE* optr = (BYTE*) dest;
+ BYTE* op = (BYTE*) dest;
+ BYTE* oend = op + maxOutputSize;
+
+ int ml0, ml, ml2, ml3;
+ const BYTE* start0;
+ const BYTE* ref0;
+ const BYTE* ref = NULL;
+ const BYTE* start2 = NULL;
+ const BYTE* ref2 = NULL;
+ const BYTE* start3 = NULL;
+ const BYTE* ref3 = NULL;
+
+ /* init */
+ *srcSizePtr = 0;
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (inputSize < LZ4_minLength) goto _last_literals; /* Input too small, no compression (all literals) */
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ ml = LZ4HC_InsertAndFindBestMatch(ctx, ip, matchlimit, &ref, maxNbAttempts, patternAnalysis, dict);
+ if (ml<MINMATCH) { ip++; continue; }
+
+ /* saved, in case we would skip too much */
+ start0 = ip; ref0 = ref; ml0 = ml;
+
+_Search2:
+ if (ip+ml <= mflimit) {
+ ml2 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ ip + ml - 2, ip + 0, matchlimit, ml, &ref2, &start2,
+ maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ } else {
+ ml2 = ml;
+ }
+
+ if (ml2 == ml) { /* No better match => encode ML1 */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ continue;
+ }
+
+ if (start0 < ip) { /* first match was skipped at least once */
+ if (start2 < ip + ml0) { /* squeezing ML1 between ML0(original ML1) and ML2 */
+ ip = start0; ref = ref0; ml = ml0; /* restore initial ML1 */
+ } }
+
+ /* Here, start0==ip */
+ if ((start2 - ip) < 3) { /* First Match too small : removed */
+ ml = ml2;
+ ip = start2;
+ ref =ref2;
+ goto _Search2;
+ }
+
+_Search3:
+ /* At this stage, we have :
+ * ml2 > ml1, and
+ * ip1+3 <= ip2 (usually < ip1+ml1) */
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ int new_ml = ml;
+ if (new_ml > OPTIMAL_ML) new_ml = OPTIMAL_ML;
+ if (ip+new_ml > start2 + ml2 - MINMATCH) new_ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = new_ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ }
+ /* Now, we have start2 = ip+new_ml, with new_ml = min(ml, OPTIMAL_ML=18) */
+
+ if (start2 + ml2 <= mflimit) {
+ ml3 = LZ4HC_InsertAndGetWiderMatch(ctx,
+ start2 + ml2 - 3, start2, matchlimit, ml2, &ref3, &start3,
+ maxNbAttempts, patternAnalysis, 0, dict, favorCompressionRatio);
+ } else {
+ ml3 = ml2;
+ }
+
+ if (ml3 == ml2) { /* No better match => encode ML1 and ML2 */
+ /* ip & ref are known; Now for ml */
+ if (start2 < ip+ml) ml = (int)(start2 - ip);
+ /* Now, encode 2 sequences */
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ ip = start2;
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml2, ref2, limit, oend)) {
+ ml = ml2;
+ ref = ref2;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ if (start3 < ip+ml+3) { /* Not enough space for match 2 : remove it */
+ if (start3 >= (ip+ml)) { /* can write Seq1 immediately ==> Seq2 is removed, so Seq3 becomes Seq1 */
+ if (start2 < ip+ml) {
+ int correction = (int)(ip+ml - start2);
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ if (ml2 < MINMATCH) {
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ }
+ }
+
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+ ip = start3;
+ ref = ref3;
+ ml = ml3;
+
+ start0 = start2;
+ ref0 = ref2;
+ ml0 = ml2;
+ goto _Search2;
+ }
+
+ start2 = start3;
+ ref2 = ref3;
+ ml2 = ml3;
+ goto _Search3;
+ }
+
+ /*
+ * OK, now we have 3 ascending matches;
+ * let's write the first one ML1.
+ * ip & ref are known; Now decide ml.
+ */
+ if (start2 < ip+ml) {
+ if ((start2 - ip) < OPTIMAL_ML) {
+ int correction;
+ if (ml > OPTIMAL_ML) ml = OPTIMAL_ML;
+ if (ip + ml > start2 + ml2 - MINMATCH) ml = (int)(start2 - ip) + ml2 - MINMATCH;
+ correction = ml - (int)(start2 - ip);
+ if (correction > 0) {
+ start2 += correction;
+ ref2 += correction;
+ ml2 -= correction;
+ }
+ } else {
+ ml = (int)(start2 - ip);
+ }
+ }
+ optr = op;
+ if (LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, limit, oend)) goto _dest_overflow;
+
+ /* ML2 becomes ML1 */
+ ip = start2; ref = ref2; ml = ml2;
+
+ /* ML3 becomes ML2 */
+ start2 = start3; ref2 = ref3; ml2 = ml3;
+
+ /* let's find a new ML3 */
+ goto _Search3;
+ }
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) return 0;
+ /* adapt lastRunSize to fill 'dest' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip) - source);
+ return (int) (((char*)op)-dest);
+
+_dest_overflow:
+ if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ml and ref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing");
+ op = optr; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ml >= 0);
+ if ((size_t)ml > maxMlSize) ml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ml >= MFLIMIT) {
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ref, notLimited, oend);
+ } }
+ goto _last_literals;
+ }
+ /* compression failed */
+ return 0;
+}
+
+
+static int LZ4HC_compress_optimal( LZ4HC_CCtx_internal* ctx,
+ const char* const source, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ int const nbSearches, size_t sufficient_len,
+ const limitedOutput_directive limit, int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed);
+
+
+LZ4_FORCE_INLINE int LZ4HC_compress_generic_internal (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ const limitedOutput_directive limit,
+ const dictCtx_directive dict
+ )
+{
+ typedef enum { lz4hc, lz4opt } lz4hc_strat_e;
+ typedef struct {
+ lz4hc_strat_e strat;
+ int nbSearches;
+ U32 targetLength;
+ } cParams_t;
+ static const cParams_t clTable[LZ4HC_CLEVEL_MAX+1] = {
+ { lz4hc, 2, 16 }, /* 0, unused */
+ { lz4hc, 2, 16 }, /* 1, unused */
+ { lz4hc, 2, 16 }, /* 2, unused */
+ { lz4hc, 4, 16 }, /* 3 */
+ { lz4hc, 8, 16 }, /* 4 */
+ { lz4hc, 16, 16 }, /* 5 */
+ { lz4hc, 32, 16 }, /* 6 */
+ { lz4hc, 64, 16 }, /* 7 */
+ { lz4hc, 128, 16 }, /* 8 */
+ { lz4hc, 256, 16 }, /* 9 */
+ { lz4opt, 96, 64 }, /*10==LZ4HC_CLEVEL_OPT_MIN*/
+ { lz4opt, 512,128 }, /*11 */
+ { lz4opt,16384,LZ4_OPT_NUM }, /* 12==LZ4HC_CLEVEL_MAX */
+ };
+
+ DEBUGLOG(4, "LZ4HC_compress_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ ctx, src, *srcSizePtr, limit);
+
+ if (limit == fillOutput && dstCapacity < 1) return 0; /* Impossible to store anything */
+ if ((U32)*srcSizePtr > (U32)LZ4_MAX_INPUT_SIZE) return 0; /* Unsupported input size (too large or negative) */
+
+ ctx->end += *srcSizePtr;
+ if (cLevel < 1) cLevel = LZ4HC_CLEVEL_DEFAULT; /* note : convention is different from lz4frame, maybe something to review */
+ cLevel = MIN(LZ4HC_CLEVEL_MAX, cLevel);
+ { cParams_t const cParam = clTable[cLevel];
+ HCfavor_e const favor = ctx->favorDecSpeed ? favorDecompressionSpeed : favorCompressionRatio;
+ int result;
+
+ if (cParam.strat == lz4hc) {
+ result = LZ4HC_compress_hashChain(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, limit, dict);
+ } else {
+ assert(cParam.strat == lz4opt);
+ result = LZ4HC_compress_optimal(ctx,
+ src, dst, srcSizePtr, dstCapacity,
+ cParam.nbSearches, cParam.targetLength, limit,
+ cLevel == LZ4HC_CLEVEL_MAX, /* ultra mode */
+ dict, favor);
+ }
+ if (result <= 0) ctx->dirty = 1;
+ return result;
+ }
+}
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock);
+
+static int
+LZ4HC_compress_generic_noDictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ assert(ctx->dictCtx == NULL);
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, noDictCtx);
+}
+
+static int
+LZ4HC_compress_generic_dictCtx (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ const size_t position = (size_t)(ctx->end - ctx->base) - ctx->lowLimit;
+ assert(ctx->dictCtx != NULL);
+ if (position >= 64 KB) {
+ ctx->dictCtx = NULL;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else if (position == 0 && *srcSizePtr > 4 KB) {
+ memcpy(ctx, ctx->dictCtx, sizeof(LZ4HC_CCtx_internal));
+ LZ4HC_setExternalDict(ctx, (const BYTE *)src);
+ ctx->compressionLevel = (short)cLevel;
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_internal(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit, usingDictCtxHc);
+ }
+}
+
+static int
+LZ4HC_compress_generic (
+ LZ4HC_CCtx_internal* const ctx,
+ const char* const src,
+ char* const dst,
+ int* const srcSizePtr,
+ int const dstCapacity,
+ int cLevel,
+ limitedOutput_directive limit
+ )
+{
+ if (ctx->dictCtx == NULL) {
+ return LZ4HC_compress_generic_noDictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ } else {
+ return LZ4HC_compress_generic_dictCtx(ctx, src, dst, srcSizePtr, dstCapacity, cLevel, limit);
+ }
+}
+
+
+int LZ4_sizeofStateHC(void) { return (int)sizeof(LZ4_streamHC_t); }
+
+static size_t LZ4_streamHC_t_alignment(void)
+{
+#if LZ4_ALIGN_TEST
+ typedef struct { char c; LZ4_streamHC_t t; } t_a;
+ return sizeof(t_a) - sizeof(LZ4_streamHC_t);
+#else
+ return 1; /* effectively disabled */
+#endif
+}
+
+/* state is presumed correctly initialized,
+ * in which case its size and alignment have already been validate */
+int LZ4_compress_HC_extStateHC_fastReset (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ LZ4HC_CCtx_internal* const ctx = &((LZ4_streamHC_t*)state)->internal_donotuse;
+ if (!LZ4_isAligned(state, LZ4_streamHC_t_alignment())) return 0;
+ LZ4_resetStreamHC_fast((LZ4_streamHC_t*)state, compressionLevel);
+ LZ4HC_init_internal (ctx, (const BYTE*)src);
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, limitedOutput);
+ else
+ return LZ4HC_compress_generic (ctx, src, dst, &srcSize, dstCapacity, compressionLevel, notLimited);
+}
+
+int LZ4_compress_HC_extStateHC (void* state, const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+ LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx==NULL) return 0; /* init failure */
+ return LZ4_compress_HC_extStateHC_fastReset(state, src, dst, srcSize, dstCapacity, compressionLevel);
+}
+
+int LZ4_compress_HC(const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel)
+{
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ LZ4_streamHC_t* const statePtr = (LZ4_streamHC_t*)ALLOC(sizeof(LZ4_streamHC_t));
+#else
+ LZ4_streamHC_t state;
+ LZ4_streamHC_t* const statePtr = &state;
+#endif
+ int const cSize = LZ4_compress_HC_extStateHC(statePtr, src, dst, srcSize, dstCapacity, compressionLevel);
+#if defined(LZ4HC_HEAPMODE) && LZ4HC_HEAPMODE==1
+ FREEMEM(statePtr);
+#endif
+ return cSize;
+}
+
+/* state is presumed sized correctly (>= sizeof(LZ4_streamHC_t)) */
+int LZ4_compress_HC_destSize(void* state, const char* source, char* dest, int* sourceSizePtr, int targetDestSize, int cLevel)
+{
+ LZ4_streamHC_t* const ctx = LZ4_initStreamHC(state, sizeof(*ctx));
+ if (ctx==NULL) return 0; /* init failure */
+ LZ4HC_init_internal(&ctx->internal_donotuse, (const BYTE*) source);
+ LZ4_setCompressionLevel(ctx, cLevel);
+ return LZ4HC_compress_generic(&ctx->internal_donotuse, source, dest, sourceSizePtr, targetDestSize, cLevel, fillOutput);
+}
+
+
+
+/**************************************
+* Streaming Functions
+**************************************/
+/* allocation */
+LZ4_streamHC_t* LZ4_createStreamHC(void)
+{
+ LZ4_streamHC_t* const state =
+ (LZ4_streamHC_t*)ALLOC_AND_ZERO(sizeof(LZ4_streamHC_t));
+ if (state == NULL) return NULL;
+ LZ4_setCompressionLevel(state, LZ4HC_CLEVEL_DEFAULT);
+ return state;
+}
+
+int LZ4_freeStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr)
+{
+ DEBUGLOG(4, "LZ4_freeStreamHC(%p)", LZ4_streamHCPtr);
+ if (!LZ4_streamHCPtr) return 0; /* support free on NULL */
+ FREEMEM(LZ4_streamHCPtr);
+ return 0;
+}
+
+
+LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size)
+{
+ LZ4_streamHC_t* const LZ4_streamHCPtr = (LZ4_streamHC_t*)buffer;
+ /* if compilation fails here, LZ4_STREAMHCSIZE must be increased */
+ LZ4_STATIC_ASSERT(sizeof(LZ4HC_CCtx_internal) <= LZ4_STREAMHCSIZE);
+ DEBUGLOG(4, "LZ4_initStreamHC(%p, %u)", buffer, (unsigned)size);
+ /* check conditions */
+ if (buffer == NULL) return NULL;
+ if (size < sizeof(LZ4_streamHC_t)) return NULL;
+ if (!LZ4_isAligned(buffer, LZ4_streamHC_t_alignment())) return NULL;
+ /* init */
+ { LZ4HC_CCtx_internal* const hcstate = &(LZ4_streamHCPtr->internal_donotuse);
+ MEM_INIT(hcstate, 0, sizeof(*hcstate)); }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, LZ4HC_CLEVEL_DEFAULT);
+ return LZ4_streamHCPtr;
+}
+
+/* just a stub */
+void LZ4_resetStreamHC (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_resetStreamHC_fast (LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(4, "LZ4_resetStreamHC_fast(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (LZ4_streamHCPtr->internal_donotuse.dirty) {
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ } else {
+ /* preserve end - base : can trigger clearTable's threshold */
+ LZ4_streamHCPtr->internal_donotuse.end -= (uptrval)LZ4_streamHCPtr->internal_donotuse.base;
+ LZ4_streamHCPtr->internal_donotuse.base = NULL;
+ LZ4_streamHCPtr->internal_donotuse.dictCtx = NULL;
+ }
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, compressionLevel);
+}
+
+void LZ4_setCompressionLevel(LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel)
+{
+ DEBUGLOG(5, "LZ4_setCompressionLevel(%p, %d)", LZ4_streamHCPtr, compressionLevel);
+ if (compressionLevel < 1) compressionLevel = LZ4HC_CLEVEL_DEFAULT;
+ if (compressionLevel > LZ4HC_CLEVEL_MAX) compressionLevel = LZ4HC_CLEVEL_MAX;
+ LZ4_streamHCPtr->internal_donotuse.compressionLevel = (short)compressionLevel;
+}
+
+void LZ4_favorDecompressionSpeed(LZ4_streamHC_t* LZ4_streamHCPtr, int favor)
+{
+ LZ4_streamHCPtr->internal_donotuse.favorDecSpeed = (favor!=0);
+}
+
+/* LZ4_loadDictHC() :
+ * LZ4_streamHCPtr is presumed properly initialized */
+int LZ4_loadDictHC (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* dictionary, int dictSize)
+{
+ LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(4, "LZ4_loadDictHC(ctx:%p, dict:%p, dictSize:%d)", LZ4_streamHCPtr, dictionary, dictSize);
+ assert(LZ4_streamHCPtr != NULL);
+ if (dictSize > 64 KB) {
+ dictionary += (size_t)dictSize - 64 KB;
+ dictSize = 64 KB;
+ }
+ /* need a full initialization, there are bad side-effects when using resetFast() */
+ { int const cLevel = ctxPtr->compressionLevel;
+ LZ4_initStreamHC(LZ4_streamHCPtr, sizeof(*LZ4_streamHCPtr));
+ LZ4_setCompressionLevel(LZ4_streamHCPtr, cLevel);
+ }
+ LZ4HC_init_internal (ctxPtr, (const BYTE*)dictionary);
+ ctxPtr->end = (const BYTE*)dictionary + dictSize;
+ if (dictSize >= 4) LZ4HC_Insert (ctxPtr, ctxPtr->end-3);
+ return dictSize;
+}
+
+void LZ4_attach_HC_dictionary(LZ4_streamHC_t *working_stream, const LZ4_streamHC_t *dictionary_stream) {
+ working_stream->internal_donotuse.dictCtx = dictionary_stream != NULL ? &(dictionary_stream->internal_donotuse) : NULL;
+}
+
+/* compression */
+
+static void LZ4HC_setExternalDict(LZ4HC_CCtx_internal* ctxPtr, const BYTE* newBlock)
+{
+ DEBUGLOG(4, "LZ4HC_setExternalDict(%p, %p)", ctxPtr, newBlock);
+ if (ctxPtr->end >= ctxPtr->base + ctxPtr->dictLimit + 4)
+ LZ4HC_Insert (ctxPtr, ctxPtr->end-3); /* Referencing remaining dictionary content */
+
+ /* Only one memory segment for extDict, so any previous extDict is lost at this stage */
+ ctxPtr->lowLimit = ctxPtr->dictLimit;
+ ctxPtr->dictLimit = (U32)(ctxPtr->end - ctxPtr->base);
+ ctxPtr->dictBase = ctxPtr->base;
+ ctxPtr->base = newBlock - ctxPtr->dictLimit;
+ ctxPtr->end = newBlock;
+ ctxPtr->nextToUpdate = ctxPtr->dictLimit; /* match referencing will resume from there */
+
+ /* cannot reference an extDict and a dictCtx at the same time */
+ ctxPtr->dictCtx = NULL;
+}
+
+static int
+LZ4_compressHC_continue_generic (LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int dstCapacity,
+ limitedOutput_directive limit)
+{
+ LZ4HC_CCtx_internal* const ctxPtr = &LZ4_streamHCPtr->internal_donotuse;
+ DEBUGLOG(5, "LZ4_compressHC_continue_generic(ctx=%p, src=%p, srcSize=%d, limit=%d)",
+ LZ4_streamHCPtr, src, *srcSizePtr, limit);
+ assert(ctxPtr != NULL);
+ /* auto-init if forgotten */
+ if (ctxPtr->base == NULL) LZ4HC_init_internal (ctxPtr, (const BYTE*) src);
+
+ /* Check overflow */
+ if ((size_t)(ctxPtr->end - ctxPtr->base) > 2 GB) {
+ size_t dictSize = (size_t)(ctxPtr->end - ctxPtr->base) - ctxPtr->dictLimit;
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ LZ4_loadDictHC(LZ4_streamHCPtr, (const char*)(ctxPtr->end) - dictSize, (int)dictSize);
+ }
+
+ /* Check if blocks follow each other */
+ if ((const BYTE*)src != ctxPtr->end)
+ LZ4HC_setExternalDict(ctxPtr, (const BYTE*)src);
+
+ /* Check overlapping input/dictionary space */
+ { const BYTE* sourceEnd = (const BYTE*) src + *srcSizePtr;
+ const BYTE* const dictBegin = ctxPtr->dictBase + ctxPtr->lowLimit;
+ const BYTE* const dictEnd = ctxPtr->dictBase + ctxPtr->dictLimit;
+ if ((sourceEnd > dictBegin) && ((const BYTE*)src < dictEnd)) {
+ if (sourceEnd > dictEnd) sourceEnd = dictEnd;
+ ctxPtr->lowLimit = (U32)(sourceEnd - ctxPtr->dictBase);
+ if (ctxPtr->dictLimit - ctxPtr->lowLimit < 4) ctxPtr->lowLimit = ctxPtr->dictLimit;
+ } }
+
+ return LZ4HC_compress_generic (ctxPtr, src, dst, srcSizePtr, dstCapacity, ctxPtr->compressionLevel, limit);
+}
+
+int LZ4_compress_HC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int srcSize, int dstCapacity)
+{
+ if (dstCapacity < LZ4_compressBound(srcSize))
+ return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, limitedOutput);
+ else
+ return LZ4_compressHC_continue_generic (LZ4_streamHCPtr, src, dst, &srcSize, dstCapacity, notLimited);
+}
+
+int LZ4_compress_HC_continue_destSize (LZ4_streamHC_t* LZ4_streamHCPtr, const char* src, char* dst, int* srcSizePtr, int targetDestSize)
+{
+ return LZ4_compressHC_continue_generic(LZ4_streamHCPtr, src, dst, srcSizePtr, targetDestSize, fillOutput);
+}
+
+
+
+/* LZ4_saveDictHC :
+ * save history content
+ * into a user-provided buffer
+ * which is then used to continue compression
+ */
+int LZ4_saveDictHC (LZ4_streamHC_t* LZ4_streamHCPtr, char* safeBuffer, int dictSize)
+{
+ LZ4HC_CCtx_internal* const streamPtr = &LZ4_streamHCPtr->internal_donotuse;
+ int const prefixSize = (int)(streamPtr->end - (streamPtr->base + streamPtr->dictLimit));
+ DEBUGLOG(5, "LZ4_saveDictHC(%p, %p, %d)", LZ4_streamHCPtr, safeBuffer, dictSize);
+ assert(prefixSize >= 0);
+ if (dictSize > 64 KB) dictSize = 64 KB;
+ if (dictSize < 4) dictSize = 0;
+ if (dictSize > prefixSize) dictSize = prefixSize;
+ if (safeBuffer == NULL) assert(dictSize == 0);
+ if (dictSize > 0)
+ memmove(safeBuffer, streamPtr->end - dictSize, dictSize);
+ { U32 const endIndex = (U32)(streamPtr->end - streamPtr->base);
+ streamPtr->end = (const BYTE*)safeBuffer + dictSize;
+ streamPtr->base = streamPtr->end - endIndex;
+ streamPtr->dictLimit = endIndex - (U32)dictSize;
+ streamPtr->lowLimit = endIndex - (U32)dictSize;
+ if (streamPtr->nextToUpdate < streamPtr->dictLimit)
+ streamPtr->nextToUpdate = streamPtr->dictLimit;
+ }
+ return dictSize;
+}
+
+
+/***************************************************
+* Deprecated Functions
+***************************************************/
+
+/* These functions currently generate deprecation warnings */
+
+/* Wrappers for deprecated compression functions */
+int LZ4_compressHC(const char* src, char* dst, int srcSize) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2(const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC (src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput(const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC(src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_withStateHC (void* state, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, LZ4_compressBound(srcSize), 0); }
+int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_extStateHC (state, src, dst, srcSize, maxDstSize, 0); }
+int LZ4_compressHC2_withStateHC (void* state, const char* src, char* dst, int srcSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, LZ4_compressBound(srcSize), cLevel); }
+int LZ4_compressHC2_limitedOutput_withStateHC (void* state, const char* src, char* dst, int srcSize, int maxDstSize, int cLevel) { return LZ4_compress_HC_extStateHC(state, src, dst, srcSize, maxDstSize, cLevel); }
+int LZ4_compressHC_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, LZ4_compressBound(srcSize)); }
+int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* ctx, const char* src, char* dst, int srcSize, int maxDstSize) { return LZ4_compress_HC_continue (ctx, src, dst, srcSize, maxDstSize); }
+
+
+/* Deprecated streaming functions */
+int LZ4_sizeofStreamStateHC(void) { return LZ4_STREAMHCSIZE; }
+
+/* state is presumed correctly sized, aka >= sizeof(LZ4_streamHC_t)
+ * @return : 0 on success, !=0 if error */
+int LZ4_resetStreamStateHC(void* state, char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_initStreamHC(state, sizeof(*hc4));
+ if (hc4 == NULL) return 1; /* init failed */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return 0;
+}
+
+void* LZ4_createHC (const char* inputBuffer)
+{
+ LZ4_streamHC_t* const hc4 = LZ4_createStreamHC();
+ if (hc4 == NULL) return NULL; /* not enough memory */
+ LZ4HC_init_internal (&hc4->internal_donotuse, (const BYTE*)inputBuffer);
+ return hc4;
+}
+
+int LZ4_freeHC (void* LZ4HC_Data)
+{
+ if (!LZ4HC_Data) return 0; /* support free on NULL */
+ FREEMEM(LZ4HC_Data);
+ return 0;
+}
+
+int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, 0, cLevel, notLimited);
+}
+
+int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* src, char* dst, int srcSize, int dstCapacity, int cLevel)
+{
+ return LZ4HC_compress_generic (&((LZ4_streamHC_t*)LZ4HC_Data)->internal_donotuse, src, dst, &srcSize, dstCapacity, cLevel, limitedOutput);
+}
+
+char* LZ4_slideInputBufferHC(void* LZ4HC_Data)
+{
+ LZ4_streamHC_t *ctx = (LZ4_streamHC_t*)LZ4HC_Data;
+ const BYTE *bufferStart = ctx->internal_donotuse.base + ctx->internal_donotuse.lowLimit;
+ LZ4_resetStreamHC_fast(ctx, ctx->internal_donotuse.compressionLevel);
+ /* avoid const char * -> char * conversion warning :( */
+ return (char *)(uptrval)bufferStart;
+}
+
+
+/* ================================================
+ * LZ4 Optimal parser (levels [LZ4HC_CLEVEL_OPT_MIN - LZ4HC_CLEVEL_MAX])
+ * ===============================================*/
+typedef struct {
+ int price;
+ int off;
+ int mlen;
+ int litlen;
+} LZ4HC_optimal_t;
+
+/* price in bytes */
+LZ4_FORCE_INLINE int LZ4HC_literalsPrice(int const litlen)
+{
+ int price = litlen;
+ assert(litlen >= 0);
+ if (litlen >= (int)RUN_MASK)
+ price += 1 + ((litlen-(int)RUN_MASK) / 255);
+ return price;
+}
+
+
+/* requires mlen >= MINMATCH */
+LZ4_FORCE_INLINE int LZ4HC_sequencePrice(int litlen, int mlen)
+{
+ int price = 1 + 2 ; /* token + 16-bit offset */
+ assert(litlen >= 0);
+ assert(mlen >= MINMATCH);
+
+ price += LZ4HC_literalsPrice(litlen);
+
+ if (mlen >= (int)(ML_MASK+MINMATCH))
+ price += 1 + ((mlen-(int)(ML_MASK+MINMATCH)) / 255);
+
+ return price;
+}
+
+
+typedef struct {
+ int off;
+ int len;
+} LZ4HC_match_t;
+
+LZ4_FORCE_INLINE LZ4HC_match_t
+LZ4HC_FindLongerMatch(LZ4HC_CCtx_internal* const ctx,
+ const BYTE* ip, const BYTE* const iHighLimit,
+ int minLen, int nbSearches,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ LZ4HC_match_t match = { 0 , 0 };
+ const BYTE* matchPtr = NULL;
+ /* note : LZ4HC_InsertAndGetWiderMatch() is able to modify the starting position of a match (*startpos),
+ * but this won't be the case here, as we define iLowLimit==ip,
+ * so LZ4HC_InsertAndGetWiderMatch() won't be allowed to search past ip */
+ int matchLength = LZ4HC_InsertAndGetWiderMatch(ctx, ip, ip, iHighLimit, minLen, &matchPtr, &ip, nbSearches, 1 /*patternAnalysis*/, 1 /*chainSwap*/, dict, favorDecSpeed);
+ if (matchLength <= minLen) return match;
+ if (favorDecSpeed) {
+ if ((matchLength>18) & (matchLength<=36)) matchLength=18; /* favor shortcut */
+ }
+ match.len = matchLength;
+ match.off = (int)(ip-matchPtr);
+ return match;
+}
+
+
+static int LZ4HC_compress_optimal ( LZ4HC_CCtx_internal* ctx,
+ const char* const source,
+ char* dst,
+ int* srcSizePtr,
+ int dstCapacity,
+ int const nbSearches,
+ size_t sufficient_len,
+ const limitedOutput_directive limit,
+ int const fullUpdate,
+ const dictCtx_directive dict,
+ const HCfavor_e favorDecSpeed)
+{
+ int retval = 0;
+#define TRAILING_LITERALS 3
+#ifdef LZ4HC_HEAPMODE
+ LZ4HC_optimal_t* const opt = (LZ4HC_optimal_t*)ALLOC(sizeof(LZ4HC_optimal_t) * (LZ4_OPT_NUM + TRAILING_LITERALS));
+#else
+ LZ4HC_optimal_t opt[LZ4_OPT_NUM + TRAILING_LITERALS]; /* ~64 KB, which is a bit large for stack... */
+#endif
+
+ const BYTE* ip = (const BYTE*) source;
+ const BYTE* anchor = ip;
+ const BYTE* const iend = ip + *srcSizePtr;
+ const BYTE* const mflimit = iend - MFLIMIT;
+ const BYTE* const matchlimit = iend - LASTLITERALS;
+ BYTE* op = (BYTE*) dst;
+ BYTE* opSaved = (BYTE*) dst;
+ BYTE* oend = op + dstCapacity;
+ int ovml = MINMATCH; /* overflow - last sequence */
+ const BYTE* ovref = NULL;
+
+ /* init */
+#ifdef LZ4HC_HEAPMODE
+ if (opt == NULL) goto _return_label;
+#endif
+ DEBUGLOG(5, "LZ4HC_compress_optimal(dst=%p, dstCapa=%u)", dst, (unsigned)dstCapacity);
+ *srcSizePtr = 0;
+ if (limit == fillOutput) oend -= LASTLITERALS; /* Hack for support LZ4 format restriction */
+ if (sufficient_len >= LZ4_OPT_NUM) sufficient_len = LZ4_OPT_NUM-1;
+
+ /* Main Loop */
+ while (ip <= mflimit) {
+ int const llen = (int)(ip - anchor);
+ int best_mlen, best_off;
+ int cur, last_match_pos = 0;
+
+ LZ4HC_match_t const firstMatch = LZ4HC_FindLongerMatch(ctx, ip, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
+ if (firstMatch.len==0) { ip++; continue; }
+
+ if ((size_t)firstMatch.len > sufficient_len) {
+ /* good enough solution : immediate encoding */
+ int const firstML = firstMatch.len;
+ const BYTE* const matchPos = ip - firstMatch.off;
+ opSaved = op;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), firstML, matchPos, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = firstML;
+ ovref = matchPos;
+ goto _dest_overflow;
+ }
+ continue;
+ }
+
+ /* set prices for first positions (literals) */
+ { int rPos;
+ for (rPos = 0 ; rPos < MINMATCH ; rPos++) {
+ int const cost = LZ4HC_literalsPrice(llen + rPos);
+ opt[rPos].mlen = 1;
+ opt[rPos].off = 0;
+ opt[rPos].litlen = llen + rPos;
+ opt[rPos].price = cost;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ rPos, cost, opt[rPos].litlen);
+ } }
+ /* set prices using initial match */
+ { int mlen = MINMATCH;
+ int const matchML = firstMatch.len; /* necessarily < sufficient_len < LZ4_OPT_NUM */
+ int const offset = firstMatch.off;
+ assert(matchML < LZ4_OPT_NUM);
+ for ( ; mlen <= matchML ; mlen++) {
+ int const cost = LZ4HC_sequencePrice(llen, mlen);
+ opt[mlen].mlen = mlen;
+ opt[mlen].off = offset;
+ opt[mlen].litlen = llen;
+ opt[mlen].price = cost;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i) -- initial setup",
+ mlen, cost, mlen);
+ } }
+ last_match_pos = firstMatch.len;
+ { int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
+ opt[last_match_pos+addLit].mlen = 1; /* literal */
+ opt[last_match_pos+addLit].off = 0;
+ opt[last_match_pos+addLit].litlen = addLit;
+ opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i) -- initial setup",
+ last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
+ } }
+
+ /* check further positions */
+ for (cur = 1; cur < last_match_pos; cur++) {
+ const BYTE* const curPtr = ip + cur;
+ LZ4HC_match_t newMatch;
+
+ if (curPtr > mflimit) break;
+ DEBUGLOG(7, "rPos:%u[%u] vs [%u]%u",
+ cur, opt[cur].price, opt[cur+1].price, cur+1);
+ if (fullUpdate) {
+ /* not useful to search here if next position has same (or lower) cost */
+ if ( (opt[cur+1].price <= opt[cur].price)
+ /* in some cases, next position has same cost, but cost rises sharply after, so a small match would still be beneficial */
+ && (opt[cur+MINMATCH].price < opt[cur].price + 3/*min seq price*/) )
+ continue;
+ } else {
+ /* not useful to search here if next position has same (or lower) cost */
+ if (opt[cur+1].price <= opt[cur].price) continue;
+ }
+
+ DEBUGLOG(7, "search at rPos:%u", cur);
+ if (fullUpdate)
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, MINMATCH-1, nbSearches, dict, favorDecSpeed);
+ else
+ /* only test matches of minimum length; slightly faster, but misses a few bytes */
+ newMatch = LZ4HC_FindLongerMatch(ctx, curPtr, matchlimit, last_match_pos - cur, nbSearches, dict, favorDecSpeed);
+ if (!newMatch.len) continue;
+
+ if ( ((size_t)newMatch.len > sufficient_len)
+ || (newMatch.len + cur >= LZ4_OPT_NUM) ) {
+ /* immediate encoding */
+ best_mlen = newMatch.len;
+ best_off = newMatch.off;
+ last_match_pos = cur + 1;
+ goto encode;
+ }
+
+ /* before match : set price with literals at beginning */
+ { int const baseLitlen = opt[cur].litlen;
+ int litlen;
+ for (litlen = 1; litlen < MINMATCH; litlen++) {
+ int const price = opt[cur].price - LZ4HC_literalsPrice(baseLitlen) + LZ4HC_literalsPrice(baseLitlen+litlen);
+ int const pos = cur + litlen;
+ if (price < opt[pos].price) {
+ opt[pos].mlen = 1; /* literal */
+ opt[pos].off = 0;
+ opt[pos].litlen = baseLitlen+litlen;
+ opt[pos].price = price;
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)",
+ pos, price, opt[pos].litlen);
+ } } }
+
+ /* set prices using match at position = cur */
+ { int const matchML = newMatch.len;
+ int ml = MINMATCH;
+
+ assert(cur + newMatch.len < LZ4_OPT_NUM);
+ for ( ; ml <= matchML ; ml++) {
+ int const pos = cur + ml;
+ int const offset = newMatch.off;
+ int price;
+ int ll;
+ DEBUGLOG(7, "testing price rPos %i (last_match_pos=%i)",
+ pos, last_match_pos);
+ if (opt[cur].mlen == 1) {
+ ll = opt[cur].litlen;
+ price = ((cur > ll) ? opt[cur - ll].price : 0)
+ + LZ4HC_sequencePrice(ll, ml);
+ } else {
+ ll = 0;
+ price = opt[cur].price + LZ4HC_sequencePrice(0, ml);
+ }
+
+ assert((U32)favorDecSpeed <= 1);
+ if (pos > last_match_pos+TRAILING_LITERALS
+ || price <= opt[pos].price - (int)favorDecSpeed) {
+ DEBUGLOG(7, "rPos:%3i => price:%3i (matchlen=%i)",
+ pos, price, ml);
+ assert(pos < LZ4_OPT_NUM);
+ if ( (ml == matchML) /* last pos of last match */
+ && (last_match_pos < pos) )
+ last_match_pos = pos;
+ opt[pos].mlen = ml;
+ opt[pos].off = offset;
+ opt[pos].litlen = ll;
+ opt[pos].price = price;
+ } } }
+ /* complete following positions with literals */
+ { int addLit;
+ for (addLit = 1; addLit <= TRAILING_LITERALS; addLit ++) {
+ opt[last_match_pos+addLit].mlen = 1; /* literal */
+ opt[last_match_pos+addLit].off = 0;
+ opt[last_match_pos+addLit].litlen = addLit;
+ opt[last_match_pos+addLit].price = opt[last_match_pos].price + LZ4HC_literalsPrice(addLit);
+ DEBUGLOG(7, "rPos:%3i => price:%3i (litlen=%i)", last_match_pos+addLit, opt[last_match_pos+addLit].price, addLit);
+ } }
+ } /* for (cur = 1; cur <= last_match_pos; cur++) */
+
+ assert(last_match_pos < LZ4_OPT_NUM + TRAILING_LITERALS);
+ best_mlen = opt[last_match_pos].mlen;
+ best_off = opt[last_match_pos].off;
+ cur = last_match_pos - best_mlen;
+
+encode: /* cur, last_match_pos, best_mlen, best_off must be set */
+ assert(cur < LZ4_OPT_NUM);
+ assert(last_match_pos >= 1); /* == 1 when only one candidate */
+ DEBUGLOG(6, "reverse traversal, looking for shortest path (last_match_pos=%i)", last_match_pos);
+ { int candidate_pos = cur;
+ int selected_matchLength = best_mlen;
+ int selected_offset = best_off;
+ while (1) { /* from end to beginning */
+ int const next_matchLength = opt[candidate_pos].mlen; /* can be 1, means literal */
+ int const next_offset = opt[candidate_pos].off;
+ DEBUGLOG(7, "pos %i: sequence length %i", candidate_pos, selected_matchLength);
+ opt[candidate_pos].mlen = selected_matchLength;
+ opt[candidate_pos].off = selected_offset;
+ selected_matchLength = next_matchLength;
+ selected_offset = next_offset;
+ if (next_matchLength > candidate_pos) break; /* last match elected, first match to encode */
+ assert(next_matchLength > 0); /* can be 1, means literal */
+ candidate_pos -= next_matchLength;
+ } }
+
+ /* encode all recorded sequences in order */
+ { int rPos = 0; /* relative position (to ip) */
+ while (rPos < last_match_pos) {
+ int const ml = opt[rPos].mlen;
+ int const offset = opt[rPos].off;
+ if (ml == 1) { ip++; rPos++; continue; } /* literal; note: can end up with several literals, in which case, skip them */
+ rPos += ml;
+ assert(ml >= MINMATCH);
+ assert((offset >= 1) && (offset <= LZ4_DISTANCE_MAX));
+ opSaved = op;
+ if ( LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ml, ip - offset, limit, oend) ) { /* updates ip, op and anchor */
+ ovml = ml;
+ ovref = ip - offset;
+ goto _dest_overflow;
+ } } }
+ } /* while (ip <= mflimit) */
+
+_last_literals:
+ /* Encode Last Literals */
+ { size_t lastRunSize = (size_t)(iend - anchor); /* literals */
+ size_t llAdd = (lastRunSize + 255 - RUN_MASK) / 255;
+ size_t const totalSize = 1 + llAdd + lastRunSize;
+ if (limit == fillOutput) oend += LASTLITERALS; /* restore correct value */
+ if (limit && (op + totalSize > oend)) {
+ if (limit == limitedOutput) { /* Check output limit */
+ retval = 0;
+ goto _return_label;
+ }
+ /* adapt lastRunSize to fill 'dst' */
+ lastRunSize = (size_t)(oend - op) - 1 /*token*/;
+ llAdd = (lastRunSize + 256 - RUN_MASK) / 256;
+ lastRunSize -= llAdd;
+ }
+ DEBUGLOG(6, "Final literal run : %i literals", (int)lastRunSize);
+ ip = anchor + lastRunSize; /* can be != iend if limit==fillOutput */
+
+ if (lastRunSize >= RUN_MASK) {
+ size_t accumulator = lastRunSize - RUN_MASK;
+ *op++ = (RUN_MASK << ML_BITS);
+ for(; accumulator >= 255 ; accumulator -= 255) *op++ = 255;
+ *op++ = (BYTE) accumulator;
+ } else {
+ *op++ = (BYTE)(lastRunSize << ML_BITS);
+ }
+ memcpy(op, anchor, lastRunSize);
+ op += lastRunSize;
+ }
+
+ /* End */
+ *srcSizePtr = (int) (((const char*)ip) - source);
+ retval = (int) ((char*)op-dst);
+ goto _return_label;
+
+_dest_overflow:
+if (limit == fillOutput) {
+ /* Assumption : ip, anchor, ovml and ovref must be set correctly */
+ size_t const ll = (size_t)(ip - anchor);
+ size_t const ll_addbytes = (ll + 240) / 255;
+ size_t const ll_totalCost = 1 + ll_addbytes + ll;
+ BYTE* const maxLitPos = oend - 3; /* 2 for offset, 1 for token */
+ DEBUGLOG(6, "Last sequence overflowing (only %i bytes remaining)", (int)(oend-1-opSaved));
+ op = opSaved; /* restore correct out pointer */
+ if (op + ll_totalCost <= maxLitPos) {
+ /* ll validated; now adjust match length */
+ size_t const bytesLeftForMl = (size_t)(maxLitPos - (op+ll_totalCost));
+ size_t const maxMlSize = MINMATCH + (ML_MASK-1) + (bytesLeftForMl * 255);
+ assert(maxMlSize < INT_MAX); assert(ovml >= 0);
+ if ((size_t)ovml > maxMlSize) ovml = (int)maxMlSize;
+ if ((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1 + ovml >= MFLIMIT) {
+ DEBUGLOG(6, "Space to end : %i + ml (%i)", (int)((oend + LASTLITERALS) - (op + ll_totalCost + 2) - 1), ovml);
+ DEBUGLOG(6, "Before : ip = %p, anchor = %p", ip, anchor);
+ LZ4HC_encodeSequence(UPDATABLE(ip, op, anchor), ovml, ovref, notLimited, oend);
+ DEBUGLOG(6, "After : ip = %p, anchor = %p", ip, anchor);
+ } }
+ goto _last_literals;
+}
+_return_label:
+#ifdef LZ4HC_HEAPMODE
+ FREEMEM(opt);
+#endif
+ return retval;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h b/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h
new file mode 100644
index 000000000..3d441fb6f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/lz4hc.h
@@ -0,0 +1,413 @@
+/*
+ LZ4 HC - High Compression Mode of LZ4
+ Header File
+ Copyright (C) 2011-2017, Yann Collet.
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - LZ4 source repository : https://github.com/lz4/lz4
+ - LZ4 public forum : https://groups.google.com/forum/#!forum/lz4c
+*/
+#ifndef LZ4_HC_H_19834876238432
+#define LZ4_HC_H_19834876238432
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/* --- Dependency --- */
+/* note : lz4hc requires lz4.h/lz4.c for compilation */
+#include "lz4.h" /* stddef, LZ4LIB_API, LZ4_DEPRECATED */
+
+
+/* --- Useful constants --- */
+#define LZ4HC_CLEVEL_MIN 3
+#define LZ4HC_CLEVEL_DEFAULT 9
+#define LZ4HC_CLEVEL_OPT_MIN 10
+#define LZ4HC_CLEVEL_MAX 12
+
+
+/*-************************************
+ * Block Compression
+ **************************************/
+/*! LZ4_compress_HC() :
+ * Compress data from `src` into `dst`, using the powerful but slower "HC" algorithm.
+ * `dst` must be already allocated.
+ * Compression is guaranteed to succeed if `dstCapacity >= LZ4_compressBound(srcSize)` (see "lz4.h")
+ * Max supported `srcSize` value is LZ4_MAX_INPUT_SIZE (see "lz4.h")
+ * `compressionLevel` : any value between 1 and LZ4HC_CLEVEL_MAX will work.
+ * Values > LZ4HC_CLEVEL_MAX behave the same as LZ4HC_CLEVEL_MAX.
+ * @return : the number of bytes written into 'dst'
+ * or 0 if compression fails.
+ */
+LZ4LIB_API int LZ4_compress_HC (const char* src, char* dst, int srcSize, int dstCapacity, int compressionLevel);
+
+
+/* Note :
+ * Decompression functions are provided within "lz4.h" (BSD license)
+ */
+
+
+/*! LZ4_compress_HC_extStateHC() :
+ * Same as LZ4_compress_HC(), but using an externally allocated memory segment for `state`.
+ * `state` size is provided by LZ4_sizeofStateHC().
+ * Memory segment must be aligned on 8-bytes boundaries (which a normal malloc() should do properly).
+ */
+LZ4LIB_API int LZ4_sizeofStateHC(void);
+LZ4LIB_API int LZ4_compress_HC_extStateHC(void* stateHC, const char* src, char* dst, int srcSize, int maxDstSize, int compressionLevel);
+
+
+/*! LZ4_compress_HC_destSize() : v1.9.0+
+ * Will compress as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided in 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr is updated to indicate how much bytes were read from `src`
+ */
+LZ4LIB_API int LZ4_compress_HC_destSize(void* stateHC,
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize,
+ int compressionLevel);
+
+
+/*-************************************
+ * Streaming Compression
+ * Bufferless synchronous API
+ **************************************/
+ typedef union LZ4_streamHC_u LZ4_streamHC_t; /* incomplete type (defined later) */
+
+/*! LZ4_createStreamHC() and LZ4_freeStreamHC() :
+ * These functions create and release memory for LZ4 HC streaming state.
+ * Newly created states are automatically initialized.
+ * A same state can be used multiple times consecutively,
+ * starting with LZ4_resetStreamHC_fast() to start a new stream of blocks.
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_createStreamHC(void);
+LZ4LIB_API int LZ4_freeStreamHC (LZ4_streamHC_t* streamHCPtr);
+
+/*
+ These functions compress data in successive blocks of any size,
+ using previous blocks as dictionary, to improve compression ratio.
+ One key assumption is that previous blocks (up to 64 KB) remain read-accessible while compressing next blocks.
+ There is an exception for ring buffers, which can be smaller than 64 KB.
+ Ring-buffer scenario is automatically detected and handled within LZ4_compress_HC_continue().
+
+ Before starting compression, state must be allocated and properly initialized.
+ LZ4_createStreamHC() does both, though compression level is set to LZ4HC_CLEVEL_DEFAULT.
+
+ Selecting the compression level can be done with LZ4_resetStreamHC_fast() (starts a new stream)
+ or LZ4_setCompressionLevel() (anytime, between blocks in the same stream) (experimental).
+ LZ4_resetStreamHC_fast() only works on states which have been properly initialized at least once,
+ which is automatically the case when state is created using LZ4_createStreamHC().
+
+ After reset, a first "fictional block" can be designated as initial dictionary,
+ using LZ4_loadDictHC() (Optional).
+
+ Invoke LZ4_compress_HC_continue() to compress each successive block.
+ The number of blocks is unlimited.
+ Previous input blocks, including initial dictionary when present,
+ must remain accessible and unmodified during compression.
+
+ It's allowed to update compression level anytime between blocks,
+ using LZ4_setCompressionLevel() (experimental).
+
+ 'dst' buffer should be sized to handle worst case scenarios
+ (see LZ4_compressBound(), it ensures compression success).
+ In case of failure, the API does not guarantee recovery,
+ so the state _must_ be reset.
+ To ensure compression success
+ whenever `dst` buffer size cannot be made >= LZ4_compressBound(),
+ consider using LZ4_compress_HC_continue_destSize().
+
+ Whenever previous input blocks can't be preserved unmodified in-place during compression of next blocks,
+ it's possible to copy the last blocks into a more stable memory space, using LZ4_saveDictHC().
+ Return value of LZ4_saveDictHC() is the size of dictionary effectively saved into 'safeBuffer' (<= 64 KB)
+
+ After completing a streaming compression,
+ it's possible to start a new stream of blocks, using the same LZ4_streamHC_t state,
+ just by resetting it, using LZ4_resetStreamHC_fast().
+*/
+
+LZ4LIB_API void LZ4_resetStreamHC_fast(LZ4_streamHC_t* streamHCPtr, int compressionLevel); /* v1.9.0+ */
+LZ4LIB_API int LZ4_loadDictHC (LZ4_streamHC_t* streamHCPtr, const char* dictionary, int dictSize);
+
+LZ4LIB_API int LZ4_compress_HC_continue (LZ4_streamHC_t* streamHCPtr,
+ const char* src, char* dst,
+ int srcSize, int maxDstSize);
+
+/*! LZ4_compress_HC_continue_destSize() : v1.9.0+
+ * Similar to LZ4_compress_HC_continue(),
+ * but will read as much data as possible from `src`
+ * to fit into `targetDstSize` budget.
+ * Result is provided into 2 parts :
+ * @return : the number of bytes written into 'dst' (necessarily <= targetDstSize)
+ * or 0 if compression fails.
+ * `srcSizePtr` : on success, *srcSizePtr will be updated to indicate how much bytes were read from `src`.
+ * Note that this function may not consume the entire input.
+ */
+LZ4LIB_API int LZ4_compress_HC_continue_destSize(LZ4_streamHC_t* LZ4_streamHCPtr,
+ const char* src, char* dst,
+ int* srcSizePtr, int targetDstSize);
+
+LZ4LIB_API int LZ4_saveDictHC (LZ4_streamHC_t* streamHCPtr, char* safeBuffer, int maxDictSize);
+
+
+
+/*^**********************************************
+ * !!!!!! STATIC LINKING ONLY !!!!!!
+ ***********************************************/
+
+/*-******************************************************************
+ * PRIVATE DEFINITIONS :
+ * Do not use these definitions directly.
+ * They are merely exposed to allow static allocation of `LZ4_streamHC_t`.
+ * Declare an `LZ4_streamHC_t` directly, rather than any type below.
+ * Even then, only do so in the context of static linking, as definitions may change between versions.
+ ********************************************************************/
+
+#define LZ4HC_DICTIONARY_LOGSIZE 16
+#define LZ4HC_MAXD (1<<LZ4HC_DICTIONARY_LOGSIZE)
+#define LZ4HC_MAXD_MASK (LZ4HC_MAXD - 1)
+
+#define LZ4HC_HASH_LOG 15
+#define LZ4HC_HASHTABLESIZE (1 << LZ4HC_HASH_LOG)
+#define LZ4HC_HASH_MASK (LZ4HC_HASHTABLESIZE - 1)
+
+
+typedef struct LZ4HC_CCtx_internal LZ4HC_CCtx_internal;
+struct LZ4HC_CCtx_internal
+{
+ LZ4_u32 hashTable[LZ4HC_HASHTABLESIZE];
+ LZ4_u16 chainTable[LZ4HC_MAXD];
+ const LZ4_byte* end; /* next block here to continue on current prefix */
+ const LZ4_byte* base; /* All index relative to this position */
+ const LZ4_byte* dictBase; /* alternate base for extDict */
+ LZ4_u32 dictLimit; /* below that point, need extDict */
+ LZ4_u32 lowLimit; /* below that point, no more dict */
+ LZ4_u32 nextToUpdate; /* index from which to continue dictionary update */
+ short compressionLevel;
+ LZ4_i8 favorDecSpeed; /* favor decompression speed if this flag set,
+ otherwise, favor compression ratio */
+ LZ4_i8 dirty; /* stream has to be fully reset if this flag is set */
+ const LZ4HC_CCtx_internal* dictCtx;
+};
+
+
+/* Do not use these definitions directly !
+ * Declare or allocate an LZ4_streamHC_t instead.
+ */
+#define LZ4_STREAMHCSIZE 262200 /* static size, for inter-version compatibility */
+#define LZ4_STREAMHCSIZE_VOIDP (LZ4_STREAMHCSIZE / sizeof(void*))
+union LZ4_streamHC_u {
+ void* table[LZ4_STREAMHCSIZE_VOIDP];
+ LZ4HC_CCtx_internal internal_donotuse;
+}; /* previously typedef'd to LZ4_streamHC_t */
+
+/* LZ4_streamHC_t :
+ * This structure allows static allocation of LZ4 HC streaming state.
+ * This can be used to allocate statically, on state, or as part of a larger structure.
+ *
+ * Such state **must** be initialized using LZ4_initStreamHC() before first use.
+ *
+ * Note that invoking LZ4_initStreamHC() is not required when
+ * the state was created using LZ4_createStreamHC() (which is recommended).
+ * Using the normal builder, a newly created state is automatically initialized.
+ *
+ * Static allocation shall only be used in combination with static linking.
+ */
+
+/* LZ4_initStreamHC() : v1.9.0+
+ * Required before first use of a statically allocated LZ4_streamHC_t.
+ * Before v1.9.0 : use LZ4_resetStreamHC() instead
+ */
+LZ4LIB_API LZ4_streamHC_t* LZ4_initStreamHC (void* buffer, size_t size);
+
+
+/*-************************************
+* Deprecated Functions
+**************************************/
+/* see lz4.h LZ4_DISABLE_DEPRECATE_WARNINGS to turn off deprecation warnings */
+
+/* deprecated compression functions */
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC (const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput (const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2 (const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput(const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_withStateHC (void* state, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_withStateHC (void* state, const char* source, char* dest, int inputSize, int maxOutputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_withStateHC (void* state, const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_extStateHC() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_withStateHC(void* state, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC_limitedOutput_continue (LZ4_streamHC_t* LZ4_streamHCPtr, const char* source, char* dest, int inputSize, int maxOutputSize);
+
+/* Obsolete streaming functions; degraded functionality; do not use!
+ *
+ * In order to perform streaming compression, these functions depended on data
+ * that is no longer tracked in the state. They have been preserved as well as
+ * possible: using them will still produce a correct output. However, use of
+ * LZ4_slideInputBufferHC() will truncate the history of the stream, rather
+ * than preserve a window-sized chunk of history.
+ */
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API void* LZ4_createHC (const char* inputBuffer);
+LZ4_DEPRECATED("use LZ4_saveDictHC() instead") LZ4LIB_API char* LZ4_slideInputBufferHC (void* LZ4HC_Data);
+LZ4_DEPRECATED("use LZ4_freeStreamHC() instead") LZ4LIB_API int LZ4_freeHC (void* LZ4HC_Data);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_compress_HC_continue() instead") LZ4LIB_API int LZ4_compressHC2_limitedOutput_continue (void* LZ4HC_Data, const char* source, char* dest, int inputSize, int maxOutputSize, int compressionLevel);
+LZ4_DEPRECATED("use LZ4_createStreamHC() instead") LZ4LIB_API int LZ4_sizeofStreamStateHC(void);
+LZ4_DEPRECATED("use LZ4_initStreamHC() instead") LZ4LIB_API int LZ4_resetStreamStateHC(void* state, char* inputBuffer);
+
+
+/* LZ4_resetStreamHC() is now replaced by LZ4_initStreamHC().
+ * The intention is to emphasize the difference with LZ4_resetStreamHC_fast(),
+ * which is now the recommended function to start a new stream of blocks,
+ * but cannot be used to initialize a memory segment containing arbitrary garbage data.
+ *
+ * It is recommended to switch to LZ4_initStreamHC().
+ * LZ4_resetStreamHC() will generate deprecation warnings in a future version.
+ */
+LZ4LIB_API void LZ4_resetStreamHC (LZ4_streamHC_t* streamHCPtr, int compressionLevel);
+
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4_HC_H_19834876238432 */
+
+
+/*-**************************************************
+ * !!!!! STATIC LINKING ONLY !!!!!
+ * Following definitions are considered experimental.
+ * They should not be linked from DLL,
+ * as there is no guarantee of API stability yet.
+ * Prototypes will be promoted to "stable" status
+ * after successfull usage in real-life scenarios.
+ ***************************************************/
+#ifdef LZ4_HC_STATIC_LINKING_ONLY /* protection macro */
+#ifndef LZ4_HC_SLO_098092834
+#define LZ4_HC_SLO_098092834
+
+#define LZ4_STATIC_LINKING_ONLY /* LZ4LIB_STATIC_API */
+#include "lz4.h"
+
+#if defined (__cplusplus)
+extern "C" {
+#endif
+
+/*! LZ4_setCompressionLevel() : v1.8.0+ (experimental)
+ * It's possible to change compression level
+ * between successive invocations of LZ4_compress_HC_continue*()
+ * for dynamic adaptation.
+ */
+LZ4LIB_STATIC_API void LZ4_setCompressionLevel(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_favorDecompressionSpeed() : v1.8.2+ (experimental)
+ * Opt. Parser will favor decompression speed over compression ratio.
+ * Only applicable to levels >= LZ4HC_CLEVEL_OPT_MIN.
+ */
+LZ4LIB_STATIC_API void LZ4_favorDecompressionSpeed(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int favor);
+
+/*! LZ4_resetStreamHC_fast() : v1.9.0+
+ * When an LZ4_streamHC_t is known to be in a internally coherent state,
+ * it can often be prepared for a new compression with almost no work, only
+ * sometimes falling back to the full, expensive reset that is always required
+ * when the stream is in an indeterminate state (i.e., the reset performed by
+ * LZ4_resetStreamHC()).
+ *
+ * LZ4_streamHCs are guaranteed to be in a valid state when:
+ * - returned from LZ4_createStreamHC()
+ * - reset by LZ4_resetStreamHC()
+ * - memset(stream, 0, sizeof(LZ4_streamHC_t))
+ * - the stream was in a valid state and was reset by LZ4_resetStreamHC_fast()
+ * - the stream was in a valid state and was then used in any compression call
+ * that returned success
+ * - the stream was in an indeterminate state and was used in a compression
+ * call that fully reset the state (LZ4_compress_HC_extStateHC()) and that
+ * returned success
+ *
+ * Note:
+ * A stream that was last used in a compression call that returned an error
+ * may be passed to this function. However, it will be fully reset, which will
+ * clear any existing history and settings from the context.
+ */
+LZ4LIB_STATIC_API void LZ4_resetStreamHC_fast(
+ LZ4_streamHC_t* LZ4_streamHCPtr, int compressionLevel);
+
+/*! LZ4_compress_HC_extStateHC_fastReset() :
+ * A variant of LZ4_compress_HC_extStateHC().
+ *
+ * Using this variant avoids an expensive initialization step. It is only safe
+ * to call if the state buffer is known to be correctly initialized already
+ * (see above comment on LZ4_resetStreamHC_fast() for a definition of
+ * "correctly initialized"). From a high level, the difference is that this
+ * function initializes the provided state with a call to
+ * LZ4_resetStreamHC_fast() while LZ4_compress_HC_extStateHC() starts with a
+ * call to LZ4_resetStreamHC().
+ */
+LZ4LIB_STATIC_API int LZ4_compress_HC_extStateHC_fastReset (
+ void* state,
+ const char* src, char* dst,
+ int srcSize, int dstCapacity,
+ int compressionLevel);
+
+/*! LZ4_attach_HC_dictionary() :
+ * This is an experimental API that allows for the efficient use of a
+ * static dictionary many times.
+ *
+ * Rather than re-loading the dictionary buffer into a working context before
+ * each compression, or copying a pre-loaded dictionary's LZ4_streamHC_t into a
+ * working LZ4_streamHC_t, this function introduces a no-copy setup mechanism,
+ * in which the working stream references the dictionary stream in-place.
+ *
+ * Several assumptions are made about the state of the dictionary stream.
+ * Currently, only streams which have been prepared by LZ4_loadDictHC() should
+ * be expected to work.
+ *
+ * Alternatively, the provided dictionary stream pointer may be NULL, in which
+ * case any existing dictionary stream is unset.
+ *
+ * A dictionary should only be attached to a stream without any history (i.e.,
+ * a stream that has just been reset).
+ *
+ * The dictionary will remain attached to the working stream only for the
+ * current stream session. Calls to LZ4_resetStreamHC(_fast) will remove the
+ * dictionary context association from the working stream. The dictionary
+ * stream (and source buffer) must remain in-place / accessible / unchanged
+ * through the lifetime of the stream session.
+ */
+LZ4LIB_STATIC_API void LZ4_attach_HC_dictionary(
+ LZ4_streamHC_t *working_stream,
+ const LZ4_streamHC_t *dictionary_stream);
+
+#if defined (__cplusplus)
+}
+#endif
+
+#endif /* LZ4_HC_SLO_098092834 */
+#endif /* LZ4_HC_STATIC_LINKING_ONLY */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/queue.h b/fluent-bit/lib/librdkafka-2.1.0/src/queue.h
new file mode 100644
index 000000000..d1ba14833
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/queue.h
@@ -0,0 +1,850 @@
+/* $NetBSD: queue.h,v 1.68 2014/11/19 08:10:01 uebayasi Exp $ */
+
+/*
+ * Copyright (c) 1991, 1993
+ * The Regents of the University of California. All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ * 3. Neither the name of the University nor the names of its contributors
+ * may be used to endorse or promote products derived from this software
+ * without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
+ * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
+ * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
+ * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
+ * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
+ * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+ * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
+ * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
+ * SUCH DAMAGE.
+ *
+ * @(#)queue.h 8.5 (Berkeley) 8/20/94
+ */
+
+#ifndef _SYS_QUEUE_H_
+#define _SYS_QUEUE_H_
+
+/*
+ * This file defines five types of data structures: singly-linked lists,
+ * lists, simple queues, tail queues, and circular queues.
+ *
+ * A singly-linked list is headed by a single forward pointer. The
+ * elements are singly linked for minimum space and pointer manipulation
+ * overhead at the expense of O(n) removal for arbitrary elements. New
+ * elements can be added to the list after an existing element or at the
+ * head of the list. Elements being removed from the head of the list
+ * should use the explicit macro for this purpose for optimum
+ * efficiency. A singly-linked list may only be traversed in the forward
+ * direction. Singly-linked lists are ideal for applications with large
+ * datasets and few or no removals or for implementing a LIFO queue.
+ *
+ * A list is headed by a single forward pointer (or an array of forward
+ * pointers for a hash table header). The elements are doubly linked
+ * so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before
+ * or after an existing element or at the head of the list. A list
+ * may only be traversed in the forward direction.
+ *
+ * A simple queue is headed by a pair of pointers, one the head of the
+ * list and the other to the tail of the list. The elements are singly
+ * linked to save space, so elements can only be removed from the
+ * head of the list. New elements can be added to the list after
+ * an existing element, at the head of the list, or at the end of the
+ * list. A simple queue may only be traversed in the forward direction.
+ *
+ * A tail queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or
+ * after an existing element, at the head of the list, or at the end of
+ * the list. A tail queue may be traversed in either direction.
+ *
+ * A circle queue is headed by a pair of pointers, one to the head of the
+ * list and the other to the tail of the list. The elements are doubly
+ * linked so that an arbitrary element can be removed without a need to
+ * traverse the list. New elements can be added to the list before or after
+ * an existing element, at the head of the list, or at the end of the list.
+ * A circle queue may be traversed in either direction, but has a more
+ * complex end of list detection.
+ *
+ * For details on the use of these macros, see the queue(3) manual page.
+ */
+
+/*
+ * Include the definition of NULL only on NetBSD because sys/null.h
+ * is not available elsewhere. This conditional makes the header
+ * portable and it can simply be dropped verbatim into any system.
+ * The caveat is that on other systems some other header
+ * must provide NULL before the macros can be used.
+ */
+#ifdef __NetBSD__
+#include <sys/null.h>
+#endif
+
+#if defined(QUEUEDEBUG)
+# if defined(_KERNEL)
+# define QUEUEDEBUG_ABORT(...) panic(__VA_ARGS__)
+# else
+# include <err.h>
+# define QUEUEDEBUG_ABORT(...) err(1, __VA_ARGS__)
+# endif
+#endif
+
+/*
+ * Singly-linked List definitions.
+ */
+#define SLIST_HEAD(name, type) \
+struct name { \
+ struct type *slh_first; /* first element */ \
+}
+
+#define SLIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define SLIST_ENTRY(type) \
+struct { \
+ struct type *sle_next; /* next element */ \
+}
+
+/*
+ * Singly-linked List access methods.
+ */
+#define SLIST_FIRST(head) ((head)->slh_first)
+#define SLIST_END(head) NULL
+#define SLIST_EMPTY(head) ((head)->slh_first == NULL)
+#define SLIST_NEXT(elm, field) ((elm)->field.sle_next)
+
+#define SLIST_FOREACH(var, head, field) \
+ for((var) = (head)->slh_first; \
+ (var) != SLIST_END(head); \
+ (var) = (var)->field.sle_next)
+
+#define SLIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = SLIST_FIRST((head)); \
+ (var) != SLIST_END(head) && \
+ ((tvar) = SLIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+/*
+ * Singly-linked List functions.
+ */
+#define SLIST_INIT(head) do { \
+ (head)->slh_first = SLIST_END(head); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_INSERT_AFTER(slistelm, elm, field) do { \
+ (elm)->field.sle_next = (slistelm)->field.sle_next; \
+ (slistelm)->field.sle_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_INSERT_HEAD(head, elm, field) do { \
+ (elm)->field.sle_next = (head)->slh_first; \
+ (head)->slh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE_AFTER(slistelm, field) do { \
+ (slistelm)->field.sle_next = \
+ SLIST_NEXT(SLIST_NEXT((slistelm), field), field); \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE_HEAD(head, field) do { \
+ (head)->slh_first = (head)->slh_first->field.sle_next; \
+} while (/*CONSTCOND*/0)
+
+#define SLIST_REMOVE(head, elm, type, field) do { \
+ if ((head)->slh_first == (elm)) { \
+ SLIST_REMOVE_HEAD((head), field); \
+ } \
+ else { \
+ struct type *curelm = (head)->slh_first; \
+ while(curelm->field.sle_next != (elm)) \
+ curelm = curelm->field.sle_next; \
+ curelm->field.sle_next = \
+ curelm->field.sle_next->field.sle_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+
+/*
+ * List definitions.
+ */
+#define LIST_HEAD(name, type) \
+struct name { \
+ struct type *lh_first; /* first element */ \
+}
+
+#define LIST_HEAD_INITIALIZER(head) \
+ { NULL }
+
+#define LIST_ENTRY(type) \
+struct { \
+ struct type *le_next; /* next element */ \
+ struct type **le_prev; /* address of previous next element */ \
+}
+
+/*
+ * List access methods.
+ */
+#define LIST_FIRST(head) ((head)->lh_first)
+#define LIST_END(head) NULL
+#define LIST_EMPTY(head) ((head)->lh_first == LIST_END(head))
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); \
+ (var) != LIST_END(head); \
+ (var) = ((var)->field.le_next))
+
+#define LIST_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = LIST_FIRST((head)); \
+ (var) != LIST_END(head) && \
+ ((tvar) = LIST_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define LIST_MOVE(head1, head2) do { \
+ LIST_INIT((head2)); \
+ if (!LIST_EMPTY((head1))) { \
+ (head2)->lh_first = (head1)->lh_first; \
+ LIST_INIT((head1)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+/*
+ * List functions.
+ */
+#if defined(QUEUEDEBUG)
+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field) \
+ if ((head)->lh_first && \
+ (head)->lh_first->field.le_prev != &(head)->lh_first) \
+ QUEUEDEBUG_ABORT("LIST_INSERT_HEAD %p %s:%d", (head), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_LIST_OP(elm, field) \
+ if ((elm)->field.le_next && \
+ (elm)->field.le_next->field.le_prev != \
+ &(elm)->field.le_next) \
+ QUEUEDEBUG_ABORT("LIST_* forw %p %s:%d", (elm), \
+ __FILE__, __LINE__); \
+ if (*(elm)->field.le_prev != (elm)) \
+ QUEUEDEBUG_ABORT("LIST_* back %p %s:%d", (elm), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field) \
+ (elm)->field.le_next = (void *)1L; \
+ (elm)->field.le_prev = (void *)1L;
+#else
+#define QUEUEDEBUG_LIST_INSERT_HEAD(head, elm, field)
+#define QUEUEDEBUG_LIST_OP(elm, field)
+#define QUEUEDEBUG_LIST_POSTREMOVE(elm, field)
+#endif
+
+#define LIST_INIT(head) do { \
+ (head)->lh_first = LIST_END(head); \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_AFTER(listelm, elm, field) do { \
+ QUEUEDEBUG_LIST_OP((listelm), field) \
+ if (((elm)->field.le_next = (listelm)->field.le_next) != \
+ LIST_END(head)) \
+ (listelm)->field.le_next->field.le_prev = \
+ &(elm)->field.le_next; \
+ (listelm)->field.le_next = (elm); \
+ (elm)->field.le_prev = &(listelm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_BEFORE(listelm, elm, field) do { \
+ QUEUEDEBUG_LIST_OP((listelm), field) \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_INSERT_HEAD(head, elm, field) do { \
+ QUEUEDEBUG_LIST_INSERT_HEAD((head), (elm), field) \
+ if (((elm)->field.le_next = (head)->lh_first) != LIST_END(head))\
+ (head)->lh_first->field.le_prev = &(elm)->field.le_next;\
+ (head)->lh_first = (elm); \
+ (elm)->field.le_prev = &(head)->lh_first; \
+} while (/*CONSTCOND*/0)
+
+#define LIST_REMOVE(elm, field) do { \
+ QUEUEDEBUG_LIST_OP((elm), field) \
+ if ((elm)->field.le_next != NULL) \
+ (elm)->field.le_next->field.le_prev = \
+ (elm)->field.le_prev; \
+ *(elm)->field.le_prev = (elm)->field.le_next; \
+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
+} while (/*CONSTCOND*/0)
+
+#define LIST_REPLACE(elm, elm2, field) do { \
+ if (((elm2)->field.le_next = (elm)->field.le_next) != NULL) \
+ (elm2)->field.le_next->field.le_prev = \
+ &(elm2)->field.le_next; \
+ (elm2)->field.le_prev = (elm)->field.le_prev; \
+ *(elm2)->field.le_prev = (elm2); \
+ QUEUEDEBUG_LIST_POSTREMOVE((elm), field) \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Simple queue definitions.
+ */
+#define SIMPLEQ_HEAD(name, type) \
+struct name { \
+ struct type *sqh_first; /* first element */ \
+ struct type **sqh_last; /* addr of last next element */ \
+}
+
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+
+#define SIMPLEQ_ENTRY(type) \
+struct { \
+ struct type *sqe_next; /* next element */ \
+}
+
+/*
+ * Simple queue access methods.
+ */
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#define SIMPLEQ_END(head) NULL
+#define SIMPLEQ_EMPTY(head) ((head)->sqh_first == SIMPLEQ_END(head))
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->sqh_first); \
+ (var) != SIMPLEQ_END(head); \
+ (var) = ((var)->field.sqe_next))
+
+#define SIMPLEQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->sqh_first); \
+ (var) != SIMPLEQ_END(head) && \
+ ((next = ((var)->field.sqe_next)), 1); \
+ (var) = (next))
+
+/*
+ * Simple queue functions.
+ */
+#define SIMPLEQ_INIT(head) do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == NULL)\
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE_AFTER(head, elm, field) do { \
+ if (((elm)->field.sqe_next = (elm)->field.sqe_next->field.sqe_next) \
+ == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->sqh_first == (elm)) { \
+ SIMPLEQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->sqh_first; \
+ while (curelm->field.sqe_next != (elm)) \
+ curelm = curelm->field.sqe_next; \
+ if ((curelm->field.sqe_next = \
+ curelm->field.sqe_next->field.sqe_next) == NULL) \
+ (head)->sqh_last = &(curelm)->field.sqe_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_CONCAT(head1, head2) do { \
+ if (!SIMPLEQ_EMPTY((head2))) { \
+ *(head1)->sqh_last = (head2)->sqh_first; \
+ (head1)->sqh_last = (head2)->sqh_last; \
+ SIMPLEQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define SIMPLEQ_LAST(head, type, field) \
+ (SIMPLEQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->sqh_last) - offsetof(struct type, field))))
+
+/*
+ * Tail queue definitions.
+ */
+#define _TAILQ_HEAD(name, type, qual) \
+struct name { \
+ qual type *tqh_first; /* first element */ \
+ qual type *qual *tqh_last; /* addr of last next element */ \
+}
+#define TAILQ_HEAD(name, type) _TAILQ_HEAD(name, struct type,)
+
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { TAILQ_END(head), &(head).tqh_first }
+
+#define _TAILQ_ENTRY(type, qual) \
+struct { \
+ qual type *tqe_next; /* next element */ \
+ qual type *qual *tqe_prev; /* address of previous next element */\
+}
+#define TAILQ_ENTRY(type) _TAILQ_ENTRY(struct type,)
+
+/*
+ * Tail queue access methods.
+ */
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#define TAILQ_END(head) (NULL)
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#define TAILQ_EMPTY(head) (TAILQ_FIRST(head) == TAILQ_END(head))
+
+
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); \
+ (var) != TAILQ_END(head); \
+ (var) = ((var)->field.tqe_next))
+
+#define TAILQ_FOREACH_SAFE(var, head, field, next) \
+ for ((var) = ((head)->tqh_first); \
+ (var) != TAILQ_END(head) && \
+ ((next) = TAILQ_NEXT(var, field), 1); (var) = (next))
+
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last));\
+ (var) != TAILQ_END(head); \
+ (var) = (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+
+#define TAILQ_FOREACH_REVERSE_SAFE(var, head, headname, field, prev) \
+ for ((var) = TAILQ_LAST((head), headname); \
+ (var) != TAILQ_END(head) && \
+ ((prev) = TAILQ_PREV((var), headname, field), 1); (var) = (prev))
+
+/*
+ * Tail queue functions.
+ */
+#if defined(QUEUEDEBUG)
+#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field) \
+ if ((head)->tqh_first && \
+ (head)->tqh_first->field.tqe_prev != &(head)->tqh_first) \
+ QUEUEDEBUG_ABORT("TAILQ_INSERT_HEAD %p %s:%d", (head), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field) \
+ if (*(head)->tqh_last != NULL) \
+ QUEUEDEBUG_ABORT("TAILQ_INSERT_TAIL %p %s:%d", (head), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_TAILQ_OP(elm, field) \
+ if ((elm)->field.tqe_next && \
+ (elm)->field.tqe_next->field.tqe_prev != \
+ &(elm)->field.tqe_next) \
+ QUEUEDEBUG_ABORT("TAILQ_* forw %p %s:%d", (elm), \
+ __FILE__, __LINE__); \
+ if (*(elm)->field.tqe_prev != (elm)) \
+ QUEUEDEBUG_ABORT("TAILQ_* back %p %s:%d", (elm), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field) \
+ if ((elm)->field.tqe_next == NULL && \
+ (head)->tqh_last != &(elm)->field.tqe_next) \
+ QUEUEDEBUG_ABORT("TAILQ_PREREMOVE head %p elm %p %s:%d",\
+ (head), (elm), __FILE__, __LINE__);
+#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field) \
+ (elm)->field.tqe_next = (void *)1L; \
+ (elm)->field.tqe_prev = (void *)1L;
+#else
+#define QUEUEDEBUG_TAILQ_INSERT_HEAD(head, elm, field)
+#define QUEUEDEBUG_TAILQ_INSERT_TAIL(head, elm, field)
+#define QUEUEDEBUG_TAILQ_OP(elm, field)
+#define QUEUEDEBUG_TAILQ_PREREMOVE(head, elm, field)
+#define QUEUEDEBUG_TAILQ_POSTREMOVE(elm, field)
+#endif
+
+#define TAILQ_INIT(head) do { \
+ (head)->tqh_first = TAILQ_END(head); \
+ (head)->tqh_last = &(head)->tqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_HEAD(head, elm, field) do { \
+ QUEUEDEBUG_TAILQ_INSERT_HEAD((head), (elm), field) \
+ if (((elm)->field.tqe_next = (head)->tqh_first) != TAILQ_END(head))\
+ (head)->tqh_first->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (head)->tqh_first = (elm); \
+ (elm)->field.tqe_prev = &(head)->tqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_TAIL(head, elm, field) do { \
+ QUEUEDEBUG_TAILQ_INSERT_TAIL((head), (elm), field) \
+ (elm)->field.tqe_next = TAILQ_END(head); \
+ (elm)->field.tqe_prev = (head)->tqh_last; \
+ *(head)->tqh_last = (elm); \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ QUEUEDEBUG_TAILQ_OP((listelm), field) \
+ if (((elm)->field.tqe_next = (listelm)->field.tqe_next) != \
+ TAILQ_END(head)) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ &(elm)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm)->field.tqe_next; \
+ (listelm)->field.tqe_next = (elm); \
+ (elm)->field.tqe_prev = &(listelm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) do { \
+ QUEUEDEBUG_TAILQ_OP((listelm), field) \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_REMOVE(head, elm, field) do { \
+ QUEUEDEBUG_TAILQ_PREREMOVE((head), (elm), field) \
+ QUEUEDEBUG_TAILQ_OP((elm), field) \
+ if (((elm)->field.tqe_next) != TAILQ_END(head)) \
+ (elm)->field.tqe_next->field.tqe_prev = \
+ (elm)->field.tqe_prev; \
+ else \
+ (head)->tqh_last = (elm)->field.tqe_prev; \
+ *(elm)->field.tqe_prev = (elm)->field.tqe_next; \
+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_REPLACE(head, elm, elm2, field) do { \
+ if (((elm2)->field.tqe_next = (elm)->field.tqe_next) != \
+ TAILQ_END(head)) \
+ (elm2)->field.tqe_next->field.tqe_prev = \
+ &(elm2)->field.tqe_next; \
+ else \
+ (head)->tqh_last = &(elm2)->field.tqe_next; \
+ (elm2)->field.tqe_prev = (elm)->field.tqe_prev; \
+ *(elm2)->field.tqe_prev = (elm2); \
+ QUEUEDEBUG_TAILQ_POSTREMOVE((elm), field); \
+} while (/*CONSTCOND*/0)
+
+#define TAILQ_CONCAT(head1, head2, field) do { \
+ if (!TAILQ_EMPTY(head2)) { \
+ *(head1)->tqh_last = (head2)->tqh_first; \
+ (head2)->tqh_first->field.tqe_prev = (head1)->tqh_last; \
+ (head1)->tqh_last = (head2)->tqh_last; \
+ TAILQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+/*
+ * Singly-linked Tail queue declarations.
+ */
+#define STAILQ_HEAD(name, type) \
+struct name { \
+ struct type *stqh_first; /* first element */ \
+ struct type **stqh_last; /* addr of last next element */ \
+}
+
+#define STAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).stqh_first }
+
+#define STAILQ_ENTRY(type) \
+struct { \
+ struct type *stqe_next; /* next element */ \
+}
+
+/*
+ * Singly-linked Tail queue access methods.
+ */
+#define STAILQ_FIRST(head) ((head)->stqh_first)
+#define STAILQ_END(head) NULL
+#define STAILQ_NEXT(elm, field) ((elm)->field.stqe_next)
+#define STAILQ_EMPTY(head) (STAILQ_FIRST(head) == STAILQ_END(head))
+
+/*
+ * Singly-linked Tail queue functions.
+ */
+#define STAILQ_INIT(head) do { \
+ (head)->stqh_first = NULL; \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_HEAD(head, elm, field) do { \
+ if (((elm)->field.stqe_next = (head)->stqh_first) == NULL) \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+ (head)->stqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_TAIL(head, elm, field) do { \
+ (elm)->field.stqe_next = NULL; \
+ *(head)->stqh_last = (elm); \
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ if (((elm)->field.stqe_next = (listelm)->field.stqe_next) == NULL)\
+ (head)->stqh_last = &(elm)->field.stqe_next; \
+ (listelm)->field.stqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_REMOVE_HEAD(head, field) do { \
+ if (((head)->stqh_first = (head)->stqh_first->field.stqe_next) == NULL) \
+ (head)->stqh_last = &(head)->stqh_first; \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_REMOVE(head, elm, type, field) do { \
+ if ((head)->stqh_first == (elm)) { \
+ STAILQ_REMOVE_HEAD((head), field); \
+ } else { \
+ struct type *curelm = (head)->stqh_first; \
+ while (curelm->field.stqe_next != (elm)) \
+ curelm = curelm->field.stqe_next; \
+ if ((curelm->field.stqe_next = \
+ curelm->field.stqe_next->field.stqe_next) == NULL) \
+ (head)->stqh_last = &(curelm)->field.stqe_next; \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->stqh_first); \
+ (var); \
+ (var) = ((var)->field.stqe_next))
+
+#define STAILQ_FOREACH_SAFE(var, head, field, tvar) \
+ for ((var) = STAILQ_FIRST((head)); \
+ (var) && ((tvar) = STAILQ_NEXT((var), field), 1); \
+ (var) = (tvar))
+
+#define STAILQ_CONCAT(head1, head2) do { \
+ if (!STAILQ_EMPTY((head2))) { \
+ *(head1)->stqh_last = (head2)->stqh_first; \
+ (head1)->stqh_last = (head2)->stqh_last; \
+ STAILQ_INIT((head2)); \
+ } \
+} while (/*CONSTCOND*/0)
+
+#define STAILQ_LAST(head, type, field) \
+ (STAILQ_EMPTY((head)) ? \
+ NULL : \
+ ((struct type *)(void *) \
+ ((char *)((head)->stqh_last) - offsetof(struct type, field))))
+
+
+#ifndef _KERNEL
+/*
+ * Circular queue definitions. Do not use. We still keep the macros
+ * for compatibility but because of pointer aliasing issues their use
+ * is discouraged!
+ */
+
+/*
+ * __launder_type(): We use this ugly hack to work around the the compiler
+ * noticing that two types may not alias each other and elide tests in code.
+ * We hit this in the CIRCLEQ macros when comparing 'struct name *' and
+ * 'struct type *' (see CIRCLEQ_HEAD()). Modern compilers (such as GCC
+ * 4.8) declare these comparisons as always false, causing the code to
+ * not run as designed.
+ *
+ * This hack is only to be used for comparisons and thus can be fully const.
+ * Do not use for assignment.
+ *
+ * If we ever choose to change the ABI of the CIRCLEQ macros, we could fix
+ * this by changing the head/tail sentinal values, but see the note above
+ * this one.
+ */
+#ifdef _MSC_VER
+#define __launder_type(x) ((const void *)(x))
+#else
+static inline const void * __launder_type(const void *);
+static inline const void *
+__launder_type(const void *__x)
+{
+ __asm __volatile("" : "+r" (__x));
+ return __x;
+}
+#endif
+
+#if defined(QUEUEDEBUG)
+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field) \
+ if ((head)->cqh_first != CIRCLEQ_ENDC(head) && \
+ (head)->cqh_first->field.cqe_prev != CIRCLEQ_ENDC(head)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ head forw %p %s:%d", (head), \
+ __FILE__, __LINE__); \
+ if ((head)->cqh_last != CIRCLEQ_ENDC(head) && \
+ (head)->cqh_last->field.cqe_next != CIRCLEQ_ENDC(head)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ head back %p %s:%d", (head), \
+ __FILE__, __LINE__);
+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field) \
+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) { \
+ if ((head)->cqh_last != (elm)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm last %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
+ } else { \
+ if ((elm)->field.cqe_next->field.cqe_prev != (elm)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm forw %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
+ } \
+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) { \
+ if ((head)->cqh_first != (elm)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm first %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
+ } else { \
+ if ((elm)->field.cqe_prev->field.cqe_next != (elm)) \
+ QUEUEDEBUG_ABORT("CIRCLEQ elm prev %p %s:%d", \
+ (elm), __FILE__, __LINE__); \
+ }
+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field) \
+ (elm)->field.cqe_next = (void *)1L; \
+ (elm)->field.cqe_prev = (void *)1L;
+#else
+#define QUEUEDEBUG_CIRCLEQ_HEAD(head, field)
+#define QUEUEDEBUG_CIRCLEQ_ELM(head, elm, field)
+#define QUEUEDEBUG_CIRCLEQ_POSTREMOVE(elm, field)
+#endif
+
+#define CIRCLEQ_HEAD(name, type) \
+struct name { \
+ struct type *cqh_first; /* first element */ \
+ struct type *cqh_last; /* last element */ \
+}
+
+#define CIRCLEQ_HEAD_INITIALIZER(head) \
+ { CIRCLEQ_END(&head), CIRCLEQ_END(&head) }
+
+#define CIRCLEQ_ENTRY(type) \
+struct { \
+ struct type *cqe_next; /* next element */ \
+ struct type *cqe_prev; /* previous element */ \
+}
+
+/*
+ * Circular queue functions.
+ */
+#define CIRCLEQ_INIT(head) do { \
+ (head)->cqh_first = CIRCLEQ_END(head); \
+ (head)->cqh_last = CIRCLEQ_END(head); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_AFTER(head, listelm, elm, field) do { \
+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
+ QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
+ (elm)->field.cqe_next = (listelm)->field.cqe_next; \
+ (elm)->field.cqe_prev = (listelm); \
+ if ((listelm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (listelm)->field.cqe_next->field.cqe_prev = (elm); \
+ (listelm)->field.cqe_next = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_BEFORE(head, listelm, elm, field) do { \
+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
+ QUEUEDEBUG_CIRCLEQ_ELM((head), (listelm), field) \
+ (elm)->field.cqe_next = (listelm); \
+ (elm)->field.cqe_prev = (listelm)->field.cqe_prev; \
+ if ((listelm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (listelm)->field.cqe_prev->field.cqe_next = (elm); \
+ (listelm)->field.cqe_prev = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_HEAD(head, elm, field) do { \
+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
+ (elm)->field.cqe_next = (head)->cqh_first; \
+ (elm)->field.cqe_prev = CIRCLEQ_END(head); \
+ if ((head)->cqh_last == CIRCLEQ_ENDC(head)) \
+ (head)->cqh_last = (elm); \
+ else \
+ (head)->cqh_first->field.cqe_prev = (elm); \
+ (head)->cqh_first = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_INSERT_TAIL(head, elm, field) do { \
+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
+ (elm)->field.cqe_next = CIRCLEQ_END(head); \
+ (elm)->field.cqe_prev = (head)->cqh_last; \
+ if ((head)->cqh_first == CIRCLEQ_ENDC(head)) \
+ (head)->cqh_first = (elm); \
+ else \
+ (head)->cqh_last->field.cqe_next = (elm); \
+ (head)->cqh_last = (elm); \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_REMOVE(head, elm, field) do { \
+ QUEUEDEBUG_CIRCLEQ_HEAD((head), field) \
+ QUEUEDEBUG_CIRCLEQ_ELM((head), (elm), field) \
+ if ((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
+ (head)->cqh_last = (elm)->field.cqe_prev; \
+ else \
+ (elm)->field.cqe_next->field.cqe_prev = \
+ (elm)->field.cqe_prev; \
+ if ((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
+ (head)->cqh_first = (elm)->field.cqe_next; \
+ else \
+ (elm)->field.cqe_prev->field.cqe_next = \
+ (elm)->field.cqe_next; \
+ QUEUEDEBUG_CIRCLEQ_POSTREMOVE((elm), field) \
+} while (/*CONSTCOND*/0)
+
+#define CIRCLEQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->cqh_first); \
+ (var) != CIRCLEQ_ENDC(head); \
+ (var) = ((var)->field.cqe_next))
+
+#define CIRCLEQ_FOREACH_REVERSE(var, head, field) \
+ for ((var) = ((head)->cqh_last); \
+ (var) != CIRCLEQ_ENDC(head); \
+ (var) = ((var)->field.cqe_prev))
+
+/*
+ * Circular queue access methods.
+ */
+#define CIRCLEQ_FIRST(head) ((head)->cqh_first)
+#define CIRCLEQ_LAST(head) ((head)->cqh_last)
+/* For comparisons */
+#define CIRCLEQ_ENDC(head) (__launder_type(head))
+/* For assignments */
+#define CIRCLEQ_END(head) ((void *)(head))
+#define CIRCLEQ_NEXT(elm, field) ((elm)->field.cqe_next)
+#define CIRCLEQ_PREV(elm, field) ((elm)->field.cqe_prev)
+#define CIRCLEQ_EMPTY(head) \
+ (CIRCLEQ_FIRST(head) == CIRCLEQ_ENDC(head))
+
+#define CIRCLEQ_LOOP_NEXT(head, elm, field) \
+ (((elm)->field.cqe_next == CIRCLEQ_ENDC(head)) \
+ ? ((head)->cqh_first) \
+ : (elm->field.cqe_next))
+#define CIRCLEQ_LOOP_PREV(head, elm, field) \
+ (((elm)->field.cqe_prev == CIRCLEQ_ENDC(head)) \
+ ? ((head)->cqh_last) \
+ : (elm->field.cqe_prev))
+#endif /* !_KERNEL */
+
+#endif /* !_SYS_QUEUE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rd.h b/fluent-bit/lib/librdkafka-2.1.0/src/rd.h
new file mode 100644
index 000000000..670605de4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rd.h
@@ -0,0 +1,436 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RD_H_
+#define _RD_H_
+
+#ifndef _WIN32
+#ifndef _GNU_SOURCE
+#define _GNU_SOURCE /* for strndup() */
+#endif
+
+#if defined(__APPLE__) && !defined(_DARWIN_C_SOURCE)
+#define _DARWIN_C_SOURCE /* for strlcpy, pthread_setname_np, etc */
+#endif
+
+#define __need_IOV_MAX
+#ifndef _POSIX_C_SOURCE
+#define _POSIX_C_SOURCE 200809L /* for timespec on solaris */
+#endif
+#endif
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <time.h>
+#include <assert.h>
+#include <limits.h>
+
+#include "tinycthread.h"
+#include "rdsysqueue.h"
+
+#ifdef _WIN32
+/* Visual Studio */
+#include "win32_config.h"
+#else
+/* POSIX / UNIX based systems */
+#include "../config.h" /* mklove output */
+#endif
+
+#ifdef _WIN32
+/* Win32/Visual Studio */
+#include "rdwin32.h"
+
+#else
+/* POSIX / UNIX based systems */
+#include "rdposix.h"
+#endif
+
+#include "rdtypes.h"
+
+#if WITH_SYSLOG
+#include <syslog.h>
+#else
+#define LOG_EMERG 0
+#define LOG_ALERT 1
+#define LOG_CRIT 2
+#define LOG_ERR 3
+#define LOG_WARNING 4
+#define LOG_NOTICE 5
+#define LOG_INFO 6
+#define LOG_DEBUG 7
+#endif
+
+
+/* Debug assert, only enabled with --enable-devel */
+#if ENABLE_DEVEL == 1
+#define rd_dassert(cond) rd_assert(cond)
+#else
+#define rd_dassert(cond) \
+ do { \
+ } while (0)
+#endif
+
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+/** Function attribute to indicate that a sentinel NULL is required at the
+ * end of the va-arg input list. */
+#define RD_SENTINEL __attribute__((__sentinel__))
+#else
+#define RD_SENTINEL
+#endif
+
+
+/** Assert if reached */
+#define RD_NOTREACHED() rd_assert(!*"/* NOTREACHED */ violated")
+
+/** Assert if reached */
+#define RD_BUG(...) \
+ do { \
+ fprintf(stderr, \
+ "INTERNAL ERROR: librdkafka %s:%d: ", __FUNCTION__, \
+ __LINE__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ rd_assert(!*"INTERNAL ERROR IN LIBRDKAFKA"); \
+ } while (0)
+
+
+
+/**
+ * Allocator wrappers.
+ * We serve under the premise that if a (small) memory
+ * allocation fails all hope is lost and the application
+ * will fail anyway, so no need to handle it handsomely.
+ */
+static RD_INLINE RD_UNUSED void *rd_calloc(size_t num, size_t sz) {
+ void *p = calloc(num, sz);
+ rd_assert(p);
+ return p;
+}
+
+static RD_INLINE RD_UNUSED void *rd_malloc(size_t sz) {
+ void *p = malloc(sz);
+ rd_assert(p);
+ return p;
+}
+
+static RD_INLINE RD_UNUSED void *rd_realloc(void *ptr, size_t sz) {
+ void *p = realloc(ptr, sz);
+ rd_assert(p);
+ return p;
+}
+
+static RD_INLINE RD_UNUSED void rd_free(void *ptr) {
+ free(ptr);
+}
+
+static RD_INLINE RD_UNUSED char *rd_strdup(const char *s) {
+#ifndef _WIN32
+ char *n = strdup(s);
+#else
+ char *n = _strdup(s);
+#endif
+ rd_assert(n);
+ return n;
+}
+
+static RD_INLINE RD_UNUSED char *rd_strndup(const char *s, size_t len) {
+#if HAVE_STRNDUP
+ char *n = strndup(s, len);
+ rd_assert(n);
+#else
+ char *n = (char *)rd_malloc(len + 1);
+ rd_assert(n);
+ memcpy(n, s, len);
+ n[len] = '\0';
+#endif
+ return n;
+}
+
+
+
+/*
+ * Portability
+ */
+
+#ifdef strndupa
+#define rd_strndupa(DESTPTR, PTR, LEN) (*(DESTPTR) = strndupa(PTR, LEN))
+#else
+#define rd_strndupa(DESTPTR, PTR, LEN) \
+ do { \
+ const char *_src = (PTR); \
+ size_t _srclen = (LEN); \
+ char *_dst = rd_alloca(_srclen + 1); \
+ memcpy(_dst, _src, _srclen); \
+ _dst[_srclen] = '\0'; \
+ *(DESTPTR) = _dst; \
+ } while (0)
+#endif
+
+#ifdef strdupa
+#define rd_strdupa(DESTPTR, PTR) (*(DESTPTR) = strdupa(PTR))
+#else
+#define rd_strdupa(DESTPTR, PTR) \
+ do { \
+ const char *_src1 = (PTR); \
+ size_t _srclen1 = strlen(_src1); \
+ rd_strndupa(DESTPTR, _src1, _srclen1); \
+ } while (0)
+#endif
+
+#ifndef IOV_MAX
+#ifdef __APPLE__
+/* Some versions of MacOSX dont have IOV_MAX */
+#define IOV_MAX 1024
+#elif defined(_WIN32) || defined(__GNU__)
+/* There is no IOV_MAX on MSVC or GNU but it is used internally in librdkafka */
+#define IOV_MAX 1024
+#else
+#error "IOV_MAX not defined"
+#endif
+#endif
+
+
+/* Round/align X upwards to STRIDE, which must be power of 2. */
+#define RD_ROUNDUP(X, STRIDE) (((X) + ((STRIDE)-1)) & ~(STRIDE - 1))
+
+#define RD_ARRAY_SIZE(A) (sizeof((A)) / sizeof(*(A)))
+#define RD_ARRAYSIZE(A) RD_ARRAY_SIZE(A)
+#define RD_SIZEOF(TYPE, MEMBER) sizeof(((TYPE *)NULL)->MEMBER)
+#define RD_OFFSETOF(TYPE, MEMBER) ((size_t) & (((TYPE *)NULL)->MEMBER))
+
+/**
+ * Returns the 'I'th array element from static sized array 'A'
+ * or NULL if 'I' is out of range.
+ * var-args is an optional prefix to provide the correct return type.
+ */
+#define RD_ARRAY_ELEM(A, I, ...) \
+ ((unsigned int)(I) < RD_ARRAY_SIZE(A) ? __VA_ARGS__(A)[(I)] : NULL)
+
+
+#define RD_STRINGIFY(X) #X
+
+
+
+#define RD_MIN(a, b) ((a) < (b) ? (a) : (b))
+#define RD_MAX(a, b) ((a) > (b) ? (a) : (b))
+
+
+/**
+ * Cap an integer (of any type) to reside within the defined limit.
+ */
+#define RD_INT_CAP(val, low, hi) \
+ ((val) < (low) ? low : ((val) > (hi) ? (hi) : (val)))
+
+
+
+/**
+ * Allocate 'size' bytes, copy 'src', return pointer to new memory.
+ *
+ * Use rd_free() to free the returned pointer.
+ */
+static RD_INLINE RD_UNUSED void *rd_memdup(const void *src, size_t size) {
+ void *dst = rd_malloc(size);
+ memcpy(dst, src, size);
+ return dst;
+}
+
+/**
+ * @brief Memset &OBJ to 0, does automatic sizeof(OBJ).
+ */
+#define RD_MEMZERO(OBJ) memset(&(OBJ), 0, sizeof(OBJ))
+
+
+/**
+ * Generic refcnt interface
+ */
+
+#if !HAVE_ATOMICS_32
+#define RD_REFCNT_USE_LOCKS 1
+#endif
+
+#ifdef RD_REFCNT_USE_LOCKS
+typedef struct rd_refcnt_t {
+ mtx_t lock;
+ int v;
+} rd_refcnt_t;
+#else
+typedef rd_atomic32_t rd_refcnt_t;
+#endif
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_init(rd_refcnt_t *R, int v) {
+ int r;
+ mtx_init(&R->lock, mtx_plain);
+ mtx_lock(&R->lock);
+ r = R->v = v;
+ mtx_unlock(&R->lock);
+ return r;
+}
+#else
+#define rd_refcnt_init(R, v) rd_atomic32_init(R, v)
+#endif
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED void rd_refcnt_destroy(rd_refcnt_t *R) {
+ mtx_lock(&R->lock);
+ rd_assert(R->v == 0);
+ mtx_unlock(&R->lock);
+
+ mtx_destroy(&R->lock);
+}
+#else
+#define rd_refcnt_destroy(R) \
+ do { \
+ } while (0)
+#endif
+
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_set(rd_refcnt_t *R, int v) {
+ int r;
+ mtx_lock(&R->lock);
+ r = R->v = v;
+ mtx_unlock(&R->lock);
+ return r;
+}
+#else
+#define rd_refcnt_set(R, v) rd_atomic32_set(R, v)
+#endif
+
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_add0(rd_refcnt_t *R) {
+ int r;
+ mtx_lock(&R->lock);
+ r = ++(R->v);
+ mtx_unlock(&R->lock);
+ return r;
+}
+#else
+#define rd_refcnt_add0(R) rd_atomic32_add(R, 1)
+#endif
+
+static RD_INLINE RD_UNUSED int rd_refcnt_sub0(rd_refcnt_t *R) {
+ int r;
+#ifdef RD_REFCNT_USE_LOCKS
+ mtx_lock(&R->lock);
+ r = --(R->v);
+ mtx_unlock(&R->lock);
+#else
+ r = rd_atomic32_sub(R, 1);
+#endif
+ if (r < 0)
+ rd_assert(!*"refcnt sub-zero");
+ return r;
+}
+
+#ifdef RD_REFCNT_USE_LOCKS
+static RD_INLINE RD_UNUSED int rd_refcnt_get(rd_refcnt_t *R) {
+ int r;
+ mtx_lock(&R->lock);
+ r = R->v;
+ mtx_unlock(&R->lock);
+ return r;
+}
+#else
+#define rd_refcnt_get(R) rd_atomic32_get(R)
+#endif
+
+/**
+ * A wrapper for decreasing refcount and calling a destroy function
+ * when refcnt reaches 0.
+ */
+#define rd_refcnt_destroywrapper(REFCNT, DESTROY_CALL) \
+ do { \
+ if (rd_refcnt_sub(REFCNT) > 0) \
+ break; \
+ DESTROY_CALL; \
+ } while (0)
+
+
+#define rd_refcnt_destroywrapper2(REFCNT, WHAT, DESTROY_CALL) \
+ do { \
+ if (rd_refcnt_sub2(REFCNT, WHAT) > 0) \
+ break; \
+ DESTROY_CALL; \
+ } while (0)
+
+#if ENABLE_REFCNT_DEBUG
+#define rd_refcnt_add_fl(FUNC, LINE, R) \
+ (fprintf(stderr, "REFCNT DEBUG: %-35s %d +1: %16p: %s:%d\n", #R, \
+ rd_refcnt_get(R), (R), (FUNC), (LINE)), \
+ rd_refcnt_add0(R))
+
+#define rd_refcnt_add(R) rd_refcnt_add_fl(__FUNCTION__, __LINE__, (R))
+
+#define rd_refcnt_add2(R, WHAT) \
+ do { \
+ fprintf(stderr, \
+ "REFCNT DEBUG: %-35s %d +1: %16p: %16s: %s:%d\n", #R, \
+ rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \
+ rd_refcnt_add0(R); \
+ } while (0)
+
+#define rd_refcnt_sub2(R, WHAT) \
+ (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %16s: %s:%d\n", #R, \
+ rd_refcnt_get(R), (R), WHAT, __FUNCTION__, __LINE__), \
+ rd_refcnt_sub0(R))
+
+#define rd_refcnt_sub(R) \
+ (fprintf(stderr, "REFCNT DEBUG: %-35s %d -1: %16p: %s:%d\n", #R, \
+ rd_refcnt_get(R), (R), __FUNCTION__, __LINE__), \
+ rd_refcnt_sub0(R))
+
+#else
+#define rd_refcnt_add_fl(FUNC, LINE, R) rd_refcnt_add0(R)
+#define rd_refcnt_add(R) rd_refcnt_add0(R)
+#define rd_refcnt_sub(R) rd_refcnt_sub0(R)
+#endif
+
+
+
+#define RD_IF_FREE(PTR, FUNC) \
+ do { \
+ if ((PTR)) \
+ FUNC(PTR); \
+ } while (0)
+
+
+/**
+ * @brief Utility types to hold memory,size tuple.
+ */
+
+typedef struct rd_chariov_s {
+ char *ptr;
+ size_t size;
+} rd_chariov_t;
+
+#endif /* _RD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c
new file mode 100644
index 000000000..092406233
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.c
@@ -0,0 +1,255 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+
+#include "rd.h"
+#include "rdaddr.h"
+#include "rdrand.h"
+
+#ifdef _WIN32
+#include <ws2tcpip.h>
+#endif
+
+const char *rd_sockaddr2str(const void *addr, int flags) {
+ const rd_sockaddr_inx_t *a = (const rd_sockaddr_inx_t *)addr;
+ static RD_TLS char ret[32][256];
+ static RD_TLS int reti = 0;
+ char portstr[32];
+ int of = 0;
+ int niflags = NI_NUMERICSERV;
+ int r;
+
+ reti = (reti + 1) % 32;
+
+ switch (a->sinx_family) {
+ case AF_INET:
+ case AF_INET6:
+ if (flags & RD_SOCKADDR2STR_F_FAMILY)
+ of += rd_snprintf(&ret[reti][of],
+ sizeof(ret[reti]) - of, "ipv%i#",
+ a->sinx_family == AF_INET ? 4 : 6);
+
+ if ((flags & RD_SOCKADDR2STR_F_PORT) &&
+ a->sinx_family == AF_INET6)
+ ret[reti][of++] = '[';
+
+ if (!(flags & RD_SOCKADDR2STR_F_RESOLVE))
+ niflags |= NI_NUMERICHOST;
+
+ retry:
+ if ((r = getnameinfo(
+ (const struct sockaddr *)a, RD_SOCKADDR_INX_LEN(a),
+
+ ret[reti] + of, sizeof(ret[reti]) - of,
+
+ (flags & RD_SOCKADDR2STR_F_PORT) ? portstr : NULL,
+
+ (flags & RD_SOCKADDR2STR_F_PORT) ? sizeof(portstr) : 0,
+
+ niflags))) {
+
+ if (r == EAI_AGAIN && !(niflags & NI_NUMERICHOST)) {
+ /* If unable to resolve name, retry without
+ * name resolution. */
+ niflags |= NI_NUMERICHOST;
+ goto retry;
+ }
+ break;
+ }
+
+
+ if (flags & RD_SOCKADDR2STR_F_PORT) {
+ size_t len = strlen(ret[reti]);
+ rd_snprintf(
+ ret[reti] + len, sizeof(ret[reti]) - len, "%s:%s",
+ a->sinx_family == AF_INET6 ? "]" : "", portstr);
+ }
+
+ return ret[reti];
+ }
+
+
+ /* Error-case */
+ rd_snprintf(ret[reti], sizeof(ret[reti]), "<unsupported:%s>",
+ rd_family2str(a->sinx_family));
+
+ return ret[reti];
+}
+
+
+const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc) {
+ static RD_TLS char snode[256];
+ static RD_TLS char ssvc[64];
+ const char *t;
+ const char *svct = NULL;
+ size_t nodelen = 0;
+
+ *snode = '\0';
+ *ssvc = '\0';
+
+ if (*nodesvc == '[') {
+ /* "[host]".. (enveloped node name) */
+ if (!(t = strchr(nodesvc, ']')))
+ return "Missing close-']'";
+ nodesvc++;
+ nodelen = t - nodesvc;
+ svct = t + 1;
+
+ } else if (*nodesvc == ':' && *(nodesvc + 1) != ':') {
+ /* ":".. (port only) */
+ nodelen = 0;
+ svct = nodesvc;
+ }
+
+ if ((svct = strrchr(svct ? svct : nodesvc, ':')) &&
+ (*(svct - 1) != ':') && *(++svct)) {
+ /* Optional ":service" definition. */
+ if (strlen(svct) >= sizeof(ssvc))
+ return "Service name too long";
+ strcpy(ssvc, svct);
+ if (!nodelen)
+ nodelen = svct - nodesvc - 1;
+
+ } else if (!nodelen)
+ nodelen = strlen(nodesvc);
+
+ if (nodelen) {
+ /* Truncate nodename if necessary. */
+ nodelen = RD_MIN(nodelen, sizeof(snode) - 1);
+ memcpy(snode, nodesvc, nodelen);
+ snode[nodelen] = '\0';
+ }
+
+ *node = snode;
+ *svc = ssvc;
+
+ return NULL;
+}
+
+
+
+rd_sockaddr_list_t *
+rd_getaddrinfo(const char *nodesvc,
+ const char *defsvc,
+ int flags,
+ int family,
+ int socktype,
+ int protocol,
+ int (*resolve_cb)(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res,
+ void *opaque),
+ void *opaque,
+ const char **errstr) {
+ struct addrinfo hints;
+ memset(&hints, 0, sizeof(hints));
+ hints.ai_family = family;
+ hints.ai_socktype = socktype;
+ hints.ai_protocol = protocol;
+ hints.ai_flags = flags;
+
+ struct addrinfo *ais, *ai;
+ char *node, *svc;
+ int r;
+ int cnt = 0;
+ rd_sockaddr_list_t *rsal;
+
+ if ((*errstr = rd_addrinfo_prepare(nodesvc, &node, &svc))) {
+ errno = EINVAL;
+ return NULL;
+ }
+
+ if (*svc)
+ defsvc = svc;
+
+ if (resolve_cb) {
+ r = resolve_cb(node, defsvc, &hints, &ais, opaque);
+ } else {
+ r = getaddrinfo(node, defsvc, &hints, &ais);
+ }
+
+ if (r) {
+#ifdef EAI_SYSTEM
+ if (r == EAI_SYSTEM)
+#else
+ if (0)
+#endif
+ *errstr = rd_strerror(errno);
+ else {
+#ifdef _WIN32
+ *errstr = gai_strerrorA(r);
+#else
+ *errstr = gai_strerror(r);
+#endif
+ errno = EFAULT;
+ }
+ return NULL;
+ }
+
+ /* Count number of addresses */
+ for (ai = ais; ai != NULL; ai = ai->ai_next)
+ cnt++;
+
+ if (cnt == 0) {
+ /* unlikely? */
+ if (resolve_cb)
+ resolve_cb(NULL, NULL, NULL, &ais, opaque);
+ else
+ freeaddrinfo(ais);
+ errno = ENOENT;
+ *errstr = "No addresses";
+ return NULL;
+ }
+
+
+ rsal = rd_calloc(1, sizeof(*rsal) + (sizeof(*rsal->rsal_addr) * cnt));
+
+ for (ai = ais; ai != NULL; ai = ai->ai_next)
+ memcpy(&rsal->rsal_addr[rsal->rsal_cnt++], ai->ai_addr,
+ ai->ai_addrlen);
+
+ if (resolve_cb)
+ resolve_cb(NULL, NULL, NULL, &ais, opaque);
+ else
+ freeaddrinfo(ais);
+
+ /* Shuffle address list for proper round-robin */
+ if (!(flags & RD_AI_NOSHUFFLE))
+ rd_array_shuffle(rsal->rsal_addr, rsal->rsal_cnt,
+ sizeof(*rsal->rsal_addr));
+
+ return rsal;
+}
+
+
+
+void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal) {
+ rd_free(rsal);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h
new file mode 100644
index 000000000..c8574d019
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdaddr.h
@@ -0,0 +1,203 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDADDR_H_
+#define _RDADDR_H_
+
+#ifndef _WIN32
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <netdb.h>
+#else
+#define WIN32_MEAN_AND_LEAN
+#include <winsock2.h>
+#include <ws2ipdef.h>
+#endif
+
+#if defined(__FreeBSD__) || defined(_AIX) || defined(__OpenBSD__)
+#include <sys/socket.h>
+#endif
+
+/**
+ * rd_sockaddr_inx_t is a union for either ipv4 or ipv6 sockaddrs.
+ * It provides conveniant abstraction of AF_INET* agnostic operations.
+ */
+typedef union {
+ struct sockaddr_in in;
+ struct sockaddr_in6 in6;
+} rd_sockaddr_inx_t;
+#define sinx_family in.sin_family
+#define sinx_addr in.sin_addr
+#define RD_SOCKADDR_INX_LEN(sinx) \
+ ((sinx)->sinx_family == AF_INET \
+ ? sizeof(struct sockaddr_in) \
+ : (sinx)->sinx_family == AF_INET6 ? sizeof(struct sockaddr_in6) \
+ : sizeof(rd_sockaddr_inx_t))
+#define RD_SOCKADDR_INX_PORT(sinx) \
+ ((sinx)->sinx_family == AF_INET \
+ ? (sinx)->in.sin_port \
+ : (sinx)->sinx_family == AF_INET6 ? (sinx)->in6.sin6_port : 0)
+
+#define RD_SOCKADDR_INX_PORT_SET(sinx, port) \
+ do { \
+ if ((sinx)->sinx_family == AF_INET) \
+ (sinx)->in.sin_port = port; \
+ else if ((sinx)->sinx_family == AF_INET6) \
+ (sinx)->in6.sin6_port = port; \
+ } while (0)
+
+
+
+/**
+ * Returns a thread-local temporary string (may be called up to 32 times
+ * without buffer wrapping) containing the human string representation
+ * of the sockaddr (which should be AF_INET or AF_INET6 at this point).
+ * If the RD_SOCKADDR2STR_F_PORT is provided the port number will be
+ * appended to the string.
+ * IPv6 address enveloping ("[addr]:port") will also be performed
+ * if .._F_PORT is set.
+ */
+#define RD_SOCKADDR2STR_F_PORT 0x1 /* Append the port. */
+#define RD_SOCKADDR2STR_F_RESOLVE \
+ 0x2 /* Try to resolve address to hostname. \
+ */
+#define RD_SOCKADDR2STR_F_FAMILY 0x4 /* Prepend address family. */
+#define RD_SOCKADDR2STR_F_NICE /* Nice and friendly output */ \
+ (RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_RESOLVE)
+const char *rd_sockaddr2str(const void *addr, int flags);
+
+
+/**
+ * Splits a node:service definition up into their node and svc counterparts
+ * suitable for passing to getaddrinfo().
+ * Returns NULL on success (and temporarily available pointers in '*node'
+ * and '*svc') or error string on failure.
+ *
+ * Thread-safe but returned buffers in '*node' and '*svc' are only
+ * usable until the next call to rd_addrinfo_prepare() in the same thread.
+ */
+const char *rd_addrinfo_prepare(const char *nodesvc, char **node, char **svc);
+
+
+
+typedef struct rd_sockaddr_list_s {
+ int rsal_cnt;
+ int rsal_curr;
+ rd_sockaddr_inx_t rsal_addr[];
+} rd_sockaddr_list_t;
+
+
+/**
+ * Returns the next address from a sockaddr list and updates
+ * the current-index to point to it.
+ *
+ * Typical usage is for round-robin connection attempts or similar:
+ * while (1) {
+ * rd_sockaddr_inx_t *sinx = rd_sockaddr_list_next(my_server_list);
+ * if (do_connect((struct sockaddr *)sinx) == -1) {
+ * sleep(1);
+ * continue;
+ * }
+ * ...
+ * }
+ *
+ */
+
+static RD_INLINE rd_sockaddr_inx_t *
+rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) RD_UNUSED;
+static RD_INLINE rd_sockaddr_inx_t *
+rd_sockaddr_list_next(rd_sockaddr_list_t *rsal) {
+ rsal->rsal_curr = (rsal->rsal_curr + 1) % rsal->rsal_cnt;
+ return &rsal->rsal_addr[rsal->rsal_curr];
+}
+
+
+#define RD_SOCKADDR_LIST_FOREACH(sinx, rsal) \
+ for ((sinx) = &(rsal)->rsal_addr[0]; \
+ (sinx) < &(rsal)->rsal_addr[(rsal)->rsal_len]; (sinx)++)
+
+/**
+ * Wrapper for getaddrinfo(3) that performs these additional tasks:
+ * - Input is a combined "<node>[:<svc>]" string, with support for
+ * IPv6 enveloping ("[addr]:port").
+ * - Returns a rd_sockaddr_list_t which must be freed with
+ * rd_sockaddr_list_destroy() when done with it.
+ * - Automatically shuffles the returned address list to provide
+ * round-robin (unless RD_AI_NOSHUFFLE is provided in 'flags').
+ *
+ * Thread-safe.
+ */
+#define RD_AI_NOSHUFFLE \
+ 0x10000000 /* Dont shuffle returned address list. \
+ * FIXME: Guessing non-used bits like this \
+ * is a bad idea. */
+
+struct addrinfo;
+
+rd_sockaddr_list_t *
+rd_getaddrinfo(const char *nodesvc,
+ const char *defsvc,
+ int flags,
+ int family,
+ int socktype,
+ int protocol,
+ int (*resolve_cb)(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res,
+ void *opaque),
+ void *opaque,
+ const char **errstr);
+
+
+
+/**
+ * Frees a sockaddr list.
+ *
+ * Thread-safe.
+ */
+void rd_sockaddr_list_destroy(rd_sockaddr_list_t *rsal);
+
+
+
+/**
+ * Returns the human readable name of a socket family.
+ */
+static const char *rd_family2str(int af) RD_UNUSED;
+static const char *rd_family2str(int af) {
+ switch (af) {
+ case AF_INET:
+ return "inet";
+ case AF_INET6:
+ return "inet6";
+ default:
+ return "af?";
+ };
+}
+
+#endif /* _RDADDR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h
new file mode 100644
index 000000000..aa7d3d770
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdatomic.h
@@ -0,0 +1,226 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2014-2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDATOMIC_H_
+#define _RDATOMIC_H_
+
+#include "tinycthread.h"
+
+typedef struct {
+ int32_t val;
+#if !defined(_WIN32) && !HAVE_ATOMICS_32
+ mtx_t lock;
+#endif
+} rd_atomic32_t;
+
+typedef struct {
+ int64_t val;
+#if !defined(_WIN32) && !HAVE_ATOMICS_64
+ mtx_t lock;
+#endif
+} rd_atomic64_t;
+
+
+static RD_INLINE RD_UNUSED void rd_atomic32_init(rd_atomic32_t *ra, int32_t v) {
+ ra->val = v;
+#if !defined(_WIN32) && !HAVE_ATOMICS_32
+ mtx_init(&ra->lock, mtx_plain);
+#endif
+}
+
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_add(rd_atomic32_t *ra,
+ int32_t v) {
+#ifdef __SUNPRO_C
+ return atomic_add_32_nv(&ra->val, v);
+#elif defined(_WIN32)
+ return InterlockedAdd((LONG *)&ra->val, v);
+#elif !HAVE_ATOMICS_32
+ int32_t r;
+ mtx_lock(&ra->lock);
+ ra->val += v;
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#else
+ return ATOMIC_OP32(add, fetch, &ra->val, v);
+#endif
+}
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_sub(rd_atomic32_t *ra,
+ int32_t v) {
+#ifdef __SUNPRO_C
+ return atomic_add_32_nv(&ra->val, -v);
+#elif defined(_WIN32)
+ return InterlockedAdd((LONG *)&ra->val, -v);
+#elif !HAVE_ATOMICS_32
+ int32_t r;
+ mtx_lock(&ra->lock);
+ ra->val -= v;
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#else
+ return ATOMIC_OP32(sub, fetch, &ra->val, v);
+#endif
+}
+
+/**
+ * @warning The returned value is the nominal value and will be outdated
+ * by the time the application reads it.
+ * It should not be used for exact arithmetics, any correlation
+ * with other data is unsynchronized, meaning that two atomics,
+ * or one atomic and a mutex-protected piece of data have no
+ * common synchronization and can't be relied on.
+ */
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_get(rd_atomic32_t *ra) {
+#if defined(_WIN32) || defined(__SUNPRO_C)
+ return ra->val;
+#elif !HAVE_ATOMICS_32
+ int32_t r;
+ mtx_lock(&ra->lock);
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#else
+ return ATOMIC_OP32(fetch, add, &ra->val, 0);
+#endif
+}
+
+static RD_INLINE int32_t RD_UNUSED rd_atomic32_set(rd_atomic32_t *ra,
+ int32_t v) {
+#ifdef _WIN32
+ return InterlockedExchange((LONG *)&ra->val, v);
+#elif !HAVE_ATOMICS_32
+ int32_t r;
+ mtx_lock(&ra->lock);
+ r = ra->val = v;
+ mtx_unlock(&ra->lock);
+ return r;
+#elif HAVE_ATOMICS_32_ATOMIC
+ __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST);
+ return v;
+#elif HAVE_ATOMICS_32_SYNC
+ (void)__sync_lock_test_and_set(&ra->val, v);
+ return v;
+#else
+ return ra->val = v; // FIXME
+#endif
+}
+
+
+
+static RD_INLINE RD_UNUSED void rd_atomic64_init(rd_atomic64_t *ra, int64_t v) {
+ ra->val = v;
+#if !defined(_WIN32) && !HAVE_ATOMICS_64
+ mtx_init(&ra->lock, mtx_plain);
+#endif
+}
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_add(rd_atomic64_t *ra,
+ int64_t v) {
+#ifdef __SUNPRO_C
+ return atomic_add_64_nv(&ra->val, v);
+#elif defined(_WIN32)
+ return InterlockedAdd64(&ra->val, v);
+#elif !HAVE_ATOMICS_64
+ int64_t r;
+ mtx_lock(&ra->lock);
+ ra->val += v;
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#else
+ return ATOMIC_OP64(add, fetch, &ra->val, v);
+#endif
+}
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_sub(rd_atomic64_t *ra,
+ int64_t v) {
+#ifdef __SUNPRO_C
+ return atomic_add_64_nv(&ra->val, -v);
+#elif defined(_WIN32)
+ return InterlockedAdd64(&ra->val, -v);
+#elif !HAVE_ATOMICS_64
+ int64_t r;
+ mtx_lock(&ra->lock);
+ ra->val -= v;
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#else
+ return ATOMIC_OP64(sub, fetch, &ra->val, v);
+#endif
+}
+
+/**
+ * @warning The returned value is the nominal value and will be outdated
+ * by the time the application reads it.
+ * It should not be used for exact arithmetics, any correlation
+ * with other data is unsynchronized, meaning that two atomics,
+ * or one atomic and a mutex-protected piece of data have no
+ * common synchronization and can't be relied on.
+ * Use with care.
+ */
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_get(rd_atomic64_t *ra) {
+#if defined(_WIN32) || defined(__SUNPRO_C)
+ return InterlockedCompareExchange64(&ra->val, 0, 0);
+#elif !HAVE_ATOMICS_64
+ int64_t r;
+ mtx_lock(&ra->lock);
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#else
+ return ATOMIC_OP64(fetch, add, &ra->val, 0);
+#endif
+}
+
+
+static RD_INLINE int64_t RD_UNUSED rd_atomic64_set(rd_atomic64_t *ra,
+ int64_t v) {
+#ifdef _WIN32
+ return InterlockedExchange64(&ra->val, v);
+#elif !HAVE_ATOMICS_64
+ int64_t r;
+ mtx_lock(&ra->lock);
+ ra->val = v;
+ r = ra->val;
+ mtx_unlock(&ra->lock);
+ return r;
+#elif HAVE_ATOMICS_64_ATOMIC
+ __atomic_store_n(&ra->val, v, __ATOMIC_SEQ_CST);
+ return v;
+#elif HAVE_ATOMICS_64_SYNC
+ (void)__sync_lock_test_and_set(&ra->val, v);
+ return v;
+#else
+ return ra->val = v; // FIXME
+#endif
+}
+
+#endif /* _RDATOMIC_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h
new file mode 100644
index 000000000..a170e8da5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdavg.h
@@ -0,0 +1,259 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDAVG_H_
+#define _RDAVG_H_
+
+
+#if WITH_HDRHISTOGRAM
+#include "rdhdrhistogram.h"
+#endif
+
+typedef struct rd_avg_s {
+ struct {
+ int64_t maxv;
+ int64_t minv;
+ int64_t avg;
+ int64_t sum;
+ int cnt;
+ rd_ts_t start;
+ } ra_v;
+ mtx_t ra_lock;
+ int ra_enabled;
+ enum { RD_AVG_GAUGE,
+ RD_AVG_COUNTER,
+ } ra_type;
+#if WITH_HDRHISTOGRAM
+ rd_hdr_histogram_t *ra_hdr;
+#endif
+ /* Histogram results, calculated for dst in rollover().
+ * Will be all zeroes if histograms are not supported. */
+ struct {
+ /* Quantiles */
+ int64_t p50;
+ int64_t p75;
+ int64_t p90;
+ int64_t p95;
+ int64_t p99;
+ int64_t p99_99;
+
+ int64_t oor; /**< Values out of range */
+ int32_t hdrsize; /**< hdr.allocatedSize */
+ double stddev;
+ double mean;
+ } ra_hist;
+} rd_avg_t;
+
+
+/**
+ * @brief Add value \p v to averager \p ra.
+ */
+static RD_UNUSED void rd_avg_add(rd_avg_t *ra, int64_t v) {
+ mtx_lock(&ra->ra_lock);
+ if (!ra->ra_enabled) {
+ mtx_unlock(&ra->ra_lock);
+ return;
+ }
+ if (v > ra->ra_v.maxv)
+ ra->ra_v.maxv = v;
+ if (ra->ra_v.minv == 0 || v < ra->ra_v.minv)
+ ra->ra_v.minv = v;
+ ra->ra_v.sum += v;
+ ra->ra_v.cnt++;
+#if WITH_HDRHISTOGRAM
+ rd_hdr_histogram_record(ra->ra_hdr, v);
+#endif
+ mtx_unlock(&ra->ra_lock);
+}
+
+
+/**
+ * @brief Calculate the average
+ */
+static RD_UNUSED void rd_avg_calc(rd_avg_t *ra, rd_ts_t now) {
+ if (ra->ra_type == RD_AVG_GAUGE) {
+ if (ra->ra_v.cnt)
+ ra->ra_v.avg = ra->ra_v.sum / ra->ra_v.cnt;
+ else
+ ra->ra_v.avg = 0;
+ } else {
+ rd_ts_t elapsed = now - ra->ra_v.start;
+
+ if (elapsed)
+ ra->ra_v.avg = (ra->ra_v.sum * 1000000llu) / elapsed;
+ else
+ ra->ra_v.avg = 0;
+
+ ra->ra_v.start = elapsed;
+ }
+}
+
+
+/**
+ * @returns the quantile \q for \p ra, or 0 if histograms are not supported
+ * in this build.
+ *
+ * @remark ra will be not locked by this function.
+ */
+static RD_UNUSED int64_t rd_avg_quantile(const rd_avg_t *ra, double q) {
+#if WITH_HDRHISTOGRAM
+ return rd_hdr_histogram_quantile(ra->ra_hdr, q);
+#else
+ return 0;
+#endif
+}
+
+/**
+ * @brief Rolls over statistics in \p src and stores the average in \p dst.
+ * \p src is cleared and ready to be reused.
+ *
+ * Caller must free avg internal members by calling rd_avg_destroy()
+ * on the \p dst.
+ */
+static RD_UNUSED void rd_avg_rollover(rd_avg_t *dst, rd_avg_t *src) {
+ rd_ts_t now;
+
+ mtx_lock(&src->ra_lock);
+ if (!src->ra_enabled) {
+ memset(dst, 0, sizeof(*dst));
+ dst->ra_type = src->ra_type;
+ mtx_unlock(&src->ra_lock);
+ return;
+ }
+
+ mtx_init(&dst->ra_lock, mtx_plain);
+ dst->ra_type = src->ra_type;
+ dst->ra_v = src->ra_v;
+#if WITH_HDRHISTOGRAM
+ dst->ra_hdr = NULL;
+
+ dst->ra_hist.stddev = rd_hdr_histogram_stddev(src->ra_hdr);
+ dst->ra_hist.mean = rd_hdr_histogram_mean(src->ra_hdr);
+ dst->ra_hist.oor = src->ra_hdr->outOfRangeCount;
+ dst->ra_hist.hdrsize = src->ra_hdr->allocatedSize;
+ dst->ra_hist.p50 = rd_hdr_histogram_quantile(src->ra_hdr, 50.0);
+ dst->ra_hist.p75 = rd_hdr_histogram_quantile(src->ra_hdr, 75.0);
+ dst->ra_hist.p90 = rd_hdr_histogram_quantile(src->ra_hdr, 90.0);
+ dst->ra_hist.p95 = rd_hdr_histogram_quantile(src->ra_hdr, 95.0);
+ dst->ra_hist.p99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.0);
+ dst->ra_hist.p99_99 = rd_hdr_histogram_quantile(src->ra_hdr, 99.99);
+#else
+ memset(&dst->ra_hist, 0, sizeof(dst->ra_hist));
+#endif
+ memset(&src->ra_v, 0, sizeof(src->ra_v));
+
+ now = rd_clock();
+ src->ra_v.start = now;
+
+#if WITH_HDRHISTOGRAM
+ /* Adapt histogram span to fit future out of range entries
+ * from this period. */
+ if (src->ra_hdr->totalCount > 0) {
+ int64_t vmin = src->ra_hdr->lowestTrackableValue;
+ int64_t vmax = src->ra_hdr->highestTrackableValue;
+ int64_t mindiff, maxdiff;
+
+ mindiff = src->ra_hdr->lowestTrackableValue -
+ src->ra_hdr->lowestOutOfRange;
+
+ if (mindiff > 0) {
+ /* There were low out of range values, grow lower
+ * span to fit lowest out of range value + 20%. */
+ vmin = src->ra_hdr->lowestOutOfRange +
+ (int64_t)((double)mindiff * 0.2);
+ }
+
+ maxdiff = src->ra_hdr->highestOutOfRange -
+ src->ra_hdr->highestTrackableValue;
+
+ if (maxdiff > 0) {
+ /* There were high out of range values, grow higher
+ * span to fit highest out of range value + 20%. */
+ vmax = src->ra_hdr->highestOutOfRange +
+ (int64_t)((double)maxdiff * 0.2);
+ }
+
+ if (vmin == src->ra_hdr->lowestTrackableValue &&
+ vmax == src->ra_hdr->highestTrackableValue) {
+ /* No change in min,max, use existing hdr */
+ rd_hdr_histogram_reset(src->ra_hdr);
+
+ } else {
+ int sigfigs = (int)src->ra_hdr->significantFigures;
+ /* Create new hdr for adapted range */
+ rd_hdr_histogram_destroy(src->ra_hdr);
+ src->ra_hdr = rd_hdr_histogram_new(vmin, vmax, sigfigs);
+ }
+
+ } else {
+ /* No records, no need to reset. */
+ }
+#endif
+
+ mtx_unlock(&src->ra_lock);
+
+ rd_avg_calc(dst, now);
+}
+
+
+/**
+ * Initialize an averager
+ */
+static RD_UNUSED void rd_avg_init(rd_avg_t *ra,
+ int type,
+ int64_t exp_min,
+ int64_t exp_max,
+ int sigfigs,
+ int enable) {
+ memset(ra, 0, sizeof(*ra));
+ mtx_init(&ra->ra_lock, 0);
+ ra->ra_enabled = enable;
+ if (!enable)
+ return;
+ ra->ra_type = type;
+ ra->ra_v.start = rd_clock();
+#if WITH_HDRHISTOGRAM
+ /* Start off the histogram with expected min,max span,
+ * we'll adapt the size on each rollover. */
+ ra->ra_hdr = rd_hdr_histogram_new(exp_min, exp_max, sigfigs);
+#endif
+}
+
+
+/**
+ * Destroy averager
+ */
+static RD_UNUSED void rd_avg_destroy(rd_avg_t *ra) {
+#if WITH_HDRHISTOGRAM
+ if (ra->ra_hdr)
+ rd_hdr_histogram_destroy(ra->ra_hdr);
+#endif
+ mtx_destroy(&ra->ra_lock);
+}
+
+#endif /* _RDAVG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c
new file mode 100644
index 000000000..f25251de8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.c
@@ -0,0 +1,210 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdavl.h"
+
+/*
+ * AVL tree.
+ * Inspired by Ian Piumarta's tree.h implementation.
+ */
+
+#define RD_AVL_NODE_HEIGHT(ran) ((ran) ? (ran)->ran_height : 0)
+
+#define RD_AVL_NODE_DELTA(ran) \
+ (RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_LEFT]) - \
+ RD_AVL_NODE_HEIGHT((ran)->ran_p[RD_AVL_RIGHT]))
+
+#define RD_DELTA_MAX 1
+
+
+static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran);
+
+static rd_avl_node_t *rd_avl_rotate(rd_avl_node_t *ran, rd_avl_dir_t dir) {
+ rd_avl_node_t *n;
+ static const rd_avl_dir_t odirmap[] = {/* opposite direction map */
+ [RD_AVL_RIGHT] = RD_AVL_LEFT,
+ [RD_AVL_LEFT] = RD_AVL_RIGHT};
+ const int odir = odirmap[dir];
+
+ n = ran->ran_p[odir];
+ ran->ran_p[odir] = n->ran_p[dir];
+ n->ran_p[dir] = rd_avl_balance_node(ran);
+
+ return rd_avl_balance_node(n);
+}
+
+static rd_avl_node_t *rd_avl_balance_node(rd_avl_node_t *ran) {
+ const int d = RD_AVL_NODE_DELTA(ran);
+ int h;
+
+ if (d < -RD_DELTA_MAX) {
+ if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_RIGHT]) > 0)
+ ran->ran_p[RD_AVL_RIGHT] = rd_avl_rotate(
+ ran->ran_p[RD_AVL_RIGHT], RD_AVL_RIGHT);
+ return rd_avl_rotate(ran, RD_AVL_LEFT);
+
+ } else if (d > RD_DELTA_MAX) {
+ if (RD_AVL_NODE_DELTA(ran->ran_p[RD_AVL_LEFT]) < 0)
+ ran->ran_p[RD_AVL_LEFT] =
+ rd_avl_rotate(ran->ran_p[RD_AVL_LEFT], RD_AVL_LEFT);
+
+ return rd_avl_rotate(ran, RD_AVL_RIGHT);
+ }
+
+ ran->ran_height = 0;
+
+ if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_LEFT])) > ran->ran_height)
+ ran->ran_height = h;
+
+ if ((h = RD_AVL_NODE_HEIGHT(ran->ran_p[RD_AVL_RIGHT])) >
+ ran->ran_height)
+ ran->ran_height = h;
+
+ ran->ran_height++;
+
+ return ran;
+}
+
+rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl,
+ rd_avl_node_t *parent,
+ rd_avl_node_t *ran,
+ rd_avl_node_t **existing) {
+ rd_avl_dir_t dir;
+ int r;
+
+ if (!parent)
+ return ran;
+
+ if ((r = ravl->ravl_cmp(ran->ran_elm, parent->ran_elm)) == 0) {
+ /* Replace existing node with new one. */
+ ran->ran_p[RD_AVL_LEFT] = parent->ran_p[RD_AVL_LEFT];
+ ran->ran_p[RD_AVL_RIGHT] = parent->ran_p[RD_AVL_RIGHT];
+ ran->ran_height = parent->ran_height;
+ *existing = parent;
+ return ran;
+ }
+
+ if (r < 0)
+ dir = RD_AVL_LEFT;
+ else
+ dir = RD_AVL_RIGHT;
+
+ parent->ran_p[dir] =
+ rd_avl_insert_node(ravl, parent->ran_p[dir], ran, existing);
+ return rd_avl_balance_node(parent);
+}
+
+
+static rd_avl_node_t *
+rd_avl_move(rd_avl_node_t *dst, rd_avl_node_t *src, rd_avl_dir_t dir) {
+
+ if (!dst)
+ return src;
+
+ dst->ran_p[dir] = rd_avl_move(dst->ran_p[dir], src, dir);
+
+ return rd_avl_balance_node(dst);
+}
+
+static rd_avl_node_t *rd_avl_remove_node0(rd_avl_node_t *ran) {
+ rd_avl_node_t *tmp;
+
+ tmp = rd_avl_move(ran->ran_p[RD_AVL_LEFT], ran->ran_p[RD_AVL_RIGHT],
+ RD_AVL_RIGHT);
+
+ ran->ran_p[RD_AVL_LEFT] = ran->ran_p[RD_AVL_RIGHT] = NULL;
+ return tmp;
+}
+
+
+rd_avl_node_t *
+rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm) {
+ rd_avl_dir_t dir;
+ int r;
+
+ if (!parent)
+ return NULL;
+
+
+ if ((r = ravl->ravl_cmp(elm, parent->ran_elm)) == 0)
+ return rd_avl_remove_node0(parent);
+ else if (r < 0)
+ dir = RD_AVL_LEFT;
+ else /* > 0 */
+ dir = RD_AVL_RIGHT;
+
+ parent->ran_p[dir] = rd_avl_remove_elm0(ravl, parent->ran_p[dir], elm);
+
+ return rd_avl_balance_node(parent);
+}
+
+
+
+rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl,
+ const rd_avl_node_t *begin,
+ const void *elm) {
+ int r;
+
+ if (!begin)
+ return NULL;
+ else if (!(r = ravl->ravl_cmp(elm, begin->ran_elm)))
+ return (rd_avl_node_t *)begin;
+ else if (r < 0)
+ return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_LEFT], elm);
+ else /* r > 0 */
+ return rd_avl_find_node(ravl, begin->ran_p[RD_AVL_RIGHT], elm);
+}
+
+
+
+void rd_avl_destroy(rd_avl_t *ravl) {
+ if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+ rwlock_destroy(&ravl->ravl_rwlock);
+
+ if (ravl->ravl_flags & RD_AVL_F_OWNER)
+ rd_free(ravl);
+}
+
+rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags) {
+
+ if (!ravl) {
+ ravl = rd_calloc(1, sizeof(*ravl));
+ flags |= RD_AVL_F_OWNER;
+ } else {
+ memset(ravl, 0, sizeof(*ravl));
+ }
+
+ ravl->ravl_flags = flags;
+ ravl->ravl_cmp = cmp;
+
+ if (flags & RD_AVL_F_LOCKS)
+ rwlock_init(&ravl->ravl_rwlock);
+
+ return ravl;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h
new file mode 100644
index 000000000..f3e539242
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdavl.h
@@ -0,0 +1,250 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+ * AVL tree.
+ * Inspired by Ian Piumarta's tree.h implementation.
+ */
+
+#ifndef _RDAVL_H_
+#define _RDAVL_H_
+
+#include "tinycthread.h"
+
+
+typedef enum {
+ RD_AVL_LEFT,
+ RD_AVL_RIGHT,
+} rd_avl_dir_t;
+
+/**
+ * AVL tree node.
+ * Add 'rd_avl_node_t ..' as field to your element's struct and
+ * provide it as the 'field' argument in the API below.
+ */
+typedef struct rd_avl_node_s {
+ struct rd_avl_node_s *ran_p[2]; /* RD_AVL_LEFT and RD_AVL_RIGHT */
+ int ran_height; /* Sub-tree height */
+ void *ran_elm; /* Backpointer to the containing
+ * element. This could be considered
+ * costly but is convenient for the
+ * caller: RAM is cheap,
+ * development time isn't*/
+} rd_avl_node_t;
+
+
+
+/**
+ * Per-AVL application-provided element comparator.
+ */
+typedef int (*rd_avl_cmp_t)(const void *, const void *);
+
+
+/**
+ * AVL tree
+ */
+typedef struct rd_avl_s {
+ rd_avl_node_t *ravl_root; /* Root node */
+ rd_avl_cmp_t ravl_cmp; /* Comparator */
+ int ravl_flags; /* Flags */
+#define RD_AVL_F_LOCKS 0x1 /* Enable thread-safeness */
+#define RD_AVL_F_OWNER 0x2 /* internal: rd_avl_init() allocated ravl */
+ rwlock_t ravl_rwlock; /* Mutex when .._F_LOCKS is set. */
+} rd_avl_t;
+
+
+
+/**
+ *
+ *
+ * Public API
+ *
+ *
+ */
+
+/**
+ * Insert 'elm' into AVL tree.
+ * In case of collision the previous entry is overwritten by the
+ * new one and the previous element is returned, else NULL.
+ */
+#define RD_AVL_INSERT(ravl, elm, field) rd_avl_insert(ravl, elm, &(elm)->field)
+
+
+/**
+ * Remove element by matching value 'elm' using compare function.
+ */
+#define RD_AVL_REMOVE_ELM(ravl, elm) rd_avl_remove_elm(ravl, elm)
+
+/**
+ * Search for (by value using compare function) and return matching elm.
+ */
+#define RD_AVL_FIND(ravl, elm) rd_avl_find(ravl, elm, 1)
+
+
+/**
+ * Search (by value using compare function) for and return matching elm.
+ * Same as RD_AVL_FIND_NL() but assumes 'ravl' ís already locked
+ * by 'rd_avl_*lock()'.
+ *
+ * NOTE: rd_avl_wrlock() must be held.
+ */
+#define RD_AVL_FIND_NL(ravl, elm) \
+ rd_avl_find_node(ravl, (ravl)->ravl_root, elm, 0)
+
+
+/**
+ * Search (by value using compare function) for elm and return its AVL node.
+ *
+ * NOTE: rd_avl_wrlock() must be held.
+ */
+#define RD_AVL_FIND_NODE_NL(ravl, elm) rd_avl_find(ravl, elm, 0)
+
+
+/**
+ * Changes the element pointer for an existing AVL node in the tree.
+ * The new element must be identical (according to the comparator)
+ * to the previous element.
+ *
+ * NOTE: rd_avl_wrlock() must be held.
+ */
+#define RD_AVL_ELM_SET_NL(ran, elm) ((ran)->ran_elm = (elm))
+
+/**
+ * Returns the current element pointer for an existing AVL node in the tree
+ *
+ * NOTE: rd_avl_*lock() must be held.
+ */
+#define RD_AVL_ELM_GET_NL(ran) ((ran)->ran_elm)
+
+
+
+/**
+ * Destroy previously initialized (by rd_avl_init()) AVL tree.
+ */
+void rd_avl_destroy(rd_avl_t *ravl);
+
+/**
+ * Initialize (and optionally allocate if 'ravl' is NULL) AVL tree.
+ * 'cmp' is the comparison function that takes two const pointers
+ * pointing to the elements being compared (rather than the avl_nodes).
+ * 'flags' is zero or more of the RD_AVL_F_.. flags.
+ *
+ * For thread-safe AVL trees supply RD_AVL_F_LOCKS in 'flags'.
+ */
+rd_avl_t *rd_avl_init(rd_avl_t *ravl, rd_avl_cmp_t cmp, int flags);
+
+
+/**
+ * 'ravl' locking functions.
+ * Locking is performed automatically for all methods except for
+ * those with the "_NL"/"_nl" suffix ("not locked") which expects
+ * either read or write lock to be held.
+ *
+ * rdavl utilizes rwlocks to allow multiple concurrent read threads.
+ */
+static RD_INLINE RD_UNUSED void rd_avl_rdlock(rd_avl_t *ravl) {
+ if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+ rwlock_rdlock(&ravl->ravl_rwlock);
+}
+
+static RD_INLINE RD_UNUSED void rd_avl_wrlock(rd_avl_t *ravl) {
+ if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+ rwlock_wrlock(&ravl->ravl_rwlock);
+}
+
+static RD_INLINE RD_UNUSED void rd_avl_rdunlock(rd_avl_t *ravl) {
+ if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+ rwlock_rdunlock(&ravl->ravl_rwlock);
+}
+
+static RD_INLINE RD_UNUSED void rd_avl_wrunlock(rd_avl_t *ravl) {
+ if (ravl->ravl_flags & RD_AVL_F_LOCKS)
+ rwlock_wrunlock(&ravl->ravl_rwlock);
+}
+
+
+
+/**
+ * Private API, dont use directly.
+ */
+
+rd_avl_node_t *rd_avl_insert_node(rd_avl_t *ravl,
+ rd_avl_node_t *parent,
+ rd_avl_node_t *ran,
+ rd_avl_node_t **existing);
+
+static RD_UNUSED void *
+rd_avl_insert(rd_avl_t *ravl, void *elm, rd_avl_node_t *ran) {
+ rd_avl_node_t *existing = NULL;
+
+ memset(ran, 0, sizeof(*ran));
+ ran->ran_elm = elm;
+
+ rd_avl_wrlock(ravl);
+ ravl->ravl_root =
+ rd_avl_insert_node(ravl, ravl->ravl_root, ran, &existing);
+ rd_avl_wrunlock(ravl);
+
+ return existing ? existing->ran_elm : NULL;
+}
+
+rd_avl_node_t *
+rd_avl_remove_elm0(rd_avl_t *ravl, rd_avl_node_t *parent, const void *elm);
+
+static RD_INLINE RD_UNUSED void rd_avl_remove_elm(rd_avl_t *ravl,
+ const void *elm) {
+ rd_avl_wrlock(ravl);
+ ravl->ravl_root = rd_avl_remove_elm0(ravl, ravl->ravl_root, elm);
+ rd_avl_wrunlock(ravl);
+}
+
+
+rd_avl_node_t *rd_avl_find_node(const rd_avl_t *ravl,
+ const rd_avl_node_t *begin,
+ const void *elm);
+
+
+static RD_INLINE RD_UNUSED void *
+rd_avl_find(rd_avl_t *ravl, const void *elm, int dolock) {
+ const rd_avl_node_t *ran;
+ void *ret;
+
+ if (dolock)
+ rd_avl_rdlock(ravl);
+
+ ran = rd_avl_find_node(ravl, ravl->ravl_root, elm);
+ ret = ran ? ran->ran_elm : NULL;
+
+ if (dolock)
+ rd_avl_rdunlock(ravl);
+
+ return ret;
+}
+
+#endif /* _RDAVL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c
new file mode 100644
index 000000000..1392cf7b1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.c
@@ -0,0 +1,1880 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rd.h"
+#include "rdbuf.h"
+#include "rdunittest.h"
+#include "rdlog.h"
+#include "rdcrc32.h"
+#include "crc32c.h"
+
+
+static size_t
+rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p);
+
+
+/**
+ * @brief Destroy the segment and free its payload.
+ *
+ * @remark Will NOT unlink from buffer.
+ */
+static void rd_segment_destroy(rd_segment_t *seg) {
+ /* Free payload */
+ if (seg->seg_free && seg->seg_p)
+ seg->seg_free(seg->seg_p);
+
+ if (seg->seg_flags & RD_SEGMENT_F_FREE)
+ rd_free(seg);
+}
+
+/**
+ * @brief Initialize segment with absolute offset, backing memory pointer,
+ * and backing memory size.
+ * @remark The segment is NOT linked.
+ */
+static void rd_segment_init(rd_segment_t *seg, void *mem, size_t size) {
+ memset(seg, 0, sizeof(*seg));
+ seg->seg_p = mem;
+ seg->seg_size = size;
+}
+
+
+/**
+ * @brief Append segment to buffer
+ *
+ * @remark Will set the buffer position to the new \p seg if no existing wpos.
+ * @remark Will set the segment seg_absof to the current length of the buffer.
+ */
+static rd_segment_t *rd_buf_append_segment(rd_buf_t *rbuf, rd_segment_t *seg) {
+ TAILQ_INSERT_TAIL(&rbuf->rbuf_segments, seg, seg_link);
+ rbuf->rbuf_segment_cnt++;
+ seg->seg_absof = rbuf->rbuf_len;
+ rbuf->rbuf_len += seg->seg_of;
+ rbuf->rbuf_size += seg->seg_size;
+
+ /* Update writable position */
+ if (!rbuf->rbuf_wpos)
+ rbuf->rbuf_wpos = seg;
+ else
+ rd_buf_get_writable0(rbuf, NULL, NULL);
+
+ return seg;
+}
+
+
+
+/**
+ * @brief Attempt to allocate \p size bytes from the buffers extra buffers.
+ * @returns the allocated pointer which MUST NOT be freed, or NULL if
+ * not enough memory.
+ * @remark the returned pointer is memory-aligned to be safe.
+ */
+static void *extra_alloc(rd_buf_t *rbuf, size_t size) {
+ size_t of = RD_ROUNDUP(rbuf->rbuf_extra_len, 8); /* FIXME: 32-bit */
+ void *p;
+
+ if (of + size > rbuf->rbuf_extra_size)
+ return NULL;
+
+ p = rbuf->rbuf_extra + of; /* Aligned pointer */
+
+ rbuf->rbuf_extra_len = of + size;
+
+ return p;
+}
+
+
+
+/**
+ * @brief Get a pre-allocated segment if available, or allocate a new
+ * segment with the extra amount of \p size bytes allocated for payload.
+ *
+ * Will not append the segment to the buffer.
+ */
+static rd_segment_t *rd_buf_alloc_segment0(rd_buf_t *rbuf, size_t size) {
+ rd_segment_t *seg;
+
+ /* See if there is enough room in the extra buffer for
+ * allocating the segment header and the buffer,
+ * or just the segment header, else fall back to malloc. */
+ if ((seg = extra_alloc(rbuf, sizeof(*seg) + size))) {
+ rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size);
+
+ } else if ((seg = extra_alloc(rbuf, sizeof(*seg)))) {
+ rd_segment_init(seg, size > 0 ? rd_malloc(size) : NULL, size);
+ if (size > 0)
+ seg->seg_free = rd_free;
+
+ } else if ((seg = rd_malloc(sizeof(*seg) + size))) {
+ rd_segment_init(seg, size > 0 ? seg + 1 : NULL, size);
+ seg->seg_flags |= RD_SEGMENT_F_FREE;
+
+ } else
+ rd_assert(!*"segment allocation failure");
+
+ return seg;
+}
+
+/**
+ * @brief Allocate between \p min_size .. \p max_size of backing memory
+ * and add it as a new segment to the buffer.
+ *
+ * The buffer position is updated to point to the new segment.
+ *
+ * The segment will be over-allocated if permitted by max_size
+ * (max_size == 0 or max_size > min_size).
+ */
+static rd_segment_t *
+rd_buf_alloc_segment(rd_buf_t *rbuf, size_t min_size, size_t max_size) {
+ rd_segment_t *seg;
+
+ /* Over-allocate if allowed. */
+ if (min_size != max_size || max_size == 0)
+ max_size = RD_MAX(sizeof(*seg) * 4,
+ RD_MAX(min_size * 2, rbuf->rbuf_size / 2));
+
+ seg = rd_buf_alloc_segment0(rbuf, max_size);
+
+ rd_buf_append_segment(rbuf, seg);
+
+ return seg;
+}
+
+
+/**
+ * @brief Ensures that \p size bytes will be available
+ * for writing and the position will be updated to point to the
+ * start of this contiguous block.
+ */
+void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size) {
+ rd_segment_t *seg = rbuf->rbuf_wpos;
+
+ if (seg) {
+ void *p;
+ size_t remains = rd_segment_write_remains(seg, &p);
+
+ if (remains >= size)
+ return; /* Existing segment has enough space. */
+
+ /* Future optimization:
+ * If existing segment has enough remaining space to warrant
+ * a split, do it, before allocating a new one. */
+ }
+
+ /* Allocate new segment */
+ rbuf->rbuf_wpos = rd_buf_alloc_segment(rbuf, size, size);
+}
+
+/**
+ * @brief Ensures that at least \p size bytes will be available for
+ * a future write.
+ *
+ * Typically used prior to a call to rd_buf_get_write_iov()
+ */
+void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size) {
+ size_t remains;
+ while ((remains = rd_buf_write_remains(rbuf)) < min_size)
+ rd_buf_alloc_segment(rbuf, min_size - remains,
+ max_size ? max_size - remains : 0);
+}
+
+
+/**
+ * @returns the segment at absolute offset \p absof, or NULL if out of range.
+ *
+ * @remark \p hint is an optional segment where to start looking, such as
+ * the current write or read position.
+ */
+rd_segment_t *rd_buf_get_segment_at_offset(const rd_buf_t *rbuf,
+ const rd_segment_t *hint,
+ size_t absof) {
+ const rd_segment_t *seg = hint;
+
+ if (unlikely(absof >= rbuf->rbuf_len))
+ return NULL;
+
+ /* Only use current write position if possible and if it helps */
+ if (!seg || absof < seg->seg_absof)
+ seg = TAILQ_FIRST(&rbuf->rbuf_segments);
+
+ do {
+ if (absof >= seg->seg_absof &&
+ absof < seg->seg_absof + seg->seg_of) {
+ rd_dassert(seg->seg_absof <= rd_buf_len(rbuf));
+ return (rd_segment_t *)seg;
+ }
+ } while ((seg = TAILQ_NEXT(seg, seg_link)));
+
+ return NULL;
+}
+
+
+/**
+ * @brief Split segment \p seg at absolute offset \p absof, appending
+ * a new segment after \p seg with its memory pointing to the
+ * memory starting at \p absof.
+ * \p seg 's memory will be shorted to the \p absof.
+ *
+ * The new segment is NOT appended to the buffer.
+ *
+ * @warning MUST ONLY be used on the LAST segment
+ *
+ * @warning if a segment is inserted between these two splitted parts
+ * it is imperative that the later segment's absof is corrected.
+ *
+ * @remark The seg_free callback is retained on the original \p seg
+ * and is not copied to the new segment, but flags are copied.
+ */
+static rd_segment_t *
+rd_segment_split(rd_buf_t *rbuf, rd_segment_t *seg, size_t absof) {
+ rd_segment_t *newseg;
+ size_t relof;
+
+ rd_assert(seg == rbuf->rbuf_wpos);
+ rd_assert(absof >= seg->seg_absof &&
+ absof <= seg->seg_absof + seg->seg_of);
+
+ relof = absof - seg->seg_absof;
+
+ newseg = rd_buf_alloc_segment0(rbuf, 0);
+
+ /* Add later part of split bytes to new segment */
+ newseg->seg_p = seg->seg_p + relof;
+ newseg->seg_of = seg->seg_of - relof;
+ newseg->seg_size = seg->seg_size - relof;
+ newseg->seg_absof = SIZE_MAX; /* Invalid */
+ newseg->seg_flags |= seg->seg_flags;
+
+ /* Remove earlier part of split bytes from previous segment */
+ seg->seg_of = relof;
+ seg->seg_size = relof;
+
+ /* newseg's length will be added to rbuf_len in append_segment(),
+ * so shave it off here from seg's perspective. */
+ rbuf->rbuf_len -= newseg->seg_of;
+ rbuf->rbuf_size -= newseg->seg_size;
+
+ return newseg;
+}
+
+
+
+/**
+ * @brief Unlink and destroy a segment, updating the \p rbuf
+ * with the decrease in length and capacity.
+ */
+static void rd_buf_destroy_segment(rd_buf_t *rbuf, rd_segment_t *seg) {
+ rd_assert(rbuf->rbuf_segment_cnt > 0 && rbuf->rbuf_len >= seg->seg_of &&
+ rbuf->rbuf_size >= seg->seg_size);
+
+ TAILQ_REMOVE(&rbuf->rbuf_segments, seg, seg_link);
+ rbuf->rbuf_segment_cnt--;
+ rbuf->rbuf_len -= seg->seg_of;
+ rbuf->rbuf_size -= seg->seg_size;
+ if (rbuf->rbuf_wpos == seg)
+ rbuf->rbuf_wpos = NULL;
+
+ rd_segment_destroy(seg);
+}
+
+
+/**
+ * @brief Free memory associated with the \p rbuf, but not the rbuf itself.
+ * Segments will be destroyed.
+ */
+void rd_buf_destroy(rd_buf_t *rbuf) {
+ rd_segment_t *seg, *tmp;
+
+#if ENABLE_DEVEL
+ /* FIXME */
+ if (rbuf->rbuf_len > 0 && 0) {
+ size_t overalloc = rbuf->rbuf_size - rbuf->rbuf_len;
+ float fill_grade =
+ (float)rbuf->rbuf_len / (float)rbuf->rbuf_size;
+
+ printf("fill grade: %.2f%% (%" PRIusz
+ " bytes over-allocated)\n",
+ fill_grade * 100.0f, overalloc);
+ }
+#endif
+
+
+ TAILQ_FOREACH_SAFE(seg, &rbuf->rbuf_segments, seg_link, tmp) {
+ rd_segment_destroy(seg);
+ }
+
+ if (rbuf->rbuf_extra)
+ rd_free(rbuf->rbuf_extra);
+}
+
+
+/**
+ * @brief Same as rd_buf_destroy() but also frees the \p rbuf itself.
+ */
+void rd_buf_destroy_free(rd_buf_t *rbuf) {
+ rd_buf_destroy(rbuf);
+ rd_free(rbuf);
+}
+
+/**
+ * @brief Initialize buffer, pre-allocating \p fixed_seg_cnt segments
+ * where the first segment will have a \p buf_size of backing memory.
+ *
+ * The caller may rearrange the backing memory as it see fits.
+ */
+void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size) {
+ size_t totalloc = 0;
+
+ memset(rbuf, 0, sizeof(*rbuf));
+ TAILQ_INIT(&rbuf->rbuf_segments);
+
+ if (!fixed_seg_cnt) {
+ assert(!buf_size);
+ return;
+ }
+
+ /* Pre-allocate memory for a fixed set of segments that are known
+ * before-hand, to minimize the number of extra allocations
+ * needed for well-known layouts (such as headers, etc) */
+ totalloc += RD_ROUNDUP(sizeof(rd_segment_t), 8) * fixed_seg_cnt;
+
+ /* Pre-allocate extra space for the backing buffer. */
+ totalloc += buf_size;
+
+ rbuf->rbuf_extra_size = totalloc;
+ rbuf->rbuf_extra = rd_malloc(rbuf->rbuf_extra_size);
+}
+
+
+/**
+ * @brief Allocates a buffer object and initializes it.
+ * @sa rd_buf_init()
+ */
+rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size) {
+ rd_buf_t *rbuf = rd_malloc(sizeof(*rbuf));
+ rd_buf_init(rbuf, fixed_seg_cnt, buf_size);
+ return rbuf;
+}
+
+
+/**
+ * @brief Convenience writer iterator interface.
+ *
+ * After writing to \p p the caller must update the written length
+ * by calling rd_buf_write(rbuf, NULL, written_length)
+ *
+ * @returns the number of contiguous writable bytes in segment
+ * and sets \p *p to point to the start of the memory region.
+ */
+static size_t
+rd_buf_get_writable0(rd_buf_t *rbuf, rd_segment_t **segp, void **p) {
+ rd_segment_t *seg;
+
+ for (seg = rbuf->rbuf_wpos; seg; seg = TAILQ_NEXT(seg, seg_link)) {
+ size_t len = rd_segment_write_remains(seg, p);
+
+ /* Even though the write offset hasn't changed we
+ * avoid future segment scans by adjusting the
+ * wpos here to the first writable segment. */
+ rbuf->rbuf_wpos = seg;
+ if (segp)
+ *segp = seg;
+
+ if (unlikely(len == 0))
+ continue;
+
+ /* Also adjust absof if the segment was allocated
+ * before the previous segment's memory was exhausted
+ * and thus now might have a lower absolute offset
+ * than the previos segment's now higher relative offset. */
+ if (seg->seg_of == 0 && seg->seg_absof < rbuf->rbuf_len)
+ seg->seg_absof = rbuf->rbuf_len;
+
+ return len;
+ }
+
+ return 0;
+}
+
+size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p) {
+ rd_segment_t *seg;
+ return rd_buf_get_writable0(rbuf, &seg, p);
+}
+
+
+
+/**
+ * @brief Write \p payload of \p size bytes to current position
+ * in buffer. A new segment will be allocated and appended
+ * if needed.
+ *
+ * @returns the write position where payload was written (pre-write).
+ * Returning the pre-positition allows write_update() to later
+ * update the same location, effectively making write()s
+ * also a place-holder mechanism.
+ *
+ * @remark If \p payload is NULL only the write position is updated,
+ * in this mode it is required for the buffer to have enough
+ * memory for the NULL write (as it would otherwise cause
+ * uninitialized memory in any new segments allocated from this
+ * function).
+ */
+size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size) {
+ size_t remains = size;
+ size_t initial_absof;
+ const char *psrc = (const char *)payload;
+
+ initial_absof = rbuf->rbuf_len;
+
+ /* Ensure enough space by pre-allocating segments. */
+ rd_buf_write_ensure(rbuf, size, 0);
+
+ while (remains > 0) {
+ void *p = NULL;
+ rd_segment_t *seg = NULL;
+ size_t segremains = rd_buf_get_writable0(rbuf, &seg, &p);
+ size_t wlen = RD_MIN(remains, segremains);
+
+ rd_dassert(seg == rbuf->rbuf_wpos);
+ rd_dassert(wlen > 0);
+ rd_dassert(seg->seg_p + seg->seg_of <= (char *)p &&
+ (char *)p < seg->seg_p + seg->seg_size);
+
+ if (payload) {
+ memcpy(p, psrc, wlen);
+ psrc += wlen;
+ }
+
+ seg->seg_of += wlen;
+ rbuf->rbuf_len += wlen;
+ remains -= wlen;
+ }
+
+ rd_assert(remains == 0);
+
+ return initial_absof;
+}
+
+
+
+/**
+ * @brief Write \p slice to \p rbuf
+ *
+ * @remark The slice position will be updated.
+ *
+ * @returns the number of bytes witten (always slice length)
+ */
+size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice) {
+ const void *p;
+ size_t rlen;
+ size_t sum = 0;
+
+ while ((rlen = rd_slice_reader(slice, &p))) {
+ size_t r;
+ r = rd_buf_write(rbuf, p, rlen);
+ rd_dassert(r != 0);
+ sum += r;
+ }
+
+ return sum;
+}
+
+
+
+/**
+ * @brief Write \p payload of \p size at absolute offset \p absof
+ * WITHOUT updating the total buffer length.
+ *
+ * This is used to update a previously written region, such
+ * as updating the header length.
+ *
+ * @returns the number of bytes written, which may be less than \p size
+ * if the update spans multiple segments.
+ */
+static size_t rd_segment_write_update(rd_segment_t *seg,
+ size_t absof,
+ const void *payload,
+ size_t size) {
+ size_t relof;
+ size_t wlen;
+
+ rd_dassert(absof >= seg->seg_absof);
+ relof = absof - seg->seg_absof;
+ rd_assert(relof <= seg->seg_of);
+ wlen = RD_MIN(size, seg->seg_of - relof);
+ rd_dassert(relof + wlen <= seg->seg_of);
+
+ memcpy(seg->seg_p + relof, payload, wlen);
+
+ return wlen;
+}
+
+
+
+/**
+ * @brief Write \p payload of \p size at absolute offset \p absof
+ * WITHOUT updating the total buffer length.
+ *
+ * This is used to update a previously written region, such
+ * as updating the header length.
+ */
+size_t rd_buf_write_update(rd_buf_t *rbuf,
+ size_t absof,
+ const void *payload,
+ size_t size) {
+ rd_segment_t *seg;
+ const char *psrc = (const char *)payload;
+ size_t of;
+
+ /* Find segment for offset */
+ seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof);
+ rd_assert(seg && *"invalid absolute offset");
+
+ for (of = 0; of < size; seg = TAILQ_NEXT(seg, seg_link)) {
+ rd_assert(seg->seg_absof <= rd_buf_len(rbuf));
+ size_t wlen = rd_segment_write_update(seg, absof + of,
+ psrc + of, size - of);
+ of += wlen;
+ }
+
+ rd_dassert(of == size);
+
+ return of;
+}
+
+
+
+/**
+ * @brief Push reference memory segment to current write position.
+ */
+void rd_buf_push0(rd_buf_t *rbuf,
+ const void *payload,
+ size_t size,
+ void (*free_cb)(void *),
+ rd_bool_t writable) {
+ rd_segment_t *prevseg, *seg, *tailseg = NULL;
+
+ if ((prevseg = rbuf->rbuf_wpos) &&
+ rd_segment_write_remains(prevseg, NULL) > 0) {
+ /* If the current segment still has room in it split it
+ * and insert the pushed segment in the middle (below). */
+ tailseg = rd_segment_split(
+ rbuf, prevseg, prevseg->seg_absof + prevseg->seg_of);
+ }
+
+ seg = rd_buf_alloc_segment0(rbuf, 0);
+ seg->seg_p = (char *)payload;
+ seg->seg_size = size;
+ seg->seg_of = size;
+ seg->seg_free = free_cb;
+ if (!writable)
+ seg->seg_flags |= RD_SEGMENT_F_RDONLY;
+
+ rd_buf_append_segment(rbuf, seg);
+
+ if (tailseg)
+ rd_buf_append_segment(rbuf, tailseg);
+}
+
+
+
+/**
+ * @brief Erase \p size bytes at \p absof from buffer.
+ *
+ * @returns the number of bytes erased.
+ *
+ * @remark This is costly since it forces a memory move.
+ */
+size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size) {
+ rd_segment_t *seg, *next = NULL;
+ size_t of;
+
+ /* Find segment for offset */
+ seg = rd_buf_get_segment_at_offset(rbuf, NULL, absof);
+
+ /* Adjust segments until size is exhausted, then continue scanning to
+ * update the absolute offset. */
+ for (of = 0; seg && of < size; seg = next) {
+ /* Example:
+ * seg_absof = 10
+ * seg_of = 7
+ * absof = 12
+ * of = 1
+ * size = 4
+ *
+ * rof = 3 relative segment offset where to erase
+ * eraseremains = 3 remaining bytes to erase
+ * toerase = 3 available bytes to erase in segment
+ * segremains = 1 remaining bytes in segment after to
+ * the right of the erased part, i.e.,
+ * the memory that needs to be moved to the
+ * left.
+ */
+ /** Relative offset in segment for the absolute offset */
+ size_t rof = (absof + of) - seg->seg_absof;
+ /** How much remains to be erased */
+ size_t eraseremains = size - of;
+ /** How much can be erased from this segment */
+ size_t toerase = RD_MIN(seg->seg_of - rof, eraseremains);
+ /** How much remains in the segment after the erased part */
+ size_t segremains = seg->seg_of - (rof + toerase);
+
+ next = TAILQ_NEXT(seg, seg_link);
+
+ seg->seg_absof -= of;
+
+ if (unlikely(toerase == 0))
+ continue;
+
+ if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY)))
+ RD_BUG("rd_buf_erase() called on read-only segment");
+
+ if (likely(segremains > 0))
+ memmove(seg->seg_p + rof, seg->seg_p + rof + toerase,
+ segremains);
+
+ seg->seg_of -= toerase;
+ rbuf->rbuf_len -= toerase;
+
+ of += toerase;
+
+ /* If segment is now empty, remove it */
+ if (seg->seg_of == 0)
+ rd_buf_destroy_segment(rbuf, seg);
+ }
+
+ /* Update absolute offset of remaining segments */
+ for (seg = next; seg; seg = TAILQ_NEXT(seg, seg_link)) {
+ rd_assert(seg->seg_absof >= of);
+ seg->seg_absof -= of;
+ }
+
+ rbuf->rbuf_erased += of;
+
+ return of;
+}
+
+
+
+/**
+ * @brief Do a write-seek, updating the write position to the given
+ * absolute \p absof.
+ *
+ * @warning Any sub-sequent segments will be destroyed.
+ *
+ * @returns -1 if the offset is out of bounds, else 0.
+ */
+int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof) {
+ rd_segment_t *seg, *next;
+ size_t relof;
+
+ seg = rd_buf_get_segment_at_offset(rbuf, rbuf->rbuf_wpos, absof);
+ if (unlikely(!seg))
+ return -1;
+
+ relof = absof - seg->seg_absof;
+ if (unlikely(relof > seg->seg_of))
+ return -1;
+
+ /* Destroy sub-sequent segments in reverse order so that
+ * destroy_segment() length checks are correct.
+ * Will decrement rbuf_len et.al. */
+ for (next = TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head);
+ next != seg;) {
+ rd_segment_t *this = next;
+ next = TAILQ_PREV(this, rd_segment_head, seg_link);
+ rd_buf_destroy_segment(rbuf, this);
+ }
+
+ /* Update relative write offset */
+ seg->seg_of = relof;
+ rbuf->rbuf_wpos = seg;
+ rbuf->rbuf_len = seg->seg_absof + seg->seg_of;
+
+ rd_assert(rbuf->rbuf_len == absof);
+
+ return 0;
+}
+
+
+/**
+ * @brief Set up the iovecs in \p iovs (of size \p iov_max) with the writable
+ * segments from the buffer's current write position.
+ *
+ * @param iovcntp will be set to the number of populated \p iovs[]
+ * @param size_max limits the total number of bytes made available.
+ * Note: this value may be overshot with the size of one
+ * segment.
+ *
+ * @returns the total number of bytes in the represented segments.
+ *
+ * @remark the write position will NOT be updated.
+ */
+size_t rd_buf_get_write_iov(const rd_buf_t *rbuf,
+ struct iovec *iovs,
+ size_t *iovcntp,
+ size_t iov_max,
+ size_t size_max) {
+ const rd_segment_t *seg;
+ size_t iovcnt = 0;
+ size_t sum = 0;
+
+ for (seg = rbuf->rbuf_wpos; seg && iovcnt < iov_max && sum < size_max;
+ seg = TAILQ_NEXT(seg, seg_link)) {
+ size_t len;
+ void *p;
+
+ len = rd_segment_write_remains(seg, &p);
+ if (unlikely(len == 0))
+ continue;
+
+ iovs[iovcnt].iov_base = p;
+ iovs[iovcnt++].iov_len = len;
+
+ sum += len;
+ }
+
+ *iovcntp = iovcnt;
+
+ return sum;
+}
+
+
+
+/**
+ * @name Slice reader interface
+ *
+ * @{
+ */
+
+/**
+ * @brief Initialize a new slice of \p size bytes starting at \p seg with
+ * relative offset \p rof.
+ *
+ * @returns 0 on success or -1 if there is not at least \p size bytes available
+ * in the buffer.
+ */
+int rd_slice_init_seg(rd_slice_t *slice,
+ const rd_buf_t *rbuf,
+ const rd_segment_t *seg,
+ size_t rof,
+ size_t size) {
+ /* Verify that \p size bytes are indeed available in the buffer. */
+ if (unlikely(rbuf->rbuf_len < (seg->seg_absof + rof + size)))
+ return -1;
+
+ slice->buf = rbuf;
+ slice->seg = seg;
+ slice->rof = rof;
+ slice->start = seg->seg_absof + rof;
+ slice->end = slice->start + size;
+
+ rd_assert(seg->seg_absof + rof >= slice->start &&
+ seg->seg_absof + rof <= slice->end);
+
+ rd_assert(slice->end <= rd_buf_len(rbuf));
+
+ return 0;
+}
+
+/**
+ * @brief Initialize new slice of \p size bytes starting at offset \p absof
+ *
+ * @returns 0 on success or -1 if there is not at least \p size bytes available
+ * in the buffer.
+ */
+int rd_slice_init(rd_slice_t *slice,
+ const rd_buf_t *rbuf,
+ size_t absof,
+ size_t size) {
+ const rd_segment_t *seg =
+ rd_buf_get_segment_at_offset(rbuf, NULL, absof);
+ if (unlikely(!seg))
+ return -1;
+
+ return rd_slice_init_seg(slice, rbuf, seg, absof - seg->seg_absof,
+ size);
+}
+
+/**
+ * @brief Initialize new slice covering the full buffer \p rbuf
+ */
+void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf) {
+ int r = rd_slice_init(slice, rbuf, 0, rd_buf_len(rbuf));
+ rd_assert(r == 0);
+}
+
+
+
+/**
+ * @sa rd_slice_reader() rd_slice_peeker()
+ */
+size_t rd_slice_reader0(rd_slice_t *slice, const void **p, int update_pos) {
+ size_t rof = slice->rof;
+ size_t rlen;
+ const rd_segment_t *seg;
+
+ /* Find segment with non-zero payload */
+ for (seg = slice->seg;
+ seg && seg->seg_absof + rof < slice->end && seg->seg_of == rof;
+ seg = TAILQ_NEXT(seg, seg_link))
+ rof = 0;
+
+ if (unlikely(!seg || seg->seg_absof + rof >= slice->end))
+ return 0;
+
+ *p = (const void *)(seg->seg_p + rof);
+ rlen = RD_MIN(seg->seg_of - rof, rd_slice_remains(slice));
+
+ if (update_pos) {
+ if (slice->seg != seg) {
+ rd_assert(seg->seg_absof + rof >= slice->start &&
+ seg->seg_absof + rof + rlen <= slice->end);
+ slice->seg = seg;
+ slice->rof = rlen;
+ } else {
+ slice->rof += rlen;
+ }
+ }
+
+ return rlen;
+}
+
+
+/**
+ * @brief Convenience reader iterator interface.
+ *
+ * Call repeatedly from while loop until it returns 0.
+ *
+ * @param slice slice to read from, position will be updated.
+ * @param p will be set to the start of \p *rlenp contiguous bytes of memory
+ * @param rlenp will be set to the number of bytes available in \p p
+ *
+ * @returns the number of bytes read, or 0 if slice is empty.
+ */
+size_t rd_slice_reader(rd_slice_t *slice, const void **p) {
+ return rd_slice_reader0(slice, p, 1 /*update_pos*/);
+}
+
+/**
+ * @brief Identical to rd_slice_reader() but does NOT update the read position
+ */
+size_t rd_slice_peeker(const rd_slice_t *slice, const void **p) {
+ return rd_slice_reader0((rd_slice_t *)slice, p, 0 /*dont update_pos*/);
+}
+
+
+
+/**
+ * @brief Read \p size bytes from current read position,
+ * advancing the read offset by the number of bytes copied to \p dst.
+ *
+ * If there are less than \p size remaining in the buffer
+ * then 0 is returned and no bytes are copied.
+ *
+ * @returns \p size, or 0 if \p size bytes are not available in buffer.
+ *
+ * @remark This performs a complete read, no partitial reads.
+ *
+ * @remark If \p dst is NULL only the read position is updated.
+ */
+size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size) {
+ size_t remains = size;
+ char *d = (char *)dst; /* Possibly NULL */
+ size_t rlen;
+ const void *p;
+ size_t orig_end = slice->end;
+
+ if (unlikely(rd_slice_remains(slice) < size))
+ return 0;
+
+ /* Temporarily shrink slice to offset + \p size */
+ slice->end = rd_slice_abs_offset(slice) + size;
+
+ while ((rlen = rd_slice_reader(slice, &p))) {
+ rd_dassert(remains >= rlen);
+ if (dst) {
+ memcpy(d, p, rlen);
+ d += rlen;
+ }
+ remains -= rlen;
+ }
+
+ rd_dassert(remains == 0);
+
+ /* Restore original size */
+ slice->end = orig_end;
+
+ return size;
+}
+
+
+/**
+ * @brief Read \p size bytes from absolute slice offset \p offset
+ * and store in \p dst, without updating the slice read position.
+ *
+ * @returns \p size if the offset and size was within the slice, else 0.
+ */
+size_t
+rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size) {
+ rd_slice_t sub = *slice;
+
+ if (unlikely(rd_slice_seek(&sub, offset) == -1))
+ return 0;
+
+ return rd_slice_read(&sub, dst, size);
+}
+
+
+/**
+ * @brief Read a varint-encoded unsigned integer from \p slice,
+ * storing the decoded number in \p nump on success (return value > 0).
+ *
+ * @returns the number of bytes read on success or 0 in case of
+ * buffer underflow.
+ */
+size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump) {
+ uint64_t num = 0;
+ int shift = 0;
+ size_t rof = slice->rof;
+ const rd_segment_t *seg;
+
+ /* Traverse segments, byte for byte, until varint is decoded
+ * or no more segments available (underflow). */
+ for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) {
+ for (; rof < seg->seg_of; rof++) {
+ unsigned char oct;
+
+ if (unlikely(seg->seg_absof + rof >= slice->end))
+ return 0; /* Underflow */
+
+ oct = *(const unsigned char *)(seg->seg_p + rof);
+
+ num |= (uint64_t)(oct & 0x7f) << shift;
+ shift += 7;
+
+ if (!(oct & 0x80)) {
+ /* Done: no more bytes expected */
+ *nump = num;
+
+ /* Update slice's read pointer and offset */
+ if (slice->seg != seg)
+ slice->seg = seg;
+ slice->rof = rof + 1; /* including the +1 byte
+ * that was just read */
+
+ return shift / 7;
+ }
+ }
+
+ rof = 0;
+ }
+
+ return 0; /* Underflow */
+}
+
+
+/**
+ * @returns a pointer to \p size contiguous bytes at the current read offset.
+ * If there isn't \p size contiguous bytes available NULL will
+ * be returned.
+ *
+ * @remark The read position is updated to point past \p size.
+ */
+const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size) {
+ void *p;
+
+ if (unlikely(rd_slice_remains(slice) < size ||
+ slice->rof + size > slice->seg->seg_of))
+ return NULL;
+
+ p = slice->seg->seg_p + slice->rof;
+
+ rd_slice_read(slice, NULL, size);
+
+ return p;
+}
+
+
+
+/**
+ * @brief Sets the slice's read position. The offset is the slice offset,
+ * not buffer offset.
+ *
+ * @returns 0 if offset was within range, else -1 in which case the position
+ * is not changed.
+ */
+int rd_slice_seek(rd_slice_t *slice, size_t offset) {
+ const rd_segment_t *seg;
+ size_t absof = slice->start + offset;
+
+ if (unlikely(absof >= slice->end))
+ return -1;
+
+ seg = rd_buf_get_segment_at_offset(slice->buf, slice->seg, absof);
+ rd_assert(seg);
+
+ slice->seg = seg;
+ slice->rof = absof - seg->seg_absof;
+ rd_assert(seg->seg_absof + slice->rof >= slice->start &&
+ seg->seg_absof + slice->rof <= slice->end);
+
+ return 0;
+}
+
+
+/**
+ * @brief Narrow the current slice to \p size, saving
+ * the original slice state info \p save_slice.
+ *
+ * Use rd_slice_widen() to restore the saved slice
+ * with the read count updated from the narrowed slice.
+ *
+ * This is useful for reading a sub-slice of a larger slice
+ * without having to pass the lesser length around.
+ *
+ * @returns 1 if enough underlying slice buffer memory is available, else 0.
+ */
+int rd_slice_narrow(rd_slice_t *slice, rd_slice_t *save_slice, size_t size) {
+ if (unlikely(slice->start + size > slice->end))
+ return 0;
+ *save_slice = *slice;
+ slice->end = slice->start + size;
+ rd_assert(rd_slice_abs_offset(slice) <= slice->end);
+ return 1;
+}
+
+/**
+ * @brief Same as rd_slice_narrow() but using a relative size \p relsize
+ * from the current read position.
+ */
+int rd_slice_narrow_relative(rd_slice_t *slice,
+ rd_slice_t *save_slice,
+ size_t relsize) {
+ return rd_slice_narrow(slice, save_slice,
+ rd_slice_offset(slice) + relsize);
+}
+
+
+/**
+ * @brief Restore the original \p save_slice size from a previous call to
+ * rd_slice_narrow(), while keeping the updated read pointer from
+ * \p slice.
+ */
+void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice) {
+ slice->end = save_slice->end;
+}
+
+
+/**
+ * @brief Copy the original slice \p orig to \p new_slice and adjust
+ * the new slice length to \p size.
+ *
+ * This is a side-effect free form of rd_slice_narrow() which is not to
+ * be used with rd_slice_widen().
+ *
+ * @returns 1 if enough underlying slice buffer memory is available, else 0.
+ */
+int rd_slice_narrow_copy(const rd_slice_t *orig,
+ rd_slice_t *new_slice,
+ size_t size) {
+ if (unlikely(orig->start + size > orig->end))
+ return 0;
+ *new_slice = *orig;
+ new_slice->end = orig->start + size;
+ rd_assert(rd_slice_abs_offset(new_slice) <= new_slice->end);
+ return 1;
+}
+
+/**
+ * @brief Same as rd_slice_narrow_copy() but with a relative size from
+ * the current read position.
+ */
+int rd_slice_narrow_copy_relative(const rd_slice_t *orig,
+ rd_slice_t *new_slice,
+ size_t relsize) {
+ return rd_slice_narrow_copy(orig, new_slice,
+ rd_slice_offset(orig) + relsize);
+}
+
+
+
+/**
+ * @brief Set up the iovec \p iovs (of size \p iov_max) with the readable
+ * segments from the slice's current read position.
+ *
+ * @param iovcntp will be set to the number of populated \p iovs[]
+ * @param size_max limits the total number of bytes made available.
+ * Note: this value may be overshot with the size of one
+ * segment.
+ *
+ * @returns the total number of bytes in the represented segments.
+ *
+ * @remark will NOT update the read position.
+ */
+size_t rd_slice_get_iov(const rd_slice_t *slice,
+ struct iovec *iovs,
+ size_t *iovcntp,
+ size_t iov_max,
+ size_t size_max) {
+ const void *p;
+ size_t rlen;
+ size_t iovcnt = 0;
+ size_t sum = 0;
+ rd_slice_t copy = *slice; /* Use a copy of the slice so we dont
+ * update the position for the caller. */
+
+ while (sum < size_max && iovcnt < iov_max &&
+ (rlen = rd_slice_reader(&copy, &p))) {
+ iovs[iovcnt].iov_base = (void *)p;
+ iovs[iovcnt++].iov_len = rlen;
+
+ sum += rlen;
+ }
+
+ *iovcntp = iovcnt;
+
+ return sum;
+}
+
+
+
+/**
+ * @brief CRC32 calculation of slice.
+ *
+ * @returns the calculated CRC
+ *
+ * @remark the slice's position is updated.
+ */
+uint32_t rd_slice_crc32(rd_slice_t *slice) {
+ rd_crc32_t crc;
+ const void *p;
+ size_t rlen;
+
+ crc = rd_crc32_init();
+
+ while ((rlen = rd_slice_reader(slice, &p)))
+ crc = rd_crc32_update(crc, p, rlen);
+
+ return (uint32_t)rd_crc32_finalize(crc);
+}
+
+/**
+ * @brief Compute CRC-32C of segments starting at at buffer position \p absof,
+ * also supporting the case where the position/offset is not at the
+ * start of the first segment.
+ *
+ * @remark the slice's position is updated.
+ */
+uint32_t rd_slice_crc32c(rd_slice_t *slice) {
+ const void *p;
+ size_t rlen;
+ uint32_t crc = 0;
+
+ while ((rlen = rd_slice_reader(slice, &p)))
+ crc = rd_crc32c(crc, (const char *)p, rlen);
+
+ return crc;
+}
+
+
+
+/**
+ * @name Debugging dumpers
+ *
+ *
+ */
+
+static void rd_segment_dump(const rd_segment_t *seg,
+ const char *ind,
+ size_t relof,
+ int do_hexdump) {
+ fprintf(stderr,
+ "%s((rd_segment_t *)%p): "
+ "p %p, of %" PRIusz
+ ", "
+ "absof %" PRIusz ", size %" PRIusz ", free %p, flags 0x%x\n",
+ ind, seg, seg->seg_p, seg->seg_of, seg->seg_absof,
+ seg->seg_size, seg->seg_free, seg->seg_flags);
+ rd_assert(relof <= seg->seg_of);
+ if (do_hexdump)
+ rd_hexdump(stderr, "segment", seg->seg_p + relof,
+ seg->seg_of - relof);
+}
+
+void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump) {
+ const rd_segment_t *seg;
+
+ fprintf(stderr,
+ "((rd_buf_t *)%p):\n"
+ " len %" PRIusz " size %" PRIusz ", %" PRIusz "/%" PRIusz
+ " extra memory used\n",
+ rbuf, rbuf->rbuf_len, rbuf->rbuf_size, rbuf->rbuf_extra_len,
+ rbuf->rbuf_extra_size);
+
+ if (rbuf->rbuf_wpos) {
+ fprintf(stderr, " wpos:\n");
+ rd_segment_dump(rbuf->rbuf_wpos, " ", 0, 0);
+ }
+
+ if (rbuf->rbuf_segment_cnt > 0) {
+ size_t segcnt = 0;
+
+ fprintf(stderr, " %" PRIusz " linked segments:\n",
+ rbuf->rbuf_segment_cnt);
+ TAILQ_FOREACH(seg, &rbuf->rbuf_segments, seg_link) {
+ rd_segment_dump(seg, " ", 0, do_hexdump);
+ segcnt++;
+ rd_assert(segcnt <= rbuf->rbuf_segment_cnt);
+ }
+ }
+}
+
+void rd_slice_dump(const rd_slice_t *slice, int do_hexdump) {
+ const rd_segment_t *seg;
+ size_t relof;
+
+ fprintf(stderr,
+ "((rd_slice_t *)%p):\n"
+ " buf %p (len %" PRIusz "), seg %p (absof %" PRIusz
+ "), "
+ "rof %" PRIusz ", start %" PRIusz ", end %" PRIusz
+ ", size %" PRIusz ", offset %" PRIusz "\n",
+ slice, slice->buf, rd_buf_len(slice->buf), slice->seg,
+ slice->seg ? slice->seg->seg_absof : 0, slice->rof,
+ slice->start, slice->end, rd_slice_size(slice),
+ rd_slice_offset(slice));
+ relof = slice->rof;
+
+ for (seg = slice->seg; seg; seg = TAILQ_NEXT(seg, seg_link)) {
+ rd_segment_dump(seg, " ", relof, do_hexdump);
+ relof = 0;
+ }
+}
+
+
+/**
+ * @name Unit-tests
+ *
+ *
+ *
+ */
+
+
+/**
+ * @brief Basic write+read test
+ */
+static int do_unittest_write_read(void) {
+ rd_buf_t b;
+ char ones[1024];
+ char twos[1024];
+ char threes[1024];
+ char fiftyfives[100]; /* 0x55 indicates "untouched" memory */
+ char buf[1024 * 3];
+ rd_slice_t slice;
+ size_t r, pos;
+
+ memset(ones, 0x1, sizeof(ones));
+ memset(twos, 0x2, sizeof(twos));
+ memset(threes, 0x3, sizeof(threes));
+ memset(fiftyfives, 0x55, sizeof(fiftyfives));
+ memset(buf, 0x55, sizeof(buf));
+
+ rd_buf_init(&b, 2, 1000);
+
+ /*
+ * Verify write
+ */
+ r = rd_buf_write(&b, ones, 200);
+ RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r);
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos);
+
+ r = rd_buf_write(&b, twos, 800);
+ RD_UT_ASSERT(r == 200, "write() returned position %" PRIusz, r);
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200 + 800, "pos() returned position %" PRIusz, pos);
+
+ /* Buffer grows here */
+ r = rd_buf_write(&b, threes, 1);
+ RD_UT_ASSERT(pos == 200 + 800, "write() returned position %" PRIusz, r);
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200 + 800 + 1, "pos() returned position %" PRIusz,
+ pos);
+
+ /*
+ * Verify read
+ */
+ /* Get full slice. */
+ rd_slice_init_full(&slice, &b);
+
+ r = rd_slice_read(&slice, buf, 200 + 800 + 2);
+ RD_UT_ASSERT(r == 0,
+ "read() > remaining should have failed, gave %" PRIusz, r);
+ r = rd_slice_read(&slice, buf, 200 + 800 + 1);
+ RD_UT_ASSERT(r == 200 + 800 + 1,
+ "read() returned %" PRIusz " (%" PRIusz " remains)", r,
+ rd_slice_remains(&slice));
+
+ RD_UT_ASSERT(!memcmp(buf, ones, 200), "verify ones");
+ RD_UT_ASSERT(!memcmp(buf + 200, twos, 800), "verify twos");
+ RD_UT_ASSERT(!memcmp(buf + 200 + 800, threes, 1), "verify threes");
+ RD_UT_ASSERT(!memcmp(buf + 200 + 800 + 1, fiftyfives, 100),
+ "verify 55s");
+
+ rd_buf_destroy(&b);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Helper read verifier, not a unit-test itself.
+ */
+#define do_unittest_read_verify(b, absof, len, verify) \
+ do { \
+ int __fail = do_unittest_read_verify0(b, absof, len, verify); \
+ RD_UT_ASSERT(!__fail, \
+ "read_verify(absof=%" PRIusz ",len=%" PRIusz \
+ ") " \
+ "failed", \
+ (size_t)absof, (size_t)len); \
+ } while (0)
+
+static int do_unittest_read_verify0(const rd_buf_t *b,
+ size_t absof,
+ size_t len,
+ const char *verify) {
+ rd_slice_t slice, sub;
+ char buf[1024];
+ size_t half;
+ size_t r;
+ int i;
+
+ rd_assert(sizeof(buf) >= len);
+
+ /* Get reader slice */
+ i = rd_slice_init(&slice, b, absof, len);
+ RD_UT_ASSERT(i == 0, "slice_init() failed: %d", i);
+
+ r = rd_slice_read(&slice, buf, len);
+ RD_UT_ASSERT(r == len,
+ "read() returned %" PRIusz " expected %" PRIusz
+ " (%" PRIusz " remains)",
+ r, len, rd_slice_remains(&slice));
+
+ RD_UT_ASSERT(!memcmp(buf, verify, len), "verify");
+
+ r = rd_slice_offset(&slice);
+ RD_UT_ASSERT(r == len, "offset() returned %" PRIusz ", not %" PRIusz, r,
+ len);
+
+ half = len / 2;
+ i = rd_slice_seek(&slice, half);
+ RD_UT_ASSERT(i == 0, "seek(%" PRIusz ") returned %d", half, i);
+ r = rd_slice_offset(&slice);
+ RD_UT_ASSERT(r == half, "offset() returned %" PRIusz ", not %" PRIusz,
+ r, half);
+
+ /* Get a sub-slice covering the later half. */
+ sub = rd_slice_pos(&slice);
+ r = rd_slice_offset(&sub);
+ RD_UT_ASSERT(r == 0, "sub: offset() returned %" PRIusz ", not %" PRIusz,
+ r, (size_t)0);
+ r = rd_slice_size(&sub);
+ RD_UT_ASSERT(r == half,
+ "sub: size() returned %" PRIusz ", not %" PRIusz, r, half);
+ r = rd_slice_remains(&sub);
+ RD_UT_ASSERT(r == half,
+ "sub: remains() returned %" PRIusz ", not %" PRIusz, r,
+ half);
+
+ /* Read half */
+ r = rd_slice_read(&sub, buf, half);
+ RD_UT_ASSERT(r == half,
+ "sub read() returned %" PRIusz " expected %" PRIusz
+ " (%" PRIusz " remains)",
+ r, len, rd_slice_remains(&sub));
+
+ RD_UT_ASSERT(!memcmp(buf, verify, len), "verify");
+
+ r = rd_slice_offset(&sub);
+ RD_UT_ASSERT(r == rd_slice_size(&sub),
+ "sub offset() returned %" PRIusz ", not %" PRIusz, r,
+ rd_slice_size(&sub));
+ r = rd_slice_remains(&sub);
+ RD_UT_ASSERT(r == 0,
+ "sub: remains() returned %" PRIusz ", not %" PRIusz, r,
+ (size_t)0);
+
+ return 0;
+}
+
+
+/**
+ * @brief write_seek() and split() test
+ */
+static int do_unittest_write_split_seek(void) {
+ rd_buf_t b;
+ char ones[1024];
+ char twos[1024];
+ char threes[1024];
+ char fiftyfives[100]; /* 0x55 indicates "untouched" memory */
+ char buf[1024 * 3];
+ size_t r, pos;
+ rd_segment_t *seg, *newseg;
+
+ memset(ones, 0x1, sizeof(ones));
+ memset(twos, 0x2, sizeof(twos));
+ memset(threes, 0x3, sizeof(threes));
+ memset(fiftyfives, 0x55, sizeof(fiftyfives));
+ memset(buf, 0x55, sizeof(buf));
+
+ rd_buf_init(&b, 0, 0);
+
+ /*
+ * Verify write
+ */
+ r = rd_buf_write(&b, ones, 400);
+ RD_UT_ASSERT(r == 0, "write() returned position %" PRIusz, r);
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 400, "pos() returned position %" PRIusz, pos);
+
+ do_unittest_read_verify(&b, 0, 400, ones);
+
+ /*
+ * Seek and re-write
+ */
+ r = rd_buf_write_seek(&b, 200);
+ RD_UT_ASSERT(r == 0, "seek() failed");
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200, "pos() returned position %" PRIusz, pos);
+
+ r = rd_buf_write(&b, twos, 100);
+ RD_UT_ASSERT(pos == 200, "write() returned position %" PRIusz, r);
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos);
+
+ do_unittest_read_verify(&b, 0, 200, ones);
+ do_unittest_read_verify(&b, 200, 100, twos);
+
+ /* Make sure read() did not modify the write position. */
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos);
+
+ /* Split buffer, write position is now at split where writes
+ * are not allowed (mid buffer). */
+ seg = rd_buf_get_segment_at_offset(&b, NULL, 50);
+ RD_UT_ASSERT(seg->seg_of != 0, "assumed mid-segment");
+ newseg = rd_segment_split(&b, seg, 50);
+ rd_buf_append_segment(&b, newseg);
+ seg = rd_buf_get_segment_at_offset(&b, NULL, 50);
+ RD_UT_ASSERT(seg != NULL, "seg");
+ RD_UT_ASSERT(seg == newseg, "newseg %p, seg %p", newseg, seg);
+ RD_UT_ASSERT(seg->seg_of > 0,
+ "assumed beginning of segment, got %" PRIusz, seg->seg_of);
+
+ pos = rd_buf_write_pos(&b);
+ RD_UT_ASSERT(pos == 200 + 100, "pos() returned position %" PRIusz, pos);
+
+ /* Re-verify that nothing changed */
+ do_unittest_read_verify(&b, 0, 200, ones);
+ do_unittest_read_verify(&b, 200, 100, twos);
+
+ /* Do a write seek at buffer boundary, sub-sequent buffers should
+ * be destroyed. */
+ r = rd_buf_write_seek(&b, 50);
+ RD_UT_ASSERT(r == 0, "seek() failed");
+ do_unittest_read_verify(&b, 0, 50, ones);
+
+ rd_buf_destroy(&b);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief Unittest to verify payload is correctly written and read.
+ * Each written u32 word is the running CRC of the word count.
+ */
+static int do_unittest_write_read_payload_correctness(void) {
+ uint32_t crc;
+ uint32_t write_crc, read_crc;
+ const int seed = 12345;
+ rd_buf_t b;
+ const size_t max_cnt = 20000;
+ rd_slice_t slice;
+ size_t r;
+ size_t i;
+ int pass;
+
+ crc = rd_crc32_init();
+ crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed));
+
+ rd_buf_init(&b, 0, 0);
+ for (i = 0; i < max_cnt; i++) {
+ crc = rd_crc32_update(crc, (void *)&i, sizeof(i));
+ rd_buf_write(&b, &crc, sizeof(crc));
+ }
+
+ write_crc = rd_crc32_finalize(crc);
+
+ r = rd_buf_len(&b);
+ RD_UT_ASSERT(r == max_cnt * sizeof(crc),
+ "expected length %" PRIusz ", not %" PRIusz, r,
+ max_cnt * sizeof(crc));
+
+ /*
+ * Now verify the contents with a reader.
+ */
+ rd_slice_init_full(&slice, &b);
+
+ r = rd_slice_remains(&slice);
+ RD_UT_ASSERT(r == rd_buf_len(&b),
+ "slice remains %" PRIusz ", should be %" PRIusz, r,
+ rd_buf_len(&b));
+
+ for (pass = 0; pass < 2; pass++) {
+ /* Two passes:
+ * - pass 1: using peek()
+ * - pass 2: using read()
+ */
+ const char *pass_str = pass == 0 ? "peek" : "read";
+
+ crc = rd_crc32_init();
+ crc = rd_crc32_update(crc, (void *)&seed, sizeof(seed));
+
+ for (i = 0; i < max_cnt; i++) {
+ uint32_t buf_crc;
+
+ crc = rd_crc32_update(crc, (void *)&i, sizeof(i));
+
+ if (pass == 0)
+ r = rd_slice_peek(&slice, i * sizeof(buf_crc),
+ &buf_crc, sizeof(buf_crc));
+ else
+ r = rd_slice_read(&slice, &buf_crc,
+ sizeof(buf_crc));
+ RD_UT_ASSERT(r == sizeof(buf_crc),
+ "%s() at #%" PRIusz
+ " failed: "
+ "r is %" PRIusz " not %" PRIusz,
+ pass_str, i, r, sizeof(buf_crc));
+ RD_UT_ASSERT(buf_crc == crc,
+ "%s: invalid crc at #%" PRIusz
+ ": expected %" PRIu32 ", read %" PRIu32,
+ pass_str, i, crc, buf_crc);
+ }
+
+ read_crc = rd_crc32_finalize(crc);
+
+ RD_UT_ASSERT(read_crc == write_crc,
+ "%s: finalized read crc %" PRIu32
+ " != write crc %" PRIu32,
+ pass_str, read_crc, write_crc);
+ }
+
+ r = rd_slice_remains(&slice);
+ RD_UT_ASSERT(r == 0, "slice remains %" PRIusz ", should be %" PRIusz, r,
+ (size_t)0);
+
+ rd_buf_destroy(&b);
+
+ RD_UT_PASS();
+}
+
+#define do_unittest_iov_verify(...) \
+ do { \
+ int __fail = do_unittest_iov_verify0(__VA_ARGS__); \
+ RD_UT_ASSERT(!__fail, "iov_verify() failed"); \
+ } while (0)
+static int
+do_unittest_iov_verify0(rd_buf_t *b, size_t exp_iovcnt, size_t exp_totsize) {
+#define MY_IOV_MAX 16
+ struct iovec iov[MY_IOV_MAX];
+ size_t iovcnt;
+ size_t i;
+ size_t totsize, sum;
+
+ rd_assert(exp_iovcnt <= MY_IOV_MAX);
+
+ totsize =
+ rd_buf_get_write_iov(b, iov, &iovcnt, MY_IOV_MAX, exp_totsize);
+ RD_UT_ASSERT(totsize >= exp_totsize,
+ "iov total size %" PRIusz " expected >= %" PRIusz, totsize,
+ exp_totsize);
+ RD_UT_ASSERT(iovcnt >= exp_iovcnt && iovcnt <= MY_IOV_MAX,
+ "iovcnt %" PRIusz ", expected %" PRIusz
+ " < x <= MY_IOV_MAX",
+ iovcnt, exp_iovcnt);
+
+ sum = 0;
+ for (i = 0; i < iovcnt; i++) {
+ RD_UT_ASSERT(iov[i].iov_base,
+ "iov #%" PRIusz " iov_base not set", i);
+ RD_UT_ASSERT(iov[i].iov_len,
+ "iov #%" PRIusz " iov_len %" PRIusz
+ " out of range",
+ i, iov[i].iov_len);
+ sum += iov[i].iov_len;
+ RD_UT_ASSERT(sum <= totsize,
+ "sum %" PRIusz " > totsize %" PRIusz, sum,
+ totsize);
+ }
+
+ RD_UT_ASSERT(sum == totsize, "sum %" PRIusz " != totsize %" PRIusz, sum,
+ totsize);
+
+ return 0;
+}
+
+
+/**
+ * @brief Verify that buffer to iovec conversion works.
+ */
+static int do_unittest_write_iov(void) {
+ rd_buf_t b;
+
+ rd_buf_init(&b, 0, 0);
+ rd_buf_write_ensure(&b, 100, 100);
+
+ do_unittest_iov_verify(&b, 1, 100);
+
+ /* Add a secondary buffer */
+ rd_buf_write_ensure(&b, 30000, 0);
+
+ do_unittest_iov_verify(&b, 2, 100 + 30000);
+
+
+ rd_buf_destroy(&b);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief Verify that erasing parts of the buffer works.
+ */
+static int do_unittest_erase(void) {
+ static const struct {
+ const char *segs[4];
+ const char *writes[4];
+ struct {
+ size_t of;
+ size_t size;
+ size_t retsize;
+ } erasures[4];
+
+ const char *expect;
+ } in[] = {/* 12|3|45
+ * x x xx */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{1, 4, 4}},
+ .expect = "1",
+ },
+ /* 12|3|45
+ * xx */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{0, 2, 2}},
+ .expect = "345",
+ },
+ /* 12|3|45
+ * xx */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{3, 2, 2}},
+ .expect = "123",
+ },
+ /* 12|3|45
+ * x
+ * 1 |3|45
+ * x
+ * 1 | 45
+ * x */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{1, 1, 1}, {1, 1, 1}, {2, 1, 1}},
+ .expect = "14",
+ },
+ /* 12|3|45
+ * xxxxxxx */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{0, 5, 5}},
+ .expect = "",
+ },
+ /* 12|3|45
+ * x */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{0, 1, 1}},
+ .expect = "2345",
+ },
+ /* 12|3|45
+ * x */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{4, 1, 1}},
+ .expect = "1234",
+ },
+ /* 12|3|45
+ * x */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{5, 10, 0}},
+ .expect = "12345",
+ },
+ /* 12|3|45
+ * xxx */
+ {
+ .segs = {"12", "3", "45"},
+ .erasures = {{4, 3, 1}, {4, 3, 0}, {4, 3, 0}},
+ .expect = "1234",
+ },
+ /* 1
+ * xxx */
+ {
+ .segs = {"1"},
+ .erasures = {{0, 3, 1}},
+ .expect = "",
+ },
+ /* 123456
+ * xxxxxx */
+ {
+ .segs = {"123456"},
+ .erasures = {{0, 6, 6}},
+ .expect = "",
+ },
+ /* 123456789a
+ * xxx */
+ {
+ .segs = {"123456789a"},
+ .erasures = {{4, 3, 3}},
+ .expect = "123489a",
+ },
+ /* 1234|5678
+ * x xx */
+ {.segs = {"1234", "5678"},
+ .erasures = {{3, 3, 3}},
+ .writes = {"9abc"},
+ .expect = "123789abc"},
+
+ {.expect = NULL}};
+ int i;
+
+ for (i = 0; in[i].expect; i++) {
+ rd_buf_t b;
+ rd_slice_t s;
+ size_t expsz = strlen(in[i].expect);
+ char *out;
+ int j;
+ size_t r;
+ int r2;
+
+ rd_buf_init(&b, 0, 0);
+
+ /* Write segments to buffer */
+ for (j = 0; in[i].segs[j]; j++)
+ rd_buf_push_writable(&b, rd_strdup(in[i].segs[j]),
+ strlen(in[i].segs[j]), rd_free);
+
+ /* Perform erasures */
+ for (j = 0; in[i].erasures[j].retsize; j++) {
+ r = rd_buf_erase(&b, in[i].erasures[j].of,
+ in[i].erasures[j].size);
+ RD_UT_ASSERT(r == in[i].erasures[j].retsize,
+ "expected retsize %" PRIusz
+ " for i=%d,j=%d"
+ ", not %" PRIusz,
+ in[i].erasures[j].retsize, i, j, r);
+ }
+
+ /* Perform writes */
+ for (j = 0; in[i].writes[j]; j++)
+ rd_buf_write(&b, in[i].writes[j],
+ strlen(in[i].writes[j]));
+
+ RD_UT_ASSERT(expsz == rd_buf_len(&b),
+ "expected buffer to be %" PRIusz
+ " bytes, not "
+ "%" PRIusz " for i=%d",
+ expsz, rd_buf_len(&b), i);
+
+ /* Read back and verify */
+ r2 = rd_slice_init(&s, &b, 0, rd_buf_len(&b));
+ RD_UT_ASSERT((r2 == -1 && rd_buf_len(&b) == 0) ||
+ (r2 == 0 && rd_buf_len(&b) > 0),
+ "slice_init(%" PRIusz ") returned %d for i=%d",
+ rd_buf_len(&b), r2, i);
+ if (r2 == -1)
+ continue; /* Empty buffer */
+
+ RD_UT_ASSERT(expsz == rd_slice_size(&s),
+ "expected slice to be %" PRIusz
+ " bytes, not %" PRIusz " for i=%d",
+ expsz, rd_slice_size(&s), i);
+
+ out = rd_malloc(expsz);
+
+ r = rd_slice_read(&s, out, expsz);
+ RD_UT_ASSERT(r == expsz,
+ "expected to read %" PRIusz " bytes, not %" PRIusz
+ " for i=%d",
+ expsz, r, i);
+
+ RD_UT_ASSERT(!memcmp(out, in[i].expect, expsz),
+ "Expected \"%.*s\", not \"%.*s\" for i=%d",
+ (int)expsz, in[i].expect, (int)r, out, i);
+
+ rd_free(out);
+
+ RD_UT_ASSERT(rd_slice_remains(&s) == 0,
+ "expected no remaining bytes in slice, but got "
+ "%" PRIusz " for i=%d",
+ rd_slice_remains(&s), i);
+
+ rd_buf_destroy(&b);
+ }
+
+
+ RD_UT_PASS();
+}
+
+
+int unittest_rdbuf(void) {
+ int fails = 0;
+
+ fails += do_unittest_write_read();
+ fails += do_unittest_write_split_seek();
+ fails += do_unittest_write_read_payload_correctness();
+ fails += do_unittest_write_iov();
+ fails += do_unittest_erase();
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h
new file mode 100644
index 000000000..1ef30e4a9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdbuf.h
@@ -0,0 +1,373 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDBUF_H
+#define _RDBUF_H
+
+#ifndef _WIN32
+/* for struct iovec */
+#include <sys/socket.h>
+#include <sys/types.h>
+#endif
+
+#include "rdsysqueue.h"
+
+
+/**
+ * @name Generic byte buffers
+ *
+ * @{
+ *
+ * A buffer is a list of segments, each segment having a memory pointer,
+ * write offset, and capacity.
+ *
+ * The main buffer and segment structure is tailored for append-writing
+ * or append-pushing foreign memory.
+ *
+ * Updates of previously written memory regions are possible through the
+ * use of write_update() that takes an absolute offset.
+ *
+ * The write position is part of the buffer and segment structures, while
+ * read is a separate object (rd_slice_t) that does not affect the buffer.
+ */
+
+
+/**
+ * @brief Buffer segment
+ */
+typedef struct rd_segment_s {
+ TAILQ_ENTRY(rd_segment_s) seg_link; /*<< rbuf_segments Link */
+ char *seg_p; /**< Backing-store memory */
+ size_t seg_of; /**< Current relative write-position
+ * (length of payload in this segment) */
+ size_t seg_size; /**< Allocated size of seg_p */
+ size_t seg_absof; /**< Absolute offset of this segment's
+ * beginning in the grand rd_buf_t */
+ void (*seg_free)(void *p); /**< Optional free function for seg_p */
+ int seg_flags; /**< Segment flags */
+#define RD_SEGMENT_F_RDONLY 0x1 /**< Read-only segment */
+#define RD_SEGMENT_F_FREE \
+ 0x2 /**< Free segment on destroy, \
+ * e.g, not a fixed segment. */
+} rd_segment_t;
+
+
+
+TAILQ_HEAD(rd_segment_head, rd_segment_s);
+
+/**
+ * @brief Buffer, containing a list of segments.
+ */
+typedef struct rd_buf_s {
+ struct rd_segment_head rbuf_segments; /**< TAILQ list of segments */
+ size_t rbuf_segment_cnt; /**< Number of segments */
+
+ rd_segment_t *rbuf_wpos; /**< Current write position seg */
+ size_t rbuf_len; /**< Current (written) length */
+ size_t rbuf_erased; /**< Total number of bytes
+ * erased from segments.
+ * This amount is taken into
+ * account when checking for
+ * writable space which is
+ * always at the end of the
+ * buffer and thus can't make
+ * use of the erased parts. */
+ size_t rbuf_size; /**< Total allocated size of
+ * all segments. */
+
+ char *rbuf_extra; /* Extra memory allocated for
+ * use by segment structs,
+ * buffer memory, etc. */
+ size_t rbuf_extra_len; /* Current extra memory used */
+ size_t rbuf_extra_size; /* Total size of extra memory */
+} rd_buf_t;
+
+
+
+/**
+ * @brief A read-only slice of a buffer.
+ */
+typedef struct rd_slice_s {
+ const rd_buf_t *buf; /**< Pointer to buffer */
+ const rd_segment_t *seg; /**< Current read position segment.
+ * Will point to NULL when end of
+ * slice is reached. */
+ size_t rof; /**< Relative read offset in segment */
+ size_t start; /**< Slice start offset in buffer */
+ size_t end; /**< Slice end offset in buffer+1 */
+} rd_slice_t;
+
+
+
+/**
+ * @returns the current write position (absolute offset)
+ */
+static RD_INLINE RD_UNUSED size_t rd_buf_write_pos(const rd_buf_t *rbuf) {
+ const rd_segment_t *seg = rbuf->rbuf_wpos;
+
+ if (unlikely(!seg)) {
+#if ENABLE_DEVEL
+ rd_assert(rbuf->rbuf_len == 0);
+#endif
+ return 0;
+ }
+#if ENABLE_DEVEL
+ rd_assert(seg->seg_absof + seg->seg_of == rbuf->rbuf_len);
+#endif
+ return seg->seg_absof + seg->seg_of;
+}
+
+
+/**
+ * @returns the number of bytes available for writing (before growing).
+ */
+static RD_INLINE RD_UNUSED size_t rd_buf_write_remains(const rd_buf_t *rbuf) {
+ return rbuf->rbuf_size - (rbuf->rbuf_len + rbuf->rbuf_erased);
+}
+
+
+
+/**
+ * @returns the number of bytes remaining to write to the given segment,
+ * and sets the \p *p pointer (unless NULL) to the start of
+ * the contiguous memory.
+ */
+static RD_INLINE RD_UNUSED size_t
+rd_segment_write_remains(const rd_segment_t *seg, void **p) {
+ if (unlikely((seg->seg_flags & RD_SEGMENT_F_RDONLY)))
+ return 0;
+ if (p)
+ *p = (void *)(seg->seg_p + seg->seg_of);
+ return seg->seg_size - seg->seg_of;
+}
+
+
+
+/**
+ * @returns the last segment for the buffer.
+ */
+static RD_INLINE RD_UNUSED rd_segment_t *rd_buf_last(const rd_buf_t *rbuf) {
+ return TAILQ_LAST(&rbuf->rbuf_segments, rd_segment_head);
+}
+
+
+/**
+ * @returns the total written buffer length
+ */
+static RD_INLINE RD_UNUSED size_t rd_buf_len(const rd_buf_t *rbuf) {
+ return rbuf->rbuf_len;
+}
+
+
+int rd_buf_write_seek(rd_buf_t *rbuf, size_t absof);
+
+
+size_t rd_buf_write(rd_buf_t *rbuf, const void *payload, size_t size);
+size_t rd_buf_write_slice(rd_buf_t *rbuf, rd_slice_t *slice);
+size_t rd_buf_write_update(rd_buf_t *rbuf,
+ size_t absof,
+ const void *payload,
+ size_t size);
+void rd_buf_push0(rd_buf_t *rbuf,
+ const void *payload,
+ size_t size,
+ void (*free_cb)(void *),
+ rd_bool_t writable);
+#define rd_buf_push(rbuf, payload, size, free_cb) \
+ rd_buf_push0(rbuf, payload, size, free_cb, rd_false /*not-writable*/)
+#define rd_buf_push_writable(rbuf, payload, size, free_cb) \
+ rd_buf_push0(rbuf, payload, size, free_cb, rd_true /*writable*/)
+
+size_t rd_buf_erase(rd_buf_t *rbuf, size_t absof, size_t size);
+
+size_t rd_buf_get_writable(rd_buf_t *rbuf, void **p);
+
+void rd_buf_write_ensure_contig(rd_buf_t *rbuf, size_t size);
+
+void rd_buf_write_ensure(rd_buf_t *rbuf, size_t min_size, size_t max_size);
+
+size_t rd_buf_get_write_iov(const rd_buf_t *rbuf,
+ struct iovec *iovs,
+ size_t *iovcntp,
+ size_t iov_max,
+ size_t size_max);
+
+void rd_buf_init(rd_buf_t *rbuf, size_t fixed_seg_cnt, size_t buf_size);
+rd_buf_t *rd_buf_new(size_t fixed_seg_cnt, size_t buf_size);
+
+void rd_buf_destroy(rd_buf_t *rbuf);
+void rd_buf_destroy_free(rd_buf_t *rbuf);
+
+void rd_buf_dump(const rd_buf_t *rbuf, int do_hexdump);
+
+int unittest_rdbuf(void);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Buffer reads operate on slices of an rd_buf_t and does not
+ * modify the underlying rd_buf_t itself.
+ *
+ * @warning A slice will not be valid/safe after the buffer or
+ * segments have been modified by a buf write operation
+ * (write, update, write_seek, etc).
+ * @{
+ */
+
+
+/**
+ * @returns the remaining length in the slice
+ */
+#define rd_slice_remains(slice) ((slice)->end - rd_slice_abs_offset(slice))
+
+/**
+ * @returns the total size of the slice, regardless of current position.
+ */
+#define rd_slice_size(slice) ((slice)->end - (slice)->start)
+
+/**
+ * @returns the read position in the slice as a new slice.
+ */
+static RD_INLINE RD_UNUSED rd_slice_t rd_slice_pos(const rd_slice_t *slice) {
+ rd_slice_t newslice = *slice;
+
+ if (!slice->seg)
+ return newslice;
+
+ newslice.start = slice->seg->seg_absof + slice->rof;
+
+ return newslice;
+}
+
+/**
+ * @returns the read position as an absolute buffer byte offset.
+ * @remark this is the buffer offset, not the slice's local offset.
+ */
+static RD_INLINE RD_UNUSED size_t rd_slice_abs_offset(const rd_slice_t *slice) {
+ if (unlikely(!slice->seg)) /* reader has reached the end */
+ return slice->end;
+
+ return slice->seg->seg_absof + slice->rof;
+}
+
+/**
+ * @returns the read position as a byte offset.
+ * @remark this is the slice-local offset, not the backing buffer's offset.
+ */
+static RD_INLINE RD_UNUSED size_t rd_slice_offset(const rd_slice_t *slice) {
+ if (unlikely(!slice->seg)) /* reader has reached the end */
+ return rd_slice_size(slice);
+
+ return (slice->seg->seg_absof + slice->rof) - slice->start;
+}
+
+
+
+int rd_slice_init_seg(rd_slice_t *slice,
+ const rd_buf_t *rbuf,
+ const rd_segment_t *seg,
+ size_t rof,
+ size_t size);
+int rd_slice_init(rd_slice_t *slice,
+ const rd_buf_t *rbuf,
+ size_t absof,
+ size_t size);
+void rd_slice_init_full(rd_slice_t *slice, const rd_buf_t *rbuf);
+
+size_t rd_slice_reader(rd_slice_t *slice, const void **p);
+size_t rd_slice_peeker(const rd_slice_t *slice, const void **p);
+
+size_t rd_slice_read(rd_slice_t *slice, void *dst, size_t size);
+size_t
+rd_slice_peek(const rd_slice_t *slice, size_t offset, void *dst, size_t size);
+
+size_t rd_slice_read_uvarint(rd_slice_t *slice, uint64_t *nump);
+
+/**
+ * @brief Read a zig-zag varint-encoded signed integer from \p slice,
+ * storing the decoded number in \p nump on success (return value > 0).
+ *
+ * @returns the number of bytes read on success or 0 in case of
+ * buffer underflow.
+ */
+static RD_UNUSED RD_INLINE size_t rd_slice_read_varint(rd_slice_t *slice,
+ int64_t *nump) {
+ size_t r;
+ uint64_t unum;
+
+ r = rd_slice_read_uvarint(slice, &unum);
+ if (likely(r > 0)) {
+ /* Zig-zag decoding */
+ *nump = (int64_t)((unum >> 1) ^ -(int64_t)(unum & 1));
+ }
+
+ return r;
+}
+
+
+
+const void *rd_slice_ensure_contig(rd_slice_t *slice, size_t size);
+
+int rd_slice_seek(rd_slice_t *slice, size_t offset);
+
+size_t rd_slice_get_iov(const rd_slice_t *slice,
+ struct iovec *iovs,
+ size_t *iovcntp,
+ size_t iov_max,
+ size_t size_max);
+
+
+uint32_t rd_slice_crc32(rd_slice_t *slice);
+uint32_t rd_slice_crc32c(rd_slice_t *slice);
+
+
+int rd_slice_narrow(rd_slice_t *slice,
+ rd_slice_t *save_slice,
+ size_t size) RD_WARN_UNUSED_RESULT;
+int rd_slice_narrow_relative(rd_slice_t *slice,
+ rd_slice_t *save_slice,
+ size_t relsize) RD_WARN_UNUSED_RESULT;
+void rd_slice_widen(rd_slice_t *slice, const rd_slice_t *save_slice);
+int rd_slice_narrow_copy(const rd_slice_t *orig,
+ rd_slice_t *new_slice,
+ size_t size) RD_WARN_UNUSED_RESULT;
+int rd_slice_narrow_copy_relative(const rd_slice_t *orig,
+ rd_slice_t *new_slice,
+ size_t relsize) RD_WARN_UNUSED_RESULT;
+
+void rd_slice_dump(const rd_slice_t *slice, int do_hexdump);
+
+
+/**@}*/
+
+
+
+#endif /* _RDBUF_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c
new file mode 100644
index 000000000..2a6e126c1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.c
@@ -0,0 +1,114 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * \file rdcrc32.c
+ * Functions and types for CRC checks.
+ *
+ *
+ *
+ * Generated on Tue May 8 17:37:04 2012,
+ * by pycrc v0.7.10, http://www.tty1.net/pycrc/
+ * using the configuration:
+ * Width = 32
+ * Poly = 0x04c11db7
+ * XorIn = 0xffffffff
+ * ReflectIn = True
+ * XorOut = 0xffffffff
+ * ReflectOut = True
+ * Algorithm = table-driven
+ *****************************************************************************/
+#include "rdcrc32.h" /* include the header file generated with pycrc */
+#include <stdlib.h>
+#include <stdint.h>
+
+/**
+ * Static table used for the table_driven implementation.
+ *****************************************************************************/
+const rd_crc32_t crc_table[256] = {
+ 0x00000000, 0x77073096, 0xee0e612c, 0x990951ba, 0x076dc419, 0x706af48f,
+ 0xe963a535, 0x9e6495a3, 0x0edb8832, 0x79dcb8a4, 0xe0d5e91e, 0x97d2d988,
+ 0x09b64c2b, 0x7eb17cbd, 0xe7b82d07, 0x90bf1d91, 0x1db71064, 0x6ab020f2,
+ 0xf3b97148, 0x84be41de, 0x1adad47d, 0x6ddde4eb, 0xf4d4b551, 0x83d385c7,
+ 0x136c9856, 0x646ba8c0, 0xfd62f97a, 0x8a65c9ec, 0x14015c4f, 0x63066cd9,
+ 0xfa0f3d63, 0x8d080df5, 0x3b6e20c8, 0x4c69105e, 0xd56041e4, 0xa2677172,
+ 0x3c03e4d1, 0x4b04d447, 0xd20d85fd, 0xa50ab56b, 0x35b5a8fa, 0x42b2986c,
+ 0xdbbbc9d6, 0xacbcf940, 0x32d86ce3, 0x45df5c75, 0xdcd60dcf, 0xabd13d59,
+ 0x26d930ac, 0x51de003a, 0xc8d75180, 0xbfd06116, 0x21b4f4b5, 0x56b3c423,
+ 0xcfba9599, 0xb8bda50f, 0x2802b89e, 0x5f058808, 0xc60cd9b2, 0xb10be924,
+ 0x2f6f7c87, 0x58684c11, 0xc1611dab, 0xb6662d3d, 0x76dc4190, 0x01db7106,
+ 0x98d220bc, 0xefd5102a, 0x71b18589, 0x06b6b51f, 0x9fbfe4a5, 0xe8b8d433,
+ 0x7807c9a2, 0x0f00f934, 0x9609a88e, 0xe10e9818, 0x7f6a0dbb, 0x086d3d2d,
+ 0x91646c97, 0xe6635c01, 0x6b6b51f4, 0x1c6c6162, 0x856530d8, 0xf262004e,
+ 0x6c0695ed, 0x1b01a57b, 0x8208f4c1, 0xf50fc457, 0x65b0d9c6, 0x12b7e950,
+ 0x8bbeb8ea, 0xfcb9887c, 0x62dd1ddf, 0x15da2d49, 0x8cd37cf3, 0xfbd44c65,
+ 0x4db26158, 0x3ab551ce, 0xa3bc0074, 0xd4bb30e2, 0x4adfa541, 0x3dd895d7,
+ 0xa4d1c46d, 0xd3d6f4fb, 0x4369e96a, 0x346ed9fc, 0xad678846, 0xda60b8d0,
+ 0x44042d73, 0x33031de5, 0xaa0a4c5f, 0xdd0d7cc9, 0x5005713c, 0x270241aa,
+ 0xbe0b1010, 0xc90c2086, 0x5768b525, 0x206f85b3, 0xb966d409, 0xce61e49f,
+ 0x5edef90e, 0x29d9c998, 0xb0d09822, 0xc7d7a8b4, 0x59b33d17, 0x2eb40d81,
+ 0xb7bd5c3b, 0xc0ba6cad, 0xedb88320, 0x9abfb3b6, 0x03b6e20c, 0x74b1d29a,
+ 0xead54739, 0x9dd277af, 0x04db2615, 0x73dc1683, 0xe3630b12, 0x94643b84,
+ 0x0d6d6a3e, 0x7a6a5aa8, 0xe40ecf0b, 0x9309ff9d, 0x0a00ae27, 0x7d079eb1,
+ 0xf00f9344, 0x8708a3d2, 0x1e01f268, 0x6906c2fe, 0xf762575d, 0x806567cb,
+ 0x196c3671, 0x6e6b06e7, 0xfed41b76, 0x89d32be0, 0x10da7a5a, 0x67dd4acc,
+ 0xf9b9df6f, 0x8ebeeff9, 0x17b7be43, 0x60b08ed5, 0xd6d6a3e8, 0xa1d1937e,
+ 0x38d8c2c4, 0x4fdff252, 0xd1bb67f1, 0xa6bc5767, 0x3fb506dd, 0x48b2364b,
+ 0xd80d2bda, 0xaf0a1b4c, 0x36034af6, 0x41047a60, 0xdf60efc3, 0xa867df55,
+ 0x316e8eef, 0x4669be79, 0xcb61b38c, 0xbc66831a, 0x256fd2a0, 0x5268e236,
+ 0xcc0c7795, 0xbb0b4703, 0x220216b9, 0x5505262f, 0xc5ba3bbe, 0xb2bd0b28,
+ 0x2bb45a92, 0x5cb36a04, 0xc2d7ffa7, 0xb5d0cf31, 0x2cd99e8b, 0x5bdeae1d,
+ 0x9b64c2b0, 0xec63f226, 0x756aa39c, 0x026d930a, 0x9c0906a9, 0xeb0e363f,
+ 0x72076785, 0x05005713, 0x95bf4a82, 0xe2b87a14, 0x7bb12bae, 0x0cb61b38,
+ 0x92d28e9b, 0xe5d5be0d, 0x7cdcefb7, 0x0bdbdf21, 0x86d3d2d4, 0xf1d4e242,
+ 0x68ddb3f8, 0x1fda836e, 0x81be16cd, 0xf6b9265b, 0x6fb077e1, 0x18b74777,
+ 0x88085ae6, 0xff0f6a70, 0x66063bca, 0x11010b5c, 0x8f659eff, 0xf862ae69,
+ 0x616bffd3, 0x166ccf45, 0xa00ae278, 0xd70dd2ee, 0x4e048354, 0x3903b3c2,
+ 0xa7672661, 0xd06016f7, 0x4969474d, 0x3e6e77db, 0xaed16a4a, 0xd9d65adc,
+ 0x40df0b66, 0x37d83bf0, 0xa9bcae53, 0xdebb9ec5, 0x47b2cf7f, 0x30b5ffe9,
+ 0xbdbdf21c, 0xcabac28a, 0x53b39330, 0x24b4a3a6, 0xbad03605, 0xcdd70693,
+ 0x54de5729, 0x23d967bf, 0xb3667a2e, 0xc4614ab8, 0x5d681b02, 0x2a6f2b94,
+ 0xb40bbe37, 0xc30c8ea1, 0x5a05df1b, 0x2d02ef8d};
+
+/**
+ * Reflect all bits of a \a data word of \a data_len bytes.
+ *
+ * \param data The data word to be reflected.
+ * \param data_len The width of \a data expressed in number of bits.
+ * \return The reflected data.
+ *****************************************************************************/
+rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len) {
+ unsigned int i;
+ rd_crc32_t ret;
+
+ ret = data & 0x01;
+ for (i = 1; i < data_len; i++) {
+ data >>= 1;
+ ret = (ret << 1) | (data & 0x01);
+ }
+ return ret;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h
new file mode 100644
index 000000000..c3195fca6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdcrc32.h
@@ -0,0 +1,170 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+/**
+ * \file rdcrc32.h
+ * Functions and types for CRC checks.
+ *
+ * Generated on Tue May 8 17:36:59 2012,
+ * by pycrc v0.7.10, http://www.tty1.net/pycrc/
+ *
+ * NOTE: Contains librd modifications:
+ * - rd_crc32() helper.
+ * - __RDCRC32___H__ define (was missing the '32' part).
+ *
+ * using the configuration:
+ * Width = 32
+ * Poly = 0x04c11db7
+ * XorIn = 0xffffffff
+ * ReflectIn = True
+ * XorOut = 0xffffffff
+ * ReflectOut = True
+ * Algorithm = table-driven
+ *****************************************************************************/
+#ifndef __RDCRC32___H__
+#define __RDCRC32___H__
+
+#include "rd.h"
+
+#include <stdlib.h>
+#include <stdint.h>
+
+#if WITH_ZLIB
+#include <zlib.h>
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+
+/**
+ * The definition of the used algorithm.
+ *****************************************************************************/
+#define CRC_ALGO_TABLE_DRIVEN 1
+
+
+/**
+ * The type of the CRC values.
+ *
+ * This type must be big enough to contain at least 32 bits.
+ *****************************************************************************/
+typedef uint32_t rd_crc32_t;
+
+#if !WITH_ZLIB
+extern const rd_crc32_t crc_table[256];
+#endif
+
+
+/**
+ * Reflect all bits of a \a data word of \a data_len bytes.
+ *
+ * \param data The data word to be reflected.
+ * \param data_len The width of \a data expressed in number of bits.
+ * \return The reflected data.
+ *****************************************************************************/
+rd_crc32_t rd_crc32_reflect(rd_crc32_t data, size_t data_len);
+
+
+/**
+ * Calculate the initial crc value.
+ *
+ * \return The initial crc value.
+ *****************************************************************************/
+static RD_INLINE rd_crc32_t rd_crc32_init(void) {
+#if WITH_ZLIB
+ return crc32(0, NULL, 0);
+#else
+ return 0xffffffff;
+#endif
+}
+
+
+/**
+ * Update the crc value with new data.
+ *
+ * \param crc The current crc value.
+ * \param data Pointer to a buffer of \a data_len bytes.
+ * \param data_len Number of bytes in the \a data buffer.
+ * \return The updated crc value.
+ *****************************************************************************/
+/**
+ * Update the crc value with new data.
+ *
+ * \param crc The current crc value.
+ * \param data Pointer to a buffer of \a data_len bytes.
+ * \param data_len Number of bytes in the \a data buffer.
+ * \return The updated crc value.
+ *****************************************************************************/
+static RD_INLINE RD_UNUSED rd_crc32_t rd_crc32_update(rd_crc32_t crc,
+ const unsigned char *data,
+ size_t data_len) {
+#if WITH_ZLIB
+ rd_assert(data_len <= UINT_MAX);
+ return crc32(crc, data, (uInt)data_len);
+#else
+ unsigned int tbl_idx;
+
+ while (data_len--) {
+ tbl_idx = (crc ^ *data) & 0xff;
+ crc = (crc_table[tbl_idx] ^ (crc >> 8)) & 0xffffffff;
+
+ data++;
+ }
+ return crc & 0xffffffff;
+#endif
+}
+
+
+/**
+ * Calculate the final crc value.
+ *
+ * \param crc The current crc value.
+ * \return The final crc value.
+ *****************************************************************************/
+static RD_INLINE rd_crc32_t rd_crc32_finalize(rd_crc32_t crc) {
+#if WITH_ZLIB
+ return crc;
+#else
+ return crc ^ 0xffffffff;
+#endif
+}
+
+
+/**
+ * Wrapper for performing CRC32 on the provided buffer.
+ */
+static RD_INLINE rd_crc32_t rd_crc32(const char *data, size_t data_len) {
+ return rd_crc32_finalize(rd_crc32_update(
+ rd_crc32_init(), (const unsigned char *)data, data_len));
+}
+
+#ifdef __cplusplus
+} /* closing brace for extern "C" */
+#endif
+
+#endif /* __RDCRC32___H__ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c
new file mode 100644
index 000000000..785e28c48
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rddl.c
@@ -0,0 +1,179 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rddl.h"
+
+#if WITH_LIBDL
+#include <dlfcn.h>
+
+#elif defined(_WIN32)
+
+#else
+#error "Dynamic library loading not supported on this platform"
+#endif
+
+
+
+/**
+ * @brief Latest thread-local dl error, normalized to suit our logging.
+ * @returns a newly allocated string that must be freed
+ */
+static char *rd_dl_error(void) {
+#if WITH_LIBDL
+ char *errstr;
+ char *s;
+ errstr = dlerror();
+ if (!errstr)
+ return rd_strdup("No error returned from dlerror()");
+
+ errstr = rd_strdup(errstr);
+ /* Change newlines to separators. */
+ while ((s = strchr(errstr, '\n')))
+ *s = '.';
+
+ return errstr;
+
+#elif defined(_WIN32)
+ char buf[1024];
+ rd_strerror_w32(GetLastError(), buf, sizeof(buf));
+ return rd_strdup(buf);
+#endif
+}
+
+/**
+ * @brief Attempt to load library \p path.
+ * @returns the library handle (platform dependent, thus opaque) on success,
+ * else NULL.
+ */
+static rd_dl_hnd_t *
+rd_dl_open0(const char *path, char *errstr, size_t errstr_size) {
+ void *handle;
+ const char *loadfunc;
+#if WITH_LIBDL
+ loadfunc = "dlopen()";
+ handle = dlopen(path, RTLD_NOW | RTLD_LOCAL);
+#elif defined(_WIN32)
+ loadfunc = "LoadLibrary()";
+ handle = (void *)LoadLibraryA(path);
+#endif
+ if (!handle) {
+ char *dlerrstr = rd_dl_error();
+ rd_snprintf(errstr, errstr_size, "%s failed: %s", loadfunc,
+ dlerrstr);
+ rd_free(dlerrstr);
+ }
+ return (rd_dl_hnd_t *)handle;
+}
+
+
+/**
+ * @brief Attempt to load library \p path, possibly with a filename extension
+ * which will be automatically resolved depending on platform.
+ * @returns the library handle (platform dependent, thus opaque) on success,
+ * else NULL.
+ */
+rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size) {
+ rd_dl_hnd_t *handle;
+ char *extpath;
+ size_t pathlen;
+ const char *td, *fname;
+ const char *solib_ext = SOLIB_EXT;
+
+ /* Try original path first. */
+ handle = rd_dl_open0(path, errstr, errstr_size);
+ if (handle)
+ return handle;
+
+ /* Original path not found, see if we can append the solib_ext
+ * filename extension. */
+
+ /* Get filename and filename extension.
+ * We can't rely on basename(3) since it is not portable */
+ fname = strrchr(path, '/');
+#ifdef _WIN32
+ td = strrchr(path, '\\');
+ if (td > fname)
+ fname = td;
+#endif
+ if (!fname)
+ fname = path;
+
+ td = strrchr(fname, '.');
+
+ /* If there is a filename extension ('.' within the last characters)
+ * then bail out, we will not append an extension in this case. */
+ if (td && td >= fname + strlen(fname) - strlen(SOLIB_EXT))
+ return NULL;
+
+ /* Append platform-specific library extension. */
+ pathlen = strlen(path);
+ extpath = rd_alloca(pathlen + strlen(solib_ext) + 1);
+ memcpy(extpath, path, pathlen);
+ memcpy(extpath + pathlen, solib_ext, strlen(solib_ext) + 1);
+
+ /* Try again with extension */
+ return rd_dl_open0(extpath, errstr, errstr_size);
+}
+
+
+/**
+ * @brief Close handle previously returned by rd_dl_open()
+ * @remark errors are ignored (what can we do anyway?)
+ */
+void rd_dl_close(rd_dl_hnd_t *handle) {
+#if WITH_LIBDL
+ dlclose((void *)handle);
+#elif defined(_WIN32)
+ FreeLibrary((HMODULE)handle);
+#endif
+}
+
+/**
+ * @brief look up address of \p symbol in library handle \p handle
+ * @returns the function pointer on success or NULL on error.
+ */
+void *rd_dl_sym(rd_dl_hnd_t *handle,
+ const char *symbol,
+ char *errstr,
+ size_t errstr_size) {
+ void *func;
+#if WITH_LIBDL
+ func = dlsym((void *)handle, symbol);
+#elif defined(_WIN32)
+ func = GetProcAddress((HMODULE)handle, symbol);
+#endif
+ if (!func) {
+ char *dlerrstr = rd_dl_error();
+ rd_snprintf(errstr, errstr_size,
+ "Failed to load symbol \"%s\": %s", symbol,
+ dlerrstr);
+ rd_free(dlerrstr);
+ }
+ return func;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h
new file mode 100644
index 000000000..eaf6eb6d5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rddl.h
@@ -0,0 +1,43 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDDL_H
+#define _RDDL_H
+
+#include <sys/types.h>
+
+typedef void rd_dl_hnd_t;
+
+rd_dl_hnd_t *rd_dl_open(const char *path, char *errstr, size_t errstr_size);
+void rd_dl_close(rd_dl_hnd_t *handle);
+void *rd_dl_sym(rd_dl_hnd_t *handle,
+ const char *symbol,
+ char *errstr,
+ size_t errstr_size);
+
+#endif /* _RDDL_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h
new file mode 100644
index 000000000..613d44bfa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdendian.h
@@ -0,0 +1,174 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDENDIAN_H_
+#define _RDENDIAN_H_
+
+/**
+ * Provides portable endian-swapping macros/functions.
+ *
+ * be64toh()
+ * htobe64()
+ * be32toh()
+ * htobe32()
+ * be16toh()
+ * htobe16()
+ * le64toh()
+ */
+
+#ifdef __FreeBSD__
+#include <sys/endian.h>
+#elif defined __GLIBC__
+#include <endian.h>
+#ifndef be64toh
+/* Support older glibc (<2.9) which lack be64toh */
+#include <byteswap.h>
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define be16toh(x) (x)
+#define be32toh(x) (x)
+#define be64toh(x) (x)
+#define le64toh(x) __bswap_64(x)
+#define le32toh(x) __bswap_32(x)
+#else
+#define be16toh(x) __bswap_16(x)
+#define be32toh(x) __bswap_32(x)
+#define be64toh(x) __bswap_64(x)
+#define le64toh(x) (x)
+#define le32toh(x) (x)
+#endif
+#endif
+
+#elif defined __CYGWIN__
+#include <endian.h>
+#elif defined __BSD__
+#include <sys/endian.h>
+#elif defined __sun
+#include <sys/byteorder.h>
+#include <sys/isa_defs.h>
+#define __LITTLE_ENDIAN 1234
+#define __BIG_ENDIAN 4321
+#ifdef _BIG_ENDIAN
+#define __BYTE_ORDER __BIG_ENDIAN
+#define be64toh(x) (x)
+#define be32toh(x) (x)
+#define be16toh(x) (x)
+#define le16toh(x) ((uint16_t)BSWAP_16(x))
+#define le32toh(x) BSWAP_32(x)
+#define le64toh(x) BSWAP_64(x)
+#else
+#define __BYTE_ORDER __LITTLE_ENDIAN
+#define be64toh(x) BSWAP_64(x)
+#define be32toh(x) ntohl(x)
+#define be16toh(x) ntohs(x)
+#define le16toh(x) (x)
+#define le32toh(x) (x)
+#define le64toh(x) (x)
+#define htole16(x) (x)
+#define htole64(x) (x)
+#endif /* __sun */
+
+#elif defined __APPLE__
+#include <machine/endian.h>
+#include <libkern/OSByteOrder.h>
+#if __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
+#define be64toh(x) (x)
+#define be32toh(x) (x)
+#define be16toh(x) (x)
+#define le16toh(x) OSSwapInt16(x)
+#define le32toh(x) OSSwapInt32(x)
+#define le64toh(x) OSSwapInt64(x)
+#else
+#define be64toh(x) OSSwapInt64(x)
+#define be32toh(x) OSSwapInt32(x)
+#define be16toh(x) OSSwapInt16(x)
+#define le16toh(x) (x)
+#define le32toh(x) (x)
+#define le64toh(x) (x)
+#endif
+
+#elif defined(_WIN32)
+#include <intrin.h>
+
+#define be64toh(x) _byteswap_uint64(x)
+#define be32toh(x) _byteswap_ulong(x)
+#define be16toh(x) _byteswap_ushort(x)
+#define le16toh(x) (x)
+#define le32toh(x) (x)
+#define le64toh(x) (x)
+
+#elif defined _AIX /* AIX is always big endian */
+#define be64toh(x) (x)
+#define be32toh(x) (x)
+#define be16toh(x) (x)
+#define le32toh(x) \
+ ((((x)&0xff) << 24) | (((x)&0xff00) << 8) | (((x)&0xff0000) >> 8) | \
+ (((x)&0xff000000) >> 24))
+#define le64toh(x) \
+ ((((x)&0x00000000000000ffL) << 56) | \
+ (((x)&0x000000000000ff00L) << 40) | \
+ (((x)&0x0000000000ff0000L) << 24) | \
+ (((x)&0x00000000ff000000L) << 8) | (((x)&0x000000ff00000000L) >> 8) | \
+ (((x)&0x0000ff0000000000L) >> 24) | \
+ (((x)&0x00ff000000000000L) >> 40) | \
+ (((x)&0xff00000000000000L) >> 56))
+#else
+#include <endian.h>
+#endif
+
+
+
+/*
+ * On Solaris, be64toh is a function, not a macro, so there's no need to error
+ * if it's not defined.
+ */
+#if !defined(__sun) && !defined(be64toh)
+#error Missing definition for be64toh
+#endif
+
+#ifndef be32toh
+#define be32toh(x) ntohl(x)
+#endif
+
+#ifndef be16toh
+#define be16toh(x) ntohs(x)
+#endif
+
+#ifndef htobe64
+#define htobe64(x) be64toh(x)
+#endif
+#ifndef htobe32
+#define htobe32(x) be32toh(x)
+#endif
+#ifndef htobe16
+#define htobe16(x) be16toh(x)
+#endif
+
+#ifndef htole32
+#define htole32(x) le32toh(x)
+#endif
+
+#endif /* _RDENDIAN_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h
new file mode 100644
index 000000000..310045f0e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdfloat.h
@@ -0,0 +1,67 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#pragma once
+
+#include <math.h>
+
+/**
+ * rd_dbl_eq0(a,b,prec)
+ * Check two doubles for equality with the specified precision.
+ * Use this instead of != and == for all floats/doubles.
+ * More info:
+ * http://docs.sun.com/source/806-3568/ncg_goldberg.html
+ */
+static RD_INLINE RD_UNUSED int rd_dbl_eq0(double a, double b, double prec) {
+ return fabs(a - b) < prec;
+}
+
+/* A default 'good' double-equality precision value.
+ * This rather timid epsilon value is useful for tenths, hundreths,
+ * and thousands parts, but not anything more precis than that.
+ * If a higher precision is needed, use dbl_eq0 and dbl_eq0 directly
+ * and specify your own precision. */
+#define RD_DBL_EPSILON 0.00001
+
+/**
+ * rd_dbl_eq(a,b)
+ * Same as rd_dbl_eq0() above but with a predefined 'good' precision.
+ */
+#define rd_dbl_eq(a, b) rd_dbl_eq0(a, b, RD_DBL_EPSILON)
+
+/**
+ * rd_dbl_ne(a,b)
+ * Same as rd_dbl_eq() above but with reversed logic: not-equal.
+ */
+#define rd_dbl_ne(a, b) (!rd_dbl_eq0(a, b, RD_DBL_EPSILON))
+
+/**
+ * rd_dbl_zero(a)
+ * Checks if the double `a' is zero (or close enough).
+ */
+#define rd_dbl_zero(a) rd_dbl_eq0(a, 0.0, RD_DBL_EPSILON)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c
new file mode 100644
index 000000000..e951ec59f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.c
@@ -0,0 +1,113 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdunittest.h"
+#include "rdfnv1a.h"
+
+
+/* FNV-1a by Glenn Fowler, Landon Curt Noll, and Kiem-Phong Vo
+ *
+ * Based on http://www.isthe.com/chongo/src/fnv/hash_32a.c
+ * with librdkafka modifications to match the Sarama default Producer
+ * implementation, as seen here:
+ * https://github.com/Shopify/sarama/blob/master/partitioner.go#L203 Note that
+ * this implementation is only compatible with Sarama's default
+ * NewHashPartitioner and not NewReferenceHashPartitioner.
+ */
+uint32_t rd_fnv1a(const void *key, size_t len) {
+ const uint32_t prime = 0x01000193; // 16777619
+ const uint32_t offset = 0x811C9DC5; // 2166136261
+ size_t i;
+ int32_t h = offset;
+
+ const unsigned char *data = (const unsigned char *)key;
+
+ for (i = 0; i < len; i++) {
+ h ^= data[i];
+ h *= prime;
+ }
+
+ /* Take absolute value to match the Sarama NewHashPartitioner
+ * implementation */
+ if (h < 0) {
+ h = -h;
+ }
+
+ return (uint32_t)h;
+}
+
+
+/**
+ * @brief Unittest for rd_fnv1a()
+ */
+int unittest_fnv1a(void) {
+ const char *short_unaligned = "1234";
+ const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs";
+ const char *keysToTest[] = {
+ "kafka",
+ "giberish123456789",
+ short_unaligned,
+ short_unaligned + 1,
+ short_unaligned + 2,
+ short_unaligned + 3,
+ unaligned,
+ unaligned + 1,
+ unaligned + 2,
+ unaligned + 3,
+ "",
+ NULL,
+ };
+
+ // Acquired via https://play.golang.org/p/vWIhw3zJINA
+ const int32_t golang_hashfnv_results[] = {
+ 0xd33c4e1, // kafka
+ 0x77a58295, // giberish123456789
+ 0x23bdd03, // short_unaligned
+ 0x2dea3cd2, // short_unaligned+1
+ 0x740fa83e, // short_unaligned+2
+ 0x310ca263, // short_unaligned+3
+ 0x65cbd69c, // unaligned
+ 0x6e49c79a, // unaligned+1
+ 0x69eed356, // unaligned+2
+ 0x6abcc023, // unaligned+3
+ 0x7ee3623b, // ""
+ 0x7ee3623b, // NULL
+ };
+
+ size_t i;
+ for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) {
+ uint32_t h = rd_fnv1a(
+ keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0);
+ RD_UT_ASSERT((int32_t)h == golang_hashfnv_results[i],
+ "Calculated FNV-1a hash 0x%x for \"%s\", "
+ "expected 0x%x",
+ h, keysToTest[i], golang_hashfnv_results[i]);
+ }
+ RD_UT_PASS();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h
new file mode 100644
index 000000000..8df66b0d6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdfnv1a.h
@@ -0,0 +1,35 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RDFNV1A___H__
+#define __RDFNV1A___H__
+
+uint32_t rd_fnv1a(const void *key, size_t len);
+int unittest_fnv1a(void);
+
+#endif // __RDFNV1A___H__
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c
new file mode 100644
index 000000000..794bd9cc1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.c
@@ -0,0 +1,120 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdgz.h"
+
+#include <zlib.h>
+
+
+#define RD_GZ_CHUNK 262144
+
+void *rd_gz_decompress(const void *compressed,
+ int compressed_len,
+ uint64_t *decompressed_lenp) {
+ int pass = 1;
+ char *decompressed = NULL;
+
+ /* First pass (1): calculate decompressed size.
+ * (pass-1 is skipped if *decompressed_lenp is
+ * non-zero).
+ * Second pass (2): perform actual decompression.
+ */
+
+ if (*decompressed_lenp != 0LLU)
+ pass++;
+
+ for (; pass <= 2; pass++) {
+ z_stream strm = RD_ZERO_INIT;
+ char buf[512];
+ char *p;
+ int len;
+ int r;
+
+ if ((r = inflateInit2(&strm, 15 + 32)) != Z_OK)
+ goto fail;
+
+ strm.next_in = (void *)compressed;
+ strm.avail_in = compressed_len;
+
+ if (pass == 1) {
+ /* Use dummy output buffer */
+ p = buf;
+ len = sizeof(buf);
+ } else {
+ /* Use real output buffer */
+ p = decompressed;
+ len = (int)*decompressed_lenp;
+ }
+
+ do {
+ strm.next_out = (unsigned char *)p;
+ strm.avail_out = len;
+
+ r = inflate(&strm, Z_NO_FLUSH);
+ switch (r) {
+ case Z_STREAM_ERROR:
+ case Z_NEED_DICT:
+ case Z_DATA_ERROR:
+ case Z_MEM_ERROR:
+ inflateEnd(&strm);
+ goto fail;
+ }
+
+ if (pass == 2) {
+ /* Advance output pointer (in pass 2). */
+ p += len - strm.avail_out;
+ len -= len - strm.avail_out;
+ }
+
+ } while (strm.avail_out == 0 && r != Z_STREAM_END);
+
+
+ if (pass == 1) {
+ *decompressed_lenp = strm.total_out;
+ if (!(decompressed = rd_malloc(
+ (size_t)(*decompressed_lenp) + 1))) {
+ inflateEnd(&strm);
+ return NULL;
+ }
+ /* For convenience of the caller we nul-terminate
+ * the buffer. If it happens to be a string there
+ * is no need for extra copies. */
+ decompressed[*decompressed_lenp] = '\0';
+ }
+
+ inflateEnd(&strm);
+ }
+
+ return decompressed;
+
+fail:
+ if (decompressed)
+ rd_free(decompressed);
+ return NULL;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h
new file mode 100644
index 000000000..10d661cb3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdgz.h
@@ -0,0 +1,46 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDGZ_H_
+#define _RDGZ_H_
+
+/**
+ * Simple gzip decompression returning the inflated data
+ * in a malloced buffer.
+ * '*decompressed_lenp' must be 0 if the length of the uncompressed data
+ * is not known in which case it will be calculated.
+ * The returned buffer is nul-terminated (the actual allocated length
+ * is '*decompressed_lenp'+1.
+ *
+ * The decompressed length is returned in '*decompressed_lenp'.
+ */
+void *rd_gz_decompress(const void *compressed,
+ int compressed_len,
+ uint64_t *decompressed_lenp);
+
+#endif /* _RDGZ_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c
new file mode 100644
index 000000000..3f2b6758b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.c
@@ -0,0 +1,721 @@
+/*
+ * This license covers this C port of
+ * Coda Hale's Golang HdrHistogram https://github.com/codahale/hdrhistogram
+ * at revision 3a0bb77429bd3a61596f5e8a3172445844342120
+ *
+ * ----------------------------------------------------------------------------
+ *
+ * The MIT License (MIT)
+ *
+ * Copyright (c) 2014 Coda Hale
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to deal
+ * in the Software without restriction, including without limitation the rights
+ * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+ * copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+ * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+ * THE SOFTWARE.
+ */
+
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Minimal C Hdr_Histogram based on Coda Hale's Golang implementation.
+ * https://github.com/codahale/hdr_histogram
+ *
+ *
+ * A Histogram is a lossy data structure used to record the distribution of
+ * non-normally distributed data (like latency) with a high degree of accuracy
+ * and a bounded degree of precision.
+ *
+ *
+ */
+
+#include "rd.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <math.h>
+
+#include "rdhdrhistogram.h"
+#include "rdunittest.h"
+#include "rdfloat.h"
+
+void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr) {
+ rd_free(hdr);
+}
+
+rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue,
+ int64_t maxValue,
+ int significantFigures) {
+ rd_hdr_histogram_t *hdr;
+ int64_t largestValueWithSingleUnitResolution;
+ int32_t subBucketCountMagnitude;
+ int32_t subBucketHalfCountMagnitude;
+ int32_t unitMagnitude;
+ int32_t subBucketCount;
+ int32_t subBucketHalfCount;
+ int64_t subBucketMask;
+ int64_t smallestUntrackableValue;
+ int32_t bucketsNeeded = 1;
+ int32_t bucketCount;
+ int32_t countsLen;
+
+ if (significantFigures < 1 || significantFigures > 5)
+ return NULL;
+
+ largestValueWithSingleUnitResolution =
+ (int64_t)(2.0 * pow(10.0, (double)significantFigures));
+
+ subBucketCountMagnitude =
+ (int32_t)ceil(log2((double)largestValueWithSingleUnitResolution));
+
+ subBucketHalfCountMagnitude = RD_MAX(subBucketCountMagnitude, 1) - 1;
+
+ unitMagnitude = (int32_t)RD_MAX(floor(log2((double)minValue)), 0);
+
+ subBucketCount =
+ (int32_t)pow(2, (double)subBucketHalfCountMagnitude + 1.0);
+
+ subBucketHalfCount = subBucketCount / 2;
+
+ subBucketMask = (int64_t)(subBucketCount - 1) << unitMagnitude;
+
+ /* Determine exponent range needed to support the trackable
+ * value with no overflow: */
+ smallestUntrackableValue = (int64_t)subBucketCount << unitMagnitude;
+ while (smallestUntrackableValue < maxValue) {
+ smallestUntrackableValue <<= 1;
+ bucketsNeeded++;
+ }
+
+ bucketCount = bucketsNeeded;
+ countsLen = (bucketCount + 1) * (subBucketCount / 2);
+ hdr = rd_calloc(1, sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen));
+ hdr->counts = (int64_t *)(hdr + 1);
+ hdr->allocatedSize = sizeof(*hdr) + (sizeof(*hdr->counts) * countsLen);
+
+ hdr->lowestTrackableValue = minValue;
+ hdr->highestTrackableValue = maxValue;
+ hdr->unitMagnitude = unitMagnitude;
+ hdr->significantFigures = significantFigures;
+ hdr->subBucketHalfCountMagnitude = subBucketHalfCountMagnitude;
+ hdr->subBucketHalfCount = subBucketHalfCount;
+ hdr->subBucketMask = subBucketMask;
+ hdr->subBucketCount = subBucketCount;
+ hdr->bucketCount = bucketCount;
+ hdr->countsLen = countsLen;
+ hdr->totalCount = 0;
+ hdr->lowestOutOfRange = minValue;
+ hdr->highestOutOfRange = maxValue;
+
+ return hdr;
+}
+
+/**
+ * @brief Deletes all recorded values and resets histogram.
+ */
+void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr) {
+ int32_t i;
+ hdr->totalCount = 0;
+ for (i = 0; i < hdr->countsLen; i++)
+ hdr->counts[i] = 0;
+}
+
+
+
+static RD_INLINE int32_t rd_hdr_countsIndex(const rd_hdr_histogram_t *hdr,
+ int32_t bucketIdx,
+ int32_t subBucketIdx) {
+ int32_t bucketBaseIdx = (bucketIdx + 1)
+ << hdr->subBucketHalfCountMagnitude;
+ int32_t offsetInBucket = subBucketIdx - hdr->subBucketHalfCount;
+ return bucketBaseIdx + offsetInBucket;
+}
+
+static RD_INLINE int64_t rd_hdr_getCountAtIndex(const rd_hdr_histogram_t *hdr,
+ int32_t bucketIdx,
+ int32_t subBucketIdx) {
+ return hdr->counts[rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx)];
+}
+
+
+static RD_INLINE int64_t bitLen(int64_t x) {
+ int64_t n = 0;
+ for (; x >= 0x8000; x >>= 16)
+ n += 16;
+ if (x >= 0x80) {
+ x >>= 8;
+ n += 8;
+ }
+ if (x >= 0x8) {
+ x >>= 4;
+ n += 4;
+ }
+ if (x >= 0x2) {
+ x >>= 2;
+ n += 2;
+ }
+ if (x >= 0x1)
+ n++;
+ return n;
+}
+
+
+static RD_INLINE int32_t rd_hdr_getBucketIndex(const rd_hdr_histogram_t *hdr,
+ int64_t v) {
+ int64_t pow2Ceiling = bitLen(v | hdr->subBucketMask);
+ return (int32_t)(pow2Ceiling - (int64_t)hdr->unitMagnitude -
+ (int64_t)(hdr->subBucketHalfCountMagnitude + 1));
+}
+
+static RD_INLINE int32_t rd_hdr_getSubBucketIdx(const rd_hdr_histogram_t *hdr,
+ int64_t v,
+ int32_t idx) {
+ return (int32_t)(v >> ((int64_t)idx + (int64_t)hdr->unitMagnitude));
+}
+
+static RD_INLINE int64_t rd_hdr_valueFromIndex(const rd_hdr_histogram_t *hdr,
+ int32_t bucketIdx,
+ int32_t subBucketIdx) {
+ return (int64_t)subBucketIdx
+ << ((int64_t)bucketIdx + hdr->unitMagnitude);
+}
+
+static RD_INLINE int64_t
+rd_hdr_sizeOfEquivalentValueRange(const rd_hdr_histogram_t *hdr, int64_t v) {
+ int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
+ int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
+ int32_t adjustedBucket = bucketIdx;
+ if (unlikely(subBucketIdx >= hdr->subBucketCount))
+ adjustedBucket++;
+ return (int64_t)1 << (hdr->unitMagnitude + (int64_t)adjustedBucket);
+}
+
+static RD_INLINE int64_t
+rd_hdr_lowestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
+ int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
+ int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
+ return rd_hdr_valueFromIndex(hdr, bucketIdx, subBucketIdx);
+}
+
+
+static RD_INLINE int64_t
+rd_hdr_nextNonEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
+ return rd_hdr_lowestEquivalentValue(hdr, v) +
+ rd_hdr_sizeOfEquivalentValueRange(hdr, v);
+}
+
+
+static RD_INLINE int64_t
+rd_hdr_highestEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
+ return rd_hdr_nextNonEquivalentValue(hdr, v) - 1;
+}
+
+static RD_INLINE int64_t
+rd_hdr_medianEquivalentValue(const rd_hdr_histogram_t *hdr, int64_t v) {
+ return rd_hdr_lowestEquivalentValue(hdr, v) +
+ (rd_hdr_sizeOfEquivalentValueRange(hdr, v) >> 1);
+}
+
+
+static RD_INLINE int32_t rd_hdr_countsIndexFor(const rd_hdr_histogram_t *hdr,
+ int64_t v) {
+ int32_t bucketIdx = rd_hdr_getBucketIndex(hdr, v);
+ int32_t subBucketIdx = rd_hdr_getSubBucketIdx(hdr, v, bucketIdx);
+ return rd_hdr_countsIndex(hdr, bucketIdx, subBucketIdx);
+}
+
+
+
+typedef struct rd_hdr_iter_s {
+ const rd_hdr_histogram_t *hdr;
+ int bucketIdx;
+ int subBucketIdx;
+ int64_t countAtIdx;
+ int64_t countToIdx;
+ int64_t valueFromIdx;
+ int64_t highestEquivalentValue;
+} rd_hdr_iter_t;
+
+#define RD_HDR_ITER_INIT(hdr) \
+ { .hdr = hdr, .subBucketIdx = -1 }
+
+static int rd_hdr_iter_next(rd_hdr_iter_t *it) {
+ const rd_hdr_histogram_t *hdr = it->hdr;
+
+ if (unlikely(it->countToIdx >= hdr->totalCount))
+ return 0;
+
+ it->subBucketIdx++;
+ if (unlikely(it->subBucketIdx >= hdr->subBucketCount)) {
+ it->subBucketIdx = hdr->subBucketHalfCount;
+ it->bucketIdx++;
+ }
+
+ if (unlikely(it->bucketIdx >= hdr->bucketCount))
+ return 0;
+
+ it->countAtIdx =
+ rd_hdr_getCountAtIndex(hdr, it->bucketIdx, it->subBucketIdx);
+ it->countToIdx += it->countAtIdx;
+ it->valueFromIdx =
+ rd_hdr_valueFromIndex(hdr, it->bucketIdx, it->subBucketIdx);
+ it->highestEquivalentValue =
+ rd_hdr_highestEquivalentValue(hdr, it->valueFromIdx);
+
+ return 1;
+}
+
+
+double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr) {
+ double mean;
+ double geometricDevTotal = 0.0;
+ rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
+
+ if (hdr->totalCount == 0)
+ return 0;
+
+ mean = rd_hdr_histogram_mean(hdr);
+
+
+ while (rd_hdr_iter_next(&it)) {
+ double dev;
+
+ if (it.countAtIdx == 0)
+ continue;
+
+ dev =
+ (double)rd_hdr_medianEquivalentValue(hdr, it.valueFromIdx) -
+ mean;
+ geometricDevTotal += (dev * dev) * (double)it.countAtIdx;
+ }
+
+ return sqrt(geometricDevTotal / (double)hdr->totalCount);
+}
+
+
+/**
+ * @returns the approximate maximum recorded value.
+ */
+int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr) {
+ int64_t vmax = 0;
+ rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
+
+ while (rd_hdr_iter_next(&it)) {
+ if (it.countAtIdx != 0)
+ vmax = it.highestEquivalentValue;
+ }
+ return rd_hdr_highestEquivalentValue(hdr, vmax);
+}
+
+/**
+ * @returns the approximate minimum recorded value.
+ */
+int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr) {
+ int64_t vmin = 0;
+ rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
+
+ while (rd_hdr_iter_next(&it)) {
+ if (it.countAtIdx != 0 && vmin == 0) {
+ vmin = it.highestEquivalentValue;
+ break;
+ }
+ }
+ return rd_hdr_lowestEquivalentValue(hdr, vmin);
+}
+
+/**
+ * @returns the approximate arithmetic mean of the recorded values.
+ */
+double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr) {
+ int64_t total = 0;
+ rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
+
+ if (hdr->totalCount == 0)
+ return 0.0;
+
+ while (rd_hdr_iter_next(&it)) {
+ if (it.countAtIdx != 0)
+ total += it.countAtIdx * rd_hdr_medianEquivalentValue(
+ hdr, it.valueFromIdx);
+ }
+ return (double)total / (double)hdr->totalCount;
+}
+
+
+
+/**
+ * @brief Records the given value.
+ *
+ * @returns 1 if value was recorded or 0 if value is out of range.
+ */
+
+int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v) {
+ int32_t idx = rd_hdr_countsIndexFor(hdr, v);
+
+ if (idx < 0 || hdr->countsLen <= idx) {
+ hdr->outOfRangeCount++;
+ if (v > hdr->highestOutOfRange)
+ hdr->highestOutOfRange = v;
+ if (v < hdr->lowestOutOfRange)
+ hdr->lowestOutOfRange = v;
+ return 0;
+ }
+
+ hdr->counts[idx]++;
+ hdr->totalCount++;
+
+ return 1;
+}
+
+
+/**
+ * @returns the recorded value at the given quantile (0..100).
+ */
+int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q) {
+ int64_t total = 0;
+ int64_t countAtPercentile;
+ rd_hdr_iter_t it = RD_HDR_ITER_INIT(hdr);
+
+ if (q > 100.0)
+ q = 100.0;
+
+ countAtPercentile =
+ (int64_t)(((q / 100.0) * (double)hdr->totalCount) + 0.5);
+
+ while (rd_hdr_iter_next(&it)) {
+ total += it.countAtIdx;
+ if (total >= countAtPercentile)
+ return rd_hdr_highestEquivalentValue(hdr,
+ it.valueFromIdx);
+ }
+
+ return 0;
+}
+
+
+
+/**
+ * @name Unit tests
+ * @{
+ *
+ *
+ *
+ */
+
+/**
+ * @returns 0 on success or 1 on failure.
+ */
+static int ut_high_sigfig(void) {
+ rd_hdr_histogram_t *hdr;
+ const int64_t input[] = {
+ 459876, 669187, 711612, 816326, 931423,
+ 1033197, 1131895, 2477317, 3964974, 12718782,
+ };
+ size_t i;
+ int64_t v;
+ const int64_t exp = 1048575;
+
+ hdr = rd_hdr_histogram_new(459876, 12718782, 5);
+ for (i = 0; i < RD_ARRAYSIZE(input); i++) {
+ /* Ignore errors (some should fail) */
+ rd_hdr_histogram_record(hdr, input[i]);
+ }
+
+ v = rd_hdr_histogram_quantile(hdr, 50);
+ RD_UT_ASSERT(v == exp, "Median is %" PRId64 ", expected %" PRId64, v,
+ exp);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+static int ut_quantile(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ size_t i;
+ const struct {
+ double q;
+ int64_t v;
+ } exp[] = {
+ {50, 500223}, {75, 750079}, {90, 900095}, {95, 950271},
+ {99, 990207}, {99.9, 999423}, {99.99, 999935},
+ };
+
+ for (i = 0; i < 1000000; i++) {
+ int r = rd_hdr_histogram_record(hdr, (int64_t)i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
+ }
+
+ for (i = 0; i < RD_ARRAYSIZE(exp); i++) {
+ int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q);
+ RD_UT_ASSERT(v == exp[i].v,
+ "P%.2f is %" PRId64 ", expected %" PRId64,
+ exp[i].q, v, exp[i].v);
+ }
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+static int ut_mean(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ size_t i;
+ const double exp = 500000.013312;
+ double v;
+
+ for (i = 0; i < 1000000; i++) {
+ int r = rd_hdr_histogram_record(hdr, (int64_t)i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
+ }
+
+ v = rd_hdr_histogram_mean(hdr);
+ RD_UT_ASSERT(rd_dbl_eq0(v, exp, 0.0000001), "Mean is %f, expected %f",
+ v, exp);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+
+static int ut_stddev(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ size_t i;
+ const double exp = 288675.140368;
+ const double epsilon = 0.000001;
+ double v;
+
+ for (i = 0; i < 1000000; i++) {
+ int r = rd_hdr_histogram_record(hdr, (int64_t)i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", (int64_t)i);
+ }
+
+ v = rd_hdr_histogram_stddev(hdr);
+ RD_UT_ASSERT(rd_dbl_eq0(v, exp, epsilon),
+ "StdDev is %.6f, expected %.6f: diff %.6f vs epsilon %.6f",
+ v, exp, fabs(v - exp), epsilon);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+static int ut_totalcount(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ int64_t i;
+
+ for (i = 0; i < 1000000; i++) {
+ int64_t v;
+ int r = rd_hdr_histogram_record(hdr, i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
+
+ v = hdr->totalCount;
+ RD_UT_ASSERT(v == i + 1,
+ "total_count is %" PRId64 ", expected %" PRId64, v,
+ i + 1);
+ }
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+
+static int ut_max(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ int64_t i, v;
+ const int64_t exp = 1000447;
+
+ for (i = 0; i < 1000000; i++) {
+ int r = rd_hdr_histogram_record(hdr, i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
+ }
+
+ v = rd_hdr_histogram_max(hdr);
+ RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+static int ut_min(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ int64_t i, v;
+ const int64_t exp = 0;
+
+ for (i = 0; i < 1000000; i++) {
+ int r = rd_hdr_histogram_record(hdr, i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
+ }
+
+ v = rd_hdr_histogram_min(hdr);
+ RD_UT_ASSERT(v == exp, "Min is %" PRId64 ", expected %" PRId64, v, exp);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+static int ut_reset(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10000000, 3);
+ int64_t i, v;
+ const int64_t exp = 0;
+
+ for (i = 0; i < 1000000; i++) {
+ int r = rd_hdr_histogram_record(hdr, i);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", i);
+ }
+
+ rd_hdr_histogram_reset(hdr);
+
+ v = rd_hdr_histogram_max(hdr);
+ RD_UT_ASSERT(v == exp, "Max is %" PRId64 ", expected %" PRId64, v, exp);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+
+static int ut_nan(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 100000, 3);
+ double v;
+
+ v = rd_hdr_histogram_mean(hdr);
+ RD_UT_ASSERT(!isnan(v), "Mean is %f, expected NaN", v);
+ v = rd_hdr_histogram_stddev(hdr);
+ RD_UT_ASSERT(!isnan(v), "StdDev is %f, expected NaN", v);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+
+static int ut_sigfigs(void) {
+ int sigfigs;
+
+ for (sigfigs = 1; sigfigs <= 5; sigfigs++) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(1, 10, sigfigs);
+ RD_UT_ASSERT(hdr->significantFigures == sigfigs,
+ "Significant figures is %" PRId64 ", expected %d",
+ hdr->significantFigures, sigfigs);
+ rd_hdr_histogram_destroy(hdr);
+ }
+
+ RD_UT_PASS();
+}
+
+static int ut_minmax_trackable(void) {
+ const int64_t minval = 2;
+ const int64_t maxval = 11;
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(minval, maxval, 3);
+
+ RD_UT_ASSERT(hdr->lowestTrackableValue == minval,
+ "lowestTrackableValue is %" PRId64 ", expected %" PRId64,
+ hdr->lowestTrackableValue, minval);
+ RD_UT_ASSERT(hdr->highestTrackableValue == maxval,
+ "highestTrackableValue is %" PRId64 ", expected %" PRId64,
+ hdr->highestTrackableValue, maxval);
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+
+static int ut_unitmagnitude_overflow(void) {
+ rd_hdr_histogram_t *hdr = rd_hdr_histogram_new(0, 200, 4);
+ int r = rd_hdr_histogram_record(hdr, 11);
+ RD_UT_ASSERT(r, "record(11) failed\n");
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+static int ut_subbucketmask_overflow(void) {
+ rd_hdr_histogram_t *hdr;
+ const int64_t input[] = {(int64_t)1e8, (int64_t)2e7, (int64_t)3e7};
+ const struct {
+ double q;
+ int64_t v;
+ } exp[] = {
+ {50, 33554431},
+ {83.33, 33554431},
+ {83.34, 100663295},
+ {99, 100663295},
+ };
+ size_t i;
+
+ hdr = rd_hdr_histogram_new((int64_t)2e7, (int64_t)1e8, 5);
+
+ for (i = 0; i < RD_ARRAYSIZE(input); i++) {
+ /* Ignore errors (some should fail) */
+ int r = rd_hdr_histogram_record(hdr, input[i]);
+ RD_UT_ASSERT(r, "record(%" PRId64 ") failed\n", input[i]);
+ }
+
+ for (i = 0; i < RD_ARRAYSIZE(exp); i++) {
+ int64_t v = rd_hdr_histogram_quantile(hdr, exp[i].q);
+ RD_UT_ASSERT(v == exp[i].v,
+ "P%.2f is %" PRId64 ", expected %" PRId64,
+ exp[i].q, v, exp[i].v);
+ }
+
+ rd_hdr_histogram_destroy(hdr);
+ RD_UT_PASS();
+}
+
+
+int unittest_rdhdrhistogram(void) {
+ int fails = 0;
+
+ fails += ut_high_sigfig();
+ fails += ut_quantile();
+ fails += ut_mean();
+ fails += ut_stddev();
+ fails += ut_totalcount();
+ fails += ut_max();
+ fails += ut_min();
+ fails += ut_reset();
+ fails += ut_nan();
+ fails += ut_sigfigs();
+ fails += ut_minmax_trackable();
+ fails += ut_unitmagnitude_overflow();
+ fails += ut_subbucketmask_overflow();
+
+ return fails;
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h
new file mode 100644
index 000000000..868614b7b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdhdrhistogram.h
@@ -0,0 +1,87 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDHDR_HISTOGRAM_H_
+#define _RDHDR_HISTOGRAM_H_
+
+#include <inttypes.h>
+
+
+typedef struct rd_hdr_histogram_s {
+ int64_t lowestTrackableValue;
+ int64_t highestTrackableValue;
+ int64_t unitMagnitude;
+ int64_t significantFigures;
+ int32_t subBucketHalfCountMagnitude;
+ int32_t subBucketHalfCount;
+ int64_t subBucketMask;
+ int32_t subBucketCount;
+ int32_t bucketCount;
+ int32_t countsLen;
+ int64_t totalCount;
+ int64_t *counts;
+ int64_t outOfRangeCount; /**< Number of rejected records due to
+ * value being out of range. */
+ int64_t lowestOutOfRange; /**< Lowest value that was out of range.
+ * Initialized to lowestTrackableValue */
+ int64_t highestOutOfRange; /**< Highest value that was out of range.
+ * Initialized to highestTrackableValue */
+ int32_t allocatedSize; /**< Allocated size of histogram, for
+ * sigfigs tuning. */
+} rd_hdr_histogram_t;
+
+
+#endif /* !_RDHDR_HISTOGRAM_H_ */
+
+
+void rd_hdr_histogram_destroy(rd_hdr_histogram_t *hdr);
+
+/**
+ * @brief Create a new Hdr_Histogram.
+ *
+ * @param significant_figures must be between 1..5
+ *
+ * @returns a newly allocated histogram, or NULL on error.
+ *
+ * @sa rd_hdr_histogram_destroy()
+ */
+rd_hdr_histogram_t *rd_hdr_histogram_new(int64_t minValue,
+ int64_t maxValue,
+ int significantFigures);
+
+void rd_hdr_histogram_reset(rd_hdr_histogram_t *hdr);
+
+int rd_hdr_histogram_record(rd_hdr_histogram_t *hdr, int64_t v);
+
+double rd_hdr_histogram_stddev(rd_hdr_histogram_t *hdr);
+double rd_hdr_histogram_mean(const rd_hdr_histogram_t *hdr);
+int64_t rd_hdr_histogram_max(const rd_hdr_histogram_t *hdr);
+int64_t rd_hdr_histogram_min(const rd_hdr_histogram_t *hdr);
+int64_t rd_hdr_histogram_quantile(const rd_hdr_histogram_t *hdr, double q);
+
+
+int unittest_rdhdrhistogram(void);
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c
new file mode 100644
index 000000000..7457a7fbe
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.c
@@ -0,0 +1,511 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2021 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name HTTP client
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdunittest.h"
+
+#include <stdarg.h>
+
+#include <curl/curl.h>
+#include "rdhttp.h"
+
+/** Maximum response size, increase as necessary. */
+#define RD_HTTP_RESPONSE_SIZE_MAX 1024 * 1024 * 500 /* 500kb */
+
+
+void rd_http_error_destroy(rd_http_error_t *herr) {
+ rd_free(herr);
+}
+
+static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...)
+ RD_FORMAT(printf, 2, 3);
+static rd_http_error_t *rd_http_error_new(int code, const char *fmt, ...) {
+ size_t len = 0;
+ rd_http_error_t *herr;
+ va_list ap;
+
+ va_start(ap, fmt);
+
+ if (fmt && *fmt) {
+ va_list ap2;
+ va_copy(ap2, ap);
+ len = rd_vsnprintf(NULL, 0, fmt, ap2);
+ va_end(ap2);
+ }
+
+ /* Use single allocation for both herr and the error string */
+ herr = rd_malloc(sizeof(*herr) + len + 1);
+ herr->code = code;
+ herr->errstr = herr->data;
+
+ if (len > 0)
+ rd_vsnprintf(herr->errstr, len + 1, fmt, ap);
+ else
+ herr->errstr[0] = '\0';
+
+ va_end(ap);
+
+ return herr;
+}
+
+/**
+ * @brief Same as rd_http_error_new() but reads the error string from the
+ * provided buffer.
+ */
+static rd_http_error_t *rd_http_error_new_from_buf(int code,
+ const rd_buf_t *rbuf) {
+ rd_http_error_t *herr;
+ rd_slice_t slice;
+ size_t len = rd_buf_len(rbuf);
+
+ if (len == 0)
+ return rd_http_error_new(
+ code, "Server did not provide an error string");
+
+
+ /* Use single allocation for both herr and the error string */
+ herr = rd_malloc(sizeof(*herr) + len + 1);
+ herr->code = code;
+ herr->errstr = herr->data;
+ rd_slice_init_full(&slice, rbuf);
+ rd_slice_read(&slice, herr->errstr, len);
+ herr->errstr[len] = '\0';
+
+ return herr;
+}
+
+void rd_http_req_destroy(rd_http_req_t *hreq) {
+ RD_IF_FREE(hreq->hreq_curl, curl_easy_cleanup);
+ RD_IF_FREE(hreq->hreq_buf, rd_buf_destroy_free);
+}
+
+
+/**
+ * @brief Curl writefunction. Writes the bytes passed from curl
+ * to the hreq's buffer.
+ */
+static size_t
+rd_http_req_write_cb(char *ptr, size_t size, size_t nmemb, void *userdata) {
+ rd_http_req_t *hreq = (rd_http_req_t *)userdata;
+
+ if (unlikely(rd_buf_len(hreq->hreq_buf) + nmemb >
+ RD_HTTP_RESPONSE_SIZE_MAX))
+ return 0; /* FIXME: Set some overflow flag or rely on curl? */
+
+ rd_buf_write(hreq->hreq_buf, ptr, nmemb);
+
+ return nmemb;
+}
+
+rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url) {
+
+ memset(hreq, 0, sizeof(*hreq));
+
+ hreq->hreq_curl = curl_easy_init();
+ if (!hreq->hreq_curl)
+ return rd_http_error_new(-1, "Failed to create curl handle");
+
+ hreq->hreq_buf = rd_buf_new(1, 1024);
+
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_URL, url);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_PROTOCOLS,
+ CURLPROTO_HTTP | CURLPROTO_HTTPS);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_MAXREDIRS, 16);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_TIMEOUT, 30);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_ERRORBUFFER,
+ hreq->hreq_curl_errstr);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_NOSIGNAL, 1);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEFUNCTION,
+ rd_http_req_write_cb);
+ curl_easy_setopt(hreq->hreq_curl, CURLOPT_WRITEDATA, (void *)hreq);
+
+ return NULL;
+}
+
+/**
+ * @brief Synchronously (blockingly) perform the HTTP operation.
+ */
+rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq) {
+ CURLcode res;
+ long code = 0;
+
+ res = curl_easy_perform(hreq->hreq_curl);
+ if (unlikely(res != CURLE_OK))
+ return rd_http_error_new(-1, "%s", hreq->hreq_curl_errstr);
+
+ curl_easy_getinfo(hreq->hreq_curl, CURLINFO_RESPONSE_CODE, &code);
+ hreq->hreq_code = (int)code;
+ if (hreq->hreq_code >= 400)
+ return rd_http_error_new_from_buf(hreq->hreq_code,
+ hreq->hreq_buf);
+
+ return NULL;
+}
+
+
+int rd_http_req_get_code(const rd_http_req_t *hreq) {
+ return hreq->hreq_code;
+}
+
+const char *rd_http_req_get_content_type(rd_http_req_t *hreq) {
+ const char *content_type = NULL;
+
+ if (curl_easy_getinfo(hreq->hreq_curl, CURLINFO_CONTENT_TYPE,
+ &content_type))
+ return NULL;
+
+ return content_type;
+}
+
+
+/**
+ * @brief Perform a blocking HTTP(S) request to \p url.
+ *
+ * Returns the response (even if there's a HTTP error code returned)
+ * in \p *rbufp.
+ *
+ * Returns NULL on success (HTTP response code < 400), or an error
+ * object on transport or HTTP error - this error object must be destroyed
+ * by calling rd_http_error_destroy(). In case of HTTP error the \p *rbufp
+ * may be filled with the error response.
+ */
+rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp) {
+ rd_http_req_t hreq;
+ rd_http_error_t *herr;
+
+ *rbufp = NULL;
+
+ herr = rd_http_req_init(&hreq, url);
+ if (unlikely(herr != NULL))
+ return herr;
+
+ herr = rd_http_req_perform_sync(&hreq);
+ if (herr) {
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ *rbufp = hreq.hreq_buf;
+ hreq.hreq_buf = NULL;
+
+ return NULL;
+}
+
+
+/**
+ * @brief Extract the JSON object from \p hreq and return it in \p *jsonp.
+ *
+ * @returns Returns NULL on success, or an JSON parsing error - this
+ * error object must be destroyed by calling rd_http_error_destroy().
+ */
+rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp) {
+ size_t len;
+ char *raw_json;
+ const char *end = NULL;
+ rd_slice_t slice;
+ rd_http_error_t *herr = NULL;
+
+ /* cJSON requires the entire input to parse in contiguous memory. */
+ rd_slice_init_full(&slice, hreq->hreq_buf);
+ len = rd_buf_len(hreq->hreq_buf);
+
+ raw_json = rd_malloc(len + 1);
+ rd_slice_read(&slice, raw_json, len);
+ raw_json[len] = '\0';
+
+ /* Parse JSON */
+ *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0);
+
+ if (!*jsonp)
+ herr = rd_http_error_new(hreq->hreq_code,
+ "Failed to parse JSON response "
+ "at %" PRIusz "/%" PRIusz,
+ (size_t)(end - raw_json), len);
+ rd_free(raw_json);
+ return herr;
+}
+
+
+/**
+ * @brief Check if the error returned from HTTP(S) is temporary or not.
+ *
+ * @returns If the \p error_code is temporary, return rd_true,
+ * otherwise return rd_false.
+ *
+ * @locality Any thread.
+ */
+static rd_bool_t rd_http_is_failure_temporary(int error_code) {
+ switch (error_code) {
+ case 408: /**< Request timeout */
+ case 425: /**< Too early */
+ case 500: /**< Internal server error */
+ case 502: /**< Bad gateway */
+ case 503: /**< Service unavailable */
+ case 504: /**< Gateway timeout */
+ return rd_true;
+
+ default:
+ return rd_false;
+ }
+}
+
+
+/**
+ * @brief Perform a blocking HTTP(S) request to \p url with
+ * HTTP(S) headers and data with \p timeout_s.
+ * If the HTTP(S) request fails, will retry another \p retries times
+ * with multiplying backoff \p retry_ms.
+ *
+ * @returns The result will be returned in \p *jsonp.
+ * Returns NULL on success (HTTP response code < 400), or an error
+ * object on transport, HTTP error or a JSON parsing error - this
+ * error object must be destroyed by calling rd_http_error_destroy().
+ *
+ * @locality Any thread.
+ */
+rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
+ const char *url,
+ const struct curl_slist *headers,
+ const char *post_fields,
+ size_t post_fields_size,
+ int timeout_s,
+ int retries,
+ int retry_ms,
+ cJSON **jsonp) {
+ rd_http_error_t *herr;
+ rd_http_req_t hreq;
+ int i;
+ size_t len;
+ const char *content_type;
+
+ herr = rd_http_req_init(&hreq, url);
+ if (unlikely(herr != NULL))
+ return herr;
+
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_HTTPHEADER, headers);
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_TIMEOUT, timeout_s);
+
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDSIZE,
+ post_fields_size);
+ curl_easy_setopt(hreq.hreq_curl, CURLOPT_POSTFIELDS, post_fields);
+
+ for (i = 0; i <= retries; i++) {
+ if (rd_kafka_terminating(rk)) {
+ rd_http_req_destroy(&hreq);
+ return rd_http_error_new(-1, "Terminating");
+ }
+
+ herr = rd_http_req_perform_sync(&hreq);
+ len = rd_buf_len(hreq.hreq_buf);
+
+ if (!herr) {
+ if (len > 0)
+ break; /* Success */
+ /* Empty response */
+ rd_http_req_destroy(&hreq);
+ return NULL;
+ }
+ /* Retry if HTTP(S) request returns temporary error and there
+ * are remaining retries, else fail. */
+ if (i == retries || !rd_http_is_failure_temporary(herr->code)) {
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ /* Retry */
+ rd_http_error_destroy(herr);
+ rd_usleep(retry_ms * 1000 * (i + 1), &rk->rk_terminate);
+ }
+
+ content_type = rd_http_req_get_content_type(&hreq);
+
+ if (!content_type || rd_strncasecmp(content_type, "application/json",
+ strlen("application/json"))) {
+ if (!herr)
+ herr = rd_http_error_new(
+ hreq.hreq_code, "Response is not JSON encoded: %s",
+ content_type ? content_type : "(n/a)");
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ herr = rd_http_parse_json(&hreq, jsonp);
+
+ rd_http_req_destroy(&hreq);
+
+ return herr;
+}
+
+
+/**
+ * @brief Same as rd_http_get() but requires a JSON response.
+ * The response is parsed and a JSON object is returned in \p *jsonp.
+ *
+ * Same error semantics as rd_http_get().
+ */
+rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp) {
+ rd_http_req_t hreq;
+ rd_http_error_t *herr;
+ rd_slice_t slice;
+ size_t len;
+ const char *content_type;
+ char *raw_json;
+ const char *end;
+
+ *jsonp = NULL;
+
+ herr = rd_http_req_init(&hreq, url);
+ if (unlikely(herr != NULL))
+ return herr;
+
+ // FIXME: send Accept: json.. header?
+
+ herr = rd_http_req_perform_sync(&hreq);
+ len = rd_buf_len(hreq.hreq_buf);
+ if (herr && len == 0) {
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ if (len == 0) {
+ /* Empty response: create empty JSON object */
+ *jsonp = cJSON_CreateObject();
+ rd_http_req_destroy(&hreq);
+ return NULL;
+ }
+
+ content_type = rd_http_req_get_content_type(&hreq);
+
+ if (!content_type || rd_strncasecmp(content_type, "application/json",
+ strlen("application/json"))) {
+ if (!herr)
+ herr = rd_http_error_new(
+ hreq.hreq_code, "Response is not JSON encoded: %s",
+ content_type ? content_type : "(n/a)");
+ rd_http_req_destroy(&hreq);
+ return herr;
+ }
+
+ /* cJSON requires the entire input to parse in contiguous memory. */
+ rd_slice_init_full(&slice, hreq.hreq_buf);
+ raw_json = rd_malloc(len + 1);
+ rd_slice_read(&slice, raw_json, len);
+ raw_json[len] = '\0';
+
+ /* Parse JSON */
+ end = NULL;
+ *jsonp = cJSON_ParseWithOpts(raw_json, &end, 0);
+ if (!*jsonp && !herr)
+ herr = rd_http_error_new(hreq.hreq_code,
+ "Failed to parse JSON response "
+ "at %" PRIusz "/%" PRIusz,
+ (size_t)(end - raw_json), len);
+
+ rd_free(raw_json);
+ rd_http_req_destroy(&hreq);
+
+ return herr;
+}
+
+
+void rd_http_global_init(void) {
+ curl_global_init(CURL_GLOBAL_DEFAULT);
+}
+
+
+/**
+ * @brief Unittest. Requires a (local) webserver to be set with env var
+ * RD_UT_HTTP_URL=http://localhost:1234/some-path
+ *
+ * This server must return a JSON object or array containing at least one
+ * object on the main URL with a 2xx response code,
+ * and 4xx response on $RD_UT_HTTP_URL/error (with whatever type of body).
+ */
+
+int unittest_http(void) {
+ const char *base_url = rd_getenv("RD_UT_HTTP_URL", NULL);
+ char *error_url;
+ size_t error_url_size;
+ cJSON *json, *jval;
+ rd_http_error_t *herr;
+ rd_bool_t empty;
+
+ if (!base_url || !*base_url)
+ RD_UT_SKIP("RD_UT_HTTP_URL environment variable not set");
+
+ RD_UT_BEGIN();
+
+ error_url_size = strlen(base_url) + strlen("/error") + 1;
+ error_url = rd_alloca(error_url_size);
+ rd_snprintf(error_url, error_url_size, "%s/error", base_url);
+
+ /* Try the base url first, parse its JSON and extract a key-value. */
+ json = NULL;
+ herr = rd_http_get_json(base_url, &json);
+ RD_UT_ASSERT(!herr, "Expected get_json(%s) to succeed, got: %s",
+ base_url, herr->errstr);
+
+ empty = rd_true;
+ cJSON_ArrayForEach(jval, json) {
+ empty = rd_false;
+ break;
+ }
+ RD_UT_ASSERT(!empty, "Expected non-empty JSON response from %s",
+ base_url);
+ RD_UT_SAY(
+ "URL %s returned no error and a non-empty "
+ "JSON object/array as expected",
+ base_url);
+ cJSON_Delete(json);
+
+
+ /* Try the error URL, verify error code. */
+ json = NULL;
+ herr = rd_http_get_json(error_url, &json);
+ RD_UT_ASSERT(herr != NULL, "Expected get_json(%s) to fail", error_url);
+ RD_UT_ASSERT(herr->code >= 400,
+ "Expected get_json(%s) error code >= "
+ "400, got %d",
+ error_url, herr->code);
+ RD_UT_SAY(
+ "Error URL %s returned code %d, errstr \"%s\" "
+ "and %s JSON object as expected",
+ error_url, herr->code, herr->errstr, json ? "a" : "no");
+ /* Check if there's a JSON document returned */
+ if (json)
+ cJSON_Delete(json);
+ rd_http_error_destroy(herr);
+
+ RD_UT_PASS();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h
new file mode 100644
index 000000000..80512e5ac
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdhttp.h
@@ -0,0 +1,83 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2021 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDHTTP_H_
+#define _RDHTTP_H_
+
+#define CJSON_HIDE_SYMBOLS
+#include "cJSON.h"
+
+
+typedef struct rd_http_error_s {
+ int code;
+ char *errstr;
+ char data[1]; /**< This is where the error string begins. */
+} rd_http_error_t;
+
+void rd_http_error_destroy(rd_http_error_t *herr);
+
+rd_http_error_t *rd_http_get(const char *url, rd_buf_t **rbufp);
+rd_http_error_t *rd_http_get_json(const char *url, cJSON **jsonp);
+
+void rd_http_global_init(void);
+
+
+
+#ifdef LIBCURL_VERSION
+/* Advanced API that exposes the underlying CURL handle.
+ * Requires caller to have included curl.h prior to this file. */
+
+
+typedef struct rd_http_req_s {
+ CURL *hreq_curl; /**< CURL handle */
+ rd_buf_t *hreq_buf; /**< Response buffer */
+ int hreq_code; /**< HTTP response code */
+ char hreq_curl_errstr[CURL_ERROR_SIZE]; /**< Error string for curl to
+ * write to. */
+} rd_http_req_t;
+
+rd_http_error_t *rd_http_req_init(rd_http_req_t *hreq, const char *url);
+rd_http_error_t *rd_http_req_perform_sync(rd_http_req_t *hreq);
+rd_http_error_t *rd_http_parse_json(rd_http_req_t *hreq, cJSON **jsonp);
+rd_http_error_t *rd_http_post_expect_json(rd_kafka_t *rk,
+ const char *url,
+ const struct curl_slist *headers,
+ const char *data_to_token,
+ size_t data_to_token_size,
+ int timeout_s,
+ int retry,
+ int retry_ms,
+ cJSON **jsonp);
+void rd_http_req_destroy(rd_http_req_t *hreq);
+
+#endif
+
+
+
+#endif /* _RDHTTP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h
new file mode 100644
index 000000000..428337646
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdinterval.h
@@ -0,0 +1,159 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDINTERVAL_H_
+#define _RDINTERVAL_H_
+
+#include "rd.h"
+
+typedef struct rd_interval_s {
+ rd_ts_t ri_ts_last; /* last interval timestamp */
+ rd_ts_t ri_fixed; /* fixed interval if provided interval is 0 */
+ int ri_backoff; /* back off the next interval by this much */
+} rd_interval_t;
+
+
+static RD_INLINE RD_UNUSED void rd_interval_init(rd_interval_t *ri) {
+ memset(ri, 0, sizeof(*ri));
+}
+
+
+
+/**
+ * Returns the number of microseconds the interval has been over-shot.
+ * If the return value is >0 (i.e., time for next intervalled something) then
+ * the time interval is updated to the current time.
+ *
+ * The current time can be provided in 'now', or if this is set to 0 the time
+ * will be gathered automatically.
+ *
+ * If 'interval_us' is set to 0 the fixed interval will be used, see
+ * 'rd_interval_fixed()'.
+ *
+ * If this is the first time rd_interval() is called after an _init() or
+ * _reset() or the \p immediate parameter is true, then a positive value
+ * will be returned immediately even though the initial interval has not
+ * passed.
+ */
+#define rd_interval(ri, interval_us, now) rd_interval0(ri, interval_us, now, 0)
+#define rd_interval_immediate(ri, interval_us, now) \
+ rd_interval0(ri, interval_us, now, 1)
+static RD_INLINE RD_UNUSED rd_ts_t rd_interval0(rd_interval_t *ri,
+ rd_ts_t interval_us,
+ rd_ts_t now,
+ int immediate) {
+ rd_ts_t diff;
+
+ if (!now)
+ now = rd_clock();
+ if (!interval_us)
+ interval_us = ri->ri_fixed;
+
+ if (ri->ri_ts_last || !immediate) {
+ diff = now - (ri->ri_ts_last + interval_us + ri->ri_backoff);
+ } else
+ diff = 1;
+ if (unlikely(diff > 0)) {
+ ri->ri_ts_last = now;
+ ri->ri_backoff = 0;
+ }
+
+ return diff;
+}
+
+
+/**
+ * Reset the interval to zero, i.e., the next call to rd_interval()
+ * will be immediate.
+ */
+static RD_INLINE RD_UNUSED void rd_interval_reset(rd_interval_t *ri) {
+ ri->ri_ts_last = 0;
+ ri->ri_backoff = 0;
+}
+
+/**
+ * Reset the interval to 'now'. If now is 0, the time will be gathered
+ * automatically.
+ */
+static RD_INLINE RD_UNUSED void rd_interval_reset_to_now(rd_interval_t *ri,
+ rd_ts_t now) {
+ if (!now)
+ now = rd_clock();
+
+ ri->ri_ts_last = now;
+ ri->ri_backoff = 0;
+}
+
+/**
+ * Back off the next interval by `backoff_us` microseconds.
+ */
+static RD_INLINE RD_UNUSED void rd_interval_backoff(rd_interval_t *ri,
+ int backoff_us) {
+ ri->ri_backoff = backoff_us;
+}
+
+/**
+ * Expedite (speed up) the next interval by `expedite_us` microseconds.
+ * If `expedite_us` is 0 the interval will be set to trigger
+ * immedately on the next rd_interval() call.
+ */
+static RD_INLINE RD_UNUSED void rd_interval_expedite(rd_interval_t *ri,
+ int expedite_us) {
+ if (!expedite_us)
+ ri->ri_ts_last = 0;
+ else
+ ri->ri_backoff = -expedite_us;
+}
+
+/**
+ * Specifies a fixed interval to use if rd_interval() is called with
+ * `interval_us` set to 0.
+ */
+static RD_INLINE RD_UNUSED void rd_interval_fixed(rd_interval_t *ri,
+ rd_ts_t fixed_us) {
+ ri->ri_fixed = fixed_us;
+}
+
+/**
+ * Disables the interval (until rd_interval_init()/reset() is called).
+ * A disabled interval will never return a positive value from
+ * rd_interval().
+ */
+static RD_INLINE RD_UNUSED void rd_interval_disable(rd_interval_t *ri) {
+ /* Set last beat to a large value a long time in the future. */
+ ri->ri_ts_last = 6000000000000000000LL; /* in about 190000 years */
+}
+
+/**
+ * Returns true if the interval is disabled.
+ */
+static RD_INLINE RD_UNUSED int rd_interval_disabled(const rd_interval_t *ri) {
+ return ri->ri_ts_last == 6000000000000000000LL;
+}
+
+#endif /* _RDINTERVAL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c
new file mode 100644
index 000000000..b254748eb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.c
@@ -0,0 +1,5026 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#define _GNU_SOURCE
+#include <errno.h>
+#include <string.h>
+#include <stdarg.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <sys/stat.h>
+#if !_WIN32
+#include <sys/types.h>
+#include <dirent.h>
+#endif
+
+#include "rdkafka_int.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_cgrp.h"
+#include "rdkafka_assignor.h"
+#include "rdkafka_request.h"
+#include "rdkafka_event.h"
+#include "rdkafka_error.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_interceptor.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_sasl_oauthbearer.h"
+#if WITH_OAUTHBEARER_OIDC
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+#endif
+#if WITH_SSL
+#include "rdkafka_ssl.h"
+#endif
+
+#include "rdtime.h"
+#include "crc32c.h"
+#include "rdunittest.h"
+
+#ifdef _WIN32
+#include <sys/types.h>
+#include <sys/timeb.h>
+#endif
+
+#define CJSON_HIDE_SYMBOLS
+#include "cJSON.h"
+
+#if WITH_CURL
+#include "rdhttp.h"
+#endif
+
+
+static once_flag rd_kafka_global_init_once = ONCE_FLAG_INIT;
+static once_flag rd_kafka_global_srand_once = ONCE_FLAG_INIT;
+
+/**
+ * @brief Global counter+lock for all active librdkafka instances
+ */
+mtx_t rd_kafka_global_lock;
+int rd_kafka_global_cnt;
+
+
+/**
+ * Last API error code, per thread.
+ * Shared among all rd_kafka_t instances.
+ */
+rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code;
+
+
+/**
+ * Current number of threads created by rdkafka.
+ * This is used in regression tests.
+ */
+rd_atomic32_t rd_kafka_thread_cnt_curr;
+int rd_kafka_thread_cnt(void) {
+ return rd_atomic32_get(&rd_kafka_thread_cnt_curr);
+}
+
+/**
+ * Current thread's log name (TLS)
+ */
+char RD_TLS rd_kafka_thread_name[64] = "app";
+
+void rd_kafka_set_thread_name(const char *fmt, ...) {
+ va_list ap;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(rd_kafka_thread_name, sizeof(rd_kafka_thread_name), fmt,
+ ap);
+ va_end(ap);
+}
+
+/**
+ * @brief Current thread's system name (TLS)
+ *
+ * Note the name must be 15 characters or less, because it is passed to
+ * pthread_setname_np on Linux which imposes this limit.
+ */
+static char RD_TLS rd_kafka_thread_sysname[16] = "app";
+
+void rd_kafka_set_thread_sysname(const char *fmt, ...) {
+ va_list ap;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(rd_kafka_thread_sysname, sizeof(rd_kafka_thread_sysname),
+ fmt, ap);
+ va_end(ap);
+
+ thrd_setname(rd_kafka_thread_sysname);
+}
+
+static void rd_kafka_global_init0(void) {
+ cJSON_Hooks json_hooks = {.malloc_fn = rd_malloc, .free_fn = rd_free};
+
+ mtx_init(&rd_kafka_global_lock, mtx_plain);
+#if ENABLE_DEVEL
+ rd_atomic32_init(&rd_kafka_op_cnt, 0);
+#endif
+ rd_crc32c_global_init();
+#if WITH_SSL
+ /* The configuration interface might need to use
+ * OpenSSL to parse keys, prior to any rd_kafka_t
+ * object has been created. */
+ rd_kafka_ssl_init();
+#endif
+
+ cJSON_InitHooks(&json_hooks);
+
+#if WITH_CURL
+ rd_http_global_init();
+#endif
+}
+
+/**
+ * @brief Initialize once per process
+ */
+void rd_kafka_global_init(void) {
+ call_once(&rd_kafka_global_init_once, rd_kafka_global_init0);
+}
+
+
+/**
+ * @brief Seed the PRNG with current_time.milliseconds
+ */
+static void rd_kafka_global_srand(void) {
+ struct timeval tv;
+
+ rd_gettimeofday(&tv, NULL);
+
+ srand((unsigned int)(tv.tv_usec / 1000));
+}
+
+
+/**
+ * @returns the current number of active librdkafka instances
+ */
+static int rd_kafka_global_cnt_get(void) {
+ int r;
+ mtx_lock(&rd_kafka_global_lock);
+ r = rd_kafka_global_cnt;
+ mtx_unlock(&rd_kafka_global_lock);
+ return r;
+}
+
+
+/**
+ * @brief Increase counter for active librdkafka instances.
+ * If this is the first instance the global constructors will be called, if any.
+ */
+static void rd_kafka_global_cnt_incr(void) {
+ mtx_lock(&rd_kafka_global_lock);
+ rd_kafka_global_cnt++;
+ if (rd_kafka_global_cnt == 1) {
+ rd_kafka_transport_init();
+#if WITH_SSL
+ rd_kafka_ssl_init();
+#endif
+ rd_kafka_sasl_global_init();
+ }
+ mtx_unlock(&rd_kafka_global_lock);
+}
+
+/**
+ * @brief Decrease counter for active librdkafka instances.
+ * If this counter reaches 0 the global destructors will be called, if any.
+ */
+static void rd_kafka_global_cnt_decr(void) {
+ mtx_lock(&rd_kafka_global_lock);
+ rd_kafka_assert(NULL, rd_kafka_global_cnt > 0);
+ rd_kafka_global_cnt--;
+ if (rd_kafka_global_cnt == 0) {
+ rd_kafka_sasl_global_term();
+#if WITH_SSL
+ rd_kafka_ssl_term();
+#endif
+ }
+ mtx_unlock(&rd_kafka_global_lock);
+}
+
+
+/**
+ * Wait for all rd_kafka_t objects to be destroyed.
+ * Returns 0 if all kafka objects are now destroyed, or -1 if the
+ * timeout was reached.
+ */
+int rd_kafka_wait_destroyed(int timeout_ms) {
+ rd_ts_t timeout = rd_clock() + (timeout_ms * 1000);
+
+ while (rd_kafka_thread_cnt() > 0 || rd_kafka_global_cnt_get() > 0) {
+ if (rd_clock() >= timeout) {
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT,
+ ETIMEDOUT);
+ return -1;
+ }
+ rd_usleep(25000, NULL); /* 25ms */
+ }
+
+ return 0;
+}
+
+static void rd_kafka_log_buf(const rd_kafka_conf_t *conf,
+ const rd_kafka_t *rk,
+ int level,
+ int ctx,
+ const char *fac,
+ const char *buf) {
+ if (level > conf->log_level)
+ return;
+ else if (rk && conf->log_queue) {
+ rd_kafka_op_t *rko;
+
+ if (!rk->rk_logq)
+ return; /* Terminating */
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_LOG);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_MEDIUM);
+ rko->rko_u.log.level = level;
+ rd_strlcpy(rko->rko_u.log.fac, fac, sizeof(rko->rko_u.log.fac));
+ rko->rko_u.log.str = rd_strdup(buf);
+ rko->rko_u.log.ctx = ctx;
+ rd_kafka_q_enq(rk->rk_logq, rko);
+
+ } else if (conf->log_cb) {
+ conf->log_cb(rk, level, fac, buf);
+ }
+}
+
+/**
+ * @brief Logger
+ *
+ * @remark conf must be set, but rk may be NULL
+ */
+void rd_kafka_log0(const rd_kafka_conf_t *conf,
+ const rd_kafka_t *rk,
+ const char *extra,
+ int level,
+ int ctx,
+ const char *fac,
+ const char *fmt,
+ ...) {
+ char buf[2048];
+ va_list ap;
+ unsigned int elen = 0;
+ unsigned int of = 0;
+
+ if (level > conf->log_level)
+ return;
+
+ if (conf->log_thread_name) {
+ elen = rd_snprintf(buf, sizeof(buf),
+ "[thrd:%s]: ", rd_kafka_thread_name);
+ if (unlikely(elen >= sizeof(buf)))
+ elen = sizeof(buf);
+ of = elen;
+ }
+
+ if (extra) {
+ elen = rd_snprintf(buf + of, sizeof(buf) - of, "%s: ", extra);
+ if (unlikely(elen >= sizeof(buf) - of))
+ elen = sizeof(buf) - of;
+ of += elen;
+ }
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap);
+ va_end(ap);
+
+ rd_kafka_log_buf(conf, rk, level, ctx, fac, buf);
+}
+
+rd_kafka_resp_err_t
+rd_kafka_oauthbearer_set_token(rd_kafka_t *rk,
+ const char *token_value,
+ int64_t md_lifetime_ms,
+ const char *md_principal_name,
+ const char **extensions,
+ size_t extension_size,
+ char *errstr,
+ size_t errstr_size) {
+#if WITH_SASL_OAUTHBEARER
+ return rd_kafka_oauthbearer_set_token0(
+ rk, token_value, md_lifetime_ms, md_principal_name, extensions,
+ extension_size, errstr, errstr_size);
+#else
+ rd_snprintf(errstr, errstr_size,
+ "librdkafka not built with SASL OAUTHBEARER support");
+ return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+#endif
+}
+
+rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk,
+ const char *errstr) {
+#if WITH_SASL_OAUTHBEARER
+ return rd_kafka_oauthbearer_set_token_failure0(rk, errstr);
+#else
+ return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+#endif
+}
+
+void rd_kafka_log_print(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf) {
+ int secs, msecs;
+ struct timeval tv;
+ rd_gettimeofday(&tv, NULL);
+ secs = (int)tv.tv_sec;
+ msecs = (int)(tv.tv_usec / 1000);
+ fprintf(stderr, "%%%i|%u.%03u|%s|%s| %s\n", level, secs, msecs, fac,
+ rk ? rk->rk_name : "", buf);
+}
+
+void rd_kafka_log_syslog(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf) {
+#if WITH_SYSLOG
+ static int initialized = 0;
+
+ if (!initialized)
+ openlog("rdkafka", LOG_PID | LOG_CONS, LOG_USER);
+
+ syslog(level, "%s: %s: %s", fac, rk ? rk->rk_name : "", buf);
+#else
+ rd_assert(!*"syslog support not enabled in this build");
+#endif
+}
+
+void rd_kafka_set_logger(rd_kafka_t *rk,
+ void (*func)(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf)) {
+#if !WITH_SYSLOG
+ if (func == rd_kafka_log_syslog)
+ rd_assert(!*"syslog support not enabled in this build");
+#endif
+ rk->rk_conf.log_cb = func;
+}
+
+void rd_kafka_set_log_level(rd_kafka_t *rk, int level) {
+ rk->rk_conf.log_level = level;
+}
+
+
+
+static const char *rd_kafka_type2str(rd_kafka_type_t type) {
+ static const char *types[] = {
+ [RD_KAFKA_PRODUCER] = "producer",
+ [RD_KAFKA_CONSUMER] = "consumer",
+ };
+ return types[type];
+}
+
+#define _ERR_DESC(ENUM, DESC) \
+ [ENUM - RD_KAFKA_RESP_ERR__BEGIN] = {ENUM, &(#ENUM)[18] /*pfx*/, DESC}
+
+static const struct rd_kafka_err_desc rd_kafka_err_descs[] = {
+ _ERR_DESC(RD_KAFKA_RESP_ERR__BEGIN, NULL),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_MSG, "Local: Bad message format"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__BAD_COMPRESSION,
+ "Local: Invalid compressed data"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__DESTROY, "Local: Broker handle destroyed"),
+ _ERR_DESC(
+ RD_KAFKA_RESP_ERR__FAIL,
+ "Local: Communication failure with broker"), // FIXME: too specific
+ _ERR_DESC(RD_KAFKA_RESP_ERR__TRANSPORT, "Local: Broker transport failure"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
+ "Local: Critical system resource failure"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__RESOLVE, "Local: Host resolution failure"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__MSG_TIMED_OUT, "Local: Message timed out"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__PARTITION_EOF, "Broker: No more messages"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION, "Local: Unknown partition"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__FS, "Local: File or filesystem error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC, "Local: Unknown topic"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
+ "Local: All broker connections are down"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Local: Invalid argument or configuration"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT, "Local: Timed out"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__QUEUE_FULL, "Local: Queue full"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__ISR_INSUFF, "Local: ISR count insufficient"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__NODE_UPDATE, "Local: Broker node update"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__SSL, "Local: SSL error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_COORD, "Local: Waiting for coordinator"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP, "Local: Unknown group"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__IN_PROGRESS, "Local: Operation in progress"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS,
+ "Local: Previous operation in progress"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION,
+ "Local: Existing subscription"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, "Local: Assign partitions"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, "Local: Revoke partitions"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__CONFLICT, "Local: Conflicting use"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__STATE, "Local: Erroneous state"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL, "Local: Unknown protocol"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED, "Local: Not implemented"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__AUTHENTICATION,
+ "Local: Authentication failure"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__NO_OFFSET, "Local: No offset stored"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__OUTDATED, "Local: Outdated"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, "Local: Timed out in queue"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "Local: Required feature not supported by broker"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__WAIT_CACHE, "Local: Awaiting cache update"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__INTR, "Local: Operation interrupted"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_SERIALIZATION,
+ "Local: Key serialization error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION,
+ "Local: Value serialization error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION,
+ "Local: Key deserialization error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION,
+ "Local: Value deserialization error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__PARTIAL, "Local: Partial response"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__READ_ONLY, "Local: Read-only object"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__NOENT, "Local: No such entry"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNDERFLOW, "Local: Read underflow"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__INVALID_TYPE, "Local: Invalid type"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__RETRY, "Local: Retry operation"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_QUEUE, "Local: Purged in queue"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, "Local: Purged in flight"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__FATAL, "Local: Fatal error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__INCONSISTENT, "Local: Inconsistent state"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE,
+ "Local: Gap-less ordering would not be guaranteed "
+ "if proceeding"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED,
+ "Local: Maximum application poll interval "
+ "(max.poll.interval.ms) exceeded"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__UNKNOWN_BROKER, "Local: Unknown broker"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
+ "Local: Functionality not configured"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__FENCED,
+ "Local: This instance has been fenced by a newer instance"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__APPLICATION,
+ "Local: Application generated error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST,
+ "Local: Group partition assignment lost"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__NOOP, "Local: No operation performed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET,
+ "Local: No offset to automatically reset to"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
+ "Local: Partition log truncation detected"),
+
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN, "Unknown broker error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NO_ERROR, "Success"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE,
+ "Broker: Offset out of range"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG, "Broker: Invalid message"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+ "Broker: Unknown topic or partition"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE,
+ "Broker: Invalid message size"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE,
+ "Broker: Leader not available"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
+ "Broker: Not leader for partition"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT, "Broker: Request timed out"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE,
+ "Broker: Broker not available"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE,
+ "Broker: Replica not available"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
+ "Broker: Message size too large"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH,
+ "Broker: StaleControllerEpochCode"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
+ "Broker: Offset metadata string too large"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION,
+ "Broker: Broker disconnected before response received"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
+ "Broker: Coordinator load in progress"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ "Broker: Coordinator not available"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_COORDINATOR, "Broker: Not coordinator"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION, "Broker: Invalid topic"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE,
+ "Broker: Message batch larger than configured server "
+ "segment size"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS,
+ "Broker: Not enough in-sync replicas"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND,
+ "Broker: Message(s) written to insufficient number of "
+ "in-sync replicas"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS,
+ "Broker: Invalid required acks value"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ "Broker: Specified group generation id is not valid"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL,
+ "Broker: Inconsistent group protocol"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_GROUP_ID, "Broker: Invalid group.id"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID, "Broker: Unknown member"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT,
+ "Broker: Invalid session timeout"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ "Broker: Group rebalance in progress"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
+ "Broker: Commit offset data size is not valid"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
+ "Broker: Topic authorization failed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
+ "Broker: Group authorization failed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED,
+ "Broker: Cluster authorization failed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP, "Broker: Invalid timestamp"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM,
+ "Broker: Unsupported SASL mechanism"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE,
+ "Broker: Request not valid in current SASL state"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION,
+ "Broker: API version not supported"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS,
+ "Broker: Topic already exists"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PARTITIONS,
+ "Broker: Invalid number of partitions"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR,
+ "Broker: Invalid replication factor"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT,
+ "Broker: Invalid replica assignment"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_CONFIG,
+ "Broker: Configuration is invalid"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NOT_CONTROLLER,
+ "Broker: Not controller for cluster"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_REQUEST, "Broker: Invalid request"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT,
+ "Broker: Message format on broker does not support request"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_POLICY_VIOLATION, "Broker: Policy violation"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
+ "Broker: Broker received an out of order sequence number"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER,
+ "Broker: Broker received a duplicate sequence number"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH,
+ "Broker: Producer attempted an operation with an old epoch"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TXN_STATE,
+ "Broker: Producer attempted a transactional operation in "
+ "an invalid state"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING,
+ "Broker: Producer attempted to use a producer id which is "
+ "not currently assigned to its transactional id"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
+ "Broker: Transaction timeout is larger than the maximum "
+ "value allowed by the broker's max.transaction.timeout.ms"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ "Broker: Producer attempted to update a transaction while "
+ "another concurrent operation on the same transaction was "
+ "ongoing"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED,
+ "Broker: Indicates that the transaction coordinator sending "
+ "a WriteTxnMarker is no longer the current coordinator for "
+ "a given producer"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
+ "Broker: Transactional Id authorization failed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_SECURITY_DISABLED,
+ "Broker: Security features are disabled"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED,
+ "Broker: Operation not attempted"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
+ "Broker: Disk error when trying to access log file on disk"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND,
+ "Broker: The user-specified log directory is not found "
+ "in the broker config"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED,
+ "Broker: SASL Authentication failed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
+ "Broker: Unknown Producer Id"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS,
+ "Broker: Partition reassignment is in progress"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED,
+ "Broker: Delegation Token feature is not enabled"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND,
+ "Broker: Delegation Token is not found on server"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH,
+ "Broker: Specified Principal is not valid Owner/Renewer"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED,
+ "Broker: Delegation Token requests are not allowed on "
+ "this connection"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED,
+ "Broker: Delegation Token authorization failed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED,
+ "Broker: Delegation Token is expired"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE,
+ "Broker: Supplied principalType is not supported"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP,
+ "Broker: The group is not empty"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND,
+ "Broker: The group id does not exist"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND,
+ "Broker: The fetch session ID was not found"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH,
+ "Broker: The fetch session epoch is invalid"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND,
+ "Broker: No matching listener"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED,
+ "Broker: Topic deletion is disabled"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH,
+ "Broker: Leader epoch is older than broker epoch"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH,
+ "Broker: Leader epoch is newer than broker epoch"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE,
+ "Broker: Unsupported compression type"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH,
+ "Broker: Broker epoch has changed"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE,
+ "Broker: Leader high watermark is not caught up"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED,
+ "Broker: Group member needs a valid member ID"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE,
+ "Broker: Preferred leader was not available"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED,
+ "Broker: Consumer group has reached maximum size"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
+ "Broker: Static consumer fenced by other consumer with same "
+ "group.instance.id"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE,
+ "Broker: Eligible partition leaders are not available"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED,
+ "Broker: Leader election not needed for topic partition"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS,
+ "Broker: No partition reassignment is in progress"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC,
+ "Broker: Deleting offsets of a topic while the consumer "
+ "group is subscribed to it"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_RECORD,
+ "Broker: Broker failed to validate record"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ "Broker: There are unstable offsets that need to be cleared"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED,
+ "Broker: Throttling quota has been exceeded"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_PRODUCER_FENCED,
+ "Broker: There is a newer producer with the same "
+ "transactionalId which fences the current one"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND,
+ "Broker: Request illegally referred to resource that "
+ "does not exist"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE,
+ "Broker: Request illegally referred to the same resource "
+ "twice"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL,
+ "Broker: Requested credential would not meet criteria for "
+ "acceptability"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET,
+ "Broker: Indicates that the either the sender or recipient "
+ "of a voter-only request is not one of the expected voters"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION,
+ "Broker: Invalid update version"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED,
+ "Broker: Unable to update finalized features due to "
+ "server error"),
+ _ERR_DESC(RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE,
+ "Broker: Request principal deserialization failed during "
+ "forwarding"),
+
+ _ERR_DESC(RD_KAFKA_RESP_ERR__END, NULL)};
+
+
+void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs,
+ size_t *cntp) {
+ *errdescs = rd_kafka_err_descs;
+ *cntp = RD_ARRAYSIZE(rd_kafka_err_descs);
+}
+
+
+const char *rd_kafka_err2str(rd_kafka_resp_err_t err) {
+ static RD_TLS char ret[32];
+ int idx = err - RD_KAFKA_RESP_ERR__BEGIN;
+
+ if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN ||
+ err >= RD_KAFKA_RESP_ERR_END_ALL ||
+ !rd_kafka_err_descs[idx].desc)) {
+ rd_snprintf(ret, sizeof(ret), "Err-%i?", err);
+ return ret;
+ }
+
+ return rd_kafka_err_descs[idx].desc;
+}
+
+
+const char *rd_kafka_err2name(rd_kafka_resp_err_t err) {
+ static RD_TLS char ret[32];
+ int idx = err - RD_KAFKA_RESP_ERR__BEGIN;
+
+ if (unlikely(err <= RD_KAFKA_RESP_ERR__BEGIN ||
+ err >= RD_KAFKA_RESP_ERR_END_ALL ||
+ !rd_kafka_err_descs[idx].desc)) {
+ rd_snprintf(ret, sizeof(ret), "ERR_%i?", err);
+ return ret;
+ }
+
+ return rd_kafka_err_descs[idx].name;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_last_error(void) {
+ return rd_kafka_last_error_code;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_errno2err(int errnox) {
+ switch (errnox) {
+ case EINVAL:
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ case EBUSY:
+ return RD_KAFKA_RESP_ERR__CONFLICT;
+
+ case ENOENT:
+ return RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+
+ case ESRCH:
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ case ETIMEDOUT:
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ case EMSGSIZE:
+ return RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
+
+ case ENOBUFS:
+ return RD_KAFKA_RESP_ERR__QUEUE_FULL;
+
+ case ECANCELED:
+ return RD_KAFKA_RESP_ERR__FATAL;
+
+ default:
+ return RD_KAFKA_RESP_ERR__FAIL;
+ }
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
+ rd_kafka_resp_err_t err;
+
+ if (unlikely((err = rd_atomic32_get(&rk->rk_fatal.err)))) {
+ rd_kafka_rdlock(rk);
+ rd_snprintf(errstr, errstr_size, "%s", rk->rk_fatal.errstr);
+ rd_kafka_rdunlock(rk);
+ }
+
+ return err;
+}
+
+
+/**
+ * @brief Set's the fatal error for this instance.
+ *
+ * @param do_lock RD_DO_LOCK: rd_kafka_wrlock() will be acquired and released,
+ * RD_DONT_LOCK: caller must hold rd_kafka_wrlock().
+ *
+ * @returns 1 if the error was set, or 0 if a previous fatal error
+ * has already been set on this instance.
+ *
+ * @locality any
+ * @locks none
+ */
+int rd_kafka_set_fatal_error0(rd_kafka_t *rk,
+ rd_dolock_t do_lock,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ va_list ap;
+ char buf[512];
+
+ if (do_lock)
+ rd_kafka_wrlock(rk);
+ rk->rk_fatal.cnt++;
+ if (rd_atomic32_get(&rk->rk_fatal.err)) {
+ if (do_lock)
+ rd_kafka_wrunlock(rk);
+ rd_kafka_dbg(rk, GENERIC, "FATAL",
+ "Suppressing subsequent fatal error: %s",
+ rd_kafka_err2name(err));
+ return 0;
+ }
+
+ rd_atomic32_set(&rk->rk_fatal.err, err);
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+ rk->rk_fatal.errstr = rd_strdup(buf);
+
+ if (do_lock)
+ rd_kafka_wrunlock(rk);
+
+ /* If there is an error callback or event handler we
+ * also log the fatal error as it happens.
+ * If there is no error callback the error event
+ * will be automatically logged, and this check here
+ * prevents us from duplicate logs. */
+ if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)
+ rd_kafka_log(rk, LOG_EMERG, "FATAL", "Fatal error: %s: %s",
+ rd_kafka_err2str(err), rk->rk_fatal.errstr);
+ else
+ rd_kafka_dbg(rk, ALL, "FATAL", "Fatal error: %s: %s",
+ rd_kafka_err2str(err), rk->rk_fatal.errstr);
+
+ /* Indicate to the application that a fatal error was raised,
+ * the app should use rd_kafka_fatal_error() to extract the
+ * fatal error code itself.
+ * For the high-level consumer we propagate the error as a
+ * consumer error so it is returned from consumer_poll(),
+ * while for all other client types (the producer) we propagate to
+ * the standard error handler (typically error_cb). */
+ if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp)
+ rd_kafka_consumer_err(
+ rk->rk_cgrp->rkcg_q, RD_KAFKA_NODEID_UA,
+ RD_KAFKA_RESP_ERR__FATAL, 0, NULL, NULL,
+ RD_KAFKA_OFFSET_INVALID, "Fatal error: %s: %s",
+ rd_kafka_err2str(err), rk->rk_fatal.errstr);
+ else
+ rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__FATAL,
+ "Fatal error: %s: %s", rd_kafka_err2str(err),
+ rk->rk_fatal.errstr);
+
+
+ /* Tell rdkafka main thread to purge producer queues, but not
+ * in-flight since we'll want proper delivery status for transmitted
+ * requests.
+ * Need NON_BLOCKING to avoid dead-lock if user is
+ * calling purge() at the same time, which could be
+ * waiting for this broker thread to handle its
+ * OP_PURGE request. */
+ if (rk->rk_type == RD_KAFKA_PRODUCER) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE);
+ rko->rko_u.purge.flags =
+ RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_NON_BLOCKING;
+ rd_kafka_q_enq(rk->rk_ops, rko);
+ }
+
+ return 1;
+}
+
+
+/**
+ * @returns a copy of the current fatal error, if any, else NULL.
+ *
+ * @locks_acquired rd_kafka_rdlock(rk)
+ */
+rd_kafka_error_t *rd_kafka_get_fatal_error(rd_kafka_t *rk) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+
+ if (!(err = rd_atomic32_get(&rk->rk_fatal.err)))
+ return NULL; /* No fatal error raised */
+
+ rd_kafka_rdlock(rk);
+ error = rd_kafka_error_new_fatal(err, "%s", rk->rk_fatal.errstr);
+ rd_kafka_rdunlock(rk);
+
+ return error;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason) {
+ if (!rd_kafka_set_fatal_error(rk, err, "test_fatal_error: %s", reason))
+ return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
+ else
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Final destructor for rd_kafka_t, must only be called with refcnt 0.
+ *
+ * @locality application thread
+ */
+void rd_kafka_destroy_final(rd_kafka_t *rk) {
+
+ rd_kafka_assert(rk, rd_kafka_terminating(rk));
+
+ /* Synchronize state */
+ rd_kafka_wrlock(rk);
+ rd_kafka_wrunlock(rk);
+
+ /* Terminate SASL provider */
+ if (rk->rk_conf.sasl.provider)
+ rd_kafka_sasl_term(rk);
+
+ rd_kafka_timers_destroy(&rk->rk_timers);
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying op queues");
+
+ /* Destroy cgrp */
+ if (rk->rk_cgrp) {
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying cgrp");
+ /* Reset queue forwarding (rep -> cgrp) */
+ rd_kafka_q_fwd_set(rk->rk_rep, NULL);
+ rd_kafka_cgrp_destroy_final(rk->rk_cgrp);
+ }
+
+ rd_kafka_assignors_term(rk);
+
+ if (rk->rk_type == RD_KAFKA_CONSUMER) {
+ rd_kafka_assignment_destroy(rk);
+ if (rk->rk_consumer.q)
+ rd_kafka_q_destroy(rk->rk_consumer.q);
+ }
+
+ /* Purge op-queues */
+ rd_kafka_q_destroy_owner(rk->rk_rep);
+ rd_kafka_q_destroy_owner(rk->rk_ops);
+
+#if WITH_SSL
+ if (rk->rk_conf.ssl.ctx) {
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Destroying SSL CTX");
+ rd_kafka_ssl_ctx_term(rk);
+ }
+ rd_list_destroy(&rk->rk_conf.ssl.loaded_providers);
+#endif
+
+ /* It is not safe to log after this point. */
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Termination done: freeing resources");
+
+ if (rk->rk_logq) {
+ rd_kafka_q_destroy_owner(rk->rk_logq);
+ rk->rk_logq = NULL;
+ }
+
+ if (rk->rk_type == RD_KAFKA_PRODUCER) {
+ cnd_destroy(&rk->rk_curr_msgs.cnd);
+ mtx_destroy(&rk->rk_curr_msgs.lock);
+ }
+
+ if (rk->rk_fatal.errstr) {
+ rd_free(rk->rk_fatal.errstr);
+ rk->rk_fatal.errstr = NULL;
+ }
+
+ cnd_destroy(&rk->rk_broker_state_change_cnd);
+ mtx_destroy(&rk->rk_broker_state_change_lock);
+
+ mtx_destroy(&rk->rk_suppress.sparse_connect_lock);
+
+ cnd_destroy(&rk->rk_init_cnd);
+ mtx_destroy(&rk->rk_init_lock);
+
+ if (rk->rk_full_metadata)
+ rd_kafka_metadata_destroy(rk->rk_full_metadata);
+ rd_kafkap_str_destroy(rk->rk_client_id);
+ rd_kafkap_str_destroy(rk->rk_group_id);
+ rd_kafkap_str_destroy(rk->rk_eos.transactional_id);
+ rd_kafka_anyconf_destroy(_RK_GLOBAL, &rk->rk_conf);
+ rd_list_destroy(&rk->rk_broker_by_id);
+
+ mtx_destroy(&rk->rk_conf.sasl.lock);
+ rwlock_destroy(&rk->rk_lock);
+
+ rd_free(rk);
+ rd_kafka_global_cnt_decr();
+}
+
+
+static void rd_kafka_destroy_app(rd_kafka_t *rk, int flags) {
+ thrd_t thrd;
+#ifndef _WIN32
+ int term_sig = rk->rk_conf.term_sig;
+#endif
+ int res;
+ char flags_str[256];
+ static const char *rd_kafka_destroy_flags_names[] = {
+ "Terminate", "DestroyCalled", "Immediate", "NoConsumerClose", NULL};
+
+ /* Fatal errors and _F_IMMEDIATE also sets .._NO_CONSUMER_CLOSE */
+ if (flags & RD_KAFKA_DESTROY_F_IMMEDIATE ||
+ rd_kafka_fatal_error_code(rk))
+ flags |= RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE;
+
+ rd_flags2str(flags_str, sizeof(flags_str), rd_kafka_destroy_flags_names,
+ flags);
+ rd_kafka_dbg(rk, ALL, "DESTROY",
+ "Terminating instance "
+ "(destroy flags %s (0x%x))",
+ flags ? flags_str : "none", flags);
+
+ /* If producer still has messages in queue the application
+ * is terminating the producer without first calling flush() or purge()
+ * which is a common new user mistake, so hint the user of proper
+ * shutdown semantics. */
+ if (rk->rk_type == RD_KAFKA_PRODUCER) {
+ unsigned int tot_cnt;
+ size_t tot_size;
+
+ rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
+
+ if (tot_cnt > 0)
+ rd_kafka_log(rk, LOG_WARNING, "TERMINATE",
+ "Producer terminating with %u message%s "
+ "(%" PRIusz
+ " byte%s) still in "
+ "queue or transit: "
+ "use flush() to wait for "
+ "outstanding message delivery",
+ tot_cnt, tot_cnt > 1 ? "s" : "", tot_size,
+ tot_size > 1 ? "s" : "");
+ }
+
+ /* Make sure destroy is not called from a librdkafka thread
+ * since this will most likely cause a deadlock.
+ * FIXME: include broker threads (for log_cb) */
+ if (thrd_is_current(rk->rk_thread) ||
+ thrd_is_current(rk->rk_background.thread)) {
+ rd_kafka_log(rk, LOG_EMERG, "BGQUEUE",
+ "Application bug: "
+ "rd_kafka_destroy() called from "
+ "librdkafka owned thread");
+ rd_kafka_assert(NULL,
+ !*"Application bug: "
+ "calling rd_kafka_destroy() from "
+ "librdkafka owned thread is prohibited");
+ }
+
+ /* Before signaling for general termination, set the destroy
+ * flags to hint cgrp how to shut down. */
+ rd_atomic32_set(&rk->rk_terminate,
+ flags | RD_KAFKA_DESTROY_F_DESTROY_CALLED);
+
+ /* The legacy/simple consumer lacks an API to close down the consumer*/
+ if (rk->rk_cgrp) {
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Terminating consumer group handler");
+ rd_kafka_consumer_close(rk);
+ }
+
+ /* With the consumer closed, terminate the rest of librdkafka. */
+ rd_atomic32_set(&rk->rk_terminate,
+ flags | RD_KAFKA_DESTROY_F_TERMINATE);
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Interrupting timers");
+ rd_kafka_wrlock(rk);
+ thrd = rk->rk_thread;
+ rd_kafka_timers_interrupt(&rk->rk_timers);
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Sending TERMINATE to internal main thread");
+ /* Send op to trigger queue/io wake-up.
+ * The op itself is (likely) ignored by the receiver. */
+ rd_kafka_q_enq(rk->rk_ops, rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
+
+#ifndef _WIN32
+ /* Interrupt main kafka thread to speed up termination. */
+ if (term_sig) {
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Sending thread kill signal %d", term_sig);
+ pthread_kill(thrd, term_sig);
+ }
+#endif
+
+ if (rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_IMMEDIATE))
+ return; /* FIXME: thread resource leak */
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Joining internal main thread");
+
+ if (thrd_join(thrd, &res) != thrd_success)
+ rd_kafka_log(rk, LOG_ERR, "DESTROY",
+ "Failed to join internal main thread: %s "
+ "(was process forked?)",
+ rd_strerror(errno));
+
+ rd_kafka_destroy_final(rk);
+}
+
+
+/* NOTE: Must only be called by application.
+ * librdkafka itself must use rd_kafka_destroy0(). */
+void rd_kafka_destroy(rd_kafka_t *rk) {
+ rd_kafka_destroy_app(rk, 0);
+}
+
+void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags) {
+ rd_kafka_destroy_app(rk, flags);
+}
+
+
+/**
+ * Main destructor for rd_kafka_t
+ *
+ * Locality: rdkafka main thread or application thread during rd_kafka_new()
+ */
+static void rd_kafka_destroy_internal(rd_kafka_t *rk) {
+ rd_kafka_topic_t *rkt, *rkt_tmp;
+ rd_kafka_broker_t *rkb, *rkb_tmp;
+ rd_list_t wait_thrds;
+ thrd_t *thrd;
+ int i;
+
+ rd_kafka_dbg(rk, ALL, "DESTROY", "Destroy internal");
+
+ /* Trigger any state-change waiters (which should check the
+ * terminate flag whenever they wake up). */
+ rd_kafka_brokers_broadcast_state_change(rk);
+
+ if (rk->rk_background.thread) {
+ int res;
+ /* Send op to trigger queue/io wake-up.
+ * The op itself is (likely) ignored by the receiver. */
+ rd_kafka_q_enq(rk->rk_background.q,
+ rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
+
+ rd_kafka_dbg(rk, ALL, "DESTROY",
+ "Waiting for background queue thread "
+ "to terminate");
+ thrd_join(rk->rk_background.thread, &res);
+ rd_kafka_q_destroy_owner(rk->rk_background.q);
+ }
+
+ /* Call on_destroy() interceptors */
+ rd_kafka_interceptors_on_destroy(rk);
+
+ /* Brokers pick up on rk_terminate automatically. */
+
+ /* List of (broker) threads to join to synchronize termination */
+ rd_list_init(&wait_thrds, rd_atomic32_get(&rk->rk_broker_cnt), NULL);
+
+ rd_kafka_wrlock(rk);
+
+ rd_kafka_dbg(rk, ALL, "DESTROY", "Removing all topics");
+ /* Decommission all topics */
+ TAILQ_FOREACH_SAFE(rkt, &rk->rk_topics, rkt_link, rkt_tmp) {
+ rd_kafka_wrunlock(rk);
+ rd_kafka_topic_partitions_remove(rkt);
+ rd_kafka_wrlock(rk);
+ }
+
+ /* Decommission brokers.
+ * Broker thread holds a refcount and detects when broker refcounts
+ * reaches 1 and then decommissions itself. */
+ TAILQ_FOREACH_SAFE(rkb, &rk->rk_brokers, rkb_link, rkb_tmp) {
+ /* Add broker's thread to wait_thrds list for later joining */
+ thrd = rd_malloc(sizeof(*thrd));
+ *thrd = rkb->rkb_thread;
+ rd_list_add(&wait_thrds, thrd);
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_dbg(rk, BROKER, "DESTROY", "Sending TERMINATE to %s",
+ rd_kafka_broker_name(rkb));
+ /* Send op to trigger queue/io wake-up.
+ * The op itself is (likely) ignored by the broker thread. */
+ rd_kafka_q_enq(rkb->rkb_ops,
+ rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
+
+#ifndef _WIN32
+ /* Interrupt IO threads to speed up termination. */
+ if (rk->rk_conf.term_sig)
+ pthread_kill(rkb->rkb_thread, rk->rk_conf.term_sig);
+#endif
+
+ rd_kafka_broker_destroy(rkb);
+
+ rd_kafka_wrlock(rk);
+ }
+
+ if (rk->rk_clusterid) {
+ rd_free(rk->rk_clusterid);
+ rk->rk_clusterid = NULL;
+ }
+
+ /* Destroy coord requests */
+ rd_kafka_coord_reqs_term(rk);
+
+ /* Destroy the coordinator cache */
+ rd_kafka_coord_cache_destroy(&rk->rk_coord_cache);
+
+ /* Purge metadata cache.
+ * #3279:
+ * We mustn't call cache_destroy() here since there might be outstanding
+ * broker rkos that hold references to the metadata cache lock,
+ * and these brokers are destroyed below. So to avoid a circular
+ * dependency refcnt deadlock we first purge the cache here
+ * and destroy it after the brokers are destroyed. */
+ rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/);
+
+ rd_kafka_wrunlock(rk);
+
+ mtx_lock(&rk->rk_broker_state_change_lock);
+ /* Purge broker state change waiters */
+ rd_list_destroy(&rk->rk_broker_state_change_waiters);
+ mtx_unlock(&rk->rk_broker_state_change_lock);
+
+ if (rk->rk_type == RD_KAFKA_CONSUMER) {
+ if (rk->rk_consumer.q)
+ rd_kafka_q_disable(rk->rk_consumer.q);
+ }
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Purging reply queue");
+
+ /* Purge op-queue */
+ rd_kafka_q_disable(rk->rk_rep);
+ rd_kafka_q_purge(rk->rk_rep);
+
+ /* Loose our special reference to the internal broker. */
+ mtx_lock(&rk->rk_internal_rkb_lock);
+ if ((rkb = rk->rk_internal_rkb)) {
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Decommissioning internal broker");
+
+ /* Send op to trigger queue wake-up. */
+ rd_kafka_q_enq(rkb->rkb_ops,
+ rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
+
+ rk->rk_internal_rkb = NULL;
+ thrd = rd_malloc(sizeof(*thrd));
+ *thrd = rkb->rkb_thread;
+ rd_list_add(&wait_thrds, thrd);
+ }
+ mtx_unlock(&rk->rk_internal_rkb_lock);
+ if (rkb)
+ rd_kafka_broker_destroy(rkb);
+
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE", "Join %d broker thread(s)",
+ rd_list_cnt(&wait_thrds));
+
+ /* Join broker threads */
+ RD_LIST_FOREACH(thrd, &wait_thrds, i) {
+ int res;
+ if (thrd_join(*thrd, &res) != thrd_success)
+ ;
+ rd_free(thrd);
+ }
+
+ rd_list_destroy(&wait_thrds);
+
+ /* Destroy mock cluster */
+ if (rk->rk_mock.cluster)
+ rd_kafka_mock_cluster_destroy(rk->rk_mock.cluster);
+
+ if (rd_atomic32_get(&rk->rk_mock.cluster_cnt) > 0) {
+ rd_kafka_log(rk, LOG_EMERG, "MOCK",
+ "%d mock cluster(s) still active: "
+ "must be explicitly destroyed with "
+ "rd_kafka_mock_cluster_destroy() prior to "
+ "terminating the rd_kafka_t instance",
+ (int)rd_atomic32_get(&rk->rk_mock.cluster_cnt));
+ rd_assert(!*"All mock clusters must be destroyed prior to "
+ "rd_kafka_t destroy");
+ }
+
+ /* Destroy metadata cache */
+ rd_kafka_wrlock(rk);
+ rd_kafka_metadata_cache_destroy(rk);
+ rd_kafka_wrunlock(rk);
+}
+
+/**
+ * @brief Buffer state for stats emitter
+ */
+struct _stats_emit {
+ char *buf; /* Pointer to allocated buffer */
+ size_t size; /* Current allocated size of buf */
+ size_t of; /* Current write-offset in buf */
+};
+
+
+/* Stats buffer printf. Requires a (struct _stats_emit *)st variable in the
+ * current scope. */
+#define _st_printf(...) \
+ do { \
+ ssize_t _r; \
+ ssize_t _rem = st->size - st->of; \
+ _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \
+ if (_r >= _rem) { \
+ st->size *= 2; \
+ _rem = st->size - st->of; \
+ st->buf = rd_realloc(st->buf, st->size); \
+ _r = rd_snprintf(st->buf + st->of, _rem, __VA_ARGS__); \
+ } \
+ st->of += _r; \
+ } while (0)
+
+struct _stats_total {
+ int64_t tx; /**< broker.tx */
+ int64_t tx_bytes; /**< broker.tx_bytes */
+ int64_t rx; /**< broker.rx */
+ int64_t rx_bytes; /**< broker.rx_bytes */
+ int64_t txmsgs; /**< partition.txmsgs */
+ int64_t txmsg_bytes; /**< partition.txbytes */
+ int64_t rxmsgs; /**< partition.rxmsgs */
+ int64_t rxmsg_bytes; /**< partition.rxbytes */
+};
+
+
+
+/**
+ * @brief Rollover and emit an average window.
+ */
+static RD_INLINE void rd_kafka_stats_emit_avg(struct _stats_emit *st,
+ const char *name,
+ rd_avg_t *src_avg) {
+ rd_avg_t avg;
+
+ rd_avg_rollover(&avg, src_avg);
+ _st_printf(
+ "\"%s\": {"
+ " \"min\":%" PRId64
+ ","
+ " \"max\":%" PRId64
+ ","
+ " \"avg\":%" PRId64
+ ","
+ " \"sum\":%" PRId64
+ ","
+ " \"stddev\": %" PRId64
+ ","
+ " \"p50\": %" PRId64
+ ","
+ " \"p75\": %" PRId64
+ ","
+ " \"p90\": %" PRId64
+ ","
+ " \"p95\": %" PRId64
+ ","
+ " \"p99\": %" PRId64
+ ","
+ " \"p99_99\": %" PRId64
+ ","
+ " \"outofrange\": %" PRId64
+ ","
+ " \"hdrsize\": %" PRId32
+ ","
+ " \"cnt\":%i "
+ "}, ",
+ name, avg.ra_v.minv, avg.ra_v.maxv, avg.ra_v.avg, avg.ra_v.sum,
+ (int64_t)avg.ra_hist.stddev, avg.ra_hist.p50, avg.ra_hist.p75,
+ avg.ra_hist.p90, avg.ra_hist.p95, avg.ra_hist.p99,
+ avg.ra_hist.p99_99, avg.ra_hist.oor, avg.ra_hist.hdrsize,
+ avg.ra_v.cnt);
+ rd_avg_destroy(&avg);
+}
+
+/**
+ * Emit stats for toppar
+ */
+static RD_INLINE void rd_kafka_stats_emit_toppar(struct _stats_emit *st,
+ struct _stats_total *total,
+ rd_kafka_toppar_t *rktp,
+ int first) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ int64_t end_offset;
+ int64_t consumer_lag = -1;
+ int64_t consumer_lag_stored = -1;
+ struct offset_stats offs;
+ int32_t broker_id = -1;
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (rktp->rktp_broker) {
+ rd_kafka_broker_lock(rktp->rktp_broker);
+ broker_id = rktp->rktp_broker->rkb_nodeid;
+ rd_kafka_broker_unlock(rktp->rktp_broker);
+ }
+
+ /* Grab a copy of the latest finalized offset stats */
+ offs = rktp->rktp_offsets_fin;
+
+ end_offset = (rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED)
+ ? rktp->rktp_ls_offset
+ : rktp->rktp_hi_offset;
+
+ /* Calculate consumer_lag by using the highest offset
+ * of stored_offset (the last message passed to application + 1, or
+ * if enable.auto.offset.store=false the last message manually stored),
+ * or the committed_offset (the last message committed by this or
+ * another consumer).
+ * Using stored_offset allows consumer_lag to be up to date even if
+ * offsets are not (yet) committed.
+ */
+ if (end_offset != RD_KAFKA_OFFSET_INVALID) {
+ if (rktp->rktp_stored_pos.offset >= 0 &&
+ rktp->rktp_stored_pos.offset <= end_offset)
+ consumer_lag_stored =
+ end_offset - rktp->rktp_stored_pos.offset;
+ if (rktp->rktp_committed_pos.offset >= 0 &&
+ rktp->rktp_committed_pos.offset <= end_offset)
+ consumer_lag =
+ end_offset - rktp->rktp_committed_pos.offset;
+ }
+
+ _st_printf(
+ "%s\"%" PRId32
+ "\": { "
+ "\"partition\":%" PRId32
+ ", "
+ "\"broker\":%" PRId32
+ ", "
+ "\"leader\":%" PRId32
+ ", "
+ "\"desired\":%s, "
+ "\"unknown\":%s, "
+ "\"msgq_cnt\":%i, "
+ "\"msgq_bytes\":%" PRIusz
+ ", "
+ "\"xmit_msgq_cnt\":%i, "
+ "\"xmit_msgq_bytes\":%" PRIusz
+ ", "
+ "\"fetchq_cnt\":%i, "
+ "\"fetchq_size\":%" PRIu64
+ ", "
+ "\"fetch_state\":\"%s\", "
+ "\"query_offset\":%" PRId64
+ ", "
+ "\"next_offset\":%" PRId64
+ ", "
+ "\"app_offset\":%" PRId64
+ ", "
+ "\"stored_offset\":%" PRId64
+ ", "
+ "\"stored_leader_epoch\":%" PRId32
+ ", "
+ "\"commited_offset\":%" PRId64
+ ", " /*FIXME: issue #80 */
+ "\"committed_offset\":%" PRId64
+ ", "
+ "\"committed_leader_epoch\":%" PRId32
+ ", "
+ "\"eof_offset\":%" PRId64
+ ", "
+ "\"lo_offset\":%" PRId64
+ ", "
+ "\"hi_offset\":%" PRId64
+ ", "
+ "\"ls_offset\":%" PRId64
+ ", "
+ "\"consumer_lag\":%" PRId64
+ ", "
+ "\"consumer_lag_stored\":%" PRId64
+ ", "
+ "\"leader_epoch\":%" PRId32
+ ", "
+ "\"txmsgs\":%" PRIu64
+ ", "
+ "\"txbytes\":%" PRIu64
+ ", "
+ "\"rxmsgs\":%" PRIu64
+ ", "
+ "\"rxbytes\":%" PRIu64
+ ", "
+ "\"msgs\": %" PRIu64
+ ", "
+ "\"rx_ver_drops\": %" PRIu64
+ ", "
+ "\"msgs_inflight\": %" PRId32
+ ", "
+ "\"next_ack_seq\": %" PRId32
+ ", "
+ "\"next_err_seq\": %" PRId32
+ ", "
+ "\"acked_msgid\": %" PRIu64 "} ",
+ first ? "" : ", ", rktp->rktp_partition, rktp->rktp_partition,
+ broker_id, rktp->rktp_leader_id,
+ (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) ? "true" : "false",
+ (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) ? "true" : "false",
+ rd_kafka_msgq_len(&rktp->rktp_msgq),
+ rd_kafka_msgq_size(&rktp->rktp_msgq),
+ /* FIXME: xmit_msgq is local to the broker thread. */
+ 0, (size_t)0, rd_kafka_q_len(rktp->rktp_fetchq),
+ rd_kafka_q_size(rktp->rktp_fetchq),
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ rktp->rktp_query_pos.offset, offs.fetch_pos.offset,
+ rktp->rktp_app_pos.offset, rktp->rktp_stored_pos.offset,
+ rktp->rktp_stored_pos.leader_epoch,
+ rktp->rktp_committed_pos.offset, /* FIXME: issue #80 */
+ rktp->rktp_committed_pos.offset,
+ rktp->rktp_committed_pos.leader_epoch, offs.eof_offset,
+ rktp->rktp_lo_offset, rktp->rktp_hi_offset, rktp->rktp_ls_offset,
+ consumer_lag, consumer_lag_stored, rktp->rktp_leader_epoch,
+ rd_atomic64_get(&rktp->rktp_c.tx_msgs),
+ rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes),
+ rd_atomic64_get(&rktp->rktp_c.rx_msgs),
+ rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes),
+ rk->rk_type == RD_KAFKA_PRODUCER
+ ? rd_atomic64_get(&rktp->rktp_c.producer_enq_msgs)
+ : rd_atomic64_get(
+ &rktp->rktp_c.rx_msgs), /* legacy, same as rx_msgs */
+ rd_atomic64_get(&rktp->rktp_c.rx_ver_drops),
+ rd_atomic32_get(&rktp->rktp_msgs_inflight),
+ rktp->rktp_eos.next_ack_seq, rktp->rktp_eos.next_err_seq,
+ rktp->rktp_eos.acked_msgid);
+
+ if (total) {
+ total->txmsgs += rd_atomic64_get(&rktp->rktp_c.tx_msgs);
+ total->txmsg_bytes +=
+ rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes);
+ total->rxmsgs += rd_atomic64_get(&rktp->rktp_c.rx_msgs);
+ total->rxmsg_bytes +=
+ rd_atomic64_get(&rktp->rktp_c.rx_msg_bytes);
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+}
+
+/**
+ * @brief Emit broker request type stats
+ */
+static void rd_kafka_stats_emit_broker_reqs(struct _stats_emit *st,
+ rd_kafka_broker_t *rkb) {
+ /* Filter out request types that will never be sent by the client. */
+ static const rd_bool_t filter[4][RD_KAFKAP__NUM] = {
+ [RD_KAFKA_PRODUCER] = {[RD_KAFKAP_Fetch] = rd_true,
+ [RD_KAFKAP_OffsetCommit] = rd_true,
+ [RD_KAFKAP_OffsetFetch] = rd_true,
+ [RD_KAFKAP_JoinGroup] = rd_true,
+ [RD_KAFKAP_Heartbeat] = rd_true,
+ [RD_KAFKAP_LeaveGroup] = rd_true,
+ [RD_KAFKAP_SyncGroup] = rd_true},
+ [RD_KAFKA_CONSUMER] =
+ {
+ [RD_KAFKAP_Produce] = rd_true,
+ [RD_KAFKAP_InitProducerId] = rd_true,
+ /* Transactional producer */
+ [RD_KAFKAP_AddPartitionsToTxn] = rd_true,
+ [RD_KAFKAP_AddOffsetsToTxn] = rd_true,
+ [RD_KAFKAP_EndTxn] = rd_true,
+ [RD_KAFKAP_TxnOffsetCommit] = rd_true,
+ },
+ [2 /*any client type*/] =
+ {
+ [RD_KAFKAP_UpdateMetadata] = rd_true,
+ [RD_KAFKAP_ControlledShutdown] = rd_true,
+ [RD_KAFKAP_LeaderAndIsr] = rd_true,
+ [RD_KAFKAP_StopReplica] = rd_true,
+ [RD_KAFKAP_OffsetForLeaderEpoch] = rd_true,
+
+ [RD_KAFKAP_WriteTxnMarkers] = rd_true,
+
+ [RD_KAFKAP_AlterReplicaLogDirs] = rd_true,
+ [RD_KAFKAP_DescribeLogDirs] = rd_true,
+
+ [RD_KAFKAP_CreateDelegationToken] = rd_true,
+ [RD_KAFKAP_RenewDelegationToken] = rd_true,
+ [RD_KAFKAP_ExpireDelegationToken] = rd_true,
+ [RD_KAFKAP_DescribeDelegationToken] = rd_true,
+ [RD_KAFKAP_IncrementalAlterConfigs] = rd_true,
+ [RD_KAFKAP_ElectLeaders] = rd_true,
+ [RD_KAFKAP_AlterPartitionReassignments] = rd_true,
+ [RD_KAFKAP_ListPartitionReassignments] = rd_true,
+ [RD_KAFKAP_AlterUserScramCredentials] = rd_true,
+ [RD_KAFKAP_Vote] = rd_true,
+ [RD_KAFKAP_BeginQuorumEpoch] = rd_true,
+ [RD_KAFKAP_EndQuorumEpoch] = rd_true,
+ [RD_KAFKAP_DescribeQuorum] = rd_true,
+ [RD_KAFKAP_AlterIsr] = rd_true,
+ [RD_KAFKAP_UpdateFeatures] = rd_true,
+ [RD_KAFKAP_Envelope] = rd_true,
+ [RD_KAFKAP_FetchSnapshot] = rd_true,
+ [RD_KAFKAP_BrokerHeartbeat] = rd_true,
+ [RD_KAFKAP_UnregisterBroker] = rd_true,
+ [RD_KAFKAP_AllocateProducerIds] = rd_true,
+ },
+ [3 /*hide-unless-non-zero*/] = {
+ /* Hide Admin requests unless they've been used */
+ [RD_KAFKAP_CreateTopics] = rd_true,
+ [RD_KAFKAP_DeleteTopics] = rd_true,
+ [RD_KAFKAP_DeleteRecords] = rd_true,
+ [RD_KAFKAP_CreatePartitions] = rd_true,
+ [RD_KAFKAP_DescribeAcls] = rd_true,
+ [RD_KAFKAP_CreateAcls] = rd_true,
+ [RD_KAFKAP_DeleteAcls] = rd_true,
+ [RD_KAFKAP_DescribeConfigs] = rd_true,
+ [RD_KAFKAP_AlterConfigs] = rd_true,
+ [RD_KAFKAP_DeleteGroups] = rd_true,
+ [RD_KAFKAP_ListGroups] = rd_true,
+ [RD_KAFKAP_DescribeGroups] = rd_true,
+ [RD_KAFKAP_DescribeLogDirs] = rd_true,
+ [RD_KAFKAP_IncrementalAlterConfigs] = rd_true,
+ [RD_KAFKAP_AlterPartitionReassignments] = rd_true,
+ [RD_KAFKAP_ListPartitionReassignments] = rd_true,
+ [RD_KAFKAP_OffsetDelete] = rd_true,
+ [RD_KAFKAP_DescribeClientQuotas] = rd_true,
+ [RD_KAFKAP_AlterClientQuotas] = rd_true,
+ [RD_KAFKAP_DescribeUserScramCredentials] = rd_true,
+ [RD_KAFKAP_AlterUserScramCredentials] = rd_true,
+ }};
+ int i;
+ int cnt = 0;
+
+ _st_printf("\"req\": { ");
+ for (i = 0; i < RD_KAFKAP__NUM; i++) {
+ int64_t v;
+
+ if (filter[rkb->rkb_rk->rk_type][i] || filter[2][i])
+ continue;
+
+ v = rd_atomic64_get(&rkb->rkb_c.reqtype[i]);
+ if (!v && filter[3][i])
+ continue; /* Filter out zero values */
+
+ _st_printf("%s\"%s\": %" PRId64, cnt > 0 ? ", " : "",
+ rd_kafka_ApiKey2str(i), v);
+
+ cnt++;
+ }
+ _st_printf(" }, ");
+}
+
+
+/**
+ * Emit all statistics
+ */
+static void rd_kafka_stats_emit_all(rd_kafka_t *rk) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_topic_t *rkt;
+ rd_ts_t now;
+ rd_kafka_op_t *rko;
+ unsigned int tot_cnt;
+ size_t tot_size;
+ rd_kafka_resp_err_t err;
+ struct _stats_emit stx = {.size = 1024 * 10};
+ struct _stats_emit *st = &stx;
+ struct _stats_total total = {0};
+
+ st->buf = rd_malloc(st->size);
+
+
+ rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
+ rd_kafka_rdlock(rk);
+
+ now = rd_clock();
+ _st_printf(
+ "{ "
+ "\"name\": \"%s\", "
+ "\"client_id\": \"%s\", "
+ "\"type\": \"%s\", "
+ "\"ts\":%" PRId64
+ ", "
+ "\"time\":%lli, "
+ "\"age\":%" PRId64
+ ", "
+ "\"replyq\":%i, "
+ "\"msg_cnt\":%u, "
+ "\"msg_size\":%" PRIusz
+ ", "
+ "\"msg_max\":%u, "
+ "\"msg_size_max\":%" PRIusz
+ ", "
+ "\"simple_cnt\":%i, "
+ "\"metadata_cache_cnt\":%i, "
+ "\"brokers\":{ " /*open brokers*/,
+ rk->rk_name, rk->rk_conf.client_id_str,
+ rd_kafka_type2str(rk->rk_type), now, (signed long long)time(NULL),
+ now - rk->rk_ts_created, rd_kafka_q_len(rk->rk_rep), tot_cnt,
+ tot_size, rk->rk_curr_msgs.max_cnt, rk->rk_curr_msgs.max_size,
+ rd_atomic32_get(&rk->rk_simple_cnt),
+ rk->rk_metadata_cache.rkmc_cnt);
+
+
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ rd_kafka_toppar_t *rktp;
+ rd_ts_t txidle = -1, rxidle = -1;
+
+ rd_kafka_broker_lock(rkb);
+
+ if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP) {
+ /* Calculate tx and rx idle time in usecs */
+ txidle = rd_atomic64_get(&rkb->rkb_c.ts_send);
+ rxidle = rd_atomic64_get(&rkb->rkb_c.ts_recv);
+
+ if (txidle)
+ txidle = RD_MAX(now - txidle, 0);
+ else
+ txidle = -1;
+
+ if (rxidle)
+ rxidle = RD_MAX(now - rxidle, 0);
+ else
+ rxidle = -1;
+ }
+
+ _st_printf(
+ "%s\"%s\": { " /*open broker*/
+ "\"name\":\"%s\", "
+ "\"nodeid\":%" PRId32
+ ", "
+ "\"nodename\":\"%s\", "
+ "\"source\":\"%s\", "
+ "\"state\":\"%s\", "
+ "\"stateage\":%" PRId64
+ ", "
+ "\"outbuf_cnt\":%i, "
+ "\"outbuf_msg_cnt\":%i, "
+ "\"waitresp_cnt\":%i, "
+ "\"waitresp_msg_cnt\":%i, "
+ "\"tx\":%" PRIu64
+ ", "
+ "\"txbytes\":%" PRIu64
+ ", "
+ "\"txerrs\":%" PRIu64
+ ", "
+ "\"txretries\":%" PRIu64
+ ", "
+ "\"txidle\":%" PRId64
+ ", "
+ "\"req_timeouts\":%" PRIu64
+ ", "
+ "\"rx\":%" PRIu64
+ ", "
+ "\"rxbytes\":%" PRIu64
+ ", "
+ "\"rxerrs\":%" PRIu64
+ ", "
+ "\"rxcorriderrs\":%" PRIu64
+ ", "
+ "\"rxpartial\":%" PRIu64
+ ", "
+ "\"rxidle\":%" PRId64
+ ", "
+ "\"zbuf_grow\":%" PRIu64
+ ", "
+ "\"buf_grow\":%" PRIu64
+ ", "
+ "\"wakeups\":%" PRIu64
+ ", "
+ "\"connects\":%" PRId32
+ ", "
+ "\"disconnects\":%" PRId32 ", ",
+ rkb == TAILQ_FIRST(&rk->rk_brokers) ? "" : ", ",
+ rkb->rkb_name, rkb->rkb_name, rkb->rkb_nodeid,
+ rkb->rkb_nodename, rd_kafka_confsource2str(rkb->rkb_source),
+ rd_kafka_broker_state_names[rkb->rkb_state],
+ rkb->rkb_ts_state ? now - rkb->rkb_ts_state : 0,
+ rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt),
+ rd_atomic32_get(&rkb->rkb_outbufs.rkbq_msg_cnt),
+ rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt),
+ rd_atomic32_get(&rkb->rkb_waitresps.rkbq_msg_cnt),
+ rd_atomic64_get(&rkb->rkb_c.tx),
+ rd_atomic64_get(&rkb->rkb_c.tx_bytes),
+ rd_atomic64_get(&rkb->rkb_c.tx_err),
+ rd_atomic64_get(&rkb->rkb_c.tx_retries), txidle,
+ rd_atomic64_get(&rkb->rkb_c.req_timeouts),
+ rd_atomic64_get(&rkb->rkb_c.rx),
+ rd_atomic64_get(&rkb->rkb_c.rx_bytes),
+ rd_atomic64_get(&rkb->rkb_c.rx_err),
+ rd_atomic64_get(&rkb->rkb_c.rx_corrid_err),
+ rd_atomic64_get(&rkb->rkb_c.rx_partial), rxidle,
+ rd_atomic64_get(&rkb->rkb_c.zbuf_grow),
+ rd_atomic64_get(&rkb->rkb_c.buf_grow),
+ rd_atomic64_get(&rkb->rkb_c.wakeups),
+ rd_atomic32_get(&rkb->rkb_c.connects),
+ rd_atomic32_get(&rkb->rkb_c.disconnects));
+
+ total.tx += rd_atomic64_get(&rkb->rkb_c.tx);
+ total.tx_bytes += rd_atomic64_get(&rkb->rkb_c.tx_bytes);
+ total.rx += rd_atomic64_get(&rkb->rkb_c.rx);
+ total.rx_bytes += rd_atomic64_get(&rkb->rkb_c.rx_bytes);
+
+ rd_kafka_stats_emit_avg(st, "int_latency",
+ &rkb->rkb_avg_int_latency);
+ rd_kafka_stats_emit_avg(st, "outbuf_latency",
+ &rkb->rkb_avg_outbuf_latency);
+ rd_kafka_stats_emit_avg(st, "rtt", &rkb->rkb_avg_rtt);
+ rd_kafka_stats_emit_avg(st, "throttle", &rkb->rkb_avg_throttle);
+
+ rd_kafka_stats_emit_broker_reqs(st, rkb);
+
+ _st_printf("\"toppars\":{ " /*open toppars*/);
+
+ TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
+ _st_printf(
+ "%s\"%.*s-%" PRId32
+ "\": { "
+ "\"topic\":\"%.*s\", "
+ "\"partition\":%" PRId32 "} ",
+ rktp == TAILQ_FIRST(&rkb->rkb_toppars) ? "" : ", ",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+ }
+
+ rd_kafka_broker_unlock(rkb);
+
+ _st_printf(
+ "} " /*close toppars*/
+ "} " /*close broker*/);
+ }
+
+
+ _st_printf(
+ "}, " /* close "brokers" array */
+ "\"topics\":{ ");
+
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ rd_kafka_toppar_t *rktp;
+ int i, j;
+
+ rd_kafka_topic_rdlock(rkt);
+ _st_printf(
+ "%s\"%.*s\": { "
+ "\"topic\":\"%.*s\", "
+ "\"age\":%" PRId64
+ ", "
+ "\"metadata_age\":%" PRId64 ", ",
+ rkt == TAILQ_FIRST(&rk->rk_topics) ? "" : ", ",
+ RD_KAFKAP_STR_PR(rkt->rkt_topic),
+ RD_KAFKAP_STR_PR(rkt->rkt_topic),
+ (now - rkt->rkt_ts_create) / 1000,
+ rkt->rkt_ts_metadata ? (now - rkt->rkt_ts_metadata) / 1000
+ : 0);
+
+ rd_kafka_stats_emit_avg(st, "batchsize",
+ &rkt->rkt_avg_batchsize);
+ rd_kafka_stats_emit_avg(st, "batchcnt", &rkt->rkt_avg_batchcnt);
+
+ _st_printf("\"partitions\":{ " /*open partitions*/);
+
+ for (i = 0; i < rkt->rkt_partition_cnt; i++)
+ rd_kafka_stats_emit_toppar(st, &total, rkt->rkt_p[i],
+ i == 0);
+
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, j)
+ rd_kafka_stats_emit_toppar(st, &total, rktp, i + j == 0);
+
+ i += j;
+
+ if (rkt->rkt_ua)
+ rd_kafka_stats_emit_toppar(st, NULL, rkt->rkt_ua,
+ i++ == 0);
+
+ rd_kafka_topic_rdunlock(rkt);
+
+ _st_printf(
+ "} " /*close partitions*/
+ "} " /*close topic*/);
+ }
+ _st_printf("} " /*close topics*/);
+
+ if (rk->rk_cgrp) {
+ rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
+ _st_printf(
+ ", \"cgrp\": { "
+ "\"state\": \"%s\", "
+ "\"stateage\": %" PRId64
+ ", "
+ "\"join_state\": \"%s\", "
+ "\"rebalance_age\": %" PRId64
+ ", "
+ "\"rebalance_cnt\": %d, "
+ "\"rebalance_reason\": \"%s\", "
+ "\"assignment_size\": %d }",
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rkcg->rkcg_ts_statechange
+ ? (now - rkcg->rkcg_ts_statechange) / 1000
+ : 0,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ rkcg->rkcg_c.ts_rebalance
+ ? (now - rkcg->rkcg_c.ts_rebalance) / 1000
+ : 0,
+ rkcg->rkcg_c.rebalance_cnt, rkcg->rkcg_c.rebalance_reason,
+ rkcg->rkcg_c.assignment_size);
+ }
+
+ if (rd_kafka_is_idempotent(rk)) {
+ _st_printf(
+ ", \"eos\": { "
+ "\"idemp_state\": \"%s\", "
+ "\"idemp_stateage\": %" PRId64
+ ", "
+ "\"txn_state\": \"%s\", "
+ "\"txn_stateage\": %" PRId64
+ ", "
+ "\"txn_may_enq\": %s, "
+ "\"producer_id\": %" PRId64
+ ", "
+ "\"producer_epoch\": %hd, "
+ "\"epoch_cnt\": %d "
+ "}",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
+ (now - rk->rk_eos.ts_idemp_state) / 1000,
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state),
+ (now - rk->rk_eos.ts_txn_state) / 1000,
+ rd_atomic32_get(&rk->rk_eos.txn_may_enq) ? "true" : "false",
+ rk->rk_eos.pid.id, rk->rk_eos.pid.epoch,
+ rk->rk_eos.epoch_cnt);
+ }
+
+ if ((err = rd_atomic32_get(&rk->rk_fatal.err)))
+ _st_printf(
+ ", \"fatal\": { "
+ "\"error\": \"%s\", "
+ "\"reason\": \"%s\", "
+ "\"cnt\": %d "
+ "}",
+ rd_kafka_err2str(err), rk->rk_fatal.errstr,
+ rk->rk_fatal.cnt);
+
+ rd_kafka_rdunlock(rk);
+
+ /* Total counters */
+ _st_printf(
+ ", "
+ "\"tx\":%" PRId64
+ ", "
+ "\"tx_bytes\":%" PRId64
+ ", "
+ "\"rx\":%" PRId64
+ ", "
+ "\"rx_bytes\":%" PRId64
+ ", "
+ "\"txmsgs\":%" PRId64
+ ", "
+ "\"txmsg_bytes\":%" PRId64
+ ", "
+ "\"rxmsgs\":%" PRId64
+ ", "
+ "\"rxmsg_bytes\":%" PRId64,
+ total.tx, total.tx_bytes, total.rx, total.rx_bytes, total.txmsgs,
+ total.txmsg_bytes, total.rxmsgs, total.rxmsg_bytes);
+
+ _st_printf("}" /*close object*/);
+
+
+ /* Enqueue op for application */
+ rko = rd_kafka_op_new(RD_KAFKA_OP_STATS);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
+ rko->rko_u.stats.json = st->buf;
+ rko->rko_u.stats.json_len = st->of;
+ rd_kafka_q_enq(rk->rk_rep, rko);
+}
+
+
+/**
+ * @brief 1 second generic timer.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_1s_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_t *rk = rkts->rkts_rk;
+
+ /* Scan topic state, message timeouts, etc. */
+ rd_kafka_topic_scan_all(rk, rd_clock());
+
+ /* Sparse connections:
+ * try to maintain at least one connection to the cluster. */
+ if (rk->rk_conf.sparse_connections &&
+ rd_atomic32_get(&rk->rk_broker_up_cnt) == 0)
+ rd_kafka_connect_any(rk, "no cluster connection");
+
+ rd_kafka_coord_cache_expire(&rk->rk_coord_cache);
+}
+
+static void rd_kafka_stats_emit_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_t *rk = rkts->rkts_rk;
+ rd_kafka_stats_emit_all(rk);
+}
+
+
+/**
+ * @brief Periodic metadata refresh callback
+ *
+ * @locality rdkafka main thread
+ */
+static void rd_kafka_metadata_refresh_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_t *rk = rkts->rkts_rk;
+ rd_kafka_resp_err_t err;
+
+ /* High-level consumer:
+ * We need to query both locally known topics and subscribed topics
+ * so that we can detect locally known topics changing partition
+ * count or disappearing, as well as detect previously non-existent
+ * subscribed topics now being available in the cluster. */
+ if (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_cgrp)
+ err = rd_kafka_metadata_refresh_consumer_topics(
+ rk, NULL, "periodic topic and broker list refresh");
+ else
+ err = rd_kafka_metadata_refresh_known_topics(
+ rk, NULL, rd_true /*force*/,
+ "periodic topic and broker list refresh");
+
+
+ if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC &&
+ rd_interval(&rk->rk_suppress.broker_metadata_refresh,
+ 10 * 1000 * 1000 /*10s*/, 0) > 0) {
+ /* If there are no (locally referenced) topics
+ * to query, refresh the broker list.
+ * This avoids getting idle-disconnected for clients
+ * that have not yet referenced a topic and makes
+ * sure such a client has an up to date broker list. */
+ rd_kafka_metadata_refresh_brokers(
+ rk, NULL, "periodic broker list refresh");
+ }
+}
+
+
+
+/**
+ * @brief Wait for background threads to initialize.
+ *
+ * @returns the number of background threads still not initialized.
+ *
+ * @locality app thread calling rd_kafka_new()
+ * @locks none
+ */
+static int rd_kafka_init_wait(rd_kafka_t *rk, int timeout_ms) {
+ struct timespec tspec;
+ int ret;
+
+ rd_timeout_init_timespec(&tspec, timeout_ms);
+
+ mtx_lock(&rk->rk_init_lock);
+ while (rk->rk_init_wait_cnt > 0 &&
+ cnd_timedwait_abs(&rk->rk_init_cnd, &rk->rk_init_lock, &tspec) ==
+ thrd_success)
+ ;
+ ret = rk->rk_init_wait_cnt;
+ mtx_unlock(&rk->rk_init_lock);
+
+ return ret;
+}
+
+
+/**
+ * Main loop for Kafka handler thread.
+ */
+static int rd_kafka_thread_main(void *arg) {
+ rd_kafka_t *rk = arg;
+ rd_kafka_timer_t tmr_1s = RD_ZERO_INIT;
+ rd_kafka_timer_t tmr_stats_emit = RD_ZERO_INIT;
+ rd_kafka_timer_t tmr_metadata_refresh = RD_ZERO_INIT;
+
+ rd_kafka_set_thread_name("main");
+ rd_kafka_set_thread_sysname("rdk:main");
+
+ rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_MAIN);
+
+ (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
+
+ /* Acquire lock (which was held by thread creator during creation)
+ * to synchronise state. */
+ rd_kafka_wrlock(rk);
+ rd_kafka_wrunlock(rk);
+
+ /* 1 second timer for topic scan and connection checking. */
+ rd_kafka_timer_start(&rk->rk_timers, &tmr_1s, 1000000,
+ rd_kafka_1s_tmr_cb, NULL);
+ if (rk->rk_conf.stats_interval_ms)
+ rd_kafka_timer_start(&rk->rk_timers, &tmr_stats_emit,
+ rk->rk_conf.stats_interval_ms * 1000ll,
+ rd_kafka_stats_emit_tmr_cb, NULL);
+ if (rk->rk_conf.metadata_refresh_interval_ms > 0)
+ rd_kafka_timer_start(&rk->rk_timers, &tmr_metadata_refresh,
+ rk->rk_conf.metadata_refresh_interval_ms *
+ 1000ll,
+ rd_kafka_metadata_refresh_cb, NULL);
+
+ if (rk->rk_cgrp)
+ rd_kafka_q_fwd_set(rk->rk_cgrp->rkcg_ops, rk->rk_ops);
+
+ if (rd_kafka_is_idempotent(rk))
+ rd_kafka_idemp_init(rk);
+
+ mtx_lock(&rk->rk_init_lock);
+ rk->rk_init_wait_cnt--;
+ cnd_broadcast(&rk->rk_init_cnd);
+ mtx_unlock(&rk->rk_init_lock);
+
+ while (likely(!rd_kafka_terminating(rk) || rd_kafka_q_len(rk->rk_ops) ||
+ (rk->rk_cgrp && (rk->rk_cgrp->rkcg_state !=
+ RD_KAFKA_CGRP_STATE_TERM)))) {
+ rd_ts_t sleeptime = rd_kafka_timers_next(
+ &rk->rk_timers, 1000 * 1000 /*1s*/, 1 /*lock*/);
+ rd_kafka_q_serve(rk->rk_ops, (int)(sleeptime / 1000), 0,
+ RD_KAFKA_Q_CB_CALLBACK, NULL, NULL);
+ if (rk->rk_cgrp) /* FIXME: move to timer-triggered */
+ rd_kafka_cgrp_serve(rk->rk_cgrp);
+ rd_kafka_timers_run(&rk->rk_timers, RD_POLL_NOWAIT);
+ }
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Internal main thread terminating");
+
+ if (rd_kafka_is_idempotent(rk))
+ rd_kafka_idemp_term(rk);
+
+ rd_kafka_q_disable(rk->rk_ops);
+ rd_kafka_q_purge(rk->rk_ops);
+
+ rd_kafka_timer_stop(&rk->rk_timers, &tmr_1s, 1);
+ if (rk->rk_conf.stats_interval_ms)
+ rd_kafka_timer_stop(&rk->rk_timers, &tmr_stats_emit, 1);
+ rd_kafka_timer_stop(&rk->rk_timers, &tmr_metadata_refresh, 1);
+
+ /* Synchronise state */
+ rd_kafka_wrlock(rk);
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_MAIN);
+
+ rd_kafka_destroy_internal(rk);
+
+ rd_kafka_dbg(rk, GENERIC, "TERMINATE",
+ "Internal main thread termination done");
+
+ rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
+
+ return 0;
+}
+
+
+void rd_kafka_term_sig_handler(int sig) {
+ /* nop */
+}
+
+
+rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
+ rd_kafka_conf_t *app_conf,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_t *rk;
+ static rd_atomic32_t rkid;
+ rd_kafka_conf_t *conf;
+ rd_kafka_resp_err_t ret_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int ret_errno = 0;
+ const char *conf_err;
+#ifndef _WIN32
+ sigset_t newset, oldset;
+#endif
+ char builtin_features[128];
+ size_t bflen;
+
+ rd_kafka_global_init();
+
+ /* rd_kafka_new() takes ownership of the provided \p app_conf
+ * object if rd_kafka_new() succeeds.
+ * Since \p app_conf is optional we allocate a default configuration
+ * object here if \p app_conf is NULL.
+ * The configuration object itself is struct-copied later
+ * leaving the default *conf pointer to be ready for freeing.
+ * In case new() fails and app_conf was specified we will clear out
+ * rk_conf to avoid double-freeing from destroy_internal() and the
+ * user's eventual call to rd_kafka_conf_destroy().
+ * This is all a bit tricky but that's the nature of
+ * legacy interfaces. */
+ if (!app_conf)
+ conf = rd_kafka_conf_new();
+ else
+ conf = app_conf;
+
+ /* Verify and finalize configuration */
+ if ((conf_err = rd_kafka_conf_finalize(type, conf))) {
+ /* Incompatible configuration settings */
+ rd_snprintf(errstr, errstr_size, "%s", conf_err);
+ if (!app_conf)
+ rd_kafka_conf_destroy(conf);
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
+ return NULL;
+ }
+
+
+ rd_kafka_global_cnt_incr();
+
+ /*
+ * Set up the handle.
+ */
+ rk = rd_calloc(1, sizeof(*rk));
+
+ rk->rk_type = type;
+ rk->rk_ts_created = rd_clock();
+
+ /* Struct-copy the config object. */
+ rk->rk_conf = *conf;
+ if (!app_conf)
+ rd_free(conf); /* Free the base config struct only,
+ * not its fields since they were copied to
+ * rk_conf just above. Those fields are
+ * freed from rd_kafka_destroy_internal()
+ * as the rk itself is destroyed. */
+
+ /* Seed PRNG, don't bother about HAVE_RAND_R, since it is pretty cheap.
+ */
+ if (rk->rk_conf.enable_random_seed)
+ call_once(&rd_kafka_global_srand_once, rd_kafka_global_srand);
+
+ /* Call on_new() interceptors */
+ rd_kafka_interceptors_on_new(rk, &rk->rk_conf);
+
+ rwlock_init(&rk->rk_lock);
+ mtx_init(&rk->rk_conf.sasl.lock, mtx_plain);
+ mtx_init(&rk->rk_internal_rkb_lock, mtx_plain);
+
+ cnd_init(&rk->rk_broker_state_change_cnd);
+ mtx_init(&rk->rk_broker_state_change_lock, mtx_plain);
+ rd_list_init(&rk->rk_broker_state_change_waiters, 8,
+ rd_kafka_enq_once_trigger_destroy);
+
+ cnd_init(&rk->rk_init_cnd);
+ mtx_init(&rk->rk_init_lock, mtx_plain);
+
+ rd_interval_init(&rk->rk_suppress.no_idemp_brokers);
+ rd_interval_init(&rk->rk_suppress.broker_metadata_refresh);
+ rd_interval_init(&rk->rk_suppress.sparse_connect_random);
+ mtx_init(&rk->rk_suppress.sparse_connect_lock, mtx_plain);
+
+ rd_atomic64_init(&rk->rk_ts_last_poll, rk->rk_ts_created);
+ rd_atomic32_init(&rk->rk_flushing, 0);
+
+ rk->rk_rep = rd_kafka_q_new(rk);
+ rk->rk_ops = rd_kafka_q_new(rk);
+ rk->rk_ops->rkq_serve = rd_kafka_poll_cb;
+ rk->rk_ops->rkq_opaque = rk;
+
+ if (rk->rk_conf.log_queue) {
+ rk->rk_logq = rd_kafka_q_new(rk);
+ rk->rk_logq->rkq_serve = rd_kafka_poll_cb;
+ rk->rk_logq->rkq_opaque = rk;
+ }
+
+ TAILQ_INIT(&rk->rk_brokers);
+ TAILQ_INIT(&rk->rk_topics);
+ rd_kafka_timers_init(&rk->rk_timers, rk, rk->rk_ops);
+ rd_kafka_metadata_cache_init(rk);
+ rd_kafka_coord_cache_init(&rk->rk_coord_cache,
+ rk->rk_conf.metadata_max_age_ms);
+ rd_kafka_coord_reqs_init(rk);
+
+ if (rk->rk_conf.dr_cb || rk->rk_conf.dr_msg_cb)
+ rk->rk_drmode = RD_KAFKA_DR_MODE_CB;
+ else if (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR)
+ rk->rk_drmode = RD_KAFKA_DR_MODE_EVENT;
+ else
+ rk->rk_drmode = RD_KAFKA_DR_MODE_NONE;
+ if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE)
+ rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_DR;
+
+ if (rk->rk_conf.rebalance_cb)
+ rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_REBALANCE;
+ if (rk->rk_conf.offset_commit_cb)
+ rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_OFFSET_COMMIT;
+ if (rk->rk_conf.error_cb)
+ rk->rk_conf.enabled_events |= RD_KAFKA_EVENT_ERROR;
+#if WITH_SASL_OAUTHBEARER
+ if (rk->rk_conf.sasl.enable_oauthbearer_unsecure_jwt &&
+ !rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
+ rd_kafka_conf_set_oauthbearer_token_refresh_cb(
+ &rk->rk_conf, rd_kafka_oauthbearer_unsecured_token);
+
+ if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb &&
+ rk->rk_conf.sasl.oauthbearer.method !=
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC)
+ rk->rk_conf.enabled_events |=
+ RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH;
+#endif
+
+#if WITH_OAUTHBEARER_OIDC
+ if (rk->rk_conf.sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
+ !rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
+ rd_kafka_conf_set_oauthbearer_token_refresh_cb(
+ &rk->rk_conf, rd_kafka_oidc_token_refresh_cb);
+#endif
+
+ rk->rk_controllerid = -1;
+
+ /* Admin client defaults */
+ rk->rk_conf.admin.request_timeout_ms = rk->rk_conf.socket_timeout_ms;
+
+ if (rk->rk_conf.debug)
+ rk->rk_conf.log_level = LOG_DEBUG;
+
+ rd_snprintf(rk->rk_name, sizeof(rk->rk_name), "%s#%s-%i",
+ rk->rk_conf.client_id_str, rd_kafka_type2str(rk->rk_type),
+ rd_atomic32_add(&rkid, 1));
+
+ /* Construct clientid kafka string */
+ rk->rk_client_id = rd_kafkap_str_new(rk->rk_conf.client_id_str, -1);
+
+ /* Convert group.id to kafka string (may be NULL) */
+ rk->rk_group_id = rd_kafkap_str_new(rk->rk_conf.group_id_str, -1);
+
+ /* Config fixups */
+ rk->rk_conf.queued_max_msg_bytes =
+ (int64_t)rk->rk_conf.queued_max_msg_kbytes * 1000ll;
+
+ /* Enable api.version.request=true if fallback.broker.version
+ * indicates a supporting broker. */
+ if (rd_kafka_ApiVersion_is_queryable(
+ rk->rk_conf.broker_version_fallback))
+ rk->rk_conf.api_version_request = 1;
+
+ if (rk->rk_type == RD_KAFKA_PRODUCER) {
+ mtx_init(&rk->rk_curr_msgs.lock, mtx_plain);
+ cnd_init(&rk->rk_curr_msgs.cnd);
+ rk->rk_curr_msgs.max_cnt = rk->rk_conf.queue_buffering_max_msgs;
+ if ((unsigned long long)rk->rk_conf.queue_buffering_max_kbytes *
+ 1024 >
+ (unsigned long long)SIZE_MAX) {
+ rk->rk_curr_msgs.max_size = SIZE_MAX;
+ rd_kafka_log(rk, LOG_WARNING, "QUEUESIZE",
+ "queue.buffering.max.kbytes adjusted "
+ "to system SIZE_MAX limit %" PRIusz
+ " bytes",
+ rk->rk_curr_msgs.max_size);
+ } else {
+ rk->rk_curr_msgs.max_size =
+ (size_t)rk->rk_conf.queue_buffering_max_kbytes *
+ 1024;
+ }
+ }
+
+ if (rd_kafka_assignors_init(rk, errstr, errstr_size) == -1) {
+ ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+ ret_errno = EINVAL;
+ goto fail;
+ }
+
+ /* Create Mock cluster */
+ rd_atomic32_init(&rk->rk_mock.cluster_cnt, 0);
+ if (rk->rk_conf.mock.broker_cnt > 0) {
+ const char *mock_bootstraps;
+ rk->rk_mock.cluster =
+ rd_kafka_mock_cluster_new(rk, rk->rk_conf.mock.broker_cnt);
+
+ if (!rk->rk_mock.cluster) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to create mock cluster, see logs");
+ ret_err = RD_KAFKA_RESP_ERR__FAIL;
+ ret_errno = EINVAL;
+ goto fail;
+ }
+
+ mock_bootstraps =
+ rd_kafka_mock_cluster_bootstraps(rk->rk_mock.cluster),
+ rd_kafka_log(rk, LOG_NOTICE, "MOCK",
+ "Mock cluster enabled: "
+ "original bootstrap.servers and security.protocol "
+ "ignored and replaced with %s",
+ mock_bootstraps);
+
+ /* Overwrite bootstrap.servers and connection settings */
+ if (rd_kafka_conf_set(&rk->rk_conf, "bootstrap.servers",
+ mock_bootstraps, NULL,
+ 0) != RD_KAFKA_CONF_OK)
+ rd_assert(!"failed to replace mock bootstrap.servers");
+
+ if (rd_kafka_conf_set(&rk->rk_conf, "security.protocol",
+ "plaintext", NULL, 0) != RD_KAFKA_CONF_OK)
+ rd_assert(!"failed to reset mock security.protocol");
+
+ rk->rk_conf.security_protocol = RD_KAFKA_PROTO_PLAINTEXT;
+
+ /* Apply default RTT to brokers */
+ if (rk->rk_conf.mock.broker_rtt)
+ rd_kafka_mock_broker_set_rtt(
+ rk->rk_mock.cluster, -1 /*all brokers*/,
+ rk->rk_conf.mock.broker_rtt);
+ }
+
+ if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL ||
+ rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT) {
+ /* Select SASL provider */
+ if (rd_kafka_sasl_select_provider(rk, errstr, errstr_size) ==
+ -1) {
+ ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+ ret_errno = EINVAL;
+ goto fail;
+ }
+
+ /* Initialize SASL provider */
+ if (rd_kafka_sasl_init(rk, errstr, errstr_size) == -1) {
+ rk->rk_conf.sasl.provider = NULL;
+ ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+ ret_errno = EINVAL;
+ goto fail;
+ }
+ }
+
+#if WITH_SSL
+ if (rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SSL ||
+ rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL) {
+ /* Create SSL context */
+ if (rd_kafka_ssl_ctx_init(rk, errstr, errstr_size) == -1) {
+ ret_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+ ret_errno = EINVAL;
+ goto fail;
+ }
+ }
+#endif
+
+ if (type == RD_KAFKA_CONSUMER) {
+ rd_kafka_assignment_init(rk);
+
+ if (RD_KAFKAP_STR_LEN(rk->rk_group_id) > 0) {
+ /* Create consumer group handle */
+ rk->rk_cgrp = rd_kafka_cgrp_new(rk, rk->rk_group_id,
+ rk->rk_client_id);
+ rk->rk_consumer.q =
+ rd_kafka_q_keep(rk->rk_cgrp->rkcg_q);
+ } else {
+ /* Legacy consumer */
+ rk->rk_consumer.q = rd_kafka_q_keep(rk->rk_rep);
+ }
+
+ } else if (type == RD_KAFKA_PRODUCER) {
+ rk->rk_eos.transactional_id =
+ rd_kafkap_str_new(rk->rk_conf.eos.transactional_id, -1);
+ }
+
+#ifndef _WIN32
+ /* Block all signals in newly created threads.
+ * To avoid race condition we block all signals in the calling
+ * thread, which the new thread will inherit its sigmask from,
+ * and then restore the original sigmask of the calling thread when
+ * we're done creating the thread. */
+ sigemptyset(&oldset);
+ sigfillset(&newset);
+ if (rk->rk_conf.term_sig) {
+ struct sigaction sa_term = {.sa_handler =
+ rd_kafka_term_sig_handler};
+ sigaction(rk->rk_conf.term_sig, &sa_term, NULL);
+ }
+ pthread_sigmask(SIG_SETMASK, &newset, &oldset);
+#endif
+
+ /* Create background thread and queue if background_event_cb()
+ * RD_KAFKA_EVENT_BACKGROUND has been enabled.
+ * Do this before creating the main thread since after
+ * the main thread is created it is no longer trivial to error
+ * out from rd_kafka_new(). */
+ if (rk->rk_conf.background_event_cb ||
+ (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_BACKGROUND)) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_wrlock(rk);
+ if (!rk->rk_background.q)
+ err = rd_kafka_background_thread_create(rk, errstr,
+ errstr_size);
+ rd_kafka_wrunlock(rk);
+ if (err)
+ goto fail;
+ }
+
+ /* Lock handle here to synchronise state, i.e., hold off
+ * the thread until we've finalized the handle. */
+ rd_kafka_wrlock(rk);
+
+ /* Create handler thread */
+ mtx_lock(&rk->rk_init_lock);
+ rk->rk_init_wait_cnt++;
+ if ((thrd_create(&rk->rk_thread, rd_kafka_thread_main, rk)) !=
+ thrd_success) {
+ rk->rk_init_wait_cnt--;
+ ret_err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ ret_errno = errno;
+ if (errstr)
+ rd_snprintf(errstr, errstr_size,
+ "Failed to create thread: %s (%i)",
+ rd_strerror(errno), errno);
+ mtx_unlock(&rk->rk_init_lock);
+ rd_kafka_wrunlock(rk);
+#ifndef _WIN32
+ /* Restore sigmask of caller */
+ pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+#endif
+ goto fail;
+ }
+
+ mtx_unlock(&rk->rk_init_lock);
+ rd_kafka_wrunlock(rk);
+
+ /*
+ * @warning `goto fail` is prohibited past this point
+ */
+
+ mtx_lock(&rk->rk_internal_rkb_lock);
+ rk->rk_internal_rkb =
+ rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT,
+ "", 0, RD_KAFKA_NODEID_UA);
+ mtx_unlock(&rk->rk_internal_rkb_lock);
+
+ /* Add initial list of brokers from configuration */
+ if (rk->rk_conf.brokerlist) {
+ if (rd_kafka_brokers_add0(rk, rk->rk_conf.brokerlist) == 0)
+ rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
+ "No brokers configured");
+ }
+
+#ifndef _WIN32
+ /* Restore sigmask of caller */
+ pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+#endif
+
+ /* Wait for background threads to fully initialize so that
+ * the client instance is fully functional at the time it is
+ * returned from the constructor. */
+ if (rd_kafka_init_wait(rk, 60 * 1000) != 0) {
+ /* This should never happen unless there is a bug
+ * or the OS is not scheduling the background threads.
+ * Either case there is no point in handling this gracefully
+ * in the current state since the thread joins are likely
+ * to hang as well. */
+ mtx_lock(&rk->rk_init_lock);
+ rd_kafka_log(rk, LOG_CRIT, "INIT",
+ "Failed to initialize %s: "
+ "%d background thread(s) did not initialize "
+ "within 60 seconds",
+ rk->rk_name, rk->rk_init_wait_cnt);
+ if (errstr)
+ rd_snprintf(errstr, errstr_size,
+ "Timed out waiting for "
+ "%d background thread(s) to initialize",
+ rk->rk_init_wait_cnt);
+ mtx_unlock(&rk->rk_init_lock);
+
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
+ EDEADLK);
+ return NULL;
+ }
+
+ rk->rk_initialized = 1;
+
+ bflen = sizeof(builtin_features);
+ if (rd_kafka_conf_get(&rk->rk_conf, "builtin.features",
+ builtin_features, &bflen) != RD_KAFKA_CONF_OK)
+ rd_snprintf(builtin_features, sizeof(builtin_features), "?");
+ rd_kafka_dbg(rk, ALL, "INIT",
+ "librdkafka v%s (0x%x) %s initialized "
+ "(builtin.features %s, %s, debug 0x%x)",
+ rd_kafka_version_str(), rd_kafka_version(), rk->rk_name,
+ builtin_features, BUILT_WITH, rk->rk_conf.debug);
+
+ /* Log warnings for deprecated configuration */
+ rd_kafka_conf_warn(rk);
+
+ /* Debug dump configuration */
+ if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) {
+ rd_kafka_anyconf_dump_dbg(rk, _RK_GLOBAL, &rk->rk_conf,
+ "Client configuration");
+ if (rk->rk_conf.topic_conf)
+ rd_kafka_anyconf_dump_dbg(
+ rk, _RK_TOPIC, rk->rk_conf.topic_conf,
+ "Default topic configuration");
+ }
+
+ /* Free user supplied conf's base pointer on success,
+ * but not the actual allocated fields since the struct
+ * will have been copied in its entirety above. */
+ if (app_conf)
+ rd_free(app_conf);
+ rd_kafka_set_last_error(0, 0);
+
+ return rk;
+
+fail:
+ /*
+ * Error out and clean up
+ */
+
+ /*
+ * Tell background thread to terminate and wait for it to return.
+ */
+ rd_atomic32_set(&rk->rk_terminate, RD_KAFKA_DESTROY_F_TERMINATE);
+
+ /* Terminate SASL provider */
+ if (rk->rk_conf.sasl.provider)
+ rd_kafka_sasl_term(rk);
+
+ if (rk->rk_background.thread) {
+ int res;
+ thrd_join(rk->rk_background.thread, &res);
+ rd_kafka_q_destroy_owner(rk->rk_background.q);
+ }
+
+ /* If on_new() interceptors have been called we also need
+ * to allow interceptor clean-up by calling on_destroy() */
+ rd_kafka_interceptors_on_destroy(rk);
+
+ /* If rk_conf is a struct-copy of the application configuration
+ * we need to avoid rk_conf fields from being freed from
+ * rd_kafka_destroy_internal() since they belong to app_conf.
+ * However, there are some internal fields, such as interceptors,
+ * that belong to rk_conf and thus needs to be cleaned up.
+ * Legacy APIs, sigh.. */
+ if (app_conf) {
+ rd_kafka_assignors_term(rk);
+ rd_kafka_interceptors_destroy(&rk->rk_conf);
+ memset(&rk->rk_conf, 0, sizeof(rk->rk_conf));
+ }
+
+ rd_kafka_destroy_internal(rk);
+ rd_kafka_destroy_final(rk);
+
+ rd_kafka_set_last_error(ret_err, ret_errno);
+
+ return NULL;
+}
+
+
+
+/**
+ * Counts usage of the legacy/simple consumer (rd_kafka_consume_start() with
+ * friends) since it does not have an API for stopping the cgrp we will need to
+ * sort that out automatically in the background when all consumption
+ * has stopped.
+ *
+ * Returns 0 if a High level consumer is already instantiated
+ * which means a Simple consumer cannot co-operate with it, else 1.
+ *
+ * A rd_kafka_t handle can never migrate from simple to high-level, or
+ * vice versa, so we dont need a ..consumer_del().
+ */
+int rd_kafka_simple_consumer_add(rd_kafka_t *rk) {
+ if (rd_atomic32_get(&rk->rk_simple_cnt) < 0)
+ return 0;
+
+ return (int)rd_atomic32_add(&rk->rk_simple_cnt, 1);
+}
+
+
+
+/**
+ * rktp fetch is split up in these parts:
+ * * application side:
+ * * broker side (handled by current leader broker thread for rktp):
+ * - the fetch state, initial offset, etc.
+ * - fetching messages, updating fetched offset, etc.
+ * - offset commits
+ *
+ * Communication between the two are:
+ * app side -> rdkafka main side: rktp_ops
+ * broker thread -> app side: rktp_fetchq
+ *
+ * There is no shared state between these threads, instead
+ * state is communicated through the two op queues, and state synchronization
+ * is performed by version barriers.
+ *
+ */
+
+static RD_UNUSED int rd_kafka_consume_start0(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t offset,
+ rd_kafka_q_t *rkq) {
+ rd_kafka_toppar_t *rktp;
+
+ if (partition < 0) {
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ ESRCH);
+ return -1;
+ }
+
+ if (!rd_kafka_simple_consumer_add(rkt->rkt_rk)) {
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
+ return -1;
+ }
+
+ rd_kafka_topic_wrlock(rkt);
+ rktp = rd_kafka_toppar_desired_add(rkt, partition);
+ rd_kafka_topic_wrunlock(rkt);
+
+ /* Verify offset */
+ if (offset == RD_KAFKA_OFFSET_BEGINNING ||
+ offset == RD_KAFKA_OFFSET_END ||
+ offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
+ /* logical offsets */
+
+ } else if (offset == RD_KAFKA_OFFSET_STORED) {
+ /* offset manager */
+
+ if (rkt->rkt_conf.offset_store_method ==
+ RD_KAFKA_OFFSET_METHOD_BROKER &&
+ RD_KAFKAP_STR_IS_NULL(rkt->rkt_rk->rk_group_id)) {
+ /* Broker based offsets require a group id. */
+ rd_kafka_toppar_destroy(rktp);
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ EINVAL);
+ return -1;
+ }
+
+ } else if (offset < 0) {
+ rd_kafka_toppar_destroy(rktp);
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
+ return -1;
+ }
+
+ rd_kafka_toppar_op_fetch_start(rktp, RD_KAFKA_FETCH_POS(offset, -1),
+ rkq, RD_KAFKA_NO_REPLYQ);
+
+ rd_kafka_toppar_destroy(rktp);
+
+ rd_kafka_set_last_error(0, 0);
+ return 0;
+}
+
+
+
+int rd_kafka_consume_start(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int64_t offset) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "START",
+ "Start consuming partition %" PRId32, partition);
+ return rd_kafka_consume_start0(rkt, partition, offset, NULL);
+}
+
+int rd_kafka_consume_start_queue(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int64_t offset,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+
+ return rd_kafka_consume_start0(rkt, partition, offset, rkqu->rkqu_q);
+}
+
+
+
+static RD_UNUSED int rd_kafka_consume_stop0(rd_kafka_toppar_t *rktp) {
+ rd_kafka_q_t *tmpq = NULL;
+ rd_kafka_resp_err_t err;
+
+ rd_kafka_topic_wrlock(rktp->rktp_rkt);
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_toppar_desired_del(rktp);
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_topic_wrunlock(rktp->rktp_rkt);
+
+ tmpq = rd_kafka_q_new(rktp->rktp_rkt->rkt_rk);
+
+ rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_REPLYQ(tmpq, 0));
+
+ /* Synchronisation: Wait for stop reply from broker thread */
+ err = rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
+ rd_kafka_q_destroy_owner(tmpq);
+
+ rd_kafka_set_last_error(err, err ? EINVAL : 0);
+
+ return err ? -1 : 0;
+}
+
+
+int rd_kafka_consume_stop(rd_kafka_topic_t *app_rkt, int32_t partition) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp;
+ int r;
+
+ if (partition == RD_KAFKA_PARTITION_UA) {
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
+ return -1;
+ }
+
+ rd_kafka_topic_wrlock(rkt);
+ if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) &&
+ !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) {
+ rd_kafka_topic_wrunlock(rkt);
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ ESRCH);
+ return -1;
+ }
+ rd_kafka_topic_wrunlock(rkt);
+
+ r = rd_kafka_consume_stop0(rktp);
+ /* set_last_error() called by stop0() */
+
+ rd_kafka_toppar_destroy(rktp);
+
+ return r;
+}
+
+
+
+rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int64_t offset,
+ int timeout_ms) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_q_t *tmpq = NULL;
+ rd_kafka_resp_err_t err;
+ rd_kafka_replyq_t replyq = RD_KAFKA_NO_REPLYQ;
+
+ /* FIXME: simple consumer check */
+
+ if (partition == RD_KAFKA_PARTITION_UA)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ rd_kafka_topic_rdlock(rkt);
+ if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0)) &&
+ !(rktp = rd_kafka_toppar_desired_get(rkt, partition))) {
+ rd_kafka_topic_rdunlock(rkt);
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ }
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (timeout_ms) {
+ tmpq = rd_kafka_q_new(rkt->rkt_rk);
+ replyq = RD_KAFKA_REPLYQ(tmpq, 0);
+ }
+
+ if ((err = rd_kafka_toppar_op_seek(rktp, RD_KAFKA_FETCH_POS(offset, -1),
+ replyq))) {
+ if (tmpq)
+ rd_kafka_q_destroy_owner(tmpq);
+ rd_kafka_toppar_destroy(rktp);
+ return err;
+ }
+
+ rd_kafka_toppar_destroy(rktp);
+
+ if (tmpq) {
+ err = rd_kafka_q_wait_result(tmpq, timeout_ms);
+ rd_kafka_q_destroy_owner(tmpq);
+ return err;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_error_t *
+rd_kafka_seek_partitions(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions,
+ int timeout_ms) {
+ rd_kafka_q_t *tmpq = NULL;
+ rd_kafka_topic_partition_t *rktpar;
+ rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
+ int cnt = 0;
+
+ if (rk->rk_type != RD_KAFKA_CONSUMER)
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Must only be used on consumer instance");
+
+ if (!partitions || partitions->cnt == 0)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "partitions must be specified");
+
+ if (timeout_ms)
+ tmpq = rd_kafka_q_new(rk);
+
+ RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_resp_err_t err;
+
+ rktp = rd_kafka_toppar_get2(
+ rk, rktpar->topic, rktpar->partition,
+ rd_false /*no-ua-on-miss*/, rd_false /*no-create-on-miss*/);
+ if (!rktp) {
+ rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ continue;
+ }
+
+ err = rd_kafka_toppar_op_seek(
+ rktp, rd_kafka_topic_partition_get_fetch_pos(rktpar),
+ RD_KAFKA_REPLYQ(tmpq, 0));
+ if (err) {
+ rktpar->err = err;
+ } else {
+ rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
+ cnt++;
+ }
+
+ rd_kafka_toppar_destroy(rktp); /* refcnt from toppar_get2() */
+ }
+
+ if (!timeout_ms)
+ return NULL;
+
+
+ while (cnt > 0) {
+ rd_kafka_op_t *rko;
+
+ rko =
+ rd_kafka_q_pop(tmpq, rd_timeout_remains_us(abs_timeout), 0);
+ if (!rko) {
+ rd_kafka_q_destroy_owner(tmpq);
+
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Timed out waiting for %d remaining partition "
+ "seek(s) to finish",
+ cnt);
+ }
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) {
+ rd_kafka_q_destroy_owner(tmpq);
+ rd_kafka_op_destroy(rko);
+
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY,
+ "Instance is terminating");
+ }
+
+ rd_assert(rko->rko_rktp);
+
+ rktpar = rd_kafka_topic_partition_list_find(
+ partitions, rko->rko_rktp->rktp_rkt->rkt_topic->str,
+ rko->rko_rktp->rktp_partition);
+ rd_assert(rktpar);
+
+ rktpar->err = rko->rko_err;
+
+ rd_kafka_op_destroy(rko);
+
+ cnt--;
+ }
+
+ rd_kafka_q_destroy_owner(tmpq);
+
+ return NULL;
+}
+
+
+
+static ssize_t rd_kafka_consume_batch0(rd_kafka_q_t *rkq,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size) {
+ /* Populate application's rkmessages array. */
+ return rd_kafka_q_serve_rkmessages(rkq, timeout_ms, rkmessages,
+ rkmessages_size);
+}
+
+
+ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp;
+ ssize_t cnt;
+
+ /* Get toppar */
+ rd_kafka_topic_rdlock(rkt);
+ rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/);
+ if (unlikely(!rktp))
+ rktp = rd_kafka_toppar_desired_get(rkt, partition);
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (unlikely(!rktp)) {
+ /* No such toppar known */
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ ESRCH);
+ return -1;
+ }
+
+ /* Populate application's rkmessages array. */
+ cnt = rd_kafka_q_serve_rkmessages(rktp->rktp_fetchq, timeout_ms,
+ rkmessages, rkmessages_size);
+
+ rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */
+
+ rd_kafka_set_last_error(0, 0);
+
+ return cnt;
+}
+
+ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size) {
+ /* Populate application's rkmessages array. */
+ return rd_kafka_consume_batch0(rkqu->rkqu_q, timeout_ms, rkmessages,
+ rkmessages_size);
+}
+
+
+struct consume_ctx {
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque);
+ void *opaque;
+};
+
+
+/**
+ * Trampoline for application's consume_cb()
+ */
+static rd_kafka_op_res_t rd_kafka_consume_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque) {
+ struct consume_ctx *ctx = opaque;
+ rd_kafka_message_t *rkmessage;
+
+ if (unlikely(rd_kafka_op_version_outdated(rko, 0)) ||
+ rko->rko_type == RD_KAFKA_OP_BARRIER) {
+ rd_kafka_op_destroy(rko);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ rkmessage = rd_kafka_message_get(rko);
+
+ rd_kafka_fetch_op_app_prepare(rk, rko);
+
+ ctx->consume_cb(rkmessage, ctx->opaque);
+
+ rd_kafka_op_destroy(rko);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+
+static rd_kafka_op_res_t rd_kafka_consume_callback0(
+ rd_kafka_q_t *rkq,
+ int timeout_ms,
+ int max_cnt,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque),
+ void *opaque) {
+ struct consume_ctx ctx = {.consume_cb = consume_cb, .opaque = opaque};
+ rd_kafka_op_res_t res;
+
+ if (timeout_ms)
+ rd_kafka_app_poll_blocking(rkq->rkq_rk);
+
+ res = rd_kafka_q_serve(rkq, timeout_ms, max_cnt, RD_KAFKA_Q_CB_RETURN,
+ rd_kafka_consume_cb, &ctx);
+
+ rd_kafka_app_polled(rkq->rkq_rk);
+
+ return res;
+}
+
+
+int rd_kafka_consume_callback(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int timeout_ms,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage,
+ void *opaque),
+ void *opaque) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp;
+ int r;
+
+ /* Get toppar */
+ rd_kafka_topic_rdlock(rkt);
+ rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/);
+ if (unlikely(!rktp))
+ rktp = rd_kafka_toppar_desired_get(rkt, partition);
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (unlikely(!rktp)) {
+ /* No such toppar known */
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ ESRCH);
+ return -1;
+ }
+
+ r = rd_kafka_consume_callback0(rktp->rktp_fetchq, timeout_ms,
+ rkt->rkt_conf.consume_callback_max_msgs,
+ consume_cb, opaque);
+
+ rd_kafka_toppar_destroy(rktp);
+
+ rd_kafka_set_last_error(0, 0);
+
+ return r;
+}
+
+
+
+int rd_kafka_consume_callback_queue(
+ rd_kafka_queue_t *rkqu,
+ int timeout_ms,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque),
+ void *opaque) {
+ return rd_kafka_consume_callback0(rkqu->rkqu_q, timeout_ms, 0,
+ consume_cb, opaque);
+}
+
+
+/**
+ * Serve queue 'rkq' and return one message.
+ * By serving the queue it will also call any registered callbacks
+ * registered for matching events, this includes consumer_cb()
+ * in which case no message will be returned.
+ */
+static rd_kafka_message_t *
+rd_kafka_consume0(rd_kafka_t *rk, rd_kafka_q_t *rkq, int timeout_ms) {
+ rd_kafka_op_t *rko;
+ rd_kafka_message_t *rkmessage = NULL;
+ rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
+
+ if (timeout_ms)
+ rd_kafka_app_poll_blocking(rk);
+
+ rd_kafka_yield_thread = 0;
+ while ((
+ rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0))) {
+ rd_kafka_op_res_t res;
+
+ res =
+ rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL);
+
+ if (res == RD_KAFKA_OP_RES_PASS)
+ break;
+
+ if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
+ rd_kafka_yield_thread)) {
+ /* Callback called rd_kafka_yield(), we must
+ * stop dispatching the queue and return. */
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INTR, EINTR);
+ rd_kafka_app_polled(rk);
+ return NULL;
+ }
+
+ /* Message was handled by callback. */
+ continue;
+ }
+
+ if (!rko) {
+ /* Timeout reached with no op returned. */
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__TIMED_OUT,
+ ETIMEDOUT);
+ rd_kafka_app_polled(rk);
+ return NULL;
+ }
+
+ rd_kafka_assert(rk, rko->rko_type == RD_KAFKA_OP_FETCH ||
+ rko->rko_type == RD_KAFKA_OP_CONSUMER_ERR);
+
+ /* Get rkmessage from rko */
+ rkmessage = rd_kafka_message_get(rko);
+
+ /* Store offset, etc */
+ rd_kafka_fetch_op_app_prepare(rk, rko);
+
+ rd_kafka_set_last_error(0, 0);
+
+ rd_kafka_app_polled(rk);
+
+ return rkmessage;
+}
+
+rd_kafka_message_t *
+rd_kafka_consume(rd_kafka_topic_t *app_rkt, int32_t partition, int timeout_ms) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_message_t *rkmessage;
+
+ rd_kafka_topic_rdlock(rkt);
+ rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no ua on miss*/);
+ if (unlikely(!rktp))
+ rktp = rd_kafka_toppar_desired_get(rkt, partition);
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (unlikely(!rktp)) {
+ /* No such toppar known */
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ ESRCH);
+ return NULL;
+ }
+
+ rkmessage =
+ rd_kafka_consume0(rkt->rkt_rk, rktp->rktp_fetchq, timeout_ms);
+
+ rd_kafka_toppar_destroy(rktp); /* refcnt from .._get() */
+
+ return rkmessage;
+}
+
+
+rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu,
+ int timeout_ms) {
+ return rd_kafka_consume0(rkqu->rkqu_rk, rkqu->rkqu_q, timeout_ms);
+}
+
+
+
+rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk) {
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ rd_kafka_q_fwd_set(rk->rk_rep, rkcg->rkcg_q);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms) {
+ rd_kafka_cgrp_t *rkcg;
+
+ if (unlikely(!(rkcg = rd_kafka_cgrp_get(rk)))) {
+ rd_kafka_message_t *rkmessage = rd_kafka_message_new();
+ rkmessage->err = RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+ return rkmessage;
+ }
+
+ return rd_kafka_consume0(rk, rkcg->rkcg_q, timeout_ms);
+}
+
+
+/**
+ * @brief Consumer close.
+ *
+ * @param rkq The consumer group queue will be forwarded to this queue, which
+ * which must be served (rebalance events) by the application/caller
+ * until rd_kafka_consumer_closed() returns true.
+ * If the consumer is not in a joined state, no rebalance events
+ * will be emitted.
+ */
+static rd_kafka_error_t *rd_kafka_consumer_close_q(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq) {
+ rd_kafka_cgrp_t *rkcg;
+ rd_kafka_error_t *error = NULL;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP,
+ "Consume close called on non-group "
+ "consumer");
+
+ if (rd_atomic32_get(&rkcg->rkcg_terminated))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__DESTROY,
+ "Consumer already closed");
+
+ /* If a fatal error has been raised and this is an
+ * explicit consumer_close() from the application we return
+ * a fatal error. Otherwise let the "silent" no_consumer_close
+ * logic be performed to clean up properly. */
+ if (!rd_kafka_destroy_flags_no_consumer_close(rk) &&
+ (error = rd_kafka_get_fatal_error(rk)))
+ return error;
+
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE",
+ "Closing consumer");
+
+ /* Redirect cgrp queue to the rebalance queue to make sure all posted
+ * ops (e.g., rebalance callbacks) are served by
+ * the application/caller. */
+ rd_kafka_q_fwd_set(rkcg->rkcg_q, rkq);
+
+ /* Tell cgrp subsystem to terminate. A TERMINATE op will be posted
+ * on the rkq when done. */
+ rd_kafka_cgrp_terminate(rkcg, RD_KAFKA_REPLYQ(rkq, 0)); /* async */
+
+ return error;
+}
+
+rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk,
+ rd_kafka_queue_t *rkqu) {
+ if (!rkqu)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Queue must be specified");
+ return rd_kafka_consumer_close_q(rk, rkqu->rkqu_q);
+}
+
+rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ rd_kafka_q_t *rkq;
+
+ /* Create a temporary reply queue to handle the TERMINATE reply op. */
+ rkq = rd_kafka_q_new(rk);
+
+ /* Initiate the close (async) */
+ error = rd_kafka_consumer_close_q(rk, rkq);
+ if (error) {
+ err = rd_kafka_error_is_fatal(error)
+ ? RD_KAFKA_RESP_ERR__FATAL
+ : rd_kafka_error_code(error);
+ rd_kafka_error_destroy(error);
+ rd_kafka_q_destroy_owner(rkq);
+ return err;
+ }
+
+ /* Disable the queue if termination is immediate or the user
+ * does not want the blocking consumer_close() behaviour, this will
+ * cause any ops posted for this queue (such as rebalance) to
+ * be destroyed.
+ */
+ if (rd_kafka_destroy_flags_no_consumer_close(rk)) {
+ rd_kafka_dbg(rk, CONSUMER, "CLOSE",
+ "Disabling and purging temporary queue to quench "
+ "close events");
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_q_disable(rkq);
+ /* Purge ops already enqueued */
+ rd_kafka_q_purge(rkq);
+ } else {
+ rd_kafka_op_t *rko;
+ rd_kafka_dbg(rk, CONSUMER, "CLOSE", "Waiting for close events");
+ while ((rko = rd_kafka_q_pop(rkq, RD_POLL_INFINITE, 0))) {
+ rd_kafka_op_res_t res;
+ if ((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) ==
+ RD_KAFKA_OP_TERMINATE) {
+ err = rko->rko_err;
+ rd_kafka_op_destroy(rko);
+ break;
+ }
+ /* Handle callbacks */
+ res = rd_kafka_poll_cb(rk, rkq, rko,
+ RD_KAFKA_Q_CB_RETURN, NULL);
+ if (res == RD_KAFKA_OP_RES_PASS)
+ rd_kafka_op_destroy(rko);
+ /* Ignore YIELD, we need to finish */
+ }
+ }
+
+ rd_kafka_q_destroy_owner(rkq);
+
+ if (err)
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE",
+ "Consumer closed with error: %s",
+ rd_kafka_err2str(err));
+ else
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLOSE",
+ "Consumer closed");
+
+ return err;
+}
+
+
+int rd_kafka_consumer_closed(rd_kafka_t *rk) {
+ if (unlikely(!rk->rk_cgrp))
+ return 0;
+
+ return rd_atomic32_get(&rk->rk_cgrp->rkcg_terminated);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_committed(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions,
+ int timeout_ms) {
+ rd_kafka_q_t *rkq;
+ rd_kafka_resp_err_t err;
+ rd_kafka_cgrp_t *rkcg;
+ rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
+
+ if (!partitions)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ /* Set default offsets. */
+ rd_kafka_topic_partition_list_reset_offsets(partitions,
+ RD_KAFKA_OFFSET_INVALID);
+
+ rkq = rd_kafka_q_new(rk);
+
+ do {
+ rd_kafka_op_t *rko;
+ int state_version = rd_kafka_brokers_get_state_version(rk);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
+ rd_kafka_op_set_replyq(rko, rkq, NULL);
+
+ /* Issue #827
+ * Copy partition list to avoid use-after-free if we time out
+ * here, the app frees the list, and then cgrp starts
+ * processing the op. */
+ rko->rko_u.offset_fetch.partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+ rko->rko_u.offset_fetch.require_stable_offsets =
+ rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED;
+ rko->rko_u.offset_fetch.do_free = 1;
+
+ if (!rd_kafka_q_enq(rkcg->rkcg_ops, rko)) {
+ err = RD_KAFKA_RESP_ERR__DESTROY;
+ break;
+ }
+
+ rko =
+ rd_kafka_q_pop(rkq, rd_timeout_remains_us(abs_timeout), 0);
+ if (rko) {
+ if (!(err = rko->rko_err))
+ rd_kafka_topic_partition_list_update(
+ partitions,
+ rko->rko_u.offset_fetch.partitions);
+ else if ((err == RD_KAFKA_RESP_ERR__WAIT_COORD ||
+ err == RD_KAFKA_RESP_ERR__TRANSPORT) &&
+ !rd_kafka_brokers_wait_state_change(
+ rk, state_version,
+ rd_timeout_remains(abs_timeout)))
+ err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ rd_kafka_op_destroy(rko);
+ } else
+ err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ } while (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__WAIT_COORD);
+
+ rd_kafka_q_destroy_owner(rkq);
+
+ return err;
+}
+
+
+
+rd_kafka_resp_err_t
+rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+
+ for (i = 0; i < partitions->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
+ rd_kafka_toppar_t *rktp;
+
+ if (!(rktp = rd_kafka_toppar_get2(rk, rktpar->topic,
+ rktpar->partition, 0, 1))) {
+ rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ rktpar->offset = RD_KAFKA_OFFSET_INVALID;
+ continue;
+ }
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_topic_partition_set_from_fetch_pos(rktpar,
+ rktp->rktp_app_pos);
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_destroy(rktp);
+
+ rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+struct _query_wmark_offsets_state {
+ rd_kafka_resp_err_t err;
+ const char *topic;
+ int32_t partition;
+ int64_t offsets[2];
+ int offidx; /* next offset to set from response */
+ rd_ts_t ts_end;
+ int state_version; /* Broker state version */
+};
+
+static void rd_kafka_query_wmark_offsets_resp_cb(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ struct _query_wmark_offsets_state *state;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_t *rktpar;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* 'state' has gone out of scope when query_watermark..()
+ * timed out and returned to the caller. */
+ return;
+ }
+
+ state = opaque;
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets,
+ NULL);
+ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return; /* Retrying */
+ }
+
+ /* Retry if no broker connection is available yet. */
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb &&
+ rd_kafka_brokers_wait_state_change(
+ rkb->rkb_rk, state->state_version,
+ rd_timeout_remains(state->ts_end))) {
+ /* Retry */
+ state->state_version = rd_kafka_brokers_get_state_version(rk);
+ request->rkbuf_retries = 0;
+ if (rd_kafka_buf_retry(rkb, request)) {
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return; /* Retry in progress */
+ }
+ /* FALLTHRU */
+ }
+
+ /* Partition not seen in response. */
+ if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, state->topic,
+ state->partition)))
+ err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ else if (rktpar->err)
+ err = rktpar->err;
+ else
+ state->offsets[state->offidx] = rktpar->offset;
+
+ state->offidx++;
+
+ if (err || state->offidx == 2) /* Error or Done */
+ state->err = err;
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_query_watermark_offsets(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high,
+ int timeout_ms) {
+ rd_kafka_q_t *rkq;
+ struct _query_wmark_offsets_state state;
+ rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+ rd_kafka_topic_partition_list_t *partitions;
+ rd_kafka_topic_partition_t *rktpar;
+ struct rd_kafka_partition_leader *leader;
+ rd_list_t leaders;
+ rd_kafka_resp_err_t err;
+
+ partitions = rd_kafka_topic_partition_list_new(1);
+ rktpar =
+ rd_kafka_topic_partition_list_add(partitions, topic, partition);
+
+ rd_list_init(&leaders, partitions->cnt,
+ (void *)rd_kafka_partition_leader_destroy);
+
+ err = rd_kafka_topic_partition_list_query_leaders(rk, partitions,
+ &leaders, timeout_ms);
+ if (err) {
+ rd_list_destroy(&leaders);
+ rd_kafka_topic_partition_list_destroy(partitions);
+ return err;
+ }
+
+ leader = rd_list_elem(&leaders, 0);
+
+ rkq = rd_kafka_q_new(rk);
+
+ /* Due to KAFKA-1588 we need to send a request for each wanted offset,
+ * in this case one for the low watermark and one for the high. */
+ state.topic = topic;
+ state.partition = partition;
+ state.offsets[0] = RD_KAFKA_OFFSET_BEGINNING;
+ state.offsets[1] = RD_KAFKA_OFFSET_END;
+ state.offidx = 0;
+ state.err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
+ state.ts_end = ts_end;
+ state.state_version = rd_kafka_brokers_get_state_version(rk);
+
+
+ rktpar->offset = RD_KAFKA_OFFSET_BEGINNING;
+ rd_kafka_ListOffsetsRequest(
+ leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0),
+ rd_kafka_query_wmark_offsets_resp_cb, &state);
+
+ rktpar->offset = RD_KAFKA_OFFSET_END;
+ rd_kafka_ListOffsetsRequest(
+ leader->rkb, partitions, RD_KAFKA_REPLYQ(rkq, 0),
+ rd_kafka_query_wmark_offsets_resp_cb, &state);
+
+ rd_kafka_topic_partition_list_destroy(partitions);
+ rd_list_destroy(&leaders);
+
+ /* Wait for reply (or timeout) */
+ while (state.err == RD_KAFKA_RESP_ERR__IN_PROGRESS &&
+ rd_kafka_q_serve(rkq, 100, 0, RD_KAFKA_Q_CB_CALLBACK,
+ rd_kafka_poll_cb,
+ NULL) != RD_KAFKA_OP_RES_YIELD)
+ ;
+
+ rd_kafka_q_destroy_owner(rkq);
+
+ if (state.err)
+ return state.err;
+ else if (state.offidx != 2)
+ return RD_KAFKA_RESP_ERR__FAIL;
+
+ /* We are not certain about the returned order. */
+ if (state.offsets[0] < state.offsets[1]) {
+ *low = state.offsets[0];
+ *high = state.offsets[1];
+ } else {
+ *low = state.offsets[1];
+ *high = state.offsets[0];
+ }
+
+ /* If partition is empty only one offset (the last) will be returned. */
+ if (*low < 0 && *high >= 0)
+ *low = *high;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high) {
+ rd_kafka_toppar_t *rktp;
+
+ rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, 1);
+ if (!rktp)
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ rd_kafka_toppar_lock(rktp);
+ *low = rktp->rktp_lo_offset;
+ *high = rktp->rktp_hi_offset;
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief get_offsets_for_times() state
+ */
+struct _get_offsets_for_times {
+ rd_kafka_topic_partition_list_t *results;
+ rd_kafka_resp_err_t err;
+ int wait_reply;
+ int state_version;
+ rd_ts_t ts_end;
+};
+
+/**
+ * @brief Handle OffsetRequest responses
+ */
+static void rd_kafka_get_offsets_for_times_resp_cb(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ struct _get_offsets_for_times *state;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* 'state' has gone out of scope when offsets_for_times()
+ * timed out and returned to the caller. */
+ return;
+ }
+
+ state = opaque;
+
+ err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request,
+ state->results, NULL);
+ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
+ return; /* Retrying */
+
+ /* Retry if no broker connection is available yet. */
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT && rkb &&
+ rd_kafka_brokers_wait_state_change(
+ rkb->rkb_rk, state->state_version,
+ rd_timeout_remains(state->ts_end))) {
+ /* Retry */
+ state->state_version = rd_kafka_brokers_get_state_version(rk);
+ request->rkbuf_retries = 0;
+ if (rd_kafka_buf_retry(rkb, request))
+ return; /* Retry in progress */
+ /* FALLTHRU */
+ }
+
+ if (err && !state->err)
+ state->err = err;
+
+ state->wait_reply--;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_offsets_for_times(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *offsets,
+ int timeout_ms) {
+ rd_kafka_q_t *rkq;
+ struct _get_offsets_for_times state = RD_ZERO_INIT;
+ rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+ rd_list_t leaders;
+ int i;
+ rd_kafka_resp_err_t err;
+ struct rd_kafka_partition_leader *leader;
+ int tmout;
+
+ if (offsets->cnt == 0)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ rd_list_init(&leaders, offsets->cnt,
+ (void *)rd_kafka_partition_leader_destroy);
+
+ err = rd_kafka_topic_partition_list_query_leaders(rk, offsets, &leaders,
+ timeout_ms);
+ if (err) {
+ rd_list_destroy(&leaders);
+ return err;
+ }
+
+
+ rkq = rd_kafka_q_new(rk);
+
+ state.wait_reply = 0;
+ state.results = rd_kafka_topic_partition_list_new(offsets->cnt);
+
+ /* For each leader send a request for its partitions */
+ RD_LIST_FOREACH(leader, &leaders, i) {
+ state.wait_reply++;
+ rd_kafka_ListOffsetsRequest(
+ leader->rkb, leader->partitions, RD_KAFKA_REPLYQ(rkq, 0),
+ rd_kafka_get_offsets_for_times_resp_cb, &state);
+ }
+
+ rd_list_destroy(&leaders);
+
+ /* Wait for reply (or timeout) */
+ while (state.wait_reply > 0 &&
+ !rd_timeout_expired((tmout = rd_timeout_remains(ts_end))))
+ rd_kafka_q_serve(rkq, tmout, 0, RD_KAFKA_Q_CB_CALLBACK,
+ rd_kafka_poll_cb, NULL);
+
+ rd_kafka_q_destroy_owner(rkq);
+
+ if (state.wait_reply > 0 && !state.err)
+ state.err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ /* Then update the queried partitions. */
+ if (!state.err)
+ rd_kafka_topic_partition_list_update(offsets, state.results);
+
+ rd_kafka_topic_partition_list_destroy(state.results);
+
+ return state.err;
+}
+
+
+/**
+ * @brief rd_kafka_poll() (and similar) op callback handler.
+ * Will either call registered callback depending on cb_type and op type
+ * or return op to application, if applicable (e.g., fetch message).
+ *
+ * @returns RD_KAFKA_OP_RES_HANDLED if op was handled, else one of the
+ * other res types (such as OP_RES_PASS).
+ *
+ * @locality any thread that serves op queues
+ */
+rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque) {
+ rd_kafka_msg_t *rkm;
+ rd_kafka_op_res_t res = RD_KAFKA_OP_RES_HANDLED;
+
+ /* Special handling for events based on cb_type */
+ if (cb_type == RD_KAFKA_Q_CB_EVENT && rd_kafka_event_setup(rk, rko)) {
+ /* Return-as-event requested. */
+ return RD_KAFKA_OP_RES_PASS; /* Return as event */
+ }
+
+ switch ((int)rko->rko_type) {
+ case RD_KAFKA_OP_FETCH:
+ if (!rk->rk_conf.consume_cb ||
+ cb_type == RD_KAFKA_Q_CB_RETURN ||
+ cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
+ return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
+ else {
+ struct consume_ctx ctx = {.consume_cb =
+ rk->rk_conf.consume_cb,
+ .opaque = rk->rk_conf.opaque};
+
+ return rd_kafka_consume_cb(rk, rkq, rko, cb_type, &ctx);
+ }
+ break;
+
+ case RD_KAFKA_OP_REBALANCE:
+ if (rk->rk_conf.rebalance_cb)
+ rk->rk_conf.rebalance_cb(
+ rk, rko->rko_err, rko->rko_u.rebalance.partitions,
+ rk->rk_conf.opaque);
+ else {
+ /** If EVENT_REBALANCE is enabled but rebalance_cb
+ * isn't, we need to perform a dummy assign for the
+ * application. This might happen during termination
+ * with consumer_close() */
+ rd_kafka_dbg(rk, CGRP, "UNASSIGN",
+ "Forcing unassign of %d partition(s)",
+ rko->rko_u.rebalance.partitions
+ ? rko->rko_u.rebalance.partitions->cnt
+ : 0);
+ rd_kafka_assign(rk, NULL);
+ }
+ break;
+
+ case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
+ if (!rko->rko_u.offset_commit.cb)
+ return RD_KAFKA_OP_RES_PASS; /* Dont handle here */
+ rko->rko_u.offset_commit.cb(rk, rko->rko_err,
+ rko->rko_u.offset_commit.partitions,
+ rko->rko_u.offset_commit.opaque);
+ break;
+
+ case RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY:
+ /* Reply from toppar FETCH_STOP */
+ rd_kafka_assignment_partition_stopped(rk, rko->rko_rktp);
+ break;
+
+ case RD_KAFKA_OP_CONSUMER_ERR:
+ /* rd_kafka_consumer_poll() (_Q_CB_CONSUMER):
+ * Consumer errors are returned to the application
+ * as rkmessages, not error callbacks.
+ *
+ * rd_kafka_poll() (_Q_CB_GLOBAL):
+ * convert to ERR op (fallthru)
+ */
+ if (cb_type == RD_KAFKA_Q_CB_RETURN ||
+ cb_type == RD_KAFKA_Q_CB_FORCE_RETURN) {
+ /* return as message_t to application */
+ return RD_KAFKA_OP_RES_PASS;
+ }
+ /* FALLTHRU */
+
+ case RD_KAFKA_OP_ERR:
+ if (rk->rk_conf.error_cb)
+ rk->rk_conf.error_cb(rk, rko->rko_err,
+ rko->rko_u.err.errstr,
+ rk->rk_conf.opaque);
+ else
+ rd_kafka_log(rk, LOG_ERR, "ERROR", "%s: %s",
+ rk->rk_name, rko->rko_u.err.errstr);
+ break;
+
+ case RD_KAFKA_OP_DR:
+ /* Delivery report:
+ * call application DR callback for each message. */
+ while ((rkm = TAILQ_FIRST(&rko->rko_u.dr.msgq.rkmq_msgs))) {
+ rd_kafka_message_t *rkmessage;
+
+ TAILQ_REMOVE(&rko->rko_u.dr.msgq.rkmq_msgs, rkm,
+ rkm_link);
+
+ rkmessage = rd_kafka_message_get_from_rkm(rko, rkm);
+
+ if (likely(rk->rk_conf.dr_msg_cb != NULL)) {
+ rk->rk_conf.dr_msg_cb(rk, rkmessage,
+ rk->rk_conf.opaque);
+
+ } else if (rk->rk_conf.dr_cb) {
+ rk->rk_conf.dr_cb(
+ rk, rkmessage->payload, rkmessage->len,
+ rkmessage->err, rk->rk_conf.opaque,
+ rkmessage->_private);
+ } else if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) {
+ rd_kafka_log(
+ rk, LOG_WARNING, "DRDROP",
+ "Dropped delivery report for "
+ "message to "
+ "%s [%" PRId32
+ "] (%s) with "
+ "opaque %p: flush() or poll() "
+ "should not be called when "
+ "EVENT_DR is enabled",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition,
+ rd_kafka_err2name(rkmessage->err),
+ rkmessage->_private);
+ } else {
+ rd_assert(!*"BUG: neither a delivery report "
+ "callback or EVENT_DR flag set");
+ }
+
+ rd_kafka_msg_destroy(rk, rkm);
+
+ if (unlikely(rd_kafka_yield_thread)) {
+ /* Callback called yield(),
+ * re-enqueue the op (if there are any
+ * remaining messages). */
+ if (!TAILQ_EMPTY(&rko->rko_u.dr.msgq.rkmq_msgs))
+ rd_kafka_q_reenq(rkq, rko);
+ else
+ rd_kafka_op_destroy(rko);
+ return RD_KAFKA_OP_RES_YIELD;
+ }
+ }
+
+ rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
+
+ break;
+
+ case RD_KAFKA_OP_THROTTLE:
+ if (rk->rk_conf.throttle_cb)
+ rk->rk_conf.throttle_cb(
+ rk, rko->rko_u.throttle.nodename,
+ rko->rko_u.throttle.nodeid,
+ rko->rko_u.throttle.throttle_time,
+ rk->rk_conf.opaque);
+ break;
+
+ case RD_KAFKA_OP_STATS:
+ /* Statistics */
+ if (rk->rk_conf.stats_cb &&
+ rk->rk_conf.stats_cb(rk, rko->rko_u.stats.json,
+ rko->rko_u.stats.json_len,
+ rk->rk_conf.opaque) == 1)
+ rko->rko_u.stats.json =
+ NULL; /* Application wanted json ptr */
+ break;
+
+ case RD_KAFKA_OP_LOG:
+ if (likely(rk->rk_conf.log_cb &&
+ rk->rk_conf.log_level >= rko->rko_u.log.level))
+ rk->rk_conf.log_cb(rk, rko->rko_u.log.level,
+ rko->rko_u.log.fac,
+ rko->rko_u.log.str);
+ break;
+
+ case RD_KAFKA_OP_TERMINATE:
+ /* nop: just a wake-up */
+ res = RD_KAFKA_OP_RES_YIELD;
+ rd_kafka_op_destroy(rko);
+ break;
+
+ case RD_KAFKA_OP_CREATETOPICS:
+ case RD_KAFKA_OP_DELETETOPICS:
+ case RD_KAFKA_OP_CREATEPARTITIONS:
+ case RD_KAFKA_OP_ALTERCONFIGS:
+ case RD_KAFKA_OP_DESCRIBECONFIGS:
+ case RD_KAFKA_OP_DELETERECORDS:
+ case RD_KAFKA_OP_DELETEGROUPS:
+ case RD_KAFKA_OP_ADMIN_FANOUT:
+ case RD_KAFKA_OP_CREATEACLS:
+ case RD_KAFKA_OP_DESCRIBEACLS:
+ case RD_KAFKA_OP_DELETEACLS:
+ /* Calls op_destroy() from worker callback,
+ * when the time comes. */
+ res = rd_kafka_op_call(rk, rkq, rko);
+ break;
+
+ case RD_KAFKA_OP_ADMIN_RESULT:
+ if (cb_type == RD_KAFKA_Q_CB_RETURN ||
+ cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
+ return RD_KAFKA_OP_RES_PASS; /* Don't handle here */
+
+ /* Op is silently destroyed below */
+ break;
+
+ case RD_KAFKA_OP_TXN:
+ /* Must only be handled by rdkafka main thread */
+ rd_assert(thrd_is_current(rk->rk_thread));
+ res = rd_kafka_op_call(rk, rkq, rko);
+ break;
+
+ case RD_KAFKA_OP_BARRIER:
+ break;
+
+ case RD_KAFKA_OP_PURGE:
+ rd_kafka_purge(rk, rko->rko_u.purge.flags);
+ break;
+
+ default:
+ /* If op has a callback set (e.g., OAUTHBEARER_REFRESH),
+ * call it. */
+ if (rko->rko_type & RD_KAFKA_OP_CB) {
+ res = rd_kafka_op_call(rk, rkq, rko);
+ break;
+ }
+
+ RD_BUG("Can't handle op type %s (0x%x)",
+ rd_kafka_op2str(rko->rko_type), rko->rko_type);
+ break;
+ }
+
+ if (res == RD_KAFKA_OP_RES_HANDLED)
+ rd_kafka_op_destroy(rko);
+
+ return res;
+}
+
+int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms) {
+ int r;
+
+ r = rd_kafka_q_serve(rk->rk_rep, timeout_ms, 0, RD_KAFKA_Q_CB_CALLBACK,
+ rd_kafka_poll_cb, NULL);
+
+ return r;
+}
+
+
+rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_q_pop_serve(rkqu->rkqu_q, rd_timeout_us(timeout_ms), 0,
+ RD_KAFKA_Q_CB_EVENT, rd_kafka_poll_cb, NULL);
+
+ if (!rko)
+ return NULL;
+
+ return rko;
+}
+
+int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms) {
+ int r;
+
+ r = rd_kafka_q_serve(rkqu->rkqu_q, timeout_ms, 0,
+ RD_KAFKA_Q_CB_CALLBACK, rd_kafka_poll_cb, NULL);
+
+ return r;
+}
+
+
+
+static void
+rd_kafka_toppar_dump(FILE *fp, const char *indent, rd_kafka_toppar_t *rktp) {
+
+ fprintf(fp,
+ "%s%.*s [%" PRId32
+ "] broker %s, "
+ "leader_id %s\n",
+ indent, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rktp->rktp_broker ? rktp->rktp_broker->rkb_name : "none",
+ rktp->rktp_leader ? rktp->rktp_leader->rkb_name : "none");
+ fprintf(fp,
+ "%s refcnt %i\n"
+ "%s msgq: %i messages\n"
+ "%s xmit_msgq: %i messages\n"
+ "%s total: %" PRIu64 " messages, %" PRIu64 " bytes\n",
+ indent, rd_refcnt_get(&rktp->rktp_refcnt), indent,
+ rktp->rktp_msgq.rkmq_msg_cnt, indent,
+ rktp->rktp_xmit_msgq.rkmq_msg_cnt, indent,
+ rd_atomic64_get(&rktp->rktp_c.tx_msgs),
+ rd_atomic64_get(&rktp->rktp_c.tx_msg_bytes));
+}
+
+static void rd_kafka_broker_dump(FILE *fp, rd_kafka_broker_t *rkb, int locks) {
+ rd_kafka_toppar_t *rktp;
+
+ if (locks)
+ rd_kafka_broker_lock(rkb);
+ fprintf(fp,
+ " rd_kafka_broker_t %p: %s NodeId %" PRId32
+ " in state %s (for %.3fs)\n",
+ rkb, rkb->rkb_name, rkb->rkb_nodeid,
+ rd_kafka_broker_state_names[rkb->rkb_state],
+ rkb->rkb_ts_state
+ ? (float)(rd_clock() - rkb->rkb_ts_state) / 1000000.0f
+ : 0.0f);
+ fprintf(fp, " refcnt %i\n", rd_refcnt_get(&rkb->rkb_refcnt));
+ fprintf(fp, " outbuf_cnt: %i waitresp_cnt: %i\n",
+ rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt),
+ rd_atomic32_get(&rkb->rkb_waitresps.rkbq_cnt));
+ fprintf(fp,
+ " %" PRIu64 " messages sent, %" PRIu64
+ " bytes, "
+ "%" PRIu64 " errors, %" PRIu64
+ " timeouts\n"
+ " %" PRIu64 " messages received, %" PRIu64
+ " bytes, "
+ "%" PRIu64
+ " errors\n"
+ " %" PRIu64 " messageset transmissions were retried\n",
+ rd_atomic64_get(&rkb->rkb_c.tx),
+ rd_atomic64_get(&rkb->rkb_c.tx_bytes),
+ rd_atomic64_get(&rkb->rkb_c.tx_err),
+ rd_atomic64_get(&rkb->rkb_c.req_timeouts),
+ rd_atomic64_get(&rkb->rkb_c.rx),
+ rd_atomic64_get(&rkb->rkb_c.rx_bytes),
+ rd_atomic64_get(&rkb->rkb_c.rx_err),
+ rd_atomic64_get(&rkb->rkb_c.tx_retries));
+
+ fprintf(fp, " %i toppars:\n", rkb->rkb_toppar_cnt);
+ TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink)
+ rd_kafka_toppar_dump(fp, " ", rktp);
+ if (locks) {
+ rd_kafka_broker_unlock(rkb);
+ }
+}
+
+
+static void rd_kafka_dump0(FILE *fp, rd_kafka_t *rk, int locks) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_toppar_t *rktp;
+ int i;
+ unsigned int tot_cnt;
+ size_t tot_size;
+
+ rd_kafka_curr_msgs_get(rk, &tot_cnt, &tot_size);
+
+ if (locks)
+ rd_kafka_rdlock(rk);
+#if ENABLE_DEVEL
+ fprintf(fp, "rd_kafka_op_cnt: %d\n", rd_atomic32_get(&rd_kafka_op_cnt));
+#endif
+ fprintf(fp, "rd_kafka_t %p: %s\n", rk, rk->rk_name);
+
+ fprintf(fp, " producer.msg_cnt %u (%" PRIusz " bytes)\n", tot_cnt,
+ tot_size);
+ fprintf(fp, " rk_rep reply queue: %i ops\n",
+ rd_kafka_q_len(rk->rk_rep));
+
+ fprintf(fp, " brokers:\n");
+ if (locks)
+ mtx_lock(&rk->rk_internal_rkb_lock);
+ if (rk->rk_internal_rkb)
+ rd_kafka_broker_dump(fp, rk->rk_internal_rkb, locks);
+ if (locks)
+ mtx_unlock(&rk->rk_internal_rkb_lock);
+
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ rd_kafka_broker_dump(fp, rkb, locks);
+ }
+
+ fprintf(fp, " cgrp:\n");
+ if (rk->rk_cgrp) {
+ rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
+ fprintf(fp, " %.*s in state %s, flags 0x%x\n",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rkcg->rkcg_flags);
+ fprintf(fp, " coord_id %" PRId32 ", broker %s\n",
+ rkcg->rkcg_coord_id,
+ rkcg->rkcg_curr_coord
+ ? rd_kafka_broker_name(rkcg->rkcg_curr_coord)
+ : "(none)");
+
+ fprintf(fp, " toppars:\n");
+ RD_LIST_FOREACH(rktp, &rkcg->rkcg_toppars, i) {
+ fprintf(fp, " %.*s [%" PRId32 "] in state %s\n",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state]);
+ }
+ }
+
+ fprintf(fp, " topics:\n");
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ fprintf(fp,
+ " %.*s with %" PRId32
+ " partitions, state %s, "
+ "refcnt %i\n",
+ RD_KAFKAP_STR_PR(rkt->rkt_topic),
+ rkt->rkt_partition_cnt,
+ rd_kafka_topic_state_names[rkt->rkt_state],
+ rd_refcnt_get(&rkt->rkt_refcnt));
+ if (rkt->rkt_ua)
+ rd_kafka_toppar_dump(fp, " ", rkt->rkt_ua);
+ if (rd_list_empty(&rkt->rkt_desp)) {
+ fprintf(fp, " desired partitions:");
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
+ fprintf(fp, " %" PRId32, rktp->rktp_partition);
+ fprintf(fp, "\n");
+ }
+ }
+
+ fprintf(fp, "\n");
+ rd_kafka_metadata_cache_dump(fp, rk);
+
+ if (locks)
+ rd_kafka_rdunlock(rk);
+}
+
+void rd_kafka_dump(FILE *fp, rd_kafka_t *rk) {
+ if (rk)
+ rd_kafka_dump0(fp, rk, 1 /*locks*/);
+}
+
+
+
+const char *rd_kafka_name(const rd_kafka_t *rk) {
+ return rk->rk_name;
+}
+
+rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk) {
+ return rk->rk_type;
+}
+
+
+char *rd_kafka_memberid(const rd_kafka_t *rk) {
+ rd_kafka_op_t *rko;
+ rd_kafka_cgrp_t *rkcg;
+ char *memberid;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return NULL;
+
+ rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_NAME);
+ if (!rko)
+ return NULL;
+ memberid = rko->rko_u.name.str;
+ rko->rko_u.name.str = NULL;
+ rd_kafka_op_destroy(rko);
+
+ return memberid;
+}
+
+
+char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms) {
+ rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
+
+ /* ClusterId is returned in Metadata >=V2 responses and
+ * cached on the rk. If no cached value is available
+ * it means no metadata has been received yet, or we're
+ * using a lower protocol version
+ * (e.g., lack of api.version.request=true). */
+
+ while (1) {
+ int remains_ms;
+
+ rd_kafka_rdlock(rk);
+
+ if (rk->rk_clusterid) {
+ /* Cached clusterid available. */
+ char *ret = rd_strdup(rk->rk_clusterid);
+ rd_kafka_rdunlock(rk);
+ return ret;
+ } else if (rk->rk_ts_metadata > 0) {
+ /* Metadata received but no clusterid,
+ * this probably means the broker is too old
+ * or api.version.request=false. */
+ rd_kafka_rdunlock(rk);
+ return NULL;
+ }
+
+ rd_kafka_rdunlock(rk);
+
+ /* Wait for up to timeout_ms for a metadata refresh,
+ * if permitted by application. */
+ remains_ms = rd_timeout_remains(abs_timeout);
+ if (rd_timeout_expired(remains_ms))
+ return NULL;
+
+ rd_kafka_metadata_cache_wait_change(rk, remains_ms);
+ }
+
+ return NULL;
+}
+
+
+int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms) {
+ rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
+
+ /* ControllerId is returned in Metadata >=V1 responses and
+ * cached on the rk. If no cached value is available
+ * it means no metadata has been received yet, or we're
+ * using a lower protocol version
+ * (e.g., lack of api.version.request=true). */
+
+ while (1) {
+ int remains_ms;
+ int version;
+
+ version = rd_kafka_brokers_get_state_version(rk);
+
+ rd_kafka_rdlock(rk);
+
+ if (rk->rk_controllerid != -1) {
+ /* Cached controllerid available. */
+ rd_kafka_rdunlock(rk);
+ return rk->rk_controllerid;
+ } else if (rk->rk_ts_metadata > 0) {
+ /* Metadata received but no clusterid,
+ * this probably means the broker is too old
+ * or api.version.request=false. */
+ rd_kafka_rdunlock(rk);
+ return -1;
+ }
+
+ rd_kafka_rdunlock(rk);
+
+ /* Wait for up to timeout_ms for a metadata refresh,
+ * if permitted by application. */
+ remains_ms = rd_timeout_remains(abs_timeout);
+ if (rd_timeout_expired(remains_ms))
+ return -1;
+
+ rd_kafka_brokers_wait_state_change(rk, version, remains_ms);
+ }
+
+ return -1;
+}
+
+
+void *rd_kafka_opaque(const rd_kafka_t *rk) {
+ return rk->rk_conf.opaque;
+}
+
+
+int rd_kafka_outq_len(rd_kafka_t *rk) {
+ return rd_kafka_curr_msgs_cnt(rk) + rd_kafka_q_len(rk->rk_rep) +
+ (rk->rk_background.q ? rd_kafka_q_len(rk->rk_background.q) : 0);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms) {
+ unsigned int msg_cnt = 0;
+
+ if (rk->rk_type != RD_KAFKA_PRODUCER)
+ return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+
+ rd_kafka_yield_thread = 0;
+
+ /* Set flushing flag on the producer for the duration of the
+ * flush() call. This tells producer_serve() that the linger.ms
+ * time should be considered immediate. */
+ rd_atomic32_add(&rk->rk_flushing, 1);
+
+ /* Wake up all broker threads to trigger the produce_serve() call.
+ * If this flush() call finishes before the broker wakes up
+ * then no flushing will be performed by that broker thread. */
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_UP, "flushing");
+
+ if (rk->rk_drmode == RD_KAFKA_DR_MODE_EVENT) {
+ /* Application wants delivery reports as events rather
+ * than callbacks, we must thus not serve this queue
+ * with rd_kafka_poll() since that would trigger non-existent
+ * delivery report callbacks, which would result
+ * in the delivery reports being dropped.
+ * Instead we rely on the application to serve the event
+ * queue in another thread, so all we do here is wait
+ * for the current message count to reach zero. */
+ rd_kafka_curr_msgs_wait_zero(rk, timeout_ms, &msg_cnt);
+
+ } else {
+ /* Standard poll interface.
+ *
+ * First poll call is non-blocking for the case
+ * where timeout_ms==RD_POLL_NOWAIT to make sure poll is
+ * called at least once. */
+ rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+ int tmout = RD_POLL_NOWAIT;
+ int qlen = 0;
+
+ do {
+ rd_kafka_poll(rk, tmout);
+ qlen = rd_kafka_q_len(rk->rk_rep);
+ msg_cnt = rd_kafka_curr_msgs_cnt(rk);
+ } while (qlen + msg_cnt > 0 && !rd_kafka_yield_thread &&
+ (tmout = rd_timeout_remains_limit(ts_end, 10)) !=
+ RD_POLL_NOWAIT);
+
+ msg_cnt += qlen;
+ }
+
+ rd_atomic32_sub(&rk->rk_flushing, 1);
+
+ return msg_cnt > 0 ? RD_KAFKA_RESP_ERR__TIMED_OUT
+ : RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Purge the partition message queue (according to \p purge_flags) for
+ * all toppars.
+ *
+ * This is a necessity to avoid the race condition when a purge() is scheduled
+ * shortly in-between an rktp has been created but before it has been
+ * joined to a broker handler thread.
+ *
+ * The rktp_xmit_msgq is handled by the broker-thread purge.
+ *
+ * @returns the number of messages purged.
+ *
+ * @locks_required rd_kafka_*lock()
+ * @locks_acquired rd_kafka_topic_rdlock()
+ */
+static int rd_kafka_purge_toppars(rd_kafka_t *rk, int purge_flags) {
+ rd_kafka_topic_t *rkt;
+ int cnt = 0;
+
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ rd_kafka_toppar_t *rktp;
+ int i;
+
+ rd_kafka_topic_rdlock(rkt);
+ for (i = 0; i < rkt->rkt_partition_cnt; i++)
+ cnt += rd_kafka_toppar_purge_queues(
+ rkt->rkt_p[i], purge_flags, rd_false /*!xmit*/);
+
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
+ cnt += rd_kafka_toppar_purge_queues(rktp, purge_flags,
+ rd_false /*!xmit*/);
+
+ if (rkt->rkt_ua)
+ cnt += rd_kafka_toppar_purge_queues(
+ rkt->rkt_ua, purge_flags, rd_false /*!xmit*/);
+ rd_kafka_topic_rdunlock(rkt);
+ }
+
+ return cnt;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_q_t *tmpq = NULL;
+ int waitcnt = 0;
+
+ if (rk->rk_type != RD_KAFKA_PRODUCER)
+ return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+
+ /* Check that future flags are not passed */
+ if ((purge_flags & ~RD_KAFKA_PURGE_F_MASK) != 0)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ /* Nothing to purge */
+ if (!purge_flags)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Set up a reply queue to wait for broker thread signalling
+ * completion, unless non-blocking. */
+ if (!(purge_flags & RD_KAFKA_PURGE_F_NON_BLOCKING))
+ tmpq = rd_kafka_q_new(rk);
+
+ rd_kafka_rdlock(rk);
+
+ /* Purge msgq for all toppars. */
+ rd_kafka_purge_toppars(rk, purge_flags);
+
+ /* Send purge request to all broker threads */
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ rd_kafka_broker_purge_queues(rkb, purge_flags,
+ RD_KAFKA_REPLYQ(tmpq, 0));
+ waitcnt++;
+ }
+
+ rd_kafka_rdunlock(rk);
+
+
+ if (tmpq) {
+ /* Wait for responses */
+ while (waitcnt-- > 0)
+ rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
+
+ rd_kafka_q_destroy_owner(tmpq);
+ }
+
+ /* Purge messages for the UA(-1) partitions (which are not
+ * handled by a broker thread) */
+ if (purge_flags & RD_KAFKA_PURGE_F_QUEUE)
+ rd_kafka_purge_ua_toppar_queues(rk);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @returns a csv string of purge flags in thread-local storage
+ */
+const char *rd_kafka_purge_flags2str(int flags) {
+ static const char *names[] = {"queue", "inflight", "non-blocking",
+ NULL};
+ static RD_TLS char ret[64];
+
+ return rd_flags2str(ret, sizeof(ret), names, flags);
+}
+
+
+int rd_kafka_version(void) {
+ return RD_KAFKA_VERSION;
+}
+
+const char *rd_kafka_version_str(void) {
+ static RD_TLS char ret[128];
+ size_t of = 0, r;
+
+ if (*ret)
+ return ret;
+
+#ifdef LIBRDKAFKA_GIT_VERSION
+ if (*LIBRDKAFKA_GIT_VERSION) {
+ of = rd_snprintf(ret, sizeof(ret), "%s",
+ *LIBRDKAFKA_GIT_VERSION == 'v'
+ ? &LIBRDKAFKA_GIT_VERSION[1]
+ : LIBRDKAFKA_GIT_VERSION);
+ if (of > sizeof(ret))
+ of = sizeof(ret);
+ }
+#endif
+
+#define _my_sprintf(...) \
+ do { \
+ r = rd_snprintf(ret + of, sizeof(ret) - of, __VA_ARGS__); \
+ if (r > sizeof(ret) - of) \
+ r = sizeof(ret) - of; \
+ of += r; \
+ } while (0)
+
+ if (of == 0) {
+ int ver = rd_kafka_version();
+ int prel = (ver & 0xff);
+ _my_sprintf("%i.%i.%i", (ver >> 24) & 0xff, (ver >> 16) & 0xff,
+ (ver >> 8) & 0xff);
+ if (prel != 0xff) {
+ /* pre-builds below 200 are just running numbers,
+ * above 200 are RC numbers. */
+ if (prel <= 200)
+ _my_sprintf("-pre%d", prel);
+ else
+ _my_sprintf("-RC%d", prel - 200);
+ }
+ }
+
+#if ENABLE_DEVEL
+ _my_sprintf("-devel");
+#endif
+
+#if WITHOUT_OPTIMIZATION
+ _my_sprintf("-O0");
+#endif
+
+ return ret;
+}
+
+
+/**
+ * Assert trampoline to print some debugging information on crash.
+ */
+void RD_NORETURN rd_kafka_crash(const char *file,
+ int line,
+ const char *function,
+ rd_kafka_t *rk,
+ const char *reason) {
+ fprintf(stderr, "*** %s:%i:%s: %s ***\n", file, line, function, reason);
+ if (rk)
+ rd_kafka_dump0(stderr, rk, 0 /*no locks*/);
+ abort();
+}
+
+
+
+struct list_groups_state {
+ rd_kafka_q_t *q;
+ rd_kafka_resp_err_t err;
+ int wait_cnt;
+ const char *desired_group;
+ struct rd_kafka_group_list *grplist;
+ int grplist_size;
+};
+
+static const char *rd_kafka_consumer_group_state_names[] = {
+ "Unknown", "PreparingRebalance", "CompletingRebalance", "Stable", "Dead",
+ "Empty"};
+
+const char *
+rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state) {
+ if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT)
+ return NULL;
+ return rd_kafka_consumer_group_state_names[state];
+}
+
+rd_kafka_consumer_group_state_t
+rd_kafka_consumer_group_state_code(const char *name) {
+ size_t i;
+ for (i = 0; i < RD_KAFKA_CONSUMER_GROUP_STATE__CNT; i++) {
+ if (!rd_strcasecmp(rd_kafka_consumer_group_state_names[i],
+ name))
+ return i;
+ }
+ return RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN;
+}
+
+static void rd_kafka_DescribeGroups_resp_cb(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ struct list_groups_state *state;
+ const int log_decode_errors = LOG_ERR;
+ int cnt;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* 'state' has gone out of scope due to list_groups()
+ * timing out and returning. */
+ return;
+ }
+
+ state = opaque;
+ state->wait_cnt--;
+
+ if (err)
+ goto err;
+
+ rd_kafka_buf_read_i32(reply, &cnt);
+
+ while (cnt-- > 0) {
+ int16_t ErrorCode;
+ rd_kafkap_str_t Group, GroupState, ProtoType, Proto;
+ int MemberCnt;
+ struct rd_kafka_group_info *gi;
+
+ if (state->grplist->group_cnt == state->grplist_size) {
+ /* Grow group array */
+ state->grplist_size *= 2;
+ state->grplist->groups =
+ rd_realloc(state->grplist->groups,
+ state->grplist_size *
+ sizeof(*state->grplist->groups));
+ }
+
+ gi = &state->grplist->groups[state->grplist->group_cnt++];
+ memset(gi, 0, sizeof(*gi));
+
+ rd_kafka_buf_read_i16(reply, &ErrorCode);
+ rd_kafka_buf_read_str(reply, &Group);
+ rd_kafka_buf_read_str(reply, &GroupState);
+ rd_kafka_buf_read_str(reply, &ProtoType);
+ rd_kafka_buf_read_str(reply, &Proto);
+ rd_kafka_buf_read_i32(reply, &MemberCnt);
+
+ if (MemberCnt > 100000) {
+ err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ goto err;
+ }
+
+ rd_kafka_broker_lock(rkb);
+ gi->broker.id = rkb->rkb_nodeid;
+ gi->broker.host = rd_strdup(rkb->rkb_origname);
+ gi->broker.port = rkb->rkb_port;
+ rd_kafka_broker_unlock(rkb);
+
+ gi->err = ErrorCode;
+ gi->group = RD_KAFKAP_STR_DUP(&Group);
+ gi->state = RD_KAFKAP_STR_DUP(&GroupState);
+ gi->protocol_type = RD_KAFKAP_STR_DUP(&ProtoType);
+ gi->protocol = RD_KAFKAP_STR_DUP(&Proto);
+
+ if (MemberCnt > 0)
+ gi->members =
+ rd_malloc(MemberCnt * sizeof(*gi->members));
+
+ while (MemberCnt-- > 0) {
+ rd_kafkap_str_t MemberId, ClientId, ClientHost;
+ rd_kafkap_bytes_t Meta, Assignment;
+ struct rd_kafka_group_member_info *mi;
+
+ mi = &gi->members[gi->member_cnt++];
+ memset(mi, 0, sizeof(*mi));
+
+ rd_kafka_buf_read_str(reply, &MemberId);
+ rd_kafka_buf_read_str(reply, &ClientId);
+ rd_kafka_buf_read_str(reply, &ClientHost);
+ rd_kafka_buf_read_bytes(reply, &Meta);
+ rd_kafka_buf_read_bytes(reply, &Assignment);
+
+ mi->member_id = RD_KAFKAP_STR_DUP(&MemberId);
+ mi->client_id = RD_KAFKAP_STR_DUP(&ClientId);
+ mi->client_host = RD_KAFKAP_STR_DUP(&ClientHost);
+
+ if (RD_KAFKAP_BYTES_LEN(&Meta) == 0) {
+ mi->member_metadata_size = 0;
+ mi->member_metadata = NULL;
+ } else {
+ mi->member_metadata_size =
+ RD_KAFKAP_BYTES_LEN(&Meta);
+ mi->member_metadata = rd_memdup(
+ Meta.data, mi->member_metadata_size);
+ }
+
+ if (RD_KAFKAP_BYTES_LEN(&Assignment) == 0) {
+ mi->member_assignment_size = 0;
+ mi->member_assignment = NULL;
+ } else {
+ mi->member_assignment_size =
+ RD_KAFKAP_BYTES_LEN(&Assignment);
+ mi->member_assignment =
+ rd_memdup(Assignment.data,
+ mi->member_assignment_size);
+ }
+ }
+ }
+
+err:
+ state->err = err;
+ return;
+
+err_parse:
+ state->err = reply->rkbuf_err;
+}
+
+static void rd_kafka_ListGroups_resp_cb(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ struct list_groups_state *state;
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode;
+ char **grps = NULL;
+ int cnt, grpcnt, i = 0;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* 'state' is no longer in scope because
+ * list_groups() timed out and returned to the caller.
+ * We must not touch anything here but simply return. */
+ return;
+ }
+
+ state = opaque;
+
+ state->wait_cnt--;
+
+ if (err)
+ goto err;
+
+ rd_kafka_buf_read_i16(reply, &ErrorCode);
+ if (ErrorCode) {
+ err = ErrorCode;
+ goto err;
+ }
+
+ rd_kafka_buf_read_i32(reply, &cnt);
+
+ if (state->desired_group)
+ grpcnt = 1;
+ else
+ grpcnt = cnt;
+
+ if (cnt == 0 || grpcnt == 0)
+ return;
+
+ grps = rd_malloc(sizeof(*grps) * grpcnt);
+
+ while (cnt-- > 0) {
+ rd_kafkap_str_t grp, proto;
+
+ rd_kafka_buf_read_str(reply, &grp);
+ rd_kafka_buf_read_str(reply, &proto);
+
+ if (state->desired_group &&
+ rd_kafkap_str_cmp_str(&grp, state->desired_group))
+ continue;
+
+ grps[i++] = RD_KAFKAP_STR_DUP(&grp);
+
+ if (i == grpcnt)
+ break;
+ }
+
+ if (i > 0) {
+ rd_kafka_error_t *error;
+
+ state->wait_cnt++;
+ error = rd_kafka_DescribeGroupsRequest(
+ rkb, 0, grps, i, RD_KAFKA_REPLYQ(state->q, 0),
+ rd_kafka_DescribeGroups_resp_cb, state);
+ if (error) {
+ rd_kafka_DescribeGroups_resp_cb(
+ rk, rkb, rd_kafka_error_code(error), reply, request,
+ opaque);
+ rd_kafka_error_destroy(error);
+ }
+
+ while (i-- > 0)
+ rd_free(grps[i]);
+ }
+
+
+ rd_free(grps);
+
+err:
+ state->err = err;
+ return;
+
+err_parse:
+ if (grps)
+ rd_free(grps);
+ state->err = reply->rkbuf_err;
+}
+
+rd_kafka_resp_err_t
+rd_kafka_list_groups(rd_kafka_t *rk,
+ const char *group,
+ const struct rd_kafka_group_list **grplistp,
+ int timeout_ms) {
+ rd_kafka_broker_t *rkb;
+ int rkb_cnt = 0;
+ struct list_groups_state state = RD_ZERO_INIT;
+ rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+
+ /* Wait until metadata has been fetched from cluster so
+ * that we have a full broker list.
+ * This state only happens during initial client setup, after that
+ * there'll always be a cached metadata copy. */
+ while (1) {
+ int state_version = rd_kafka_brokers_get_state_version(rk);
+ rd_bool_t has_metadata;
+
+ rd_kafka_rdlock(rk);
+ has_metadata = rk->rk_ts_metadata != 0;
+ rd_kafka_rdunlock(rk);
+
+ if (has_metadata)
+ break;
+
+ if (!rd_kafka_brokers_wait_state_change(
+ rk, state_version, rd_timeout_remains(ts_end)))
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+ }
+
+
+ state.q = rd_kafka_q_new(rk);
+ state.desired_group = group;
+ state.grplist = rd_calloc(1, sizeof(*state.grplist));
+ state.grplist_size = group ? 1 : 32;
+
+ state.grplist->groups =
+ rd_malloc(state.grplist_size * sizeof(*state.grplist->groups));
+
+ /* Query each broker for its list of groups */
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ rd_kafka_error_t *error;
+ rd_kafka_broker_lock(rkb);
+ if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
+ rd_kafka_broker_unlock(rkb);
+ continue;
+ }
+ rd_kafka_broker_unlock(rkb);
+
+ state.wait_cnt++;
+ rkb_cnt++;
+ error = rd_kafka_ListGroupsRequest(
+ rkb, 0, NULL, 0, RD_KAFKA_REPLYQ(state.q, 0),
+ rd_kafka_ListGroups_resp_cb, &state);
+ if (error) {
+ rd_kafka_ListGroups_resp_cb(rk, rkb,
+ rd_kafka_error_code(error),
+ NULL, NULL, &state);
+ rd_kafka_error_destroy(error);
+ }
+ }
+ rd_kafka_rdunlock(rk);
+
+ if (rkb_cnt == 0) {
+ state.err = RD_KAFKA_RESP_ERR__TRANSPORT;
+
+ } else {
+ int remains;
+
+ while (state.wait_cnt > 0 &&
+ !rd_timeout_expired(
+ (remains = rd_timeout_remains(ts_end)))) {
+ rd_kafka_q_serve(state.q, remains, 0,
+ RD_KAFKA_Q_CB_CALLBACK,
+ rd_kafka_poll_cb, NULL);
+ /* Ignore yields */
+ }
+ }
+
+ rd_kafka_q_destroy_owner(state.q);
+
+ if (state.wait_cnt > 0 && !state.err) {
+ if (state.grplist->group_cnt == 0)
+ state.err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ else {
+ *grplistp = state.grplist;
+ return RD_KAFKA_RESP_ERR__PARTIAL;
+ }
+ }
+
+ if (state.err)
+ rd_kafka_group_list_destroy(state.grplist);
+ else
+ *grplistp = state.grplist;
+
+ return state.err;
+}
+
+
+void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist0) {
+ struct rd_kafka_group_list *grplist =
+ (struct rd_kafka_group_list *)grplist0;
+
+ while (grplist->group_cnt-- > 0) {
+ struct rd_kafka_group_info *gi;
+ gi = &grplist->groups[grplist->group_cnt];
+
+ if (gi->broker.host)
+ rd_free(gi->broker.host);
+ if (gi->group)
+ rd_free(gi->group);
+ if (gi->state)
+ rd_free(gi->state);
+ if (gi->protocol_type)
+ rd_free(gi->protocol_type);
+ if (gi->protocol)
+ rd_free(gi->protocol);
+
+ while (gi->member_cnt-- > 0) {
+ struct rd_kafka_group_member_info *mi;
+ mi = &gi->members[gi->member_cnt];
+
+ if (mi->member_id)
+ rd_free(mi->member_id);
+ if (mi->client_id)
+ rd_free(mi->client_id);
+ if (mi->client_host)
+ rd_free(mi->client_host);
+ if (mi->member_metadata)
+ rd_free(mi->member_metadata);
+ if (mi->member_assignment)
+ rd_free(mi->member_assignment);
+ }
+
+ if (gi->members)
+ rd_free(gi->members);
+ }
+
+ if (grplist->groups)
+ rd_free(grplist->groups);
+
+ rd_free(grplist);
+}
+
+
+
+const char *rd_kafka_get_debug_contexts(void) {
+ return RD_KAFKA_DEBUG_CONTEXTS;
+}
+
+
+int rd_kafka_path_is_dir(const char *path) {
+#ifdef _WIN32
+ struct _stat st;
+ return (_stat(path, &st) == 0 && st.st_mode & S_IFDIR);
+#else
+ struct stat st;
+ return (stat(path, &st) == 0 && S_ISDIR(st.st_mode));
+#endif
+}
+
+
+/**
+ * @returns true if directory is empty or can't be accessed, else false.
+ */
+rd_bool_t rd_kafka_dir_is_empty(const char *path) {
+#if _WIN32
+ /* FIXME: Unsupported */
+ return rd_true;
+#else
+ DIR *dir;
+ struct dirent *d;
+#if defined(__sun)
+ struct stat st;
+ int ret = 0;
+#endif
+
+ dir = opendir(path);
+ if (!dir)
+ return rd_true;
+
+ while ((d = readdir(dir))) {
+
+ if (!strcmp(d->d_name, ".") || !strcmp(d->d_name, ".."))
+ continue;
+
+#if defined(__sun)
+ ret = stat(d->d_name, &st);
+ if (ret != 0) {
+ return rd_true; // Can't be accessed
+ }
+ if (S_ISREG(st.st_mode) || S_ISDIR(st.st_mode) ||
+ S_ISLNK(st.st_mode)) {
+#else
+ if (d->d_type == DT_REG || d->d_type == DT_LNK ||
+ d->d_type == DT_DIR) {
+#endif
+ closedir(dir);
+ return rd_false;
+ }
+ }
+
+ closedir(dir);
+ return rd_true;
+#endif
+}
+
+
+void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size) {
+ return rd_malloc(size);
+}
+
+void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size) {
+ return rd_calloc(num, size);
+}
+
+void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr) {
+ rd_free(ptr);
+}
+
+
+int rd_kafka_errno(void) {
+ return errno;
+}
+
+int rd_kafka_unittest(void) {
+ return rd_unittest();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h
new file mode 100644
index 000000000..e3474e50f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka.h
@@ -0,0 +1,9340 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2022 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @file rdkafka.h
+ * @brief Apache Kafka C/C++ consumer and producer client library.
+ *
+ * rdkafka.h contains the public API for librdkafka.
+ * The API is documented in this file as comments prefixing the function, type,
+ * enum, define, etc.
+ *
+ * @sa For the C++ interface see rdkafkacpp.h
+ *
+ * @tableofcontents
+ */
+
+
+/* @cond NO_DOC */
+#ifndef _RDKAFKA_H_
+#define _RDKAFKA_H_
+
+#include <stdio.h>
+#include <inttypes.h>
+#include <sys/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#if 0
+} /* Restore indent */
+#endif
+#endif
+
+#ifdef _WIN32
+#include <basetsd.h>
+#ifndef WIN32_MEAN_AND_LEAN
+#define WIN32_MEAN_AND_LEAN
+#endif
+#include <winsock2.h> /* for sockaddr, .. */
+#ifndef _SSIZE_T_DEFINED
+#define _SSIZE_T_DEFINED
+typedef SSIZE_T ssize_t;
+#endif
+#define RD_UNUSED
+#define RD_INLINE __inline
+#define RD_DEPRECATED __declspec(deprecated)
+#define RD_FORMAT(...)
+#undef RD_EXPORT
+#ifdef LIBRDKAFKA_STATICLIB
+#define RD_EXPORT
+#else
+#ifdef LIBRDKAFKA_EXPORTS
+#define RD_EXPORT __declspec(dllexport)
+#else
+#define RD_EXPORT __declspec(dllimport)
+#endif
+#ifndef LIBRDKAFKA_TYPECHECKS
+#define LIBRDKAFKA_TYPECHECKS 0
+#endif
+#endif
+
+#else
+#include <sys/socket.h> /* for sockaddr, .. */
+
+#define RD_UNUSED __attribute__((unused))
+#define RD_INLINE inline
+#define RD_EXPORT
+#define RD_DEPRECATED __attribute__((deprecated))
+
+#if defined(__clang__) || defined(__GNUC__) || defined(__GNUG__)
+#define RD_HAS_STATEMENT_EXPRESSIONS
+#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__)))
+#else
+#define RD_FORMAT(...)
+#endif
+
+#ifndef LIBRDKAFKA_TYPECHECKS
+#define LIBRDKAFKA_TYPECHECKS 1
+#endif
+#endif
+
+
+/**
+ * @brief Type-checking macros
+ * Compile-time checking that \p ARG is of type \p TYPE.
+ * @returns \p RET
+ */
+#if LIBRDKAFKA_TYPECHECKS
+#define _LRK_TYPECHECK(RET, TYPE, ARG) \
+ ({ \
+ if (0) { \
+ TYPE __t RD_UNUSED = (ARG); \
+ } \
+ RET; \
+ })
+
+#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) \
+ ({ \
+ if (0) { \
+ TYPE __t RD_UNUSED = (ARG); \
+ TYPE2 __t2 RD_UNUSED = (ARG2); \
+ } \
+ RET; \
+ })
+
+#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) \
+ ({ \
+ if (0) { \
+ TYPE __t RD_UNUSED = (ARG); \
+ TYPE2 __t2 RD_UNUSED = (ARG2); \
+ TYPE3 __t3 RD_UNUSED = (ARG3); \
+ } \
+ RET; \
+ })
+#else
+#define _LRK_TYPECHECK(RET, TYPE, ARG) (RET)
+#define _LRK_TYPECHECK2(RET, TYPE, ARG, TYPE2, ARG2) (RET)
+#define _LRK_TYPECHECK3(RET, TYPE, ARG, TYPE2, ARG2, TYPE3, ARG3) (RET)
+#endif
+
+/* @endcond */
+
+
+/**
+ * @name librdkafka version
+ * @{
+ *
+ *
+ */
+
+/**
+ * @brief librdkafka version
+ *
+ * Interpreted as hex \c MM.mm.rr.xx:
+ * - MM = Major
+ * - mm = minor
+ * - rr = revision
+ * - xx = pre-release id (0xff is the final release)
+ *
+ * E.g.: \c 0x000801ff = 0.8.1
+ *
+ * @remark This value should only be used during compile time,
+ * for runtime checks of version use rd_kafka_version()
+ */
+#define RD_KAFKA_VERSION 0x020100ff
+
+/**
+ * @brief Returns the librdkafka version as integer.
+ *
+ * @returns Version integer.
+ *
+ * @sa See RD_KAFKA_VERSION for how to parse the integer format.
+ * @sa Use rd_kafka_version_str() to retreive the version as a string.
+ */
+RD_EXPORT
+int rd_kafka_version(void);
+
+/**
+ * @brief Returns the librdkafka version as string.
+ *
+ * @returns Version string
+ */
+RD_EXPORT
+const char *rd_kafka_version_str(void);
+
+/**@}*/
+
+
+/**
+ * @name Constants, errors, types
+ * @{
+ *
+ *
+ */
+
+
+/**
+ * @enum rd_kafka_type_t
+ *
+ * @brief rd_kafka_t handle type.
+ *
+ * @sa rd_kafka_new()
+ */
+typedef enum rd_kafka_type_t {
+ RD_KAFKA_PRODUCER, /**< Producer client */
+ RD_KAFKA_CONSUMER /**< Consumer client */
+} rd_kafka_type_t;
+
+
+/*!
+ * Timestamp types
+ *
+ * @sa rd_kafka_message_timestamp()
+ */
+typedef enum rd_kafka_timestamp_type_t {
+ RD_KAFKA_TIMESTAMP_NOT_AVAILABLE, /**< Timestamp not available */
+ RD_KAFKA_TIMESTAMP_CREATE_TIME, /**< Message creation time */
+ RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME /**< Log append time */
+} rd_kafka_timestamp_type_t;
+
+
+
+/**
+ * @brief Retrieve supported debug contexts for use with the \c \"debug\"
+ * configuration property. (runtime)
+ *
+ * @returns Comma-separated list of available debugging contexts.
+ */
+RD_EXPORT
+const char *rd_kafka_get_debug_contexts(void);
+
+/**
+ * @brief Supported debug contexts. (compile time)
+ *
+ * @deprecated This compile time value may be outdated at runtime due to
+ * linking another version of the library.
+ * Use rd_kafka_get_debug_contexts() instead.
+ */
+#define RD_KAFKA_DEBUG_CONTEXTS \
+ "all,generic,broker,topic,metadata,feature,queue,msg,protocol,cgrp," \
+ "security,fetch,interceptor,plugin,consumer,admin,eos,mock,assignor," \
+ "conf"
+
+
+/* @cond NO_DOC */
+/* Private types to provide ABI compatibility */
+typedef struct rd_kafka_s rd_kafka_t;
+typedef struct rd_kafka_topic_s rd_kafka_topic_t;
+typedef struct rd_kafka_conf_s rd_kafka_conf_t;
+typedef struct rd_kafka_topic_conf_s rd_kafka_topic_conf_t;
+typedef struct rd_kafka_queue_s rd_kafka_queue_t;
+typedef struct rd_kafka_op_s rd_kafka_event_t;
+typedef struct rd_kafka_topic_result_s rd_kafka_topic_result_t;
+typedef struct rd_kafka_consumer_group_metadata_s
+ rd_kafka_consumer_group_metadata_t;
+typedef struct rd_kafka_error_s rd_kafka_error_t;
+typedef struct rd_kafka_headers_s rd_kafka_headers_t;
+typedef struct rd_kafka_group_result_s rd_kafka_group_result_t;
+typedef struct rd_kafka_acl_result_s rd_kafka_acl_result_t;
+/* @endcond */
+
+
+/**
+ * @enum rd_kafka_resp_err_t
+ * @brief Error codes.
+ *
+ * The negative error codes delimited by two underscores
+ * (\c RD_KAFKA_RESP_ERR__..) denotes errors internal to librdkafka and are
+ * displayed as \c \"Local: \<error string..\>\", while the error codes
+ * delimited by a single underscore (\c RD_KAFKA_RESP_ERR_..) denote broker
+ * errors and are displayed as \c \"Broker: \<error string..\>\".
+ *
+ * @sa Use rd_kafka_err2str() to translate an error code a human readable string
+ */
+typedef enum {
+ /* Internal errors to rdkafka: */
+ /** Begin internal error codes */
+ RD_KAFKA_RESP_ERR__BEGIN = -200,
+ /** Received message is incorrect */
+ RD_KAFKA_RESP_ERR__BAD_MSG = -199,
+ /** Bad/unknown compression */
+ RD_KAFKA_RESP_ERR__BAD_COMPRESSION = -198,
+ /** Broker is going away */
+ RD_KAFKA_RESP_ERR__DESTROY = -197,
+ /** Generic failure */
+ RD_KAFKA_RESP_ERR__FAIL = -196,
+ /** Broker transport failure */
+ RD_KAFKA_RESP_ERR__TRANSPORT = -195,
+ /** Critical system resource */
+ RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE = -194,
+ /** Failed to resolve broker */
+ RD_KAFKA_RESP_ERR__RESOLVE = -193,
+ /** Produced message timed out*/
+ RD_KAFKA_RESP_ERR__MSG_TIMED_OUT = -192,
+ /** Reached the end of the topic+partition queue on
+ * the broker. Not really an error.
+ * This event is disabled by default,
+ * see the `enable.partition.eof` configuration property. */
+ RD_KAFKA_RESP_ERR__PARTITION_EOF = -191,
+ /** Permanent: Partition does not exist in cluster. */
+ RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION = -190,
+ /** File or filesystem error */
+ RD_KAFKA_RESP_ERR__FS = -189,
+ /** Permanent: Topic does not exist in cluster. */
+ RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC = -188,
+ /** All broker connections are down. */
+ RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN = -187,
+ /** Invalid argument, or invalid configuration */
+ RD_KAFKA_RESP_ERR__INVALID_ARG = -186,
+ /** Operation timed out */
+ RD_KAFKA_RESP_ERR__TIMED_OUT = -185,
+ /** Queue is full */
+ RD_KAFKA_RESP_ERR__QUEUE_FULL = -184,
+ /** ISR count < required.acks */
+ RD_KAFKA_RESP_ERR__ISR_INSUFF = -183,
+ /** Broker node update */
+ RD_KAFKA_RESP_ERR__NODE_UPDATE = -182,
+ /** SSL error */
+ RD_KAFKA_RESP_ERR__SSL = -181,
+ /** Waiting for coordinator to become available. */
+ RD_KAFKA_RESP_ERR__WAIT_COORD = -180,
+ /** Unknown client group */
+ RD_KAFKA_RESP_ERR__UNKNOWN_GROUP = -179,
+ /** Operation in progress */
+ RD_KAFKA_RESP_ERR__IN_PROGRESS = -178,
+ /** Previous operation in progress, wait for it to finish. */
+ RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS = -177,
+ /** This operation would interfere with an existing subscription */
+ RD_KAFKA_RESP_ERR__EXISTING_SUBSCRIPTION = -176,
+ /** Assigned partitions (rebalance_cb) */
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS = -175,
+ /** Revoked partitions (rebalance_cb) */
+ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS = -174,
+ /** Conflicting use */
+ RD_KAFKA_RESP_ERR__CONFLICT = -173,
+ /** Wrong state */
+ RD_KAFKA_RESP_ERR__STATE = -172,
+ /** Unknown protocol */
+ RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL = -171,
+ /** Not implemented */
+ RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED = -170,
+ /** Authentication failure*/
+ RD_KAFKA_RESP_ERR__AUTHENTICATION = -169,
+ /** No stored offset */
+ RD_KAFKA_RESP_ERR__NO_OFFSET = -168,
+ /** Outdated */
+ RD_KAFKA_RESP_ERR__OUTDATED = -167,
+ /** Timed out in queue */
+ RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE = -166,
+ /** Feature not supported by broker */
+ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE = -165,
+ /** Awaiting cache update */
+ RD_KAFKA_RESP_ERR__WAIT_CACHE = -164,
+ /** Operation interrupted (e.g., due to yield)) */
+ RD_KAFKA_RESP_ERR__INTR = -163,
+ /** Key serialization error */
+ RD_KAFKA_RESP_ERR__KEY_SERIALIZATION = -162,
+ /** Value serialization error */
+ RD_KAFKA_RESP_ERR__VALUE_SERIALIZATION = -161,
+ /** Key deserialization error */
+ RD_KAFKA_RESP_ERR__KEY_DESERIALIZATION = -160,
+ /** Value deserialization error */
+ RD_KAFKA_RESP_ERR__VALUE_DESERIALIZATION = -159,
+ /** Partial response */
+ RD_KAFKA_RESP_ERR__PARTIAL = -158,
+ /** Modification attempted on read-only object */
+ RD_KAFKA_RESP_ERR__READ_ONLY = -157,
+ /** No such entry / item not found */
+ RD_KAFKA_RESP_ERR__NOENT = -156,
+ /** Read underflow */
+ RD_KAFKA_RESP_ERR__UNDERFLOW = -155,
+ /** Invalid type */
+ RD_KAFKA_RESP_ERR__INVALID_TYPE = -154,
+ /** Retry operation */
+ RD_KAFKA_RESP_ERR__RETRY = -153,
+ /** Purged in queue */
+ RD_KAFKA_RESP_ERR__PURGE_QUEUE = -152,
+ /** Purged in flight */
+ RD_KAFKA_RESP_ERR__PURGE_INFLIGHT = -151,
+ /** Fatal error: see rd_kafka_fatal_error() */
+ RD_KAFKA_RESP_ERR__FATAL = -150,
+ /** Inconsistent state */
+ RD_KAFKA_RESP_ERR__INCONSISTENT = -149,
+ /** Gap-less ordering would not be guaranteed if proceeding */
+ RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE = -148,
+ /** Maximum poll interval exceeded */
+ RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED = -147,
+ /** Unknown broker */
+ RD_KAFKA_RESP_ERR__UNKNOWN_BROKER = -146,
+ /** Functionality not configured */
+ RD_KAFKA_RESP_ERR__NOT_CONFIGURED = -145,
+ /** Instance has been fenced */
+ RD_KAFKA_RESP_ERR__FENCED = -144,
+ /** Application generated error */
+ RD_KAFKA_RESP_ERR__APPLICATION = -143,
+ /** Assignment lost */
+ RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST = -142,
+ /** No operation performed */
+ RD_KAFKA_RESP_ERR__NOOP = -141,
+ /** No offset to automatically reset to */
+ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET = -140,
+ /** Partition log truncation detected */
+ RD_KAFKA_RESP_ERR__LOG_TRUNCATION = -139,
+
+ /** End internal error codes */
+ RD_KAFKA_RESP_ERR__END = -100,
+
+ /* Kafka broker errors: */
+ /** Unknown broker error */
+ RD_KAFKA_RESP_ERR_UNKNOWN = -1,
+ /** Success */
+ RD_KAFKA_RESP_ERR_NO_ERROR = 0,
+ /** Offset out of range */
+ RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE = 1,
+ /** Invalid message */
+ RD_KAFKA_RESP_ERR_INVALID_MSG = 2,
+ /** Unknown topic or partition */
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART = 3,
+ /** Invalid message size */
+ RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE = 4,
+ /** Leader not available */
+ RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE = 5,
+/** Not leader for partition */
+#define RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER \
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION = 6,
+ /** Request timed out */
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT = 7,
+ /** Broker not available */
+ RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE = 8,
+ /** Replica not available */
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE = 9,
+ /** Message size too large */
+ RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE = 10,
+ /** StaleControllerEpochCode */
+ RD_KAFKA_RESP_ERR_STALE_CTRL_EPOCH = 11,
+ /** Offset metadata string too large */
+ RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE = 12,
+ /** Broker disconnected before response received */
+ RD_KAFKA_RESP_ERR_NETWORK_EXCEPTION = 13,
+ /** Coordinator load in progress */
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS = 14,
+/** Group coordinator load in progress */
+#define RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS \
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS
+ /** Coordinator not available */
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE = 15,
+/** Group coordinator not available */
+#define RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE \
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE
+ /** Not coordinator */
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR = 16,
+/** Not coordinator for group */
+#define RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP \
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR
+ /** Invalid topic */
+ RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION = 17,
+ /** Message batch larger than configured server segment size */
+ RD_KAFKA_RESP_ERR_RECORD_LIST_TOO_LARGE = 18,
+ /** Not enough in-sync replicas */
+ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS = 19,
+ /** Message(s) written to insufficient number of in-sync replicas */
+ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND = 20,
+ /** Invalid required acks value */
+ RD_KAFKA_RESP_ERR_INVALID_REQUIRED_ACKS = 21,
+ /** Specified group generation id is not valid */
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION = 22,
+ /** Inconsistent group protocol */
+ RD_KAFKA_RESP_ERR_INCONSISTENT_GROUP_PROTOCOL = 23,
+ /** Invalid group.id */
+ RD_KAFKA_RESP_ERR_INVALID_GROUP_ID = 24,
+ /** Unknown member */
+ RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID = 25,
+ /** Invalid session timeout */
+ RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT = 26,
+ /** Group rebalance in progress */
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS = 27,
+ /** Commit offset data size is not valid */
+ RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE = 28,
+ /** Topic authorization failed */
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED = 29,
+ /** Group authorization failed */
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED = 30,
+ /** Cluster authorization failed */
+ RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED = 31,
+ /** Invalid timestamp */
+ RD_KAFKA_RESP_ERR_INVALID_TIMESTAMP = 32,
+ /** Unsupported SASL mechanism */
+ RD_KAFKA_RESP_ERR_UNSUPPORTED_SASL_MECHANISM = 33,
+ /** Illegal SASL state */
+ RD_KAFKA_RESP_ERR_ILLEGAL_SASL_STATE = 34,
+ /** Unuspported version */
+ RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION = 35,
+ /** Topic already exists */
+ RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS = 36,
+ /** Invalid number of partitions */
+ RD_KAFKA_RESP_ERR_INVALID_PARTITIONS = 37,
+ /** Invalid replication factor */
+ RD_KAFKA_RESP_ERR_INVALID_REPLICATION_FACTOR = 38,
+ /** Invalid replica assignment */
+ RD_KAFKA_RESP_ERR_INVALID_REPLICA_ASSIGNMENT = 39,
+ /** Invalid config */
+ RD_KAFKA_RESP_ERR_INVALID_CONFIG = 40,
+ /** Not controller for cluster */
+ RD_KAFKA_RESP_ERR_NOT_CONTROLLER = 41,
+ /** Invalid request */
+ RD_KAFKA_RESP_ERR_INVALID_REQUEST = 42,
+ /** Message format on broker does not support request */
+ RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT = 43,
+ /** Policy violation */
+ RD_KAFKA_RESP_ERR_POLICY_VIOLATION = 44,
+ /** Broker received an out of order sequence number */
+ RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER = 45,
+ /** Broker received a duplicate sequence number */
+ RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER = 46,
+ /** Producer attempted an operation with an old epoch */
+ RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH = 47,
+ /** Producer attempted a transactional operation in an invalid state */
+ RD_KAFKA_RESP_ERR_INVALID_TXN_STATE = 48,
+ /** Producer attempted to use a producer id which is not
+ * currently assigned to its transactional id */
+ RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING = 49,
+ /** Transaction timeout is larger than the maximum
+ * value allowed by the broker's max.transaction.timeout.ms */
+ RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT = 50,
+ /** Producer attempted to update a transaction while another
+ * concurrent operation on the same transaction was ongoing */
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS = 51,
+ /** Indicates that the transaction coordinator sending a
+ * WriteTxnMarker is no longer the current coordinator for a
+ * given producer */
+ RD_KAFKA_RESP_ERR_TRANSACTION_COORDINATOR_FENCED = 52,
+ /** Transactional Id authorization failed */
+ RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED = 53,
+ /** Security features are disabled */
+ RD_KAFKA_RESP_ERR_SECURITY_DISABLED = 54,
+ /** Operation not attempted */
+ RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED = 55,
+ /** Disk error when trying to access log file on the disk */
+ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR = 56,
+ /** The user-specified log directory is not found in the broker config
+ */
+ RD_KAFKA_RESP_ERR_LOG_DIR_NOT_FOUND = 57,
+ /** SASL Authentication failed */
+ RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED = 58,
+ /** Unknown Producer Id */
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID = 59,
+ /** Partition reassignment is in progress */
+ RD_KAFKA_RESP_ERR_REASSIGNMENT_IN_PROGRESS = 60,
+ /** Delegation Token feature is not enabled */
+ RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTH_DISABLED = 61,
+ /** Delegation Token is not found on server */
+ RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_NOT_FOUND = 62,
+ /** Specified Principal is not valid Owner/Renewer */
+ RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_OWNER_MISMATCH = 63,
+ /** Delegation Token requests are not allowed on this connection */
+ RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_REQUEST_NOT_ALLOWED = 64,
+ /** Delegation Token authorization failed */
+ RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_AUTHORIZATION_FAILED = 65,
+ /** Delegation Token is expired */
+ RD_KAFKA_RESP_ERR_DELEGATION_TOKEN_EXPIRED = 66,
+ /** Supplied principalType is not supported */
+ RD_KAFKA_RESP_ERR_INVALID_PRINCIPAL_TYPE = 67,
+ /** The group is not empty */
+ RD_KAFKA_RESP_ERR_NON_EMPTY_GROUP = 68,
+ /** The group id does not exist */
+ RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND = 69,
+ /** The fetch session ID was not found */
+ RD_KAFKA_RESP_ERR_FETCH_SESSION_ID_NOT_FOUND = 70,
+ /** The fetch session epoch is invalid */
+ RD_KAFKA_RESP_ERR_INVALID_FETCH_SESSION_EPOCH = 71,
+ /** No matching listener */
+ RD_KAFKA_RESP_ERR_LISTENER_NOT_FOUND = 72,
+ /** Topic deletion is disabled */
+ RD_KAFKA_RESP_ERR_TOPIC_DELETION_DISABLED = 73,
+ /** Leader epoch is older than broker epoch */
+ RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH = 74,
+ /** Leader epoch is newer than broker epoch */
+ RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH = 75,
+ /** Unsupported compression type */
+ RD_KAFKA_RESP_ERR_UNSUPPORTED_COMPRESSION_TYPE = 76,
+ /** Broker epoch has changed */
+ RD_KAFKA_RESP_ERR_STALE_BROKER_EPOCH = 77,
+ /** Leader high watermark is not caught up */
+ RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE = 78,
+ /** Group member needs a valid member ID */
+ RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED = 79,
+ /** Preferred leader was not available */
+ RD_KAFKA_RESP_ERR_PREFERRED_LEADER_NOT_AVAILABLE = 80,
+ /** Consumer group has reached maximum size */
+ RD_KAFKA_RESP_ERR_GROUP_MAX_SIZE_REACHED = 81,
+ /** Static consumer fenced by other consumer with same
+ * group.instance.id. */
+ RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID = 82,
+ /** Eligible partition leaders are not available */
+ RD_KAFKA_RESP_ERR_ELIGIBLE_LEADERS_NOT_AVAILABLE = 83,
+ /** Leader election not needed for topic partition */
+ RD_KAFKA_RESP_ERR_ELECTION_NOT_NEEDED = 84,
+ /** No partition reassignment is in progress */
+ RD_KAFKA_RESP_ERR_NO_REASSIGNMENT_IN_PROGRESS = 85,
+ /** Deleting offsets of a topic while the consumer group is
+ * subscribed to it */
+ RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC = 86,
+ /** Broker failed to validate record */
+ RD_KAFKA_RESP_ERR_INVALID_RECORD = 87,
+ /** There are unstable offsets that need to be cleared */
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT = 88,
+ /** Throttling quota has been exceeded */
+ RD_KAFKA_RESP_ERR_THROTTLING_QUOTA_EXCEEDED = 89,
+ /** There is a newer producer with the same transactionalId
+ * which fences the current one */
+ RD_KAFKA_RESP_ERR_PRODUCER_FENCED = 90,
+ /** Request illegally referred to resource that does not exist */
+ RD_KAFKA_RESP_ERR_RESOURCE_NOT_FOUND = 91,
+ /** Request illegally referred to the same resource twice */
+ RD_KAFKA_RESP_ERR_DUPLICATE_RESOURCE = 92,
+ /** Requested credential would not meet criteria for acceptability */
+ RD_KAFKA_RESP_ERR_UNACCEPTABLE_CREDENTIAL = 93,
+ /** Indicates that the either the sender or recipient of a
+ * voter-only request is not one of the expected voters */
+ RD_KAFKA_RESP_ERR_INCONSISTENT_VOTER_SET = 94,
+ /** Invalid update version */
+ RD_KAFKA_RESP_ERR_INVALID_UPDATE_VERSION = 95,
+ /** Unable to update finalized features due to server error */
+ RD_KAFKA_RESP_ERR_FEATURE_UPDATE_FAILED = 96,
+ /** Request principal deserialization failed during forwarding */
+ RD_KAFKA_RESP_ERR_PRINCIPAL_DESERIALIZATION_FAILURE = 97,
+
+ RD_KAFKA_RESP_ERR_END_ALL,
+} rd_kafka_resp_err_t;
+
+
+/**
+ * @brief Error code value, name and description.
+ * Typically for use with language bindings to automatically expose
+ * the full set of librdkafka error codes.
+ */
+struct rd_kafka_err_desc {
+ rd_kafka_resp_err_t code; /**< Error code */
+ const char *name; /**< Error name, same as code enum sans prefix */
+ const char *desc; /**< Human readable error description. */
+};
+
+
+/**
+ * @brief Returns the full list of error codes.
+ */
+RD_EXPORT
+void rd_kafka_get_err_descs(const struct rd_kafka_err_desc **errdescs,
+ size_t *cntp);
+
+
+
+/**
+ * @brief Returns a human readable representation of a kafka error.
+ *
+ * @param err Error code to translate
+ */
+RD_EXPORT
+const char *rd_kafka_err2str(rd_kafka_resp_err_t err);
+
+
+
+/**
+ * @brief Returns the error code name (enum name).
+ *
+ * @param err Error code to translate
+ */
+RD_EXPORT
+const char *rd_kafka_err2name(rd_kafka_resp_err_t err);
+
+
+/**
+ * @brief Returns the last error code generated by a legacy API call
+ * in the current thread.
+ *
+ * The legacy APIs are the ones using errno to propagate error value, namely:
+ * - rd_kafka_topic_new()
+ * - rd_kafka_consume_start()
+ * - rd_kafka_consume_stop()
+ * - rd_kafka_consume()
+ * - rd_kafka_consume_batch()
+ * - rd_kafka_consume_callback()
+ * - rd_kafka_consume_queue()
+ * - rd_kafka_produce()
+ *
+ * The main use for this function is to avoid converting system \p errno
+ * values to rd_kafka_resp_err_t codes for legacy APIs.
+ *
+ * @remark The last error is stored per-thread, if multiple rd_kafka_t handles
+ * are used in the same application thread the developer needs to
+ * make sure rd_kafka_last_error() is called immediately after
+ * a failed API call.
+ *
+ * @remark errno propagation from librdkafka is not safe on Windows
+ * and should not be used, use rd_kafka_last_error() instead.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_last_error(void);
+
+
+/**
+ * @brief Converts the system errno value \p errnox to a rd_kafka_resp_err_t
+ * error code upon failure from the following functions:
+ * - rd_kafka_topic_new()
+ * - rd_kafka_consume_start()
+ * - rd_kafka_consume_stop()
+ * - rd_kafka_consume()
+ * - rd_kafka_consume_batch()
+ * - rd_kafka_consume_callback()
+ * - rd_kafka_consume_queue()
+ * - rd_kafka_produce()
+ *
+ * @param errnox System errno value to convert
+ *
+ * @returns Appropriate error code for \p errnox
+ *
+ * @remark A better alternative is to call rd_kafka_last_error() immediately
+ * after any of the above functions return -1 or NULL.
+ *
+ * @deprecated Use rd_kafka_last_error() to retrieve the last error code
+ * set by the legacy librdkafka APIs.
+ *
+ * @sa rd_kafka_last_error()
+ */
+RD_EXPORT RD_DEPRECATED rd_kafka_resp_err_t rd_kafka_errno2err(int errnox);
+
+
+/**
+ * @brief Returns the thread-local system errno
+ *
+ * On most platforms this is the same as \p errno but in case of different
+ * runtimes between library and application (e.g., Windows static DLLs)
+ * this provides a means for exposing the errno librdkafka uses.
+ *
+ * @remark The value is local to the current calling thread.
+ *
+ * @deprecated Use rd_kafka_last_error() to retrieve the last error code
+ * set by the legacy librdkafka APIs.
+ */
+RD_EXPORT RD_DEPRECATED int rd_kafka_errno(void);
+
+
+
+/**
+ * @brief Returns the first fatal error set on this client instance,
+ * or RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has occurred.
+ *
+ * This function is to be used with the Idempotent Producer and \c error_cb
+ * to detect fatal errors.
+ *
+ * Generally all errors raised by \c error_cb are to be considered
+ * informational and temporary, the client will try to recover from all
+ * errors in a graceful fashion (by retrying, etc).
+ *
+ * However, some errors should logically be considered fatal to retain
+ * consistency; in particular a set of errors that may occur when using the
+ * Idempotent Producer and the in-order or exactly-once producer guarantees
+ * can't be satisfied.
+ *
+ * @param rk Client instance.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written to if there is a fatal error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if no fatal error has been raised, else
+ * any other error code.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t
+rd_kafka_fatal_error(rd_kafka_t *rk, char *errstr, size_t errstr_size);
+
+
+/**
+ * @brief Trigger a fatal error for testing purposes.
+ *
+ * Since there is no practical way to trigger real fatal errors in the
+ * idempotent producer, this method allows an application to trigger
+ * fabricated fatal errors in tests to check its error handling code.
+ *
+ * @param rk Client instance.
+ * @param err The underlying error code.
+ * @param reason A human readable error reason.
+ * Will be prefixed with "test_fatal_error: " to differentiate
+ * from real fatal errors.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if a fatal error was triggered, or
+ * RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS if a previous fatal error
+ * has already been triggered.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_test_fatal_error(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason);
+
+
+/**
+ * @returns the error code for \p error or RD_KAFKA_RESP_ERR_NO_ERROR if
+ * \p error is NULL.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error);
+
+/**
+ * @returns the error code name for \p error, e.g, "ERR_UNKNOWN_MEMBER_ID",
+ * or an empty string if \p error is NULL.
+ *
+ * @remark The lifetime of the returned pointer is the same as the error object.
+ *
+ * @sa rd_kafka_err2name()
+ */
+RD_EXPORT
+const char *rd_kafka_error_name(const rd_kafka_error_t *error);
+
+/**
+ * @returns a human readable error string for \p error,
+ * or an empty string if \p error is NULL.
+ *
+ * @remark The lifetime of the returned pointer is the same as the error object.
+ */
+RD_EXPORT
+const char *rd_kafka_error_string(const rd_kafka_error_t *error);
+
+
+/**
+ * @returns 1 if the error is a fatal error, indicating that the client
+ * instance is no longer usable, else 0 (also if \p error is NULL).
+ */
+RD_EXPORT
+int rd_kafka_error_is_fatal(const rd_kafka_error_t *error);
+
+
+/**
+ * @returns 1 if the operation may be retried,
+ * else 0 (also if \p error is NULL).
+ */
+RD_EXPORT
+int rd_kafka_error_is_retriable(const rd_kafka_error_t *error);
+
+
+/**
+ * @returns 1 if the error is an abortable transaction error in which case
+ * the application must call rd_kafka_abort_transaction() and
+ * start a new transaction with rd_kafka_begin_transaction() if it
+ * wishes to proceed with transactions.
+ * Else returns 0 (also if \p error is NULL).
+ *
+ * @remark The return value of this method is only valid for errors returned
+ * by the transactional API.
+ */
+RD_EXPORT
+int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error);
+
+/**
+ * @brief Free and destroy an error object.
+ *
+ * @remark As a conveniance it is permitted to pass a NULL \p error.
+ */
+RD_EXPORT
+void rd_kafka_error_destroy(rd_kafka_error_t *error);
+
+
+/**
+ * @brief Create a new error object with error \p code and optional
+ * human readable error string in \p fmt.
+ *
+ * This method is mainly to be used for mocking errors in application test code.
+ *
+ * The returned object must be destroyed with rd_kafka_error_destroy().
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_error_new(rd_kafka_resp_err_t code,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 2, 3);
+
+
+/**
+ * @brief Topic+Partition place holder
+ *
+ * Generic place holder for a Topic+Partition and its related information
+ * used for multiple purposes:
+ * - consumer offset (see rd_kafka_commit(), et.al.)
+ * - group rebalancing callback (rd_kafka_conf_set_rebalance_cb())
+ * - offset commit result callback (rd_kafka_conf_set_offset_commit_cb())
+ */
+
+/**
+ * @brief Generic place holder for a specific Topic+Partition.
+ *
+ * @sa rd_kafka_topic_partition_list_new()
+ */
+typedef struct rd_kafka_topic_partition_s {
+ char *topic; /**< Topic name */
+ int32_t partition; /**< Partition */
+ int64_t offset; /**< Offset */
+ void *metadata; /**< Metadata */
+ size_t metadata_size; /**< Metadata size */
+ void *opaque; /**< Opaque value for application use */
+ rd_kafka_resp_err_t err; /**< Error code, depending on use. */
+ void *_private; /**< INTERNAL USE ONLY,
+ * INITIALIZE TO ZERO, DO NOT TOUCH,
+ * DO NOT COPY, DO NOT SHARE WITH OTHER
+ * rd_kafka_t INSTANCES. */
+} rd_kafka_topic_partition_t;
+
+
+/**
+ * @brief Destroy a rd_kafka_topic_partition_t.
+ * @remark This must not be called for elements in a topic partition list.
+ */
+RD_EXPORT
+void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar);
+
+
+/**
+ * @brief Sets the offset leader epoch (use -1 to clear).
+ *
+ * @param rktpar Partition object.
+ * @param leader_epoch Offset leader epoch, use -1 to reset.
+ *
+ * @remark See KIP-320 for more information.
+ */
+RD_EXPORT
+void rd_kafka_topic_partition_set_leader_epoch(
+ rd_kafka_topic_partition_t *rktpar,
+ int32_t leader_epoch);
+
+/**
+ * @returns the offset leader epoch, if relevant and known,
+ * else -1.
+ *
+ * @param rktpar Partition object.
+ *
+ * @remark See KIP-320 for more information.
+ */
+RD_EXPORT
+int32_t rd_kafka_topic_partition_get_leader_epoch(
+ const rd_kafka_topic_partition_t *rktpar);
+
+/**
+ * @brief A growable list of Topic+Partitions.
+ *
+ */
+typedef struct rd_kafka_topic_partition_list_s {
+ int cnt; /**< Current number of elements */
+ int size; /**< Current allocated size */
+ rd_kafka_topic_partition_t *elems; /**< Element array[] */
+} rd_kafka_topic_partition_list_t;
+
+
+/**
+ * @brief Create a new list/vector Topic+Partition container.
+ *
+ * @param size Initial allocated size used when the expected number of
+ * elements is known or can be estimated.
+ * Avoids reallocation and possibly relocation of the
+ * elems array.
+ *
+ * @returns A newly allocated Topic+Partition list.
+ *
+ * @remark Use rd_kafka_topic_partition_list_destroy() to free all resources
+ * in use by a list and the list itself.
+ * @sa rd_kafka_topic_partition_list_add()
+ */
+RD_EXPORT
+rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size);
+
+
+/**
+ * @brief Free all resources used by the list and the list itself.
+ */
+RD_EXPORT
+void rd_kafka_topic_partition_list_destroy(
+ rd_kafka_topic_partition_list_t *rkparlist);
+
+/**
+ * @brief Add topic+partition to list
+ *
+ * @param rktparlist List to extend
+ * @param topic Topic name (copied)
+ * @param partition Partition id
+ *
+ * @returns The object which can be used to fill in additionals fields.
+ */
+RD_EXPORT
+rd_kafka_topic_partition_t *
+rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition);
+
+
+/**
+ * @brief Add range of partitions from \p start to \p stop inclusive.
+ *
+ * @param rktparlist List to extend
+ * @param topic Topic name (copied)
+ * @param start Start partition of range
+ * @param stop Last partition of range (inclusive)
+ */
+RD_EXPORT
+void rd_kafka_topic_partition_list_add_range(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t start,
+ int32_t stop);
+
+
+
+/**
+ * @brief Delete partition from list.
+ *
+ * @param rktparlist List to modify
+ * @param topic Topic name to match
+ * @param partition Partition to match
+ *
+ * @returns 1 if partition was found (and removed), else 0.
+ *
+ * @remark Any held indices to elems[] are unusable after this call returns 1.
+ */
+RD_EXPORT
+int rd_kafka_topic_partition_list_del(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition);
+
+
+/**
+ * @brief Delete partition from list by elems[] index.
+ *
+ * @returns 1 if partition was found (and removed), else 0.
+ *
+ * @sa rd_kafka_topic_partition_list_del()
+ */
+RD_EXPORT
+int rd_kafka_topic_partition_list_del_by_idx(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int idx);
+
+
+/**
+ * @brief Make a copy of an existing list.
+ *
+ * @param src The existing list to copy.
+ *
+ * @returns A new list fully populated to be identical to \p src
+ */
+RD_EXPORT
+rd_kafka_topic_partition_list_t *
+rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src);
+
+
+
+/**
+ * @brief Set offset to \p offset for \p topic and \p partition
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or
+ * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if \p partition was not found
+ * in the list.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition,
+ int64_t offset);
+
+
+
+/**
+ * @brief Find element by \p topic and \p partition.
+ *
+ * @returns a pointer to the first matching element, or NULL if not found.
+ */
+RD_EXPORT
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition);
+
+
+/**
+ * @brief Sort list using comparator \p cmp.
+ *
+ * If \p cmp is NULL the default comparator will be used that
+ * sorts by ascending topic name and partition.
+ *
+ * \p cmp_opaque is provided as the \p cmp_opaque argument to \p cmp.
+ *
+ */
+RD_EXPORT void rd_kafka_topic_partition_list_sort(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int (*cmp)(const void *a, const void *b, void *cmp_opaque),
+ void *cmp_opaque);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Var-arg tag types
+ * @{
+ *
+ */
+
+/**
+ * @enum rd_kafka_vtype_t
+ *
+ * @brief Var-arg tag types
+ *
+ * @sa rd_kafka_producev()
+ */
+typedef enum rd_kafka_vtype_t {
+ RD_KAFKA_VTYPE_END, /**< va-arg sentinel */
+ RD_KAFKA_VTYPE_TOPIC, /**< (const char *) Topic name */
+ RD_KAFKA_VTYPE_RKT, /**< (rd_kafka_topic_t *) Topic handle */
+ RD_KAFKA_VTYPE_PARTITION, /**< (int32_t) Partition */
+ RD_KAFKA_VTYPE_VALUE, /**< (void *, size_t) Message value (payload)*/
+ RD_KAFKA_VTYPE_KEY, /**< (void *, size_t) Message key */
+ RD_KAFKA_VTYPE_OPAQUE, /**< (void *) Per-message application opaque
+ * value. This is the same as
+ * the _private field in
+ * rd_kafka_message_t, also known
+ * as the msg_opaque. */
+ RD_KAFKA_VTYPE_MSGFLAGS, /**< (int) RD_KAFKA_MSG_F_.. flags */
+ RD_KAFKA_VTYPE_TIMESTAMP, /**< (int64_t) Milliseconds since epoch UTC */
+ RD_KAFKA_VTYPE_HEADER, /**< (const char *, const void *, ssize_t)
+ * Message Header */
+ RD_KAFKA_VTYPE_HEADERS, /**< (rd_kafka_headers_t *) Headers list */
+} rd_kafka_vtype_t;
+
+
+/**
+ * @brief VTYPE + argument container for use with rd_kafka_produce_va()
+ *
+ * See RD_KAFKA_V_..() macros below for which union field corresponds
+ * to which RD_KAFKA_VTYPE_...
+ */
+typedef struct rd_kafka_vu_s {
+ rd_kafka_vtype_t vtype; /**< RD_KAFKA_VTYPE_.. */
+ /** Value union, see RD_KAFKA_V_.. macros for which field to use. */
+ union {
+ const char *cstr;
+ rd_kafka_topic_t *rkt;
+ int i;
+ int32_t i32;
+ int64_t i64;
+ struct {
+ void *ptr;
+ size_t size;
+ } mem;
+ struct {
+ const char *name;
+ const void *val;
+ ssize_t size;
+ } header;
+ rd_kafka_headers_t *headers;
+ void *ptr;
+ char _pad[64]; /**< Padding size for future-proofness */
+ } u;
+} rd_kafka_vu_t;
+
+/**
+ * @brief Convenience macros for rd_kafka_vtype_t that takes the
+ * correct arguments for each vtype.
+ */
+
+/*!
+ * va-arg end sentinel used to terminate the variable argument list
+ */
+#define RD_KAFKA_V_END RD_KAFKA_VTYPE_END
+
+/*!
+ * Topic name (const char *)
+ *
+ * rd_kafka_vu_t field: u.cstr
+ */
+#define RD_KAFKA_V_TOPIC(topic) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_TOPIC, const char *, topic), \
+ (const char *)topic
+/*!
+ * Topic object (rd_kafka_topic_t *)
+ *
+ * rd_kafka_vu_t field: u.rkt
+ */
+#define RD_KAFKA_V_RKT(rkt) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_RKT, rd_kafka_topic_t *, rkt), \
+ (rd_kafka_topic_t *)rkt
+/*!
+ * Partition (int32_t)
+ *
+ * rd_kafka_vu_t field: u.i32
+ */
+#define RD_KAFKA_V_PARTITION(partition) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_PARTITION, int32_t, partition), \
+ (int32_t)partition
+/*!
+ * Message value/payload pointer and length (void *, size_t)
+ *
+ * rd_kafka_vu_t fields: u.mem.ptr, u.mem.size
+ */
+#define RD_KAFKA_V_VALUE(VALUE, LEN) \
+ _LRK_TYPECHECK2(RD_KAFKA_VTYPE_VALUE, void *, VALUE, size_t, LEN), \
+ (void *)VALUE, (size_t)LEN
+/*!
+ * Message key pointer and length (const void *, size_t)
+ *
+ * rd_kafka_vu_t field: u.mem.ptr, rd_kafka_vu.t.u.mem.size
+ */
+#define RD_KAFKA_V_KEY(KEY, LEN) \
+ _LRK_TYPECHECK2(RD_KAFKA_VTYPE_KEY, const void *, KEY, size_t, LEN), \
+ (void *)KEY, (size_t)LEN
+/*!
+ * Message opaque pointer (void *)
+ * Same as \c msg_opaque, \c produce(.., msg_opaque),
+ * and \c rkmessage->_private .
+ *
+ * rd_kafka_vu_t field: u.ptr
+ */
+#define RD_KAFKA_V_OPAQUE(msg_opaque) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_OPAQUE, void *, msg_opaque), \
+ (void *)msg_opaque
+/*!
+ * Message flags (int)
+ * @sa RD_KAFKA_MSG_F_COPY, et.al.
+ *
+ * rd_kafka_vu_t field: u.i
+ */
+#define RD_KAFKA_V_MSGFLAGS(msgflags) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_MSGFLAGS, int, msgflags), (int)msgflags
+/*!
+ * Timestamp in milliseconds since epoch UTC (int64_t).
+ * A value of 0 will use the current wall-clock time.
+ *
+ * rd_kafka_vu_t field: u.i64
+ */
+#define RD_KAFKA_V_TIMESTAMP(timestamp) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_TIMESTAMP, int64_t, timestamp), \
+ (int64_t)timestamp
+/*!
+ * Add Message Header (const char *NAME, const void *VALUE, ssize_t LEN).
+ * @sa rd_kafka_header_add()
+ * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed
+ * in the same call to producev().
+ *
+ * rd_kafka_vu_t fields: u.header.name, u.header.val, u.header.size
+ */
+#define RD_KAFKA_V_HEADER(NAME, VALUE, LEN) \
+ _LRK_TYPECHECK3(RD_KAFKA_VTYPE_HEADER, const char *, NAME, \
+ const void *, VALUE, ssize_t, LEN), \
+ (const char *)NAME, (const void *)VALUE, (ssize_t)LEN
+
+/*!
+ * Message Headers list (rd_kafka_headers_t *).
+ * The message object will assume ownership of the headers (unless producev()
+ * fails).
+ * Any existing headers will be replaced.
+ * @sa rd_kafka_message_set_headers()
+ * @remark RD_KAFKA_V_HEADER() and RD_KAFKA_V_HEADERS() MUST NOT be mixed
+ * in the same call to producev().
+ *
+ * rd_kafka_vu_t fields: u.headers
+ */
+#define RD_KAFKA_V_HEADERS(HDRS) \
+ _LRK_TYPECHECK(RD_KAFKA_VTYPE_HEADERS, rd_kafka_headers_t *, HDRS), \
+ (rd_kafka_headers_t *)HDRS
+
+
+/**@}*/
+
+
+/**
+ * @name Message headers
+ * @{
+ *
+ * @brief Message headers consist of a list of (string key, binary value) pairs.
+ * Duplicate keys are supported and the order in which keys were
+ * added are retained.
+ *
+ * Header values are considered binary and may have three types of
+ * value:
+ * - proper value with size > 0 and a valid pointer
+ * - empty value with size = 0 and any non-NULL pointer
+ * - null value with size = 0 and a NULL pointer
+ *
+ * Headers require Apache Kafka broker version v0.11.0.0 or later.
+ *
+ * Header operations are O(n).
+ */
+
+
+/**
+ * @brief Create a new headers list.
+ *
+ * @param initial_count Preallocate space for this number of headers.
+ * Any number of headers may be added, updated and
+ * removed regardless of the initial count.
+ */
+RD_EXPORT rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count);
+
+/**
+ * @brief Destroy the headers list. The object and any returned value pointers
+ * are not usable after this call.
+ */
+RD_EXPORT void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs);
+
+/**
+ * @brief Make a copy of headers list \p src.
+ */
+RD_EXPORT rd_kafka_headers_t *
+rd_kafka_headers_copy(const rd_kafka_headers_t *src);
+
+/**
+ * @brief Add header with name \p name and value \p val (copied) of size
+ * \p size (not including null-terminator).
+ *
+ * @param hdrs Headers list.
+ * @param name Header name.
+ * @param name_size Header name size (not including the null-terminator).
+ * If -1 the \p name length is automatically acquired using
+ * strlen().
+ * @param value Pointer to header value, or NULL (set size to 0 or -1).
+ * @param value_size Size of header value. If -1 the \p value is assumed to be a
+ * null-terminated string and the length is automatically
+ * acquired using strlen().
+ *
+ * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only,
+ * else RD_KAFKA_RESP_ERR_NO_ERROR.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs,
+ const char *name,
+ ssize_t name_size,
+ const void *value,
+ ssize_t value_size);
+
+/**
+ * @brief Remove all headers for the given key (if any).
+ *
+ * @returns RD_KAFKA_RESP_ERR__READ_ONLY if the headers are read-only,
+ * RD_KAFKA_RESP_ERR__NOENT if no matching headers were found,
+ * else RD_KAFKA_RESP_ERR_NO_ERROR if headers were removed.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs,
+ const char *name);
+
+
+/**
+ * @brief Find last header in list \p hdrs matching \p name.
+ *
+ * @param hdrs Headers list.
+ * @param name Header to find (last match).
+ * @param valuep (out) Set to a (null-terminated) const pointer to the value
+ * (may be NULL).
+ * @param sizep (out) Set to the value's size (not including null-terminator).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if an entry was found, else
+ * RD_KAFKA_RESP_ERR__NOENT.
+ *
+ * @remark The returned pointer in \p valuep includes a trailing null-terminator
+ * that is not accounted for in \p sizep.
+ * @remark The returned pointer is only valid as long as the headers list and
+ * the header item is valid.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs,
+ const char *name,
+ const void **valuep,
+ size_t *sizep);
+
+/**
+ * @brief Iterator for headers matching \p name.
+ *
+ * Same semantics as rd_kafka_header_get_last()
+ *
+ * @param hdrs Headers to iterate.
+ * @param idx Iterator index, start at 0 and increment by one for each call
+ * as long as RD_KAFKA_RESP_ERR_NO_ERROR is returned.
+ * @param name Header name to match.
+ * @param valuep (out) Set to a (null-terminated) const pointer to the value
+ * (may be NULL).
+ * @param sizep (out) Set to the value's size (not including null-terminator).
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_header_get(const rd_kafka_headers_t *hdrs,
+ size_t idx,
+ const char *name,
+ const void **valuep,
+ size_t *sizep);
+
+
+/**
+ * @brief Iterator for all headers.
+ *
+ * Same semantics as rd_kafka_header_get()
+ *
+ * @sa rd_kafka_header_get()
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs,
+ size_t idx,
+ const char **namep,
+ const void **valuep,
+ size_t *sizep);
+
+
+
+/**@}*/
+
+
+
+/**
+ * @name Kafka messages
+ * @{
+ *
+ */
+
+
+
+// FIXME: This doesn't show up in docs for some reason
+// "Compound rd_kafka_message_t is not documented."
+
+/**
+ * @brief A Kafka message as returned by the \c rd_kafka_consume*() family
+ * of functions as well as provided to the Producer \c dr_msg_cb().
+ *
+ * For the consumer this object has two purposes:
+ * - provide the application with a consumed message. (\c err == 0)
+ * - report per-topic+partition consumer errors (\c err != 0)
+ *
+ * The application must check \c err to decide what action to take.
+ *
+ * When the application is finished with a message it must call
+ * rd_kafka_message_destroy() unless otherwise noted.
+ */
+typedef struct rd_kafka_message_s {
+ rd_kafka_resp_err_t err; /**< Non-zero for error signaling. */
+ rd_kafka_topic_t *rkt; /**< Topic */
+ int32_t partition; /**< Partition */
+ void *payload; /**< Producer: original message payload.
+ * Consumer: Depends on the value of \c err :
+ * - \c err==0: Message payload.
+ * - \c err!=0: Error string */
+ size_t len; /**< Depends on the value of \c err :
+ * - \c err==0: Message payload length
+ * - \c err!=0: Error string length */
+ void *key; /**< Depends on the value of \c err :
+ * - \c err==0: Optional message key */
+ size_t key_len; /**< Depends on the value of \c err :
+ * - \c err==0: Optional message key length*/
+ int64_t offset; /**< Consumer:
+ * - Message offset (or offset for error
+ * if \c err!=0 if applicable).
+ * Producer, dr_msg_cb:
+ * Message offset assigned by broker.
+ * May be RD_KAFKA_OFFSET_INVALID
+ * for retried messages when
+ * idempotence is enabled. */
+ void *_private; /**< Consumer:
+ * - rdkafka private pointer:
+ * DO NOT MODIFY, DO NOT COPY.
+ * Producer:
+ * - dr_msg_cb:
+ * msg_opaque from produce() call or
+ * RD_KAFKA_V_OPAQUE from producev(). */
+} rd_kafka_message_t;
+
+
+/**
+ * @brief Frees resources for \p rkmessage and hands ownership back to rdkafka.
+ */
+RD_EXPORT
+void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage);
+
+
+
+/**
+ * @brief Returns the error string for an errored rd_kafka_message_t or NULL if
+ * there was no error.
+ *
+ * @remark This function MUST NOT be used with the producer.
+ */
+RD_EXPORT
+const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage);
+
+
+/**
+ * @brief Returns the message timestamp for a consumed message.
+ *
+ * The timestamp is the number of milliseconds since the epoch (UTC).
+ *
+ * \p tstype (if not NULL) is updated to indicate the type of timestamp.
+ *
+ * @returns message timestamp, or -1 if not available.
+ *
+ * @remark Message timestamps require broker version 0.10.0 or later.
+ */
+RD_EXPORT
+int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage,
+ rd_kafka_timestamp_type_t *tstype);
+
+
+
+/**
+ * @brief Returns the latency for a produced message measured from
+ * the produce() call.
+ *
+ * @returns the latency in microseconds, or -1 if not available.
+ */
+RD_EXPORT
+int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage);
+
+
+/**
+ * @brief Returns the broker id of the broker the message was produced to
+ * or fetched from.
+ *
+ * @returns a broker id if known, else -1.
+ */
+RD_EXPORT
+int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage);
+
+
+/**
+ * @brief Get the message header list.
+ *
+ * The returned pointer in \p *hdrsp is associated with the \p rkmessage and
+ * must not be used after destruction of the message object or the header
+ * list is replaced with rd_kafka_message_set_headers().
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if headers were returned,
+ * RD_KAFKA_RESP_ERR__NOENT if the message has no headers,
+ * or another error code if the headers could not be parsed.
+ *
+ * @remark Headers require broker version 0.11.0.0 or later.
+ *
+ * @remark As an optimization the raw protocol headers are parsed on
+ * the first call to this function.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_message_headers(const rd_kafka_message_t *rkmessage,
+ rd_kafka_headers_t **hdrsp);
+
+/**
+ * @brief Get the message header list and detach the list from the message
+ * making the application the owner of the headers.
+ * The application must eventually destroy the headers using
+ * rd_kafka_headers_destroy().
+ * The message's headers will be set to NULL.
+ *
+ * Otherwise same semantics as rd_kafka_message_headers()
+ *
+ * @sa rd_kafka_message_headers
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage,
+ rd_kafka_headers_t **hdrsp);
+
+
+/**
+ * @brief Replace the message's current headers with a new list.
+ *
+ * @param rkmessage The message to set headers.
+ * @param hdrs New header list. The message object assumes ownership of
+ * the list, the list will be destroyed automatically with
+ * the message object.
+ * The new headers list may be updated until the message object
+ * is passed or returned to librdkafka.
+ *
+ * @remark The existing headers object, if any, will be destroyed.
+ */
+RD_EXPORT
+void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage,
+ rd_kafka_headers_t *hdrs);
+
+
+/**
+ * @brief Returns the number of header key/value pairs
+ *
+ * @param hdrs Headers to count
+ */
+RD_EXPORT size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs);
+
+
+/**
+ * @enum rd_kafka_msg_status_t
+ * @brief Message persistence status can be used by the application to
+ * find out if a produced message was persisted in the topic log.
+ */
+typedef enum {
+ /** Message was never transmitted to the broker, or failed with
+ * an error indicating it was not written to the log.
+ * Application retry risks ordering, but not duplication. */
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED = 0,
+
+ /** Message was transmitted to broker, but no acknowledgement was
+ * received.
+ * Application retry risks ordering and duplication. */
+ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED = 1,
+
+ /** Message was written to the log and acknowledged by the broker.
+ * No reason for application to retry.
+ * Note: this value should only be trusted with \c acks=all. */
+ RD_KAFKA_MSG_STATUS_PERSISTED = 2
+} rd_kafka_msg_status_t;
+
+
+/**
+ * @brief Returns the message's persistence status in the topic log.
+ *
+ * @remark The message status is not available in on_acknowledgement
+ * interceptors.
+ */
+RD_EXPORT rd_kafka_msg_status_t
+rd_kafka_message_status(const rd_kafka_message_t *rkmessage);
+
+
+/**
+ * @returns the message's partition leader epoch at the time the message was
+ * fetched and if known, else -1.
+ *
+ * @remark This API must only be used on consumed messages without error.
+ * @remark Requires broker version >= 2.10 (KIP-320).
+ */
+RD_EXPORT int32_t
+rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage);
+
+
+/**@}*/
+
+
+/**
+ * @name Configuration interface
+ * @{
+ *
+ * @brief Main/global configuration property interface
+ *
+ */
+
+/**
+ * @enum rd_kafka_conf_res_t
+ * @brief Configuration result type
+ */
+typedef enum {
+ RD_KAFKA_CONF_UNKNOWN = -2, /**< Unknown configuration name. */
+ RD_KAFKA_CONF_INVALID = -1, /**< Invalid configuration value or
+ * property or value not supported in
+ * this build. */
+ RD_KAFKA_CONF_OK = 0 /**< Configuration okay */
+} rd_kafka_conf_res_t;
+
+
+/**
+ * @brief Create configuration object.
+ *
+ * When providing your own configuration to the \c rd_kafka_*_new_*() calls
+ * the rd_kafka_conf_t objects needs to be created with this function
+ * which will set up the defaults.
+ * I.e.:
+ * @code
+ * rd_kafka_conf_t *myconf;
+ * rd_kafka_conf_res_t res;
+ *
+ * myconf = rd_kafka_conf_new();
+ * res = rd_kafka_conf_set(myconf, "socket.timeout.ms", "600",
+ * errstr, sizeof(errstr));
+ * if (res != RD_KAFKA_CONF_OK)
+ * die("%s\n", errstr);
+ *
+ * rk = rd_kafka_new(..., myconf);
+ * @endcode
+ *
+ * Please see CONFIGURATION.md for the default settings or use
+ * rd_kafka_conf_properties_show() to provide the information at runtime.
+ *
+ * The properties are identical to the Apache Kafka configuration properties
+ * whenever possible.
+ *
+ * @remark A successful call to rd_kafka_new() will assume ownership of
+ * the conf object and rd_kafka_conf_destroy() must not be called.
+ *
+ * @returns A new rd_kafka_conf_t object with defaults set.
+ *
+ * @sa rd_kafka_new(), rd_kafka_conf_set(), rd_kafka_conf_destroy()
+ */
+RD_EXPORT
+rd_kafka_conf_t *rd_kafka_conf_new(void);
+
+
+/**
+ * @brief Destroys a conf object.
+ */
+RD_EXPORT
+void rd_kafka_conf_destroy(rd_kafka_conf_t *conf);
+
+
+/**
+ * @brief Creates a copy/duplicate of configuration object \p conf
+ *
+ * @remark Interceptors are NOT copied to the new configuration object.
+ * @sa rd_kafka_interceptor_f_on_conf_dup
+ */
+RD_EXPORT
+rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf);
+
+
+/**
+ * @brief Same as rd_kafka_conf_dup() but with an array of property name
+ * prefixes to filter out (ignore) when copying.
+ */
+RD_EXPORT
+rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf,
+ size_t filter_cnt,
+ const char **filter);
+
+
+
+/**
+ * @returns the configuration object used by an rd_kafka_t instance.
+ * For use with rd_kafka_conf_get(), et.al., to extract configuration
+ * properties from a running client.
+ *
+ * @remark the returned object is read-only and its lifetime is the same
+ * as the rd_kafka_t object.
+ */
+RD_EXPORT
+const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk);
+
+
+/**
+ * @brief Sets a configuration property.
+ *
+ * \p conf must have been previously created with rd_kafka_conf_new().
+ *
+ * Fallthrough:
+ * Topic-level configuration properties may be set using this interface
+ * in which case they are applied on the \c default_topic_conf.
+ * If no \c default_topic_conf has been set one will be created.
+ * Any subsequent rd_kafka_conf_set_default_topic_conf() calls will
+ * replace the current default topic configuration.
+ *
+ * @returns \c rd_kafka_conf_res_t to indicate success or failure.
+ * In case of failure \p errstr is updated to contain a human readable
+ * error string.
+ *
+ * @remark Setting properties or values that were disabled at build time due to
+ * missing dependencies will return RD_KAFKA_CONF_INVALID.
+ */
+RD_EXPORT
+rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf,
+ const char *name,
+ const char *value,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Enable event sourcing.
+ * \p events is a bitmask of \c RD_KAFKA_EVENT_* of events to enable
+ * for consumption by `rd_kafka_queue_poll()`.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events);
+
+
+/**
+ * @brief Generic event callback to be used with the event API to trigger
+ * callbacks for \c rd_kafka_event_t objects from a background
+ * thread serving the background queue.
+ *
+ * How to use:
+ * 1. First set the event callback on the configuration object with this
+ * function, followed by creating an rd_kafka_t instance
+ * with rd_kafka_new().
+ * 2. Get the instance's background queue with rd_kafka_queue_get_background()
+ * and pass it as the reply/response queue to an API that takes an
+ * event queue, such as rd_kafka_CreateTopics().
+ * 3. As the response event is ready and enqueued on the background queue the
+ * event callback will be triggered from the background thread.
+ * 4. Prior to destroying the client instance, loose your reference to the
+ * background queue by calling rd_kafka_queue_destroy().
+ *
+ * The application must destroy the \c rkev passed to \p event cb using
+ * rd_kafka_event_destroy().
+ *
+ * The \p event_cb \c opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @remark This callback is a specialized alternative to the poll-based
+ * event API described in the Event interface section.
+ *
+ * @remark The \p event_cb will be called spontaneously from a background
+ * thread completely managed by librdkafka.
+ * Take care to perform proper locking of application objects.
+ *
+ * @warning The application MUST NOT call rd_kafka_destroy() from the
+ * event callback.
+ *
+ * @sa rd_kafka_queue_get_background
+ */
+RD_EXPORT void rd_kafka_conf_set_background_event_cb(
+ rd_kafka_conf_t *conf,
+ void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque));
+
+
+/**
+ * @deprecated See rd_kafka_conf_set_dr_msg_cb()
+ */
+RD_EXPORT
+void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf,
+ void (*dr_cb)(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque));
+
+/**
+ * @brief \b Producer: Set delivery report callback in provided \p conf object.
+ *
+ * The delivery report callback will be called once for each message
+ * accepted by rd_kafka_produce() (et.al) with \p err set to indicate
+ * the result of the produce request.
+ *
+ * The callback is called when a message is succesfully produced or
+ * if librdkafka encountered a permanent failure.
+ * Delivery errors occur when the retry count is exceeded, when the
+ * message.timeout.ms timeout is exceeded or there is a permanent error
+ * like RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART.
+ *
+ * An application must call rd_kafka_poll() at regular intervals to
+ * serve queued delivery report callbacks.
+ *
+ * The broker-assigned offset can be retrieved with \c rkmessage->offset
+ * and the timestamp can be retrieved using rd_kafka_message_timestamp().
+ *
+ * The \p dr_msg_cb \c opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ * The per-message msg_opaque value is available in
+ * \c rd_kafka_message_t._private.
+ *
+ * @remark The Idempotent Producer may return invalid timestamp
+ * (RD_KAFKA_TIMESTAMP_NOT_AVAILABLE), and
+ * and offset (RD_KAFKA_OFFSET_INVALID) for retried messages
+ * that were previously successfully delivered but not properly
+ * acknowledged.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_dr_msg_cb(
+ rd_kafka_conf_t *conf,
+ void (*dr_msg_cb)(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque));
+
+
+/**
+ * @brief \b Consumer: Set consume callback for use with
+ * rd_kafka_consumer_poll()
+ *
+ * The \p consume_cb \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ */
+RD_EXPORT
+void rd_kafka_conf_set_consume_cb(
+ rd_kafka_conf_t *conf,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque));
+
+/**
+ * @brief \b Consumer: Set rebalance callback for use with
+ * coordinated consumer group balancing.
+ *
+ * The \p err field is set to either RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
+ * or RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS and 'partitions'
+ * contains the full partition set that was either assigned or revoked.
+ *
+ * Registering a \p rebalance_cb turns off librdkafka's automatic
+ * partition assignment/revocation and instead delegates that responsibility
+ * to the application's \p rebalance_cb.
+ *
+ * The rebalance callback is responsible for updating librdkafka's
+ * assignment set based on the two events: RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
+ * and RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS but should also be able to handle
+ * arbitrary rebalancing failures where \p err is neither of those.
+ * @remark In this latter case (arbitrary error), the application must
+ * call rd_kafka_assign(rk, NULL) to synchronize state.
+ *
+ * For eager/non-cooperative `partition.assignment.strategy` assignors,
+ * such as `range` and `roundrobin`, the application must use
+ * rd_kafka_assign() to set or clear the entire assignment.
+ * For the cooperative assignors, such as `cooperative-sticky`, the application
+ * must use rd_kafka_incremental_assign() for
+ * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS and rd_kafka_incremental_unassign()
+ * for RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS.
+ *
+ * Without a rebalance callback this is done automatically by librdkafka
+ * but registering a rebalance callback gives the application flexibility
+ * in performing other operations along with the assigning/revocation,
+ * such as fetching offsets from an alternate location (on assign)
+ * or manually committing offsets (on revoke).
+ *
+ * rebalance_cb is always triggered exactly once when a rebalance completes
+ * with a new assignment, even if that assignment is empty. If an
+ * eager/non-cooperative assignor is configured, there will eventually be
+ * exactly one corresponding call to rebalance_cb to revoke these partitions
+ * (even if empty), whether this is due to a group rebalance or lost
+ * partitions. In the cooperative case, rebalance_cb will never be called if
+ * the set of partitions being revoked is empty (whether or not lost).
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @remark The \p partitions list is destroyed by librdkafka on return
+ * return from the rebalance_cb and must not be freed or
+ * saved by the application.
+ *
+ * @remark Be careful when modifying the \p partitions list.
+ * Changing this list should only be done to change the initial
+ * offsets for each partition.
+ * But a function like `rd_kafka_position()` might have unexpected
+ * effects for instance when a consumer gets assigned a partition
+ * it used to consume at an earlier rebalance. In this case, the
+ * list of partitions will be updated with the old offset for that
+ * partition. In this case, it is generally better to pass a copy
+ * of the list (see `rd_kafka_topic_partition_list_copy()`).
+ * The result of `rd_kafka_position()` is typically outdated in
+ * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS.
+ *
+ * @sa rd_kafka_assign()
+ * @sa rd_kafka_incremental_assign()
+ * @sa rd_kafka_incremental_unassign()
+ * @sa rd_kafka_assignment_lost()
+ * @sa rd_kafka_rebalance_protocol()
+ *
+ * The following example shows the application's responsibilities:
+ * @code
+ * static void rebalance_cb (rd_kafka_t *rk, rd_kafka_resp_err_t err,
+ * rd_kafka_topic_partition_list_t *partitions,
+ * void *opaque) {
+ *
+ * switch (err)
+ * {
+ * case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ * // application may load offets from arbitrary external
+ * // storage here and update \p partitions
+ * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ * rd_kafka_incremental_assign(rk, partitions);
+ * else // EAGER
+ * rd_kafka_assign(rk, partitions);
+ * break;
+ *
+ * case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ * if (manual_commits) // Optional explicit manual commit
+ * rd_kafka_commit(rk, partitions, 0); // sync commit
+ *
+ * if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE"))
+ * rd_kafka_incremental_unassign(rk, partitions);
+ * else // EAGER
+ * rd_kafka_assign(rk, NULL);
+ * break;
+ *
+ * default:
+ * handle_unlikely_error(err);
+ * rd_kafka_assign(rk, NULL); // sync state
+ * break;
+ * }
+ * }
+ * @endcode
+ *
+ * @remark The above example lacks error handling for assign calls, see
+ * the examples/ directory.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_rebalance_cb(
+ rd_kafka_conf_t *conf,
+ void (*rebalance_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque));
+
+
+
+/**
+ * @brief \b Consumer: Set offset commit callback for use with consumer groups.
+ *
+ * The results of automatic or manual offset commits will be scheduled
+ * for this callback and is served by rd_kafka_consumer_poll().
+ *
+ * If no partitions had valid offsets to commit this callback will be called
+ * with \p err == RD_KAFKA_RESP_ERR__NO_OFFSET which is not to be considered
+ * an error.
+ *
+ * The \p offsets list contains per-partition information:
+ * - \c offset: committed offset (attempted)
+ * - \c err: commit error
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ */
+RD_EXPORT
+void rd_kafka_conf_set_offset_commit_cb(
+ rd_kafka_conf_t *conf,
+ void (*offset_commit_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque));
+
+
+/**
+ * @brief Set error callback in provided conf object.
+ *
+ * The error callback is used by librdkafka to signal warnings and errors
+ * back to the application.
+ *
+ * These errors should generally be considered informational and non-permanent,
+ * the client will try to recover automatically from all type of errors.
+ * Given that the client and cluster configuration is correct the
+ * application should treat these as temporary errors.
+ *
+ * \p error_cb will be triggered with \c err set to RD_KAFKA_RESP_ERR__FATAL
+ * if a fatal error has been raised; in this case use rd_kafka_fatal_error() to
+ * retrieve the fatal error code and error string, and then begin terminating
+ * the client instance.
+ *
+ * If no \p error_cb is registered, or RD_KAFKA_EVENT_ERROR has not been set
+ * with rd_kafka_conf_set_events, then the errors will be logged instead.
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ */
+RD_EXPORT
+void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf,
+ void (*error_cb)(rd_kafka_t *rk,
+ int err,
+ const char *reason,
+ void *opaque));
+
+/**
+ * @brief Set throttle callback.
+ *
+ * The throttle callback is used to forward broker throttle times to the
+ * application for Produce and Fetch (consume) requests.
+ *
+ * Callbacks are triggered whenever a non-zero throttle time is returned by
+ * the broker, or when the throttle time drops back to zero.
+ *
+ * An application must call rd_kafka_poll() or rd_kafka_consumer_poll() at
+ * regular intervals to serve queued callbacks.
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @remark Requires broker version 0.9.0 or later.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf,
+ void (*throttle_cb)(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque));
+
+
+/**
+ * @brief Set logger callback.
+ *
+ * The default is to print to stderr, but a syslog logger is also available,
+ * see rd_kafka_log_print and rd_kafka_log_syslog for the builtin alternatives.
+ * Alternatively the application may provide its own logger callback.
+ * Or pass \p func as NULL to disable logging.
+ *
+ * This is the configuration alternative to the deprecated rd_kafka_set_logger()
+ *
+ * @remark The log_cb will be called spontaneously from librdkafka's internal
+ * threads unless logs have been forwarded to a poll queue through
+ * \c rd_kafka_set_log_queue().
+ * An application MUST NOT call any librdkafka APIs or do any prolonged
+ * work in a non-forwarded \c log_cb.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf,
+ void (*log_cb)(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf));
+
+
+/**
+ * @brief Set statistics callback in provided conf object.
+ *
+ * The statistics callback is triggered from rd_kafka_poll() every
+ * \c statistics.interval.ms (needs to be configured separately).
+ * Function arguments:
+ * - \p rk - Kafka handle
+ * - \p json - String containing the statistics data in JSON format
+ * - \p json_len - Length of \p json string.
+ * - \p opaque - Application-provided opaque as set by
+ * rd_kafka_conf_set_opaque().
+ *
+ * For more information on the format of \p json, see
+ * https://github.com/edenhill/librdkafka/wiki/Statistics
+ *
+ * If the application wishes to hold on to the \p json pointer and free
+ * it at a later time it must return 1 from the \p stats_cb.
+ * If the application returns 0 from the \p stats_cb then librdkafka
+ * will immediately free the \p json pointer.
+ *
+ * See STATISTICS.md for a full definition of the JSON object.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_stats_cb(
+ rd_kafka_conf_t *conf,
+ int (*stats_cb)(rd_kafka_t *rk, char *json, size_t json_len, void *opaque));
+
+/**
+ * @brief Set SASL/OAUTHBEARER token refresh callback in provided conf object.
+ *
+ * @param conf the configuration to mutate.
+ * @param oauthbearer_token_refresh_cb the callback to set; callback function
+ * arguments:<br>
+ * \p rk - Kafka handle<br>
+ * \p oauthbearer_config - Value of configuration property
+ * sasl.oauthbearer.config.
+ * \p opaque - Application-provided opaque set via
+ * rd_kafka_conf_set_opaque()
+ *
+ * The SASL/OAUTHBEARER token refresh callback is triggered via rd_kafka_poll()
+ * whenever OAUTHBEARER is the SASL mechanism and a token needs to be retrieved,
+ * typically based on the configuration defined in \c sasl.oauthbearer.config.
+ *
+ * The callback should invoke rd_kafka_oauthbearer_set_token()
+ * or rd_kafka_oauthbearer_set_token_failure() to indicate success
+ * or failure, respectively.
+ *
+ * The refresh operation is eventable and may be received via
+ * rd_kafka_queue_poll() with an event type of
+ * \c RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH.
+ *
+ * Note that before any SASL/OAUTHBEARER broker connection can succeed the
+ * application must call rd_kafka_oauthbearer_set_token() once -- either
+ * directly or, more typically, by invoking either rd_kafka_poll(),
+ * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), etc, in order to cause
+ * retrieval of an initial token to occur.
+ *
+ * Alternatively, the application can enable the SASL queue by calling
+ * rd_kafka_conf_enable_sasl_queue() on the configuration object prior to
+ * creating the client instance, get the SASL queue with
+ * rd_kafka_queue_get_sasl(), and either serve the queue manually by calling
+ * rd_kafka_queue_poll(), or redirecting the queue to the background thread to
+ * have the queue served automatically. For the latter case the SASL queue
+ * must be forwarded to the background queue with rd_kafka_queue_forward().
+ * A convenience function is available to automatically forward the SASL queue
+ * to librdkafka's background thread, see
+ * rd_kafka_sasl_background_callbacks_enable().
+ *
+ * An unsecured JWT refresh handler is provided by librdkafka for development
+ * and testing purposes, it is enabled by setting
+ * the \c enable.sasl.oauthbearer.unsecure.jwt property to true and is
+ * mutually exclusive to using a refresh callback.
+ *
+ * @sa rd_kafka_sasl_background_callbacks_enable()
+ * @sa rd_kafka_queue_get_sasl()
+ */
+RD_EXPORT
+void rd_kafka_conf_set_oauthbearer_token_refresh_cb(
+ rd_kafka_conf_t *conf,
+ void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque));
+
+/**
+ * @brief Enable/disable creation of a queue specific to SASL events
+ * and callbacks.
+ *
+ * For SASL mechanisms that trigger callbacks (currently OAUTHBEARER) this
+ * configuration API allows an application to get a dedicated
+ * queue for the SASL events/callbacks. After enabling the queue with this API
+ * the application can retrieve the queue by calling
+ * rd_kafka_queue_get_sasl() on the client instance.
+ * This queue may then be served directly by the application
+ * (with rd_kafka_queue_poll(), et.al) or forwarded to another queue, such as
+ * the background queue.
+ *
+ * A convenience function is available to automatically forward the SASL queue
+ * to librdkafka's background thread, see
+ * rd_kafka_sasl_background_callbacks_enable().
+ *
+ * By default (\p enable = 0) the main queue (as served by rd_kafka_poll(),
+ * et.al.) is used for SASL callbacks.
+ *
+ * @remark The SASL queue is currently only used by the SASL OAUTHBEARER
+ * mechanism's token_refresh_cb().
+ *
+ * @sa rd_kafka_queue_get_sasl()
+ * @sa rd_kafka_sasl_background_callbacks_enable()
+ */
+
+RD_EXPORT
+void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable);
+
+
+/**
+ * @brief Set socket callback.
+ *
+ * The socket callback is responsible for opening a socket
+ * according to the supplied \p domain, \p type and \p protocol.
+ * The socket shall be created with \c CLOEXEC set in a racefree fashion, if
+ * possible.
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * Default:
+ * - on linux: racefree CLOEXEC
+ * - others : non-racefree CLOEXEC
+ *
+ * @remark The callback will be called from an internal librdkafka thread.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_socket_cb(
+ rd_kafka_conf_t *conf,
+ int (*socket_cb)(int domain, int type, int protocol, void *opaque));
+
+
+
+/**
+ * @brief Set connect callback.
+ *
+ * The connect callback is responsible for connecting socket \p sockfd
+ * to peer address \p addr.
+ * The \p id field contains the broker identifier.
+ *
+ * \p connect_cb shall return 0 on success (socket connected) or an error
+ * number (errno) on error.
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @remark The callback will be called from an internal librdkafka thread.
+ */
+RD_EXPORT void
+rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf,
+ int (*connect_cb)(int sockfd,
+ const struct sockaddr *addr,
+ int addrlen,
+ const char *id,
+ void *opaque));
+
+/**
+ * @brief Set close socket callback.
+ *
+ * Close a socket (optionally opened with socket_cb()).
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @remark The callback will be called from an internal librdkafka thread.
+ */
+RD_EXPORT void rd_kafka_conf_set_closesocket_cb(
+ rd_kafka_conf_t *conf,
+ int (*closesocket_cb)(int sockfd, void *opaque));
+
+
+
+#ifndef _WIN32
+/**
+ * @brief Set open callback.
+ *
+ * The open callback is responsible for opening the file specified by
+ * pathname, flags and mode.
+ * The file shall be opened with \c CLOEXEC set in a racefree fashion, if
+ * possible.
+ *
+ * Default:
+ * - on linux: racefree CLOEXEC
+ * - others : non-racefree CLOEXEC
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @remark The callback will be called from an internal librdkafka thread.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_open_cb(
+ rd_kafka_conf_t *conf,
+ int (*open_cb)(const char *pathname, int flags, mode_t mode, void *opaque));
+#endif
+
+/** Forward declaration to avoid netdb.h or winsock includes */
+struct addrinfo;
+
+/**
+ * @brief Set address resolution callback.
+ *
+ * The callback is responsible for resolving the hostname \p node and the
+ * service \p service into a list of socket addresses as \c getaddrinfo(3)
+ * would. The \p hints and \p res parameters function as they do for
+ * \c getaddrinfo(3). The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * If the callback is invoked with a NULL \p node, \p service, and \p hints, the
+ * callback should instead free the addrinfo struct specified in \p res. In this
+ * case the callback must succeed; the return value will not be checked by the
+ * caller.
+ *
+ * The callback's return value is interpreted as the return value of \p
+ * \c getaddrinfo(3).
+ *
+ * @remark The callback will be called from an internal librdkafka thread.
+ */
+RD_EXPORT void
+rd_kafka_conf_set_resolve_cb(rd_kafka_conf_t *conf,
+ int (*resolve_cb)(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res,
+ void *opaque));
+
+/**
+ * @brief Sets the verification callback of the broker certificate
+ *
+ * The verification callback is triggered from internal librdkafka threads
+ * upon connecting to a broker. On each connection attempt the callback
+ * will be called for each certificate in the broker's certificate chain,
+ * starting at the root certification, as long as the application callback
+ * returns 1 (valid certificate).
+ * \c broker_name and \c broker_id correspond to the broker the connection
+ * is being made to.
+ * The \c x509_error argument indicates if OpenSSL's verification of
+ * the certificate succeed (0) or failed (an OpenSSL error code).
+ * The application may set the SSL context error code by returning 0
+ * from the verify callback and providing a non-zero SSL context error code
+ * in \c x509_error.
+ * If the verify callback sets \c x509_error to 0, returns 1, and the
+ * original \c x509_error was non-zero, the error on the SSL context will
+ * be cleared.
+ * \c x509_error is always a valid pointer to an int.
+ *
+ * \c depth is the depth of the current certificate in the chain, starting
+ * at the root certificate.
+ *
+ * The certificate itself is passed in binary DER format in \c buf of
+ * size \c size.
+ *
+ * The callback must return 1 if verification succeeds, or
+ * 0 if verification fails and then write a human-readable error message
+ * to \c errstr (limited to \c errstr_size bytes, including nul-term).
+ *
+ * The callback's \p opaque argument is the opaque set with
+ * rd_kafka_conf_set_opaque().
+ *
+ * @returns RD_KAFKA_CONF_OK if SSL is supported in this build, else
+ * RD_KAFKA_CONF_INVALID.
+ *
+ * @warning This callback will be called from internal librdkafka threads.
+ *
+ * @remark See <openssl/x509_vfy.h> in the OpenSSL source distribution
+ * for a list of \p x509_error codes.
+ */
+RD_EXPORT
+rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(
+ rd_kafka_conf_t *conf,
+ int (*ssl_cert_verify_cb)(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque));
+
+
+/**
+ * @enum rd_kafka_cert_type_t
+ *
+ * @brief SSL certificate type
+ *
+ * @sa rd_kafka_conf_set_ssl_cert
+ */
+typedef enum rd_kafka_cert_type_t {
+ RD_KAFKA_CERT_PUBLIC_KEY, /**< Client's public key */
+ RD_KAFKA_CERT_PRIVATE_KEY, /**< Client's private key */
+ RD_KAFKA_CERT_CA, /**< CA certificate */
+ RD_KAFKA_CERT__CNT,
+} rd_kafka_cert_type_t;
+
+/**
+ * @enum rd_kafka_cert_enc_t
+ *
+ * @brief SSL certificate encoding
+ *
+ * @sa rd_kafka_conf_set_ssl_cert
+ */
+typedef enum rd_kafka_cert_enc_t {
+ RD_KAFKA_CERT_ENC_PKCS12, /**< PKCS#12 */
+ RD_KAFKA_CERT_ENC_DER, /**< DER / binary X.509 ASN1 */
+ RD_KAFKA_CERT_ENC_PEM, /**< PEM */
+ RD_KAFKA_CERT_ENC__CNT,
+} rd_kafka_cert_enc_t;
+
+
+/**
+ * @brief Set certificate/key \p cert_type from the \p cert_enc encoded
+ * memory at \p buffer of \p size bytes.
+ *
+ * @param conf Configuration object.
+ * @param cert_type Certificate or key type to configure.
+ * @param cert_enc Buffer \p encoding type.
+ * @param buffer Memory pointer to encoded certificate or key.
+ * The memory is not referenced after this function returns.
+ * @param size Size of memory at \p buffer.
+ * @param errstr Memory were a human-readable error string will be written
+ * on failure.
+ * @param errstr_size Size of \p errstr, including space for nul-terminator.
+ *
+ * @returns RD_KAFKA_CONF_OK on success or RD_KAFKA_CONF_INVALID if the
+ * memory in \p buffer is of incorrect encoding, or if librdkafka
+ * was not built with SSL support.
+ *
+ * @remark Calling this method multiple times with the same \p cert_type
+ * will replace the previous value.
+ *
+ * @remark Calling this method with \p buffer set to NULL will clear the
+ * configuration for \p cert_type.
+ *
+ * @remark The private key may require a password, which must be specified
+ * with the `ssl.key.password` configuration property prior to
+ * calling this function.
+ *
+ * @remark Private and public keys in PEM format may also be set with the
+ * `ssl.key.pem` and `ssl.certificate.pem` configuration properties.
+ *
+ * @remark CA certificate in PEM format may also be set with the
+ * `ssl.ca.pem` configuration property.
+ *
+ * @remark When librdkafka is linked to OpenSSL 3.0 and the certificate is
+ * encoded using an obsolete cipher, it might be necessary to set up
+ * an OpenSSL configuration file to load the "legacy" provider and
+ * set the OPENSSL_CONF environment variable.
+ * See
+ * https://github.com/openssl/openssl/blob/master/README-PROVIDERS.md for more
+ * information.
+ */
+RD_EXPORT rd_kafka_conf_res_t
+rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf,
+ rd_kafka_cert_type_t cert_type,
+ rd_kafka_cert_enc_t cert_enc,
+ const void *buffer,
+ size_t size,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Set callback_data for OpenSSL engine.
+ *
+ * @param conf Configuration object.
+ * @param callback_data passed to engine callbacks,
+ * e.g. \c ENGINE_load_ssl_client_cert.
+ *
+ * @remark The \c ssl.engine.location configuration must be set for this
+ * to have affect.
+ *
+ * @remark The memory pointed to by \p value must remain valid for the
+ * lifetime of the configuration object and any Kafka clients that
+ * use it.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf,
+ void *callback_data);
+
+
+/**
+ * @brief Sets the application's opaque pointer that will be passed to callbacks
+ *
+ * @sa rd_kafka_opaque()
+ */
+RD_EXPORT
+void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque);
+
+/**
+ * @brief Retrieves the opaque pointer previously set
+ * with rd_kafka_conf_set_opaque()
+ */
+RD_EXPORT
+void *rd_kafka_opaque(const rd_kafka_t *rk);
+
+
+
+/**
+ * @brief Sets the default topic configuration to use for automatically
+ * subscribed topics (e.g., through pattern-matched topics).
+ * The topic config object is not usable after this call.
+ *
+ * @warning Any topic configuration settings that have been set on the
+ * global rd_kafka_conf_t object will be overwritten by this call
+ * since the implicitly created default topic config object is
+ * replaced by the user-supplied one.
+ *
+ * @deprecated Set default topic level configuration on the
+ * global rd_kafka_conf_t object instead.
+ */
+RD_EXPORT
+void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *tconf);
+
+/**
+ * @brief Gets the default topic configuration as previously set with
+ * rd_kafka_conf_set_default_topic_conf() or that was implicitly created
+ * by configuring a topic-level property on the global \p conf object.
+ *
+ * @returns the \p conf's default topic configuration (if any), or NULL.
+ *
+ * @warning The returned topic configuration object is owned by the \p conf
+ * object. It may be modified but not destroyed and its lifetime is
+ * the same as the \p conf object or the next call to
+ * rd_kafka_conf_set_default_topic_conf().
+ */
+RD_EXPORT rd_kafka_topic_conf_t *
+rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf);
+
+
+/**
+ * @brief Retrieve configuration value for property \p name.
+ *
+ * If \p dest is non-NULL the value will be written to \p dest with at
+ * most \p dest_size.
+ *
+ * \p *dest_size is updated to the full length of the value, thus if
+ * \p *dest_size initially is smaller than the full length the application
+ * may reallocate \p dest to fit the returned \p *dest_size and try again.
+ *
+ * If \p dest is NULL only the full length of the value is returned.
+ *
+ * Fallthrough:
+ * Topic-level configuration properties from the \c default_topic_conf
+ * may be retrieved using this interface.
+ *
+ * @returns \p RD_KAFKA_CONF_OK if the property name matched, else
+ * \p RD_KAFKA_CONF_UNKNOWN.
+ */
+RD_EXPORT
+rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf,
+ const char *name,
+ char *dest,
+ size_t *dest_size);
+
+
+/**
+ * @brief Retrieve topic configuration value for property \p name.
+ *
+ * @sa rd_kafka_conf_get()
+ */
+RD_EXPORT
+rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf,
+ const char *name,
+ char *dest,
+ size_t *dest_size);
+
+
+/**
+ * @brief Dump the configuration properties and values of \p conf to an array
+ * with \"key\", \"value\" pairs.
+ *
+ * The number of entries in the array is returned in \p *cntp.
+ *
+ * The dump must be freed with `rd_kafka_conf_dump_free()`.
+ */
+RD_EXPORT
+const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp);
+
+
+/**
+ * @brief Dump the topic configuration properties and values of \p conf
+ * to an array with \"key\", \"value\" pairs.
+ *
+ * The number of entries in the array is returned in \p *cntp.
+ *
+ * The dump must be freed with `rd_kafka_conf_dump_free()`.
+ */
+RD_EXPORT
+const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf,
+ size_t *cntp);
+
+/**
+ * @brief Frees a configuration dump returned from `rd_kafka_conf_dump()` or
+ * `rd_kafka_topic_conf_dump().
+ */
+RD_EXPORT
+void rd_kafka_conf_dump_free(const char **arr, size_t cnt);
+
+/**
+ * @brief Prints a table to \p fp of all supported configuration properties,
+ * their default values as well as a description.
+ *
+ * @remark All properties and properties and values are shown, even those
+ * that have been disabled at build time due to missing dependencies.
+ */
+RD_EXPORT
+void rd_kafka_conf_properties_show(FILE *fp);
+
+/**@}*/
+
+
+/**
+ * @name Topic configuration
+ * @brief Topic configuration property interface
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Create topic configuration object
+ *
+ * @sa Same semantics as for rd_kafka_conf_new().
+ */
+RD_EXPORT
+rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void);
+
+
+/**
+ * @brief Creates a copy/duplicate of topic configuration object \p conf.
+ */
+RD_EXPORT
+rd_kafka_topic_conf_t *
+rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf);
+
+/**
+ * @brief Creates a copy/duplicate of \p rk 's default topic configuration
+ * object.
+ */
+RD_EXPORT
+rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk);
+
+
+/**
+ * @brief Destroys a topic conf object.
+ */
+RD_EXPORT
+void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf);
+
+
+/**
+ * @brief Sets a single rd_kafka_topic_conf_t value by property name.
+ *
+ * \p topic_conf should have been previously set up
+ * with `rd_kafka_topic_conf_new()`.
+ *
+ * @returns rd_kafka_conf_res_t to indicate success or failure.
+ */
+RD_EXPORT
+rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf,
+ const char *name,
+ const char *value,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief Sets the application's opaque pointer that will be passed to all topic
+ * callbacks as the \c rkt_opaque argument.
+ *
+ * @sa rd_kafka_topic_opaque()
+ */
+RD_EXPORT
+void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *conf,
+ void *rkt_opaque);
+
+
+/**
+ * @brief \b Producer: Set partitioner callback in provided topic conf object.
+ *
+ * The partitioner may be called in any thread at any time,
+ * it may be called multiple times for the same message/key.
+ *
+ * The callback's \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The callback's \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * Partitioner function constraints:
+ * - MUST NOT call any rd_kafka_*() functions except:
+ * rd_kafka_topic_partition_available()
+ * - MUST NOT block or execute for prolonged periods of time.
+ * - MUST return a value between 0 and partition_cnt-1, or the
+ * special \c RD_KAFKA_PARTITION_UA value if partitioning
+ * could not be performed.
+ */
+RD_EXPORT
+void rd_kafka_topic_conf_set_partitioner_cb(
+ rd_kafka_topic_conf_t *topic_conf,
+ int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque));
+
+
+/**
+ * @brief \b Producer: Set message queueing order comparator callback.
+ *
+ * The callback may be called in any thread at any time,
+ * it may be called multiple times for the same message.
+ *
+ * Ordering comparator function constraints:
+ * - MUST be stable sort (same input gives same output).
+ * - MUST NOT call any rd_kafka_*() functions.
+ * - MUST NOT block or execute for prolonged periods of time.
+ *
+ * The comparator shall compare the two messages and return:
+ * - < 0 if message \p a should be inserted before message \p b.
+ * - >=0 if message \p a should be inserted after message \p b.
+ *
+ * @remark Insert sorting will be used to enqueue the message in the
+ * correct queue position, this comes at a cost of O(n).
+ *
+ * @remark If `queuing.strategy=fifo` new messages are enqueued to the
+ * tail of the queue regardless of msg_order_cmp, but retried messages
+ * are still affected by msg_order_cmp.
+ *
+ * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL,
+ * DO NOT USE IN PRODUCTION.
+ */
+RD_EXPORT void rd_kafka_topic_conf_set_msg_order_cmp(
+ rd_kafka_topic_conf_t *topic_conf,
+ int (*msg_order_cmp)(const rd_kafka_message_t *a,
+ const rd_kafka_message_t *b));
+
+
+/**
+ * @brief Check if partition is available (has a leader broker).
+ *
+ * @returns 1 if the partition is available, else 0.
+ *
+ * @warning This function must only be called from inside a partitioner function
+ */
+RD_EXPORT
+int rd_kafka_topic_partition_available(const rd_kafka_topic_t *rkt,
+ int32_t partition);
+
+
+/*******************************************************************
+ * *
+ * Partitioners provided by rdkafka *
+ * *
+ *******************************************************************/
+
+/**
+ * @brief Random partitioner.
+ *
+ * Will try not to return unavailable partitions.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a random partition between 0 and \p partition_cnt - 1.
+ *
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+/**
+ * @brief Consistent partitioner.
+ *
+ * Uses consistent hashing to map identical keys onto identical partitions.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on
+ * the CRC value of the key
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+/**
+ * @brief Consistent-Random partitioner.
+ *
+ * This is the default partitioner.
+ * Uses consistent hashing to map identical keys onto identical partitions, and
+ * messages without keys will be assigned via the random partitioner.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a \"random\" partition between 0 and \p partition_cnt - 1 based on
+ * the CRC value of the key (if provided)
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+
+/**
+ * @brief Murmur2 partitioner (Java compatible).
+ *
+ * Uses consistent hashing to map identical keys onto identical partitions
+ * using Java-compatible Murmur2 hashing.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a partition between 0 and \p partition_cnt - 1.
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+/**
+ * @brief Consistent-Random Murmur2 partitioner (Java compatible).
+ *
+ * Uses consistent hashing to map identical keys onto identical partitions
+ * using Java-compatible Murmur2 hashing.
+ * Messages without keys will be assigned via the random partitioner.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a partition between 0 and \p partition_cnt - 1.
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+
+/**
+ * @brief FNV-1a partitioner.
+ *
+ * Uses consistent hashing to map identical keys onto identical partitions
+ * using FNV-1a hashing.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a partition between 0 and \p partition_cnt - 1.
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+
+/**
+ * @brief Consistent-Random FNV-1a partitioner.
+ *
+ * Uses consistent hashing to map identical keys onto identical partitions
+ * using FNV-1a hashing.
+ * Messages without keys will be assigned via the random partitioner.
+ *
+ * The \p rkt_opaque argument is the opaque set by
+ * rd_kafka_topic_conf_set_opaque().
+ * The \p msg_opaque argument is the per-message opaque
+ * passed to produce().
+ *
+ * @returns a partition between 0 and \p partition_cnt - 1.
+ */
+RD_EXPORT
+int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Main Kafka and Topic object handles
+ * @{
+ *
+ *
+ */
+
+
+
+/**
+ * @brief Creates a new Kafka handle and starts its operation according to the
+ * specified \p type (\p RD_KAFKA_CONSUMER or \p RD_KAFKA_PRODUCER).
+ *
+ * \p conf is an optional struct created with `rd_kafka_conf_new()` that will
+ * be used instead of the default configuration.
+ * The \p conf object is freed by this function on success and must not be used
+ * or destroyed by the application subsequently.
+ * See `rd_kafka_conf_set()` et.al for more information.
+ *
+ * \p errstr must be a pointer to memory of at least size \p errstr_size where
+ * `rd_kafka_new()` may write a human readable error message in case the
+ * creation of a new handle fails. In which case the function returns NULL.
+ *
+ * @remark \b RD_KAFKA_CONSUMER: When a new \p RD_KAFKA_CONSUMER
+ * rd_kafka_t handle is created it may either operate in the
+ * legacy simple consumer mode using the rd_kafka_consume_start()
+ * interface, or the High-level KafkaConsumer API.
+ * @remark An application must only use one of these groups of APIs on a given
+ * rd_kafka_t RD_KAFKA_CONSUMER handle.
+
+ *
+ * @returns The Kafka handle on success or NULL on error (see \p errstr)
+ *
+ * @sa To destroy the Kafka handle, use rd_kafka_destroy().
+ */
+RD_EXPORT
+rd_kafka_t *rd_kafka_new(rd_kafka_type_t type,
+ rd_kafka_conf_t *conf,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Destroy Kafka handle.
+ *
+ * @remark This is a blocking operation.
+ * @remark rd_kafka_consumer_close() will be called from this function
+ * if the instance type is RD_KAFKA_CONSUMER, a \c group.id was
+ * configured, and the rd_kafka_consumer_close() was not
+ * explicitly called by the application. This in turn may
+ * trigger consumer callbacks, such as rebalance_cb.
+ * Use rd_kafka_destroy_flags() with
+ * RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE to avoid this behaviour.
+ *
+ * @sa rd_kafka_destroy_flags()
+ */
+RD_EXPORT
+void rd_kafka_destroy(rd_kafka_t *rk);
+
+
+/**
+ * @brief Destroy Kafka handle according to specified destroy flags
+ *
+ */
+RD_EXPORT
+void rd_kafka_destroy_flags(rd_kafka_t *rk, int flags);
+
+/**
+ * @brief Flags for rd_kafka_destroy_flags()
+ */
+
+/*!
+ * Don't call consumer_close() to leave group and commit final offsets.
+ *
+ * This also disables consumer callbacks to be called from rd_kafka_destroy*(),
+ * such as rebalance_cb.
+ *
+ * The consumer group handler is still closed internally, but from an
+ * application perspective none of the functionality from consumer_close()
+ * is performed.
+ */
+#define RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE 0x8
+
+
+
+/**
+ * @brief Returns Kafka handle name.
+ */
+RD_EXPORT
+const char *rd_kafka_name(const rd_kafka_t *rk);
+
+
+/**
+ * @brief Returns Kafka handle type.
+ */
+RD_EXPORT
+rd_kafka_type_t rd_kafka_type(const rd_kafka_t *rk);
+
+
+/**
+ * @brief Returns this client's broker-assigned group member id.
+ *
+ * @remark This currently requires the high-level KafkaConsumer
+ *
+ * @returns An allocated string containing the current broker-assigned group
+ * member id, or NULL if not available.
+ * The application must free the string with \p free() or
+ * rd_kafka_mem_free()
+ */
+RD_EXPORT
+char *rd_kafka_memberid(const rd_kafka_t *rk);
+
+
+
+/**
+ * @brief Returns the ClusterId as reported in broker metadata.
+ *
+ * @param rk Client instance.
+ * @param timeout_ms If there is no cached value from metadata retrieval
+ * then this specifies the maximum amount of time
+ * (in milliseconds) the call will block waiting
+ * for metadata to be retrieved.
+ * Use 0 for non-blocking calls.
+
+ * @remark Requires broker version >=0.10.0 and api.version.request=true.
+ *
+ * @remark The application must free the returned pointer
+ * using rd_kafka_mem_free().
+ *
+ * @returns a newly allocated string containing the ClusterId, or NULL
+ * if no ClusterId could be retrieved in the allotted timespan.
+ */
+RD_EXPORT
+char *rd_kafka_clusterid(rd_kafka_t *rk, int timeout_ms);
+
+
+/**
+ * @brief Returns the current ControllerId as reported in broker metadata.
+ *
+ * @param rk Client instance.
+ * @param timeout_ms If there is no cached value from metadata retrieval
+ * then this specifies the maximum amount of time
+ * (in milliseconds) the call will block waiting
+ * for metadata to be retrieved.
+ * Use 0 for non-blocking calls.
+
+ * @remark Requires broker version >=0.10.0 and api.version.request=true.
+ *
+ * @returns the controller broker id (>= 0), or -1 if no ControllerId could be
+ * retrieved in the allotted timespan.
+ */
+RD_EXPORT
+int32_t rd_kafka_controllerid(rd_kafka_t *rk, int timeout_ms);
+
+
+/**
+ * @brief Creates a new topic handle for topic named \p topic.
+ *
+ * \p conf is an optional configuration for the topic created with
+ * `rd_kafka_topic_conf_new()` that will be used instead of the default
+ * topic configuration.
+ * The \p conf object is freed by this function and must not be used or
+ * destroyed by the application subsequently.
+ * See `rd_kafka_topic_conf_set()` et.al for more information.
+ *
+ * Topic handles are refcounted internally and calling rd_kafka_topic_new()
+ * again with the same topic name will return the previous topic handle
+ * without updating the original handle's configuration.
+ * Applications must eventually call rd_kafka_topic_destroy() for each
+ * succesfull call to rd_kafka_topic_new() to clear up resources.
+ *
+ * @returns the new topic handle or NULL on error (use rd_kafka_errno2err()
+ * to convert system \p errno to an rd_kafka_resp_err_t error code.
+ *
+ * @sa rd_kafka_topic_destroy()
+ */
+RD_EXPORT
+rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk,
+ const char *topic,
+ rd_kafka_topic_conf_t *conf);
+
+
+
+/**
+ * @brief Loose application's topic handle refcount as previously created
+ * with `rd_kafka_topic_new()`.
+ *
+ * @remark Since topic objects are refcounted (both internally and for the app)
+ * the topic object might not actually be destroyed by this call,
+ * but the application must consider the object destroyed.
+ */
+RD_EXPORT
+void rd_kafka_topic_destroy(rd_kafka_topic_t *rkt);
+
+
+/**
+ * @brief Returns the topic name.
+ */
+RD_EXPORT
+const char *rd_kafka_topic_name(const rd_kafka_topic_t *rkt);
+
+
+/**
+ * @brief Get the \p rkt_opaque pointer that was set in the topic configuration
+ * with rd_kafka_topic_conf_set_opaque().
+ */
+RD_EXPORT
+void *rd_kafka_topic_opaque(const rd_kafka_topic_t *rkt);
+
+
+/**
+ * @brief Unassigned partition.
+ *
+ * The unassigned partition is used by the producer API for messages
+ * that should be partitioned using the configured or default partitioner.
+ */
+#define RD_KAFKA_PARTITION_UA ((int32_t)-1)
+
+
+/**
+ * @brief Polls the provided kafka handle for events.
+ *
+ * Events will cause application-provided callbacks to be called.
+ *
+ * The \p timeout_ms argument specifies the maximum amount of time
+ * (in milliseconds) that the call will block waiting for events.
+ * For non-blocking calls, provide 0 as \p timeout_ms.
+ * To wait indefinitely for an event, provide -1.
+ *
+ * @remark An application should make sure to call poll() at regular
+ * intervals to serve any queued callbacks waiting to be called.
+ * @remark If your producer doesn't have any callback set (in particular
+ * via rd_kafka_conf_set_dr_msg_cb or rd_kafka_conf_set_error_cb)
+ * you might choose not to call poll(), though this is not
+ * recommended.
+ *
+ * Events:
+ * - delivery report callbacks (if dr_cb/dr_msg_cb is configured) [producer]
+ * - error callbacks (rd_kafka_conf_set_error_cb()) [all]
+ * - stats callbacks (rd_kafka_conf_set_stats_cb()) [all]
+ * - throttle callbacks (rd_kafka_conf_set_throttle_cb()) [all]
+ * - OAUTHBEARER token refresh callbacks
+ * (rd_kafka_conf_set_oauthbearer_token_refresh_cb()) [all]
+ *
+ * @returns the number of events served.
+ */
+RD_EXPORT
+int rd_kafka_poll(rd_kafka_t *rk, int timeout_ms);
+
+
+/**
+ * @brief Cancels the current callback dispatcher (rd_kafka_poll(),
+ * rd_kafka_consume_callback(), etc).
+ *
+ * A callback may use this to force an immediate return to the calling
+ * code (caller of e.g. rd_kafka_poll()) without processing any further
+ * events.
+ *
+ * @remark This function MUST ONLY be called from within a librdkafka callback.
+ */
+RD_EXPORT
+void rd_kafka_yield(rd_kafka_t *rk);
+
+
+
+/**
+ * @brief Pause producing or consumption for the provided list of partitions.
+ *
+ * Success or error is returned per-partition \p err in the \p partitions list.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_pause_partitions(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions);
+
+
+
+/**
+ * @brief Resume producing consumption for the provided list of partitions.
+ *
+ * Success or error is returned per-partition \p err in the \p partitions list.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_resume_partitions(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions);
+
+
+
+/**
+ * @brief Query broker for low (oldest/beginning) and high (newest/end) offsets
+ * for partition.
+ *
+ * Offsets are returned in \p *low and \p *high respectively.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_query_watermark_offsets(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high,
+ int timeout_ms);
+
+
+/**
+ * @brief Get last known low (oldest/beginning) and high (newest/end) offsets
+ * for partition.
+ *
+ * The low offset is updated periodically (if statistics.interval.ms is set)
+ * while the high offset is updated on each fetched message set from the broker.
+ *
+ * If there is no cached offset (either low or high, or both) then
+ * RD_KAFKA_OFFSET_INVALID will be returned for the respective offset.
+ *
+ * Offsets are returned in \p *low and \p *high respectively.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on failure.
+ *
+ * @remark Shall only be used with an active consumer instance.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_get_watermark_offsets(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t *low,
+ int64_t *high);
+
+
+
+/**
+ * @brief Look up the offsets for the given partitions by timestamp.
+ *
+ * The returned offset for each partition is the earliest offset whose
+ * timestamp is greater than or equal to the given timestamp in the
+ * corresponding partition.
+ *
+ * The timestamps to query are represented as \c offset in \p offsets
+ * on input, and \c offset will contain the offset on output.
+ *
+ * The function will block for at most \p timeout_ms milliseconds.
+ *
+ * @remark Duplicate Topic+Partitions are not supported.
+ * @remark Per-partition errors may be returned in \c
+ * rd_kafka_topic_partition_t.err
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if offsets were be queried (do note
+ * that per-partition errors might be set),
+ * RD_KAFKA_RESP_ERR__TIMED_OUT if not all offsets could be fetched
+ * within \p timeout_ms,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p offsets list is empty,
+ * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION if all partitions are unknown,
+ * RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE if unable to query leaders
+ * for the given partitions.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_offsets_for_times(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *offsets,
+ int timeout_ms);
+
+
+
+/**
+ * @brief Allocate and zero memory using the same allocator librdkafka uses.
+ *
+ * This is typically an abstraction for the calloc(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * allocating pointers that are used by librdkafka.
+ *
+ * \p rk can be set to return memory allocated by a specific \c rk instance
+ * otherwise pass NULL for \p rk.
+ *
+ * @remark Memory allocated by rd_kafka_mem_calloc() must be freed using
+ * rd_kafka_mem_free()
+ */
+RD_EXPORT
+void *rd_kafka_mem_calloc(rd_kafka_t *rk, size_t num, size_t size);
+
+
+
+/**
+ * @brief Allocate memory using the same allocator librdkafka uses.
+ *
+ * This is typically an abstraction for the malloc(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * allocating pointers that are used by librdkafka.
+ *
+ * \p rk can be set to return memory allocated by a specific \c rk instance
+ * otherwise pass NULL for \p rk.
+ *
+ * @remark Memory allocated by rd_kafka_mem_malloc() must be freed using
+ * rd_kafka_mem_free()
+ */
+RD_EXPORT
+void *rd_kafka_mem_malloc(rd_kafka_t *rk, size_t size);
+
+
+
+/**
+ * @brief Free pointer returned by librdkafka
+ *
+ * This is typically an abstraction for the free(3) call and makes sure
+ * the application can use the same memory allocator as librdkafka for
+ * freeing pointers returned by librdkafka.
+ *
+ * In standard setups it is usually not necessary to use this interface
+ * rather than the free(3) functione.
+ *
+ * \p rk must be set for memory returned by APIs that take an \c rk argument,
+ * for other APIs pass NULL for \p rk.
+ *
+ * @remark rd_kafka_mem_free() must only be used for pointers returned by APIs
+ * that explicitly mention using this function for freeing.
+ */
+RD_EXPORT
+void rd_kafka_mem_free(rd_kafka_t *rk, void *ptr);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Queue API
+ * @{
+ *
+ * Message queues allows the application to re-route consumed messages
+ * from multiple topic+partitions into one single queue point.
+ * This queue point containing messages from a number of topic+partitions
+ * may then be served by a single rd_kafka_consume*_queue() call,
+ * rather than one call per topic+partition combination.
+ */
+
+
+/**
+ * @brief Create a new message queue.
+ *
+ * See rd_kafka_consume_start_queue(), rd_kafka_consume_queue(), et.al.
+ */
+RD_EXPORT
+rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk);
+
+/**
+ * Destroy a queue, purging all of its enqueued messages.
+ */
+RD_EXPORT
+void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu);
+
+
+/**
+ * @returns a reference to the main librdkafka event queue.
+ * This is the queue served by rd_kafka_poll().
+ *
+ * Use rd_kafka_queue_destroy() to loose the reference.
+ */
+RD_EXPORT
+rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk);
+
+
+
+/**
+ * @returns a reference to the SASL callback queue, if a SASL mechanism
+ * with callbacks is configured (currently only OAUTHBEARER), else
+ * returns NULL.
+ *
+ * Use rd_kafka_queue_destroy() to loose the reference.
+ *
+ * @sa rd_kafka_sasl_background_callbacks_enable()
+ */
+RD_EXPORT
+rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk);
+
+
+/**
+ * @brief Enable SASL OAUTHBEARER refresh callbacks on the librdkafka
+ * background thread.
+ *
+ * This serves as an alternative for applications that do not call
+ * rd_kafka_poll() (et.al.) at regular intervals (or not at all), as a means
+ * of automatically trigger the refresh callbacks, which are needed to
+ * initiate connections to the brokers in the case a custom OAUTHBEARER
+ * refresh callback is configured.
+ *
+ * @returns NULL on success or an error object on error.
+ *
+ * @sa rd_kafka_queue_get_sasl()
+ * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb()
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk);
+
+
+/**
+ * @brief Sets SASL credentials used for SASL PLAIN and SCRAM mechanisms by
+ * this Kafka client.
+ *
+ * This function sets or resets the SASL username and password credentials
+ * used by this Kafka client. The new credentials will be used the next time
+ * this client needs to authenticate to a broker. This function
+ * will not disconnect existing connections that might have been made using
+ * the old credentials.
+ *
+ * @remark This function only applies to the SASL PLAIN and SCRAM mechanisms.
+ *
+ * @returns NULL on success or an error object on error.
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk,
+ const char *username,
+ const char *password);
+
+/**
+ * @returns a reference to the librdkafka consumer queue.
+ * This is the queue served by rd_kafka_consumer_poll().
+ *
+ * Use rd_kafka_queue_destroy() to loose the reference.
+ *
+ * @remark rd_kafka_queue_destroy() MUST be called on this queue
+ * prior to calling rd_kafka_consumer_close().
+ */
+RD_EXPORT
+rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk);
+
+/**
+ * @returns a reference to the partition's queue, or NULL if
+ * partition is invalid.
+ *
+ * Use rd_kafka_queue_destroy() to loose the reference.
+ *
+ * @remark rd_kafka_queue_destroy() MUST be called on this queue
+ *
+ * @remark This function only works on consumers.
+ */
+RD_EXPORT
+rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition);
+
+/**
+ * @returns a reference to the background thread queue, or NULL if the
+ * background queue is not enabled.
+ *
+ * The background thread queue provides the application with an automatically
+ * polled queue that triggers the event callback in a background thread,
+ * this background thread is completely managed by librdkafka.
+ *
+ * The background thread queue is automatically created if a generic event
+ * handler callback is configured with rd_kafka_conf_set_background_event_cb()
+ * or if rd_kafka_queue_get_background() is called.
+ *
+ * The background queue is polled and served by librdkafka and MUST NOT be
+ * polled, forwarded, or otherwise managed by the application, it may only
+ * be used as the destination queue passed to queue-enabled APIs, such as
+ * the Admin API.
+ *
+ * Use rd_kafka_queue_destroy() to loose the reference.
+ *
+ * @warning The background queue MUST NOT be read from (polled, consumed, etc),
+ * or forwarded from.
+ */
+RD_EXPORT
+rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk);
+
+
+/**
+ * @brief Forward/re-route queue \p src to \p dst.
+ * If \p dst is \c NULL the forwarding is removed.
+ *
+ * The internal refcounts for both queues are increased.
+ *
+ * @remark Regardless of whether \p dst is NULL or not, after calling this
+ * function, \p src will not forward it's fetch queue to the consumer
+ * queue.
+ */
+RD_EXPORT
+void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst);
+
+/**
+ * @brief Forward librdkafka logs (and debug) to the specified queue
+ * for serving with one of the ..poll() calls.
+ *
+ * This allows an application to serve log callbacks (\c log_cb)
+ * in its thread of choice.
+ *
+ * @param rk Client instance.
+ * @param rkqu Queue to forward logs to. If the value is NULL the logs
+ * are forwarded to the main queue.
+ *
+ * @remark The configuration property \c log.queue MUST also be set to true.
+ *
+ * @remark librdkafka maintains its own reference to the provided queue.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error,
+ * eg RD_KAFKA_RESP_ERR__NOT_CONFIGURED when log.queue is not set to true.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk,
+ rd_kafka_queue_t *rkqu);
+
+
+/**
+ * @returns the current number of elements in queue.
+ */
+RD_EXPORT
+size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu);
+
+
+/**
+ * @brief Enable IO event triggering for queue.
+ *
+ * To ease integration with IO based polling loops this API
+ * allows an application to create a separate file-descriptor
+ * that librdkafka will write \p payload (of size \p size) to
+ * whenever a new element is enqueued on a previously empty queue.
+ *
+ * To remove event triggering call with \p fd = -1.
+ *
+ * librdkafka will maintain a copy of the \p payload.
+ *
+ * @remark IO and callback event triggering are mutually exclusive.
+ * @remark When using forwarded queues the IO event must only be enabled
+ * on the final forwarded-to (destination) queue.
+ * @remark The file-descriptor/socket must be set to non-blocking.
+ */
+RD_EXPORT
+void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu,
+ int fd,
+ const void *payload,
+ size_t size);
+
+/**
+ * @brief Enable callback event triggering for queue.
+ *
+ * The callback will be called from an internal librdkafka thread
+ * when a new element is enqueued on a previously empty queue.
+ *
+ * To remove event triggering call with \p event_cb = NULL.
+ *
+ * The \p qev_opaque is passed to the callback's \p qev_opaque argument.
+ *
+ * @remark IO and callback event triggering are mutually exclusive.
+ * @remark Since the callback may be triggered from internal librdkafka
+ * threads, the application must not perform any pro-longed work in
+ * the callback, or call any librdkafka APIs (for the same rd_kafka_t
+ * handle).
+ */
+RD_EXPORT
+void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu,
+ void (*event_cb)(rd_kafka_t *rk,
+ void *qev_opaque),
+ void *qev_opaque);
+
+
+/**
+ * @brief Cancels the current rd_kafka_queue_poll() on \p rkqu.
+ *
+ * An application may use this from another thread to force
+ * an immediate return to the calling code (caller of rd_kafka_queue_poll()).
+ * Must not be used from signal handlers since that may cause deadlocks.
+ */
+RD_EXPORT
+void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu);
+
+
+/**@}*/
+
+/**
+ *
+ * @name Simple Consumer API (legacy)
+ * @{
+ *
+ */
+
+
+#define RD_KAFKA_OFFSET_BEGINNING \
+ -2 /**< Start consuming from beginning of \
+ * kafka partition queue: oldest msg */
+#define RD_KAFKA_OFFSET_END \
+ -1 /**< Start consuming from end of kafka \
+ * partition queue: next msg */
+#define RD_KAFKA_OFFSET_STORED \
+ -1000 /**< Start consuming from offset retrieved \
+ * from offset store */
+#define RD_KAFKA_OFFSET_INVALID -1001 /**< Invalid offset */
+
+
+/** @cond NO_DOC */
+#define RD_KAFKA_OFFSET_TAIL_BASE -2000 /* internal: do not use */
+/** @endcond */
+
+/**
+ * @brief Start consuming \p CNT messages from topic's current end offset.
+ *
+ * That is, if current end offset is 12345 and \p CNT is 200, it will start
+ * consuming from offset \c 12345-200 = \c 12145. */
+#define RD_KAFKA_OFFSET_TAIL(CNT) (RD_KAFKA_OFFSET_TAIL_BASE - (CNT))
+
+/**
+ * @brief Start consuming messages for topic \p rkt and \p partition
+ * at offset \p offset which may either be an absolute \c (0..N)
+ * or one of the logical offsets:
+ * - RD_KAFKA_OFFSET_BEGINNING
+ * - RD_KAFKA_OFFSET_END
+ * - RD_KAFKA_OFFSET_STORED
+ * - RD_KAFKA_OFFSET_TAIL
+ *
+ * rdkafka will attempt to keep \c queued.min.messages (config property)
+ * messages in the local queue by repeatedly fetching batches of messages
+ * from the broker until the threshold is reached.
+ *
+ * The application shall use one of the `rd_kafka_consume*()` functions
+ * to consume messages from the local queue, each kafka message being
+ * represented as a `rd_kafka_message_t *` object.
+ *
+ * `rd_kafka_consume_start()` must not be called multiple times for the same
+ * topic and partition without stopping consumption first with
+ * `rd_kafka_consume_stop()`.
+ *
+ * @returns 0 on success or -1 on error in which case errno is set accordingly:
+ * - EBUSY - Conflicts with an existing or previous subscription
+ * (RD_KAFKA_RESP_ERR__CONFLICT)
+ * - EINVAL - Invalid offset, or incomplete configuration (lacking group.id)
+ * (RD_KAFKA_RESP_ERR__INVALID_ARG)
+ * - ESRCH - requested \p partition is invalid.
+ * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ * - ENOENT - topic is unknown in the Kafka cluster.
+ * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ *
+ * Use `rd_kafka_errno2err()` to convert sytem \c errno to `rd_kafka_resp_err_t`
+ */
+RD_EXPORT
+int rd_kafka_consume_start(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t offset);
+
+/**
+ * @brief Same as rd_kafka_consume_start() but re-routes incoming messages to
+ * the provided queue \p rkqu (which must have been previously allocated
+ * with `rd_kafka_queue_new()`.
+ *
+ * The application must use one of the `rd_kafka_consume_*_queue()` functions
+ * to receive fetched messages.
+ *
+ * `rd_kafka_consume_start_queue()` must not be called multiple times for the
+ * same topic and partition without stopping consumption first with
+ * `rd_kafka_consume_stop()`.
+ * `rd_kafka_consume_start()` and `rd_kafka_consume_start_queue()` must not
+ * be combined for the same topic and partition.
+ */
+RD_EXPORT
+int rd_kafka_consume_start_queue(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t offset,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * @brief Stop consuming messages for topic \p rkt and \p partition, purging
+ * all messages currently in the local queue.
+ *
+ * NOTE: To enforce synchronisation this call will block until the internal
+ * fetcher has terminated and offsets are committed to configured
+ * storage method.
+ *
+ * The application needs to be stop all consumers before calling
+ * `rd_kafka_destroy()` on the main object handle.
+ *
+ * @returns 0 on success or -1 on error (see `errno`).
+ */
+RD_EXPORT
+int rd_kafka_consume_stop(rd_kafka_topic_t *rkt, int32_t partition);
+
+
+
+/**
+ * @brief Seek consumer for topic+partition to \p offset which is either an
+ * absolute or logical offset.
+ *
+ * If \p timeout_ms is specified (not 0) the seek call will wait this long
+ * for the consumer to update its fetcher state for the given partition with
+ * the new offset. This guarantees that no previously fetched messages for the
+ * old offset (or fetch position) will be passed to the application.
+ *
+ * If the timeout is reached the internal state will be unknown to the caller
+ * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
+ *
+ * If \p timeout_ms is 0 it will initiate the seek but return
+ * immediately without any error reporting (e.g., async).
+ *
+ * This call will purge all pre-fetched messages for the given partition, which
+ * may be up to \c queued.max.message.kbytes in size. Repeated use of seek
+ * may thus lead to increased network usage as messages are re-fetched from
+ * the broker.
+ *
+ * @remark Seek must only be performed for already assigned/consumed partitions,
+ * use rd_kafka_assign() (et.al) to set the initial starting offset
+ * for a new assignmenmt.
+ *
+ * @returns `RD_KAFKA_RESP_ERR__NO_ERROR` on success else an error code.
+ *
+ * @deprecated Use rd_kafka_seek_partitions().
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_seek(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t offset,
+ int timeout_ms);
+
+
+
+/**
+ * @brief Seek consumer for partitions in \p partitions to the per-partition
+ * offset in the \c .offset field of \p partitions.
+ *
+ * The offset may be either absolute (>= 0) or a logical offset.
+ *
+ * If \p timeout_ms is specified (not 0) the seek call will wait this long
+ * for the consumer to update its fetcher state for the given partition with
+ * the new offset. This guarantees that no previously fetched messages for the
+ * old offset (or fetch position) will be passed to the application.
+ *
+ * If the timeout is reached the internal state will be unknown to the caller
+ * and this function returns `RD_KAFKA_RESP_ERR__TIMED_OUT`.
+ *
+ * If \p timeout_ms is 0 it will initiate the seek but return
+ * immediately without any error reporting (e.g., async).
+ *
+ * This call will purge all pre-fetched messages for the given partition, which
+ * may be up to \c queued.max.message.kbytes in size. Repeated use of seek
+ * may thus lead to increased network usage as messages are re-fetched from
+ * the broker.
+ *
+ * Individual partition errors are reported in the per-partition \c .err field
+ * of \p partitions.
+ *
+ * @remark Seek must only be performed for already assigned/consumed partitions,
+ * use rd_kafka_assign() (et.al) to set the initial starting offset
+ * for a new assignmenmt.
+ *
+ * @returns NULL on success or an error object on failure.
+ */
+RD_EXPORT rd_kafka_error_t *
+rd_kafka_seek_partitions(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions,
+ int timeout_ms);
+
+
+/**
+ * @brief Consume a single message from topic \p rkt and \p partition
+ *
+ * \p timeout_ms is maximum amount of time to wait for a message to be received.
+ * Consumer must have been previously started with `rd_kafka_consume_start()`.
+ *
+ * @returns a message object on success or \c NULL on error.
+ * The message object must be destroyed with `rd_kafka_message_destroy()`
+ * when the application is done with it.
+ *
+ * Errors (when returning NULL):
+ * - ETIMEDOUT - \p timeout_ms was reached with no new messages fetched.
+ * - ENOENT - \p rkt + \p partition is unknown.
+ * (no prior `rd_kafka_consume_start()` call)
+ *
+ * NOTE: The returned message's \c ..->err must be checked for errors.
+ * NOTE: \c ..->err \c == \c RD_KAFKA_RESP_ERR__PARTITION_EOF signals that the
+ * end of the partition has been reached, which should typically not be
+ * considered an error. The application should handle this case
+ * (e.g., ignore).
+ *
+ * @remark on_consume() interceptors may be called from this function prior to
+ * passing message to application.
+ */
+RD_EXPORT
+rd_kafka_message_t *
+rd_kafka_consume(rd_kafka_topic_t *rkt, int32_t partition, int timeout_ms);
+
+
+
+/**
+ * @brief Consume up to \p rkmessages_size from topic \p rkt and \p partition
+ * putting a pointer to each message in the application provided
+ * array \p rkmessages (of size \p rkmessages_size entries).
+ *
+ * `rd_kafka_consume_batch()` provides higher throughput performance
+ * than `rd_kafka_consume()`.
+ *
+ * \p timeout_ms is the maximum amount of time to wait for all of
+ * \p rkmessages_size messages to be put into \p rkmessages.
+ * If no messages were available within the timeout period this function
+ * returns 0 and \p rkmessages remains untouched.
+ * This differs somewhat from `rd_kafka_consume()`.
+ *
+ * The message objects must be destroyed with `rd_kafka_message_destroy()`
+ * when the application is done with it.
+ *
+ * @returns the number of rkmessages added in \p rkmessages,
+ * or -1 on error (same error codes as for `rd_kafka_consume()`.
+ *
+ * @sa rd_kafka_consume()
+ *
+ * @remark on_consume() interceptors may be called from this function prior to
+ * passing message to application.
+ */
+RD_EXPORT
+ssize_t rd_kafka_consume_batch(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size);
+
+
+
+/**
+ * @brief Consumes messages from topic \p rkt and \p partition, calling
+ * the provided callback for each consumed messsage.
+ *
+ * `rd_kafka_consume_callback()` provides higher throughput performance
+ * than both `rd_kafka_consume()` and `rd_kafka_consume_batch()`.
+ *
+ * \p timeout_ms is the maximum amount of time to wait for one or more messages
+ * to arrive.
+ *
+ * The provided \p consume_cb function is called for each message,
+ * the application \b MUST \b NOT call `rd_kafka_message_destroy()` on the
+ * provided \p rkmessage.
+ *
+ * The \p commit_opaque argument is passed to the \p consume_cb
+ * as \p commit_opaque.
+ *
+ * @returns the number of messages processed or -1 on error.
+ *
+ * @sa rd_kafka_consume()
+ *
+ * @remark on_consume() interceptors may be called from this function prior to
+ * passing message to application.
+ *
+ * @remark This function will return early if a transaction control message is
+ * received, these messages are not exposed to the application but
+ * still enqueued on the consumer queue to make sure their
+ * offsets are stored.
+ *
+ * @deprecated This API is deprecated and subject for future removal.
+ * There is no new callback-based consume interface, use the
+ * poll/queue based alternatives.
+ */
+RD_EXPORT
+int rd_kafka_consume_callback(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int timeout_ms,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage,
+ void *commit_opaque),
+ void *commit_opaque);
+
+
+/**@}*/
+
+/**
+ * @name Simple Consumer API (legacy): Queue consumers
+ * @{
+ *
+ * The following `..._queue()` functions are analogue to the functions above
+ * but reads messages from the provided queue \p rkqu instead.
+ * \p rkqu must have been previously created with `rd_kafka_queue_new()`
+ * and the topic consumer must have been started with
+ * `rd_kafka_consume_start_queue()` utilising the the same queue.
+ */
+
+/**
+ * @brief Consume from queue
+ *
+ * @sa rd_kafka_consume()
+ */
+RD_EXPORT
+rd_kafka_message_t *rd_kafka_consume_queue(rd_kafka_queue_t *rkqu,
+ int timeout_ms);
+
+/**
+ * @brief Consume batch of messages from queue
+ *
+ * @sa rd_kafka_consume_batch()
+ */
+RD_EXPORT
+ssize_t rd_kafka_consume_batch_queue(rd_kafka_queue_t *rkqu,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size);
+
+/**
+ * @brief Consume multiple messages from queue with callback
+ *
+ * @sa rd_kafka_consume_callback()
+ *
+ * @deprecated This API is deprecated and subject for future removal.
+ * There is no new callback-based consume interface, use the
+ * poll/queue based alternatives.
+ */
+RD_EXPORT
+int rd_kafka_consume_callback_queue(
+ rd_kafka_queue_t *rkqu,
+ int timeout_ms,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *commit_opaque),
+ void *commit_opaque);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Simple Consumer API (legacy): Topic+partition offset store.
+ * @{
+ *
+ * If \c auto.commit.enable is true the offset is stored automatically prior to
+ * returning of the message(s) in each of the rd_kafka_consume*() functions
+ * above.
+ */
+
+
+/**
+ * @brief Store offset \p offset + 1 for topic \p rkt partition \p partition.
+ *
+ * The \c offset + 1 will be committed (written) to broker (or file) according
+ * to \c `auto.commit.interval.ms` or manual offset-less commit()
+ *
+ * @deprecated This API lacks support for partition leader epochs, which makes
+ * it at risk for unclean leader election log truncation issues.
+ * Use rd_kafka_offsets_store() and rd_kafka_offset_store_message()
+ * instead.
+ *
+ * @warning This method may only be called for partitions that are currently
+ * assigned.
+ * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
+ * Since v1.9.0.
+ *
+ * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as
+ * this may later interfere with resuming a paused partition, instead
+ * store offsets prior to calling seek.
+ *
+ * @remark \c `enable.auto.offset.store` must be set to "false" when using
+ * this API.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t
+rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
+
+
+/**
+ * @brief Store offsets for next auto-commit for one or more partitions.
+ *
+ * The offset will be committed (written) to the offset store according
+ * to \c `auto.commit.interval.ms` or manual offset-less commit().
+ *
+ * Per-partition success/error status propagated through each partition's
+ * \c .err for all return values (even NO_ERROR) except INVALID_ARG.
+ *
+ * @warning This method may only be called for partitions that are currently
+ * assigned.
+ * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
+ * Since v1.9.0.
+ *
+ * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as
+ * this may later interfere with resuming a paused partition, instead
+ * store offsets prior to calling seek.
+ *
+ * @remark The \c .offset field is stored as is, it will NOT be + 1.
+ *
+ * @remark \c `enable.auto.offset.store` must be set to "false" when using
+ * this API.
+ *
+ * @remark The leader epoch, if set, will be used to fence outdated partition
+ * leaders. See rd_kafka_topic_partition_set_leader_epoch().
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on (partial) success, or
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if \c enable.auto.offset.store
+ * is true, or
+ * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or RD_KAFKA_RESP_ERR__STATE
+ * if none of the offsets could be stored.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_offsets_store(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *offsets);
+
+
+/**
+ * @brief Store offset +1 for the consumed message.
+ *
+ * The message offset + 1 will be committed to broker according
+ * to \c `auto.commit.interval.ms` or manual offset-less commit()
+ *
+ * @warning This method may only be called for partitions that are currently
+ * assigned.
+ * Non-assigned partitions will fail with RD_KAFKA_RESP_ERR__STATE.
+ * Since v1.9.0.
+ *
+ * @warning Avoid storing offsets after calling rd_kafka_seek() (et.al) as
+ * this may later interfere with resuming a paused partition, instead
+ * store offsets prior to calling seek.
+ *
+ * @remark \c `enable.auto.offset.store` must be set to "false" when using
+ * this API.
+ *
+ * @returns NULL on success or an error object on failure.
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage);
+
+/**@}*/
+
+
+
+/**
+ * @name KafkaConsumer (C)
+ * @brief High-level KafkaConsumer C API
+ * @{
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Subscribe to topic set using balanced consumer groups.
+ *
+ * Wildcard (regex) topics are supported:
+ * any topic name in the \p topics list that is prefixed with \c \"^\" will
+ * be regex-matched to the full list of topics in the cluster and matching
+ * topics will be added to the subscription list.
+ *
+ * The full topic list is retrieved every \c topic.metadata.refresh.interval.ms
+ * to pick up new or delete topics that match the subscription.
+ * If there is any change to the matched topics the consumer will
+ * immediately rejoin the group with the updated set of subscribed topics.
+ *
+ * Regex and full topic names can be mixed in \p topics.
+ *
+ * @remark Only the \c .topic field is used in the supplied \p topics list,
+ * all other fields are ignored.
+ *
+ * @remark subscribe() is an asynchronous method which returns immediately:
+ * background threads will (re)join the group, wait for group rebalance,
+ * issue any registered rebalance_cb, assign() the assigned partitions,
+ * and then start fetching messages. This cycle may take up to
+ * \c session.timeout.ms * 2 or more to complete.
+ *
+ * @remark After this call returns a consumer error will be returned by
+ * rd_kafka_consumer_poll (et.al) for each unavailable topic in the
+ * \p topics. The error will be RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART
+ * for non-existent topics, and
+ * RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED for unauthorized topics.
+ * The consumer error will be raised through rd_kafka_consumer_poll()
+ * (et.al.) with the \c rd_kafka_message_t.err field set to one of the
+ * error codes mentioned above.
+ * The subscribe function itself is asynchronous and will not return
+ * an error on unavailable topics.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if list is empty, contains invalid
+ * topics or regexes or duplicate entries,
+ * RD_KAFKA_RESP_ERR__FATAL if the consumer has raised a fatal error.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_subscribe(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *topics);
+
+
+/**
+ * @brief Unsubscribe from the current subscription set.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk);
+
+
+/**
+ * @brief Returns the current topic subscription
+ *
+ * @returns An error code on failure, otherwise \p topic is updated
+ * to point to a newly allocated topic list (possibly empty).
+ *
+ * @remark The application is responsible for calling
+ * rd_kafka_topic_partition_list_destroy on the returned list.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_subscription(rd_kafka_t *rk, rd_kafka_topic_partition_list_t **topics);
+
+
+
+/**
+ * @brief Poll the consumer for messages or events.
+ *
+ * Will block for at most \p timeout_ms milliseconds.
+ *
+ * @remark An application should make sure to call consumer_poll() at regular
+ * intervals, even if no messages are expected, to serve any
+ * queued callbacks waiting to be called. This is especially
+ * important when a rebalance_cb has been registered as it needs
+ * to be called and handled properly to synchronize internal
+ * consumer state.
+ *
+ * @returns A message object which is a proper message if \p ->err is
+ * RD_KAFKA_RESP_ERR_NO_ERROR, or an event or error for any other
+ * value.
+ *
+ * @remark on_consume() interceptors may be called from this function prior to
+ * passing message to application.
+ *
+ * @remark When subscribing to topics the application must call poll at
+ * least every \c max.poll.interval.ms to remain a member of the
+ * consumer group.
+ *
+ * Noteworthy errors returned in \c ->err:
+ * - RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED - application failed to call
+ * poll within `max.poll.interval.ms`.
+ *
+ * @sa rd_kafka_message_t
+ */
+RD_EXPORT
+rd_kafka_message_t *rd_kafka_consumer_poll(rd_kafka_t *rk, int timeout_ms);
+
+/**
+ * @brief Close the consumer.
+ *
+ * This call will block until the consumer has revoked its assignment,
+ * calling the \c rebalance_cb if it is configured, committed offsets
+ * to broker, and left the consumer group (if applicable).
+ * The maximum blocking time is roughly limited to session.timeout.ms.
+ *
+ * @returns An error code indicating if the consumer close was succesful
+ * or not.
+ * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised
+ * a fatal error.
+ *
+ * @remark The application still needs to call rd_kafka_destroy() after
+ * this call finishes to clean up the underlying handle resources.
+ *
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_consumer_close(rd_kafka_t *rk);
+
+
+/**
+ * @brief Asynchronously close the consumer.
+ *
+ * Performs the same actions as rd_kafka_consumer_close() but in a
+ * background thread.
+ *
+ * Rebalance events/callbacks (etc) will be forwarded to the
+ * application-provided \p rkqu. The application must poll/serve this queue
+ * until rd_kafka_consumer_closed() returns true.
+ *
+ * @remark Depending on consumer group join state there may or may not be
+ * rebalance events emitted on \p rkqu.
+ *
+ * @returns an error object if the consumer close failed, else NULL.
+ *
+ * @sa rd_kafka_consumer_closed()
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_consumer_close_queue(rd_kafka_t *rk,
+ rd_kafka_queue_t *rkqu);
+
+
+/**
+ * @returns 1 if the consumer is closed, else 0.
+ *
+ * Should be used in conjunction with rd_kafka_consumer_close_queue() to know
+ * when the consumer has been closed.
+ *
+ * @sa rd_kafka_consumer_close_queue()
+ */
+RD_EXPORT
+int rd_kafka_consumer_closed(rd_kafka_t *rk);
+
+
+/**
+ * @brief Incrementally add \p partitions to the current assignment.
+ *
+ * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used,
+ * this method should be used in a rebalance callback to adjust the current
+ * assignment appropriately in the case where the rebalance type is
+ * RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS. The application must pass the
+ * partition list passed to the callback (or a copy of it), even if the
+ * list is empty. \p partitions must not be NULL. This method may also be
+ * used outside the context of a rebalance callback.
+ *
+ * @returns NULL on success, or an error object if the operation was
+ * unsuccessful.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT rd_kafka_error_t *
+rd_kafka_incremental_assign(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+
+/**
+ * @brief Incrementally remove \p partitions from the current assignment.
+ *
+ * If a COOPERATIVE assignor (i.e. incremental rebalancing) is being used,
+ * this method should be used in a rebalance callback to adjust the current
+ * assignment appropriately in the case where the rebalance type is
+ * RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS. The application must pass the
+ * partition list passed to the callback (or a copy of it), even if the
+ * list is empty. \p partitions must not be NULL. This method may also be
+ * used outside the context of a rebalance callback.
+ *
+ * @returns NULL on success, or an error object if the operation was
+ * unsuccessful.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT rd_kafka_error_t *rd_kafka_incremental_unassign(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+
+/**
+ * @brief The rebalance protocol currently in use. This will be
+ * "NONE" if the consumer has not (yet) joined a group, else it will
+ * match the rebalance protocol ("EAGER", "COOPERATIVE") of the
+ * configured and selected assignor(s). All configured
+ * assignors must have the same protocol type, meaning
+ * online migration of a consumer group from using one
+ * protocol to another (in particular upgading from EAGER
+ * to COOPERATIVE) without a restart is not currently
+ * supported.
+ *
+ * @returns NULL on error, or one of "NONE", "EAGER", "COOPERATIVE" on success.
+ */
+RD_EXPORT
+const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk);
+
+
+/**
+ * @brief Atomic assignment of partitions to consume.
+ *
+ * The new \p partitions will replace the existing assignment.
+ *
+ * A zero-length \p partitions will treat the partitions as a valid,
+ * albeit empty assignment, and maintain internal state, while a \c NULL
+ * value for \p partitions will reset and clear the internal state.
+ *
+ * When used from a rebalance callback, the application should pass the
+ * partition list passed to the callback (or a copy of it) even if the list
+ * is empty (i.e. should not pass NULL in this case) so as to maintain
+ * internal join state. This is not strictly required - the application
+ * may adjust the assignment provided by the group. However, this is rarely
+ * useful in practice.
+ *
+ * @returns An error code indicating if the new assignment was applied or not.
+ * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised
+ * a fatal error.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_assign(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+/**
+ * @brief Returns the current partition assignment as set by rd_kafka_assign()
+ * or rd_kafka_incremental_assign().
+ *
+ * @returns An error code on failure, otherwise \p partitions is updated
+ * to point to a newly allocated partition list (possibly empty).
+ *
+ * @remark The application is responsible for calling
+ * rd_kafka_topic_partition_list_destroy on the returned list.
+ *
+ * @remark This assignment represents the partitions assigned through the
+ * assign functions and not the partitions assigned to this consumer
+ * instance by the consumer group leader.
+ * They are usually the same following a rebalance but not necessarily
+ * since an application is free to assign any partitions.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_assignment(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t **partitions);
+
+
+/**
+ * @brief Check whether the consumer considers the current assignment to
+ * have been lost involuntarily. This method is only applicable for
+ * use with a high level subscribing consumer. Assignments are revoked
+ * immediately when determined to have been lost, so this method
+ * is only useful when reacting to a RD_KAFKA_EVENT_REBALANCE event
+ * or from within a rebalance_cb. Partitions that have been lost may
+ * already be owned by other members in the group and therefore
+ * commiting offsets, for example, may fail.
+ *
+ * @remark Calling rd_kafka_assign(), rd_kafka_incremental_assign() or
+ * rd_kafka_incremental_unassign() resets this flag.
+ *
+ * @returns Returns 1 if the current partition assignment is considered
+ * lost, 0 otherwise.
+ */
+RD_EXPORT int rd_kafka_assignment_lost(rd_kafka_t *rk);
+
+
+/**
+ * @brief Commit offsets on broker for the provided list of partitions.
+ *
+ * \p offsets should contain \c topic, \c partition, \c offset and possibly
+ * \c metadata. The \c offset should be the offset where consumption will
+ * resume, i.e., the last processed offset + 1.
+ * If \p offsets is NULL the current partition assignment will be used instead.
+ *
+ * If \p async is false this operation will block until the broker offset commit
+ * is done, returning the resulting success or error code.
+ *
+ * If a rd_kafka_conf_set_offset_commit_cb() offset commit callback has been
+ * configured the callback will be enqueued for a future call to
+ * rd_kafka_poll(), rd_kafka_consumer_poll() or similar.
+ *
+ * @returns An error code indiciating if the commit was successful,
+ * or successfully scheduled if asynchronous, or failed.
+ * RD_KAFKA_RESP_ERR__FATAL is returned if the consumer has raised
+ * a fatal error.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_commit(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ int async);
+
+
+/**
+ * @brief Commit message's offset on broker for the message's partition.
+ * The committed offset is the message's offset + 1.
+ *
+ * @sa rd_kafka_commit
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_commit_message(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ int async);
+
+
+/**
+ * @brief Commit offsets on broker for the provided list of partitions.
+ *
+ * See rd_kafka_commit for \p offsets semantics.
+ *
+ * The result of the offset commit will be posted on the provided \p rkqu queue.
+ *
+ * If the application uses one of the poll APIs (rd_kafka_poll(),
+ * rd_kafka_consumer_poll(), rd_kafka_queue_poll(), ..) to serve the queue
+ * the \p cb callback is required.
+ *
+ * The \p commit_opaque argument is passed to the callback as \p commit_opaque,
+ * or if using the event API the callback is ignored and the offset commit
+ * result will be returned as an RD_KAFKA_EVENT_COMMIT event and the
+ * \p commit_opaque value will be available with rd_kafka_event_opaque().
+ *
+ * If \p rkqu is NULL a temporary queue will be created and the callback will
+ * be served by this call.
+ *
+ * @sa rd_kafka_commit()
+ * @sa rd_kafka_conf_set_offset_commit_cb()
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_commit_queue(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_queue_t *rkqu,
+ void (*cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *commit_opaque),
+ void *commit_opaque);
+
+
+/**
+ * @brief Retrieve committed offsets for topics+partitions.
+ *
+ * The \p offset field of each requested partition will either be set to
+ * stored offset or to RD_KAFKA_OFFSET_INVALID in case there was no stored
+ * offset for that partition.
+ *
+ * Committed offsets will be returned according to the `isolation.level`
+ * configuration property, if set to `read_committed` (default) then only
+ * stable offsets for fully committed transactions will be returned, while
+ * `read_uncommitted` may return offsets for not yet committed transactions.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
+ * \p offset or \p err field of each \p partitions' element is filled
+ * in with the stored offset, or a partition specific error.
+ * Else returns an error code.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_committed(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions,
+ int timeout_ms);
+
+
+
+/**
+ * @brief Retrieve current positions (offsets) for topics+partitions.
+ *
+ * The \p offset field of each requested partition will be set to the offset
+ * of the last consumed message + 1, or RD_KAFKA_OFFSET_INVALID in case there
+ * was no previous message.
+ *
+ * @remark In this context the last consumed message is the offset consumed
+ * by the current librdkafka instance and, in case of rebalancing, not
+ * necessarily the last message fetched from the partition.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success in which case the
+ * \p offset or \p err field of each \p partitions' element is filled
+ * in with the stored offset, or a partition specific error.
+ * Else returns an error code.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_position(rd_kafka_t *rk, rd_kafka_topic_partition_list_t *partitions);
+
+
+
+/**
+ * @returns the current consumer group metadata associated with this consumer,
+ * or NULL if \p rk is not a consumer configured with a \c group.id.
+ * This metadata object should be passed to the transactional
+ * producer's rd_kafka_send_offsets_to_transaction() API.
+ *
+ * @remark The returned pointer must be freed by the application using
+ * rd_kafka_consumer_group_metadata_destroy().
+ *
+ * @sa rd_kafka_send_offsets_to_transaction()
+ */
+RD_EXPORT rd_kafka_consumer_group_metadata_t *
+rd_kafka_consumer_group_metadata(rd_kafka_t *rk);
+
+
+/**
+ * @brief Create a new consumer group metadata object.
+ * This is typically only used for writing tests.
+ *
+ * @param group_id The group id.
+ *
+ * @remark The returned pointer must be freed by the application using
+ * rd_kafka_consumer_group_metadata_destroy().
+ */
+RD_EXPORT rd_kafka_consumer_group_metadata_t *
+rd_kafka_consumer_group_metadata_new(const char *group_id);
+
+
+/**
+ * @brief Create a new consumer group metadata object.
+ * This is typically only used for writing tests.
+ *
+ * @param group_id The group id.
+ * @param generation_id The group generation id.
+ * @param member_id The group member id.
+ * @param group_instance_id The group instance id (may be NULL).
+ *
+ * @remark The returned pointer must be freed by the application using
+ * rd_kafka_consumer_group_metadata_destroy().
+ */
+RD_EXPORT rd_kafka_consumer_group_metadata_t *
+rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id,
+ int32_t generation_id,
+ const char *member_id,
+ const char *group_instance_id);
+
+
+/**
+ * @brief Frees the consumer group metadata object as returned by
+ * rd_kafka_consumer_group_metadata().
+ */
+RD_EXPORT void
+rd_kafka_consumer_group_metadata_destroy(rd_kafka_consumer_group_metadata_t *);
+
+
+/**
+ * @brief Serialize the consumer group metadata to a binary format.
+ * This is mainly for client binding use and not for application use.
+ *
+ * @remark The serialized metadata format is private and is not compatible
+ * across different versions or even builds of librdkafka.
+ * It should only be used in the same process runtime and must only
+ * be passed to rd_kafka_consumer_group_metadata_read().
+ *
+ * @param cgmd Metadata to be serialized.
+ * @param bufferp On success this pointer will be updated to point to na
+ * allocated buffer containing the serialized metadata.
+ * The buffer must be freed with rd_kafka_mem_free().
+ * @param sizep The pointed to size will be updated with the size of
+ * the serialized buffer.
+ *
+ * @returns NULL on success or an error object on failure.
+ *
+ * @sa rd_kafka_consumer_group_metadata_read()
+ */
+RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(
+ const rd_kafka_consumer_group_metadata_t *cgmd,
+ void **bufferp,
+ size_t *sizep);
+
+/**
+ * @brief Reads serialized consumer group metadata and returns a
+ * consumer group metadata object.
+ * This is mainly for client binding use and not for application use.
+ *
+ * @remark The serialized metadata format is private and is not compatible
+ * across different versions or even builds of librdkafka.
+ * It should only be used in the same process runtime and must only
+ * be passed to rd_kafka_consumer_group_metadata_read().
+ *
+ * @param cgmdp On success this pointer will be updated to point to a new
+ * consumer group metadata object which must be freed with
+ * rd_kafka_consumer_group_metadata_destroy().
+ * @param buffer Pointer to the serialized data.
+ * @param size Size of the serialized data.
+ *
+ * @returns NULL on success or an error object on failure.
+ *
+ * @sa rd_kafka_consumer_group_metadata_write()
+ */
+RD_EXPORT rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(
+ rd_kafka_consumer_group_metadata_t **cgmdp,
+ const void *buffer,
+ size_t size);
+
+/**@}*/
+
+
+
+/**
+ * @name Producer API
+ * @{
+ *
+ *
+ */
+
+
+/**
+ * @brief Producer message flags
+ */
+#define RD_KAFKA_MSG_F_FREE \
+ 0x1 /**< Delegate freeing of payload to rdkafka. \
+ */
+#define RD_KAFKA_MSG_F_COPY \
+ 0x2 /**< rdkafka will make a copy of the payload. \
+ */
+#define RD_KAFKA_MSG_F_BLOCK \
+ 0x4 /**< Block produce*() on message queue full. \
+ * WARNING: If a delivery report callback \
+ * is used, the application MUST \
+ * call rd_kafka_poll() (or equiv.) \
+ * to make sure delivered messages \
+ * are drained from the internal \
+ * delivery report queue. \
+ * Failure to do so will result \
+ * in indefinitely blocking on \
+ * the produce() call when the \
+ * message queue is full. */
+#define RD_KAFKA_MSG_F_PARTITION \
+ 0x8 /**< produce_batch() will honor \
+ * per-message partition. */
+
+
+
+/**
+ * @brief Produce and send a single message to broker.
+ *
+ * \p rkt is the target topic which must have been previously created with
+ * `rd_kafka_topic_new()`.
+ *
+ * `rd_kafka_produce()` is an asynchronous non-blocking API.
+ * See `rd_kafka_conf_set_dr_msg_cb` on how to setup a callback to be called
+ * once the delivery status (success or failure) is known. The delivery report
+ * is triggered by the application calling `rd_kafka_poll()` (at regular
+ * intervals) or `rd_kafka_flush()` (at termination).
+ *
+ * Since producing is asynchronous, you should call `rd_kafka_flush()` before
+ * you destroy the producer. Otherwise, any outstanding messages will be
+ * silently discarded.
+ *
+ * When temporary errors occur, librdkafka automatically retries to produce the
+ * messages. Retries are triggered after retry.backoff.ms and when the
+ * leader broker for the given partition is available. Otherwise, librdkafka
+ * falls back to polling the topic metadata to monitor when a new leader is
+ * elected (see the topic.metadata.refresh.fast.interval.ms and
+ * topic.metadata.refresh.interval.ms configurations) and then performs a
+ * retry. A delivery error will occur if the message could not be produced
+ * within message.timeout.ms.
+ *
+ * See the "Message reliability" chapter in INTRODUCTION.md for more
+ * information.
+ *
+ * \p partition is the target partition, either:
+ * - RD_KAFKA_PARTITION_UA (unassigned) for
+ * automatic partitioning using the topic's partitioner function, or
+ * - a fixed partition (0..N)
+ *
+ * \p msgflags is zero or more of the following flags OR:ed together:
+ * RD_KAFKA_MSG_F_BLOCK - block \p produce*() call if
+ * \p queue.buffering.max.messages or
+ * \p queue.buffering.max.kbytes are exceeded.
+ * Messages are considered in-queue from the point
+ * they are accepted by produce() until their corresponding delivery report
+ * callback/event returns. It is thus a requirement to call rd_kafka_poll() (or
+ * equiv.) from a separate thread when F_BLOCK is used. See WARNING on \c
+ * RD_KAFKA_MSG_F_BLOCK above.
+ *
+ * RD_KAFKA_MSG_F_FREE - rdkafka will free(3) \p payload when it is done
+ * with it.
+ * RD_KAFKA_MSG_F_COPY - the \p payload data will be copied and the
+ * \p payload pointer will not be used by rdkafka
+ * after the call returns.
+ * RD_KAFKA_MSG_F_PARTITION - produce_batch() will honour per-message
+ * partition, either set manually or by the
+ * configured partitioner.
+ *
+ * .._F_FREE and .._F_COPY are mutually exclusive. If neither of these are
+ * set, the caller must ensure that the memory backing \p payload remains
+ * valid and is not modified or reused until the delivery callback is
+ * invoked. Other buffers passed to `rd_kafka_produce()` don't have this
+ * restriction on reuse, i.e. the memory backing the key or the topic name
+ * may be reused as soon as `rd_kafka_produce()` returns.
+ *
+ * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
+ * the memory associated with the payload is still the caller's
+ * responsibility.
+ *
+ * \p payload is the message payload of size \p len bytes.
+ *
+ * \p key is an optional message key of size \p keylen bytes, if non-NULL it
+ * will be passed to the topic partitioner as well as be sent with the
+ * message to the broker and passed on to the consumer.
+ *
+ * \p msg_opaque is an optional application-provided per-message opaque
+ * pointer that will provided in the message's delivery report callback
+ * (\c dr_msg_cb or \c dr_cb) and the \c rd_kafka_message_t \c _private field.
+ *
+ * @remark on_send() and on_acknowledgement() interceptors may be called
+ * from this function. on_acknowledgement() will only be called if the
+ * message fails partitioning.
+ *
+ * @remark If the producer is transactional (\c transactional.id is configured)
+ * producing is only allowed during an on-going transaction, namely
+ * after rd_kafka_begin_transaction() has been called.
+ *
+ * @returns 0 on success or -1 on error in which case errno is set accordingly:
+ * - ENOBUFS - maximum number of outstanding messages has been reached:
+ * "queue.buffering.max.messages"
+ * (RD_KAFKA_RESP_ERR__QUEUE_FULL)
+ * - EMSGSIZE - message is larger than configured max size:
+ * "messages.max.bytes".
+ * (RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE)
+ * - ESRCH - requested \p partition is unknown in the Kafka cluster.
+ * (RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ * - ENOENT - topic is unknown in the Kafka cluster.
+ * (RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ * - ECANCELED - fatal error has been raised on producer, see
+ * rd_kafka_fatal_error(),
+ * (RD_KAFKA_RESP_ERR__FATAL).
+ * - ENOEXEC - transactional state forbids producing
+ * (RD_KAFKA_RESP_ERR__STATE)
+ *
+ * @sa Use rd_kafka_errno2err() to convert `errno` to rdkafka error code.
+ */
+RD_EXPORT
+int rd_kafka_produce(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t keylen,
+ void *msg_opaque);
+
+
+/**
+ * @brief Produce and send a single message to broker.
+ *
+ * The message is defined by a va-arg list using \c rd_kafka_vtype_t
+ * tag tuples which must be terminated with a single \c RD_KAFKA_V_END.
+ *
+ * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, else an error code as
+ * described in rd_kafka_produce().
+ * \c RD_KAFKA_RESP_ERR__CONFLICT is returned if _V_HEADER and
+ * _V_HEADERS are mixed.
+ *
+ * @sa rd_kafka_produce, rd_kafka_produceva, RD_KAFKA_V_END
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...);
+
+
+/**
+ * @brief Produce and send a single message to broker.
+ *
+ * The message is defined by an array of \c rd_kafka_vu_t of
+ * count \p cnt.
+ *
+ * @returns an error object on failure or NULL on success.
+ * See rd_kafka_producev() for specific error codes.
+ *
+ * @sa rd_kafka_produce, rd_kafka_producev, RD_KAFKA_V_END
+ */
+RD_EXPORT
+rd_kafka_error_t *
+rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt);
+
+
+/**
+ * @brief Produce multiple messages.
+ *
+ * If partition is RD_KAFKA_PARTITION_UA the configured partitioner will
+ * be run for each message (slower), otherwise the messages will be enqueued
+ * to the specified partition directly (faster).
+ *
+ * The messages are provided in the array \p rkmessages of count \p message_cnt
+ * elements.
+ * The \p partition and \p msgflags are used for all provided messages.
+ *
+ * Honoured \p rkmessages[] fields are:
+ * - payload,len Message payload and length
+ * - key,key_len Optional message key
+ * - _private Message opaque pointer (msg_opaque)
+ * - err Will be set according to success or failure, see
+ * rd_kafka_produce() for possible error codes.
+ * Application only needs to check for errors if
+ * return value != \p message_cnt.
+ *
+ * @remark If \c RD_KAFKA_MSG_F_PARTITION is set in \p msgflags, the
+ * \c .partition field of the \p rkmessages is used instead of
+ * \p partition.
+ *
+ * @returns the number of messages succesfully enqueued for producing.
+ *
+ * @remark This interface does NOT support setting message headers on
+ * the provided \p rkmessages.
+ */
+RD_EXPORT
+int rd_kafka_produce_batch(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int msgflags,
+ rd_kafka_message_t *rkmessages,
+ int message_cnt);
+
+
+
+/**
+ * @brief Wait until all outstanding produce requests, et.al, are completed.
+ * This should typically be done prior to destroying a producer instance
+ * to make sure all queued and in-flight produce requests are completed
+ * before terminating.
+ *
+ * @remark This function will call rd_kafka_poll() and thus trigger callbacks.
+ *
+ * @remark The \c linger.ms time will be ignored for the duration of the call,
+ * queued messages will be sent to the broker as soon as possible.
+ *
+ * @remark If RD_KAFKA_EVENT_DR has been enabled
+ * (through rd_kafka_conf_set_events()) this function will not call
+ * rd_kafka_poll() but instead wait for the librdkafka-handled
+ * message count to reach zero. This requires the application to
+ * serve the event queue in a separate thread.
+ * In this mode only messages are counted, not other types of
+ * queued events.
+ *
+ * @returns RD_KAFKA_RESP_ERR__TIMED_OUT if \p timeout_ms was reached before all
+ * outstanding requests were completed, else RD_KAFKA_RESP_ERR_NO_ERROR
+ *
+ * @sa rd_kafka_outq_len()
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_flush(rd_kafka_t *rk, int timeout_ms);
+
+
+
+/**
+ * @brief Purge messages currently handled by the producer instance.
+ *
+ * @param rk Client instance.
+ * @param purge_flags Tells which messages to purge and how.
+ *
+ * The application will need to call rd_kafka_poll() or rd_kafka_flush()
+ * afterwards to serve the delivery report callbacks of the purged messages.
+ *
+ * Messages purged from internal queues fail with the delivery report
+ * error code set to RD_KAFKA_RESP_ERR__PURGE_QUEUE, while purged messages that
+ * are in-flight to or from the broker will fail with the error code set to
+ * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT.
+ *
+ * @warning Purging messages that are in-flight to or from the broker
+ * will ignore any subsequent acknowledgement for these messages
+ * received from the broker, effectively making it impossible
+ * for the application to know if the messages were successfully
+ * produced or not. This may result in duplicate messages if the
+ * application retries these messages at a later time.
+ *
+ * @remark This call may block for a short time while background thread
+ * queues are purged.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if the \p purge flags are invalid
+ * or unknown,
+ * RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if called on a non-producer
+ * client instance.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_purge(rd_kafka_t *rk, int purge_flags);
+
+
+/**
+ * @brief Flags for rd_kafka_purge()
+ */
+
+/*!
+ * Purge messages in internal queues.
+ */
+#define RD_KAFKA_PURGE_F_QUEUE 0x1
+
+/*!
+ * Purge messages in-flight to or from the broker.
+ * Purging these messages will void any future acknowledgements from the
+ * broker, making it impossible for the application to know if these
+ * messages were successfully delivered or not.
+ * Retrying these messages may lead to duplicates.
+ */
+#define RD_KAFKA_PURGE_F_INFLIGHT 0x2
+
+
+/*!
+ * Don't wait for background thread queue purging to finish.
+ */
+#define RD_KAFKA_PURGE_F_NON_BLOCKING 0x4
+
+
+/**@}*/
+
+
+/**
+ * @name Metadata API
+ * @{
+ *
+ *
+ */
+
+
+/**
+ * @brief Broker information
+ */
+typedef struct rd_kafka_metadata_broker {
+ int32_t id; /**< Broker Id */
+ char *host; /**< Broker hostname */
+ int port; /**< Broker listening port */
+} rd_kafka_metadata_broker_t;
+
+/**
+ * @brief Partition information
+ */
+typedef struct rd_kafka_metadata_partition {
+ int32_t id; /**< Partition Id */
+ rd_kafka_resp_err_t err; /**< Partition error reported by broker */
+ int32_t leader; /**< Leader broker */
+ int replica_cnt; /**< Number of brokers in \p replicas */
+ int32_t *replicas; /**< Replica brokers */
+ int isr_cnt; /**< Number of ISR brokers in \p isrs */
+ int32_t *isrs; /**< In-Sync-Replica brokers */
+} rd_kafka_metadata_partition_t;
+
+/**
+ * @brief Topic information
+ */
+typedef struct rd_kafka_metadata_topic {
+ char *topic; /**< Topic name */
+ int partition_cnt; /**< Number of partitions in \p partitions*/
+ struct rd_kafka_metadata_partition *partitions; /**< Partitions */
+ rd_kafka_resp_err_t err; /**< Topic error reported by broker */
+} rd_kafka_metadata_topic_t;
+
+
+/**
+ * @brief Metadata container
+ */
+typedef struct rd_kafka_metadata {
+ int broker_cnt; /**< Number of brokers in \p brokers */
+ struct rd_kafka_metadata_broker *brokers; /**< Brokers */
+
+ int topic_cnt; /**< Number of topics in \p topics */
+ struct rd_kafka_metadata_topic *topics; /**< Topics */
+
+ int32_t orig_broker_id; /**< Broker originating this metadata */
+ char *orig_broker_name; /**< Name of originating broker */
+} rd_kafka_metadata_t;
+
+/**
+ * @brief Request Metadata from broker.
+ *
+ * Parameters:
+ * - \p all_topics if non-zero: request info about all topics in cluster,
+ * if zero: only request info about locally known topics.
+ * - \p only_rkt only request info about this topic
+ * - \p metadatap pointer to hold metadata result.
+ * The \p *metadatap pointer must be released
+ * with rd_kafka_metadata_destroy().
+ * - \p timeout_ms maximum response time before failing.
+ *
+ * @remark Consumer: If \p all_topics is non-zero the Metadata response
+ * information may trigger a re-join if any subscribed topics
+ * have changed partition count or existence state.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success (in which case *metadatap)
+ * will be set, else RD_KAFKA_RESP_ERR__TIMED_OUT on timeout or
+ * other error code on error.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t
+rd_kafka_metadata(rd_kafka_t *rk,
+ int all_topics,
+ rd_kafka_topic_t *only_rkt,
+ const struct rd_kafka_metadata **metadatap,
+ int timeout_ms);
+
+/**
+ * @brief Release metadata memory.
+ */
+RD_EXPORT
+void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata);
+
+/**
+ * @brief Node (broker) information.
+ */
+typedef struct rd_kafka_Node_s rd_kafka_Node_t;
+
+/**
+ * @brief Get the id of \p node.
+ *
+ * @param node The Node instance.
+ *
+ * @return The node id.
+ */
+RD_EXPORT
+int rd_kafka_Node_id(const rd_kafka_Node_t *node);
+
+/**
+ * @brief Get the host of \p node.
+ *
+ * @param node The Node instance.
+ *
+ * @return The node host.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p node object.
+ */
+RD_EXPORT
+const char *rd_kafka_Node_host(const rd_kafka_Node_t *node);
+
+/**
+ * @brief Get the port of \p node.
+ *
+ * @param node The Node instance.
+ *
+ * @return The node port.
+ */
+RD_EXPORT
+uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node);
+
+/**@}*/
+
+
+
+/**
+ * @name Client group information
+ * @{
+ *
+ *
+ */
+
+
+/**
+ * @brief Group member information
+ *
+ * For more information on \p member_metadata format, see
+ * https://cwiki.apache.org/confluence/display/KAFKA/A+Guide+To+The+Kafka+Protocol#AGuideToTheKafkaProtocol-GroupMembershipAPI
+ *
+ */
+struct rd_kafka_group_member_info {
+ char *member_id; /**< Member id (generated by broker) */
+ char *client_id; /**< Client's \p client.id */
+ char *client_host; /**< Client's hostname */
+ void *member_metadata; /**< Member metadata (binary),
+ * format depends on \p protocol_type. */
+ int member_metadata_size; /**< Member metadata size in bytes */
+ void *member_assignment; /**< Member assignment (binary),
+ * format depends on \p protocol_type. */
+ int member_assignment_size; /**< Member assignment size in bytes */
+};
+
+/**
+ * @enum rd_kafka_consumer_group_state_t
+ *
+ * @brief Consumer group state.
+ */
+typedef enum {
+ RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN = 0,
+ RD_KAFKA_CONSUMER_GROUP_STATE_PREPARING_REBALANCE = 1,
+ RD_KAFKA_CONSUMER_GROUP_STATE_COMPLETING_REBALANCE = 2,
+ RD_KAFKA_CONSUMER_GROUP_STATE_STABLE = 3,
+ RD_KAFKA_CONSUMER_GROUP_STATE_DEAD = 4,
+ RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY = 5,
+ RD_KAFKA_CONSUMER_GROUP_STATE__CNT
+} rd_kafka_consumer_group_state_t;
+
+/**
+ * @brief Group information
+ */
+struct rd_kafka_group_info {
+ struct rd_kafka_metadata_broker broker; /**< Originating broker info */
+ char *group; /**< Group name */
+ rd_kafka_resp_err_t err; /**< Broker-originated error */
+ char *state; /**< Group state */
+ char *protocol_type; /**< Group protocol type */
+ char *protocol; /**< Group protocol */
+ struct rd_kafka_group_member_info *members; /**< Group members */
+ int member_cnt; /**< Group member count */
+};
+
+/**
+ * @brief List of groups
+ *
+ * @sa rd_kafka_group_list_destroy() to release list memory.
+ */
+struct rd_kafka_group_list {
+ struct rd_kafka_group_info *groups; /**< Groups */
+ int group_cnt; /**< Group count */
+};
+
+
+/**
+ * @brief List and describe client groups in cluster.
+ *
+ * \p group is an optional group name to describe, otherwise (\c NULL) all
+ * groups are returned.
+ *
+ * \p timeout_ms is the (approximate) maximum time to wait for response
+ * from brokers and must be a positive value.
+ *
+ * @returns \c RD_KAFKA_RESP_ERR__NO_ERROR on success and \p grplistp is
+ * updated to point to a newly allocated list of groups.
+ * \c RD_KAFKA_RESP_ERR__PARTIAL if not all brokers responded
+ * in time but at least one group is returned in \p grplistlp.
+ * \c RD_KAFKA_RESP_ERR__TIMED_OUT if no groups were returned in the
+ * given timeframe but not all brokers have yet responded, or
+ * if the list of brokers in the cluster could not be obtained within
+ * the given timeframe.
+ * \c RD_KAFKA_RESP_ERR__TRANSPORT if no brokers were found.
+ * Other error codes may also be returned from the request layer.
+ *
+ * The \p grplistp remains untouched if any error code is returned,
+ * with the exception of RD_KAFKA_RESP_ERR__PARTIAL which behaves
+ * as RD_KAFKA_RESP_ERR__NO_ERROR (success) but with an incomplete
+ * group list.
+ *
+ * @sa Use rd_kafka_group_list_destroy() to release list memory.
+ *
+ * @deprecated Use rd_kafka_ListConsumerGroups() and
+ * rd_kafka_DescribeConsumerGroups() instead.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t
+rd_kafka_list_groups(rd_kafka_t *rk,
+ const char *group,
+ const struct rd_kafka_group_list **grplistp,
+ int timeout_ms);
+
+/**
+ * @brief Returns a name for a state code.
+ *
+ * @param state The state value.
+ *
+ * @return The group state name corresponding to the provided group state value.
+ */
+RD_EXPORT
+const char *
+rd_kafka_consumer_group_state_name(rd_kafka_consumer_group_state_t state);
+
+/**
+ * @brief Returns a code for a state name.
+ *
+ * @param name The state name.
+ *
+ * @return The group state value corresponding to the provided group state name.
+ */
+RD_EXPORT
+rd_kafka_consumer_group_state_t
+rd_kafka_consumer_group_state_code(const char *name);
+
+/**
+ * @brief Release list memory
+ */
+RD_EXPORT
+void rd_kafka_group_list_destroy(const struct rd_kafka_group_list *grplist);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Miscellaneous APIs
+ * @{
+ *
+ */
+
+
+/**
+ * @brief Adds one or more brokers to the kafka handle's list of initial
+ * bootstrap brokers.
+ *
+ * Additional brokers will be discovered automatically as soon as rdkafka
+ * connects to a broker by querying the broker metadata.
+ *
+ * If a broker name resolves to multiple addresses (and possibly
+ * address families) all will be used for connection attempts in
+ * round-robin fashion.
+ *
+ * \p brokerlist is a ,-separated list of brokers in the format:
+ * \c \<broker1\>,\<broker2\>,..
+ * Where each broker is in either the host or URL based format:
+ * \c \<host\>[:\<port\>]
+ * \c \<proto\>://\<host\>[:port]
+ * \c \<proto\> is either \c PLAINTEXT, \c SSL, \c SASL, \c SASL_PLAINTEXT
+ * The two formats can be mixed but ultimately the value of the
+ * `security.protocol` config property decides what brokers are allowed.
+ *
+ * Example:
+ * brokerlist = "broker1:10000,broker2"
+ * brokerlist = "SSL://broker3:9000,ssl://broker2"
+ *
+ * @returns the number of brokers successfully added.
+ *
+ * @remark Brokers may also be defined with the \c metadata.broker.list or
+ * \c bootstrap.servers configuration property (preferred method).
+ *
+ * @deprecated Set bootstrap servers with the \c bootstrap.servers
+ * configuration property.
+ */
+RD_EXPORT
+int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist);
+
+
+
+/**
+ * @brief Set logger function.
+ *
+ * The default is to print to stderr, but a syslog logger is also available,
+ * see rd_kafka_log_(print|syslog) for the builtin alternatives.
+ * Alternatively the application may provide its own logger callback.
+ * Or pass 'func' as NULL to disable logging.
+ *
+ * @deprecated Use rd_kafka_conf_set_log_cb()
+ *
+ * @remark \p rk may be passed as NULL in the callback.
+ */
+RD_EXPORT RD_DEPRECATED void
+rd_kafka_set_logger(rd_kafka_t *rk,
+ void (*func)(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf));
+
+
+/**
+ * @brief Specifies the maximum logging level emitted by
+ * internal kafka logging and debugging.
+ *
+ * @deprecated Set the \c "log_level" configuration property instead.
+ *
+ * @remark If the \p \"debug\" configuration property is set the log level is
+ * automatically adjusted to \c LOG_DEBUG (7).
+ */
+RD_EXPORT
+void rd_kafka_set_log_level(rd_kafka_t *rk, int level);
+
+
+/**
+ * @brief Builtin (default) log sink: print to stderr
+ */
+RD_EXPORT
+void rd_kafka_log_print(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf);
+
+
+/**
+ * @brief Builtin log sink: print to syslog.
+ * @remark This logger is only available if librdkafka was built
+ * with syslog support.
+ */
+RD_EXPORT
+void rd_kafka_log_syslog(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf);
+
+
+/**
+ * @brief Returns the current out queue length.
+ *
+ * The out queue length is the sum of:
+ * - number of messages waiting to be sent to, or acknowledged by,
+ * the broker.
+ * - number of delivery reports (e.g., dr_msg_cb) waiting to be served
+ * by rd_kafka_poll() or rd_kafka_flush().
+ * - number of callbacks (e.g., error_cb, stats_cb, etc) waiting to be
+ * served by rd_kafka_poll(), rd_kafka_consumer_poll() or rd_kafka_flush().
+ * - number of events waiting to be served by background_event_cb() in
+ * the background queue (see rd_kafka_conf_set_background_event_cb).
+ *
+ * An application should wait for the return value of this function to reach
+ * zero before terminating to make sure outstanding messages,
+ * requests (such as offset commits), callbacks and events are fully processed.
+ * See rd_kafka_flush().
+ *
+ * @returns number of messages and events waiting in queues.
+ *
+ * @sa rd_kafka_flush()
+ */
+RD_EXPORT
+int rd_kafka_outq_len(rd_kafka_t *rk);
+
+
+
+/**
+ * @brief Dumps rdkafka's internal state for handle \p rk to stream \p fp
+ *
+ * This is only useful for debugging rdkafka, showing state and statistics
+ * for brokers, topics, partitions, etc.
+ */
+RD_EXPORT
+void rd_kafka_dump(FILE *fp, rd_kafka_t *rk);
+
+
+
+/**
+ * @brief Retrieve the current number of threads in use by librdkafka.
+ *
+ * Used by regression tests.
+ */
+RD_EXPORT
+int rd_kafka_thread_cnt(void);
+
+
+/**
+ * @enum rd_kafka_thread_type_t
+ *
+ * @brief librdkafka internal thread type.
+ *
+ * @sa rd_kafka_interceptor_add_on_thread_start()
+ */
+typedef enum rd_kafka_thread_type_t {
+ RD_KAFKA_THREAD_MAIN, /**< librdkafka's internal main thread */
+ RD_KAFKA_THREAD_BACKGROUND, /**< Background thread (if enabled) */
+ RD_KAFKA_THREAD_BROKER /**< Per-broker thread */
+} rd_kafka_thread_type_t;
+
+
+/**
+ * @brief Wait for all rd_kafka_t objects to be destroyed.
+ *
+ * Returns 0 if all kafka objects are now destroyed, or -1 if the
+ * timeout was reached.
+ *
+ * @remark This function is deprecated.
+ */
+RD_EXPORT
+int rd_kafka_wait_destroyed(int timeout_ms);
+
+
+/**
+ * @brief Run librdkafka's built-in unit-tests.
+ *
+ * @returns the number of failures, or 0 if all tests passed.
+ */
+RD_EXPORT
+int rd_kafka_unittest(void);
+
+
+/**@}*/
+
+
+
+/**
+ * @name Experimental APIs
+ * @{
+ */
+
+/**
+ * @brief Redirect the main (rd_kafka_poll()) queue to the KafkaConsumer's
+ * queue (rd_kafka_consumer_poll()).
+ *
+ * @warning It is not permitted to call rd_kafka_poll() after directing the
+ * main queue with rd_kafka_poll_set_consumer().
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_poll_set_consumer(rd_kafka_t *rk);
+
+
+/**@}*/
+
+/**
+ * @name Event interface
+ *
+ * @brief The event API provides an alternative pollable non-callback interface
+ * to librdkafka's message and event queues.
+ *
+ * @{
+ */
+
+
+/**
+ * @brief Event types
+ */
+typedef int rd_kafka_event_type_t;
+#define RD_KAFKA_EVENT_NONE 0x0 /**< Unset value */
+#define RD_KAFKA_EVENT_DR 0x1 /**< Producer Delivery report batch */
+#define RD_KAFKA_EVENT_FETCH 0x2 /**< Fetched message (consumer) */
+#define RD_KAFKA_EVENT_LOG 0x4 /**< Log message */
+#define RD_KAFKA_EVENT_ERROR 0x8 /**< Error */
+#define RD_KAFKA_EVENT_REBALANCE 0x10 /**< Group rebalance (consumer) */
+#define RD_KAFKA_EVENT_OFFSET_COMMIT 0x20 /**< Offset commit result */
+#define RD_KAFKA_EVENT_STATS 0x40 /**< Stats */
+#define RD_KAFKA_EVENT_CREATETOPICS_RESULT 100 /**< CreateTopics_result_t */
+#define RD_KAFKA_EVENT_DELETETOPICS_RESULT 101 /**< DeleteTopics_result_t */
+#define RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT \
+ 102 /**< CreatePartitions_result_t */
+#define RD_KAFKA_EVENT_ALTERCONFIGS_RESULT 103 /**< AlterConfigs_result_t */
+#define RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT \
+ 104 /**< DescribeConfigs_result_t */
+#define RD_KAFKA_EVENT_DELETERECORDS_RESULT 105 /**< DeleteRecords_result_t */
+#define RD_KAFKA_EVENT_DELETEGROUPS_RESULT 106 /**< DeleteGroups_result_t */
+/** DeleteConsumerGroupOffsets_result_t */
+#define RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT 107
+/** SASL/OAUTHBEARER token needs to be refreshed */
+#define RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH 0x100
+#define RD_KAFKA_EVENT_BACKGROUND 0x200 /**< Enable background thread. */
+#define RD_KAFKA_EVENT_CREATEACLS_RESULT 0x400 /**< CreateAcls_result_t */
+#define RD_KAFKA_EVENT_DESCRIBEACLS_RESULT 0x800 /**< DescribeAcls_result_t */
+#define RD_KAFKA_EVENT_DELETEACLS_RESULT 0x1000 /**< DeleteAcls_result_t */
+/** ListConsumerGroupsResult_t */
+#define RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT 0x2000
+/** DescribeConsumerGroups_result_t */
+#define RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT 0x4000
+/** ListConsumerGroupOffsets_result_t */
+#define RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT 0x8000
+/** AlterConsumerGroupOffsets_result_t */
+#define RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT 0x10000
+
+
+/**
+ * @returns the event type for the given event.
+ *
+ * @remark As a convenience it is okay to pass \p rkev as NULL in which case
+ * RD_KAFKA_EVENT_NONE is returned.
+ */
+RD_EXPORT
+rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev);
+
+/**
+ * @returns the event type's name for the given event.
+ *
+ * @remark As a convenience it is okay to pass \p rkev as NULL in which case
+ * the name for RD_KAFKA_EVENT_NONE is returned.
+ */
+RD_EXPORT
+const char *rd_kafka_event_name(const rd_kafka_event_t *rkev);
+
+
+/**
+ * @brief Destroy an event.
+ *
+ * @remark Any references to this event, such as extracted messages,
+ * will not be usable after this call.
+ *
+ * @remark As a convenience it is okay to pass \p rkev as NULL in which case
+ * no action is performed.
+ */
+RD_EXPORT
+void rd_kafka_event_destroy(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns the next message from an event.
+ *
+ * Call repeatedly until it returns NULL.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_FETCH (1 message)
+ * - RD_KAFKA_EVENT_DR (>=1 message(s))
+ *
+ * @remark The returned message(s) MUST NOT be
+ * freed with rd_kafka_message_destroy().
+ *
+ * @remark on_consume() interceptor may be called
+ * from this function prior to passing message to application.
+ */
+RD_EXPORT
+const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev);
+
+
+/**
+ * @brief Extacts \p size message(s) from the event into the
+ * pre-allocated array \p rkmessages.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_FETCH (1 message)
+ * - RD_KAFKA_EVENT_DR (>=1 message(s))
+ *
+ * @returns the number of messages extracted.
+ *
+ * @remark on_consume() interceptor may be called
+ * from this function prior to passing message to application.
+ */
+RD_EXPORT
+size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev,
+ const rd_kafka_message_t **rkmessages,
+ size_t size);
+
+
+/**
+ * @returns the number of remaining messages in the event.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_FETCH (1 message)
+ * - RD_KAFKA_EVENT_DR (>=1 message(s))
+ */
+RD_EXPORT
+size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns the associated configuration string for the event, or NULL
+ * if the configuration property is not set or if
+ * not applicable for the given event type.
+ *
+ * The returned memory is read-only and its lifetime is the same as the
+ * event object.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH: value of sasl.oauthbearer.config
+ */
+RD_EXPORT
+const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns the error code for the event.
+ *
+ * Use rd_kafka_event_error_is_fatal() to detect if this is a fatal error.
+ *
+ * Event types:
+ * - all
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns the error string (if any).
+ * An application should check that rd_kafka_event_error() returns
+ * non-zero before calling this function.
+ *
+ * Event types:
+ * - all
+ */
+RD_EXPORT
+const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns 1 if the error is a fatal error, else 0.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_ERROR
+ *
+ * @sa rd_kafka_fatal_error()
+ */
+RD_EXPORT
+int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns the event opaque (if any) as passed to rd_kafka_commit() (et.al) or
+ * rd_kafka_AdminOptions_set_opaque(), depending on event type.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_OFFSET_COMMIT
+ * - RD_KAFKA_EVENT_CREATETOPICS_RESULT
+ * - RD_KAFKA_EVENT_DELETETOPICS_RESULT
+ * - RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
+ * - RD_KAFKA_EVENT_CREATEACLS_RESULT
+ * - RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
+ * - RD_KAFKA_EVENT_DELETEACLS_RESULT
+ * - RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
+ * - RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
+ * - RD_KAFKA_EVENT_DELETEGROUPS_RESULT
+ * - RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
+ * - RD_KAFKA_EVENT_DELETERECORDS_RESULT
+ * - RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
+ * - RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
+ * - RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
+ * - RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
+ */
+RD_EXPORT
+void *rd_kafka_event_opaque(rd_kafka_event_t *rkev);
+
+
+/**
+ * @brief Extract log message from the event.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_LOG
+ *
+ * @returns 0 on success or -1 if unsupported event type.
+ */
+RD_EXPORT
+int rd_kafka_event_log(rd_kafka_event_t *rkev,
+ const char **fac,
+ const char **str,
+ int *level);
+
+
+/**
+ * @brief Extract log debug context from event.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_LOG
+ *
+ * @param rkev the event to extract data from.
+ * @param dst destination string for comma separated list.
+ * @param dstsize size of provided dst buffer.
+ * @returns 0 on success or -1 if unsupported event type.
+ */
+RD_EXPORT
+int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev,
+ char *dst,
+ size_t dstsize);
+
+
+/**
+ * @brief Extract stats from the event.
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_STATS
+ *
+ * @returns stats json string.
+ *
+ * @remark the returned string will be freed automatically along with the event
+ * object
+ *
+ */
+RD_EXPORT
+const char *rd_kafka_event_stats(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns the topic partition list from the event.
+ *
+ * @remark The list MUST NOT be freed with
+ * rd_kafka_topic_partition_list_destroy()
+ *
+ * Event types:
+ * - RD_KAFKA_EVENT_REBALANCE
+ * - RD_KAFKA_EVENT_OFFSET_COMMIT
+ */
+RD_EXPORT rd_kafka_topic_partition_list_t *
+rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev);
+
+
+/**
+ * @returns a newly allocated topic_partition container, if applicable for the
+ * event type, else NULL.
+ *
+ * @remark The returned pointer MUST be freed with
+ * rd_kafka_topic_partition_destroy().
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_ERROR (for partition level errors)
+ */
+RD_EXPORT rd_kafka_topic_partition_t *
+rd_kafka_event_topic_partition(rd_kafka_event_t *rkev);
+
+
+/*! CreateTopics result type */
+typedef rd_kafka_event_t rd_kafka_CreateTopics_result_t;
+/*! DeleteTopics result type */
+typedef rd_kafka_event_t rd_kafka_DeleteTopics_result_t;
+/*! CreateAcls result type */
+typedef rd_kafka_event_t rd_kafka_CreateAcls_result_t;
+/*! DescribeAcls result type */
+typedef rd_kafka_event_t rd_kafka_DescribeAcls_result_t;
+/*! DeleteAcls result type */
+typedef rd_kafka_event_t rd_kafka_DeleteAcls_result_t;
+/*! CreatePartitions result type */
+typedef rd_kafka_event_t rd_kafka_CreatePartitions_result_t;
+/*! AlterConfigs result type */
+typedef rd_kafka_event_t rd_kafka_AlterConfigs_result_t;
+/*! CreateTopics result type */
+typedef rd_kafka_event_t rd_kafka_DescribeConfigs_result_t;
+/*! DeleteRecords result type */
+typedef rd_kafka_event_t rd_kafka_DeleteRecords_result_t;
+/*! ListConsumerGroups result type */
+typedef rd_kafka_event_t rd_kafka_ListConsumerGroups_result_t;
+/*! DescribeConsumerGroups result type */
+typedef rd_kafka_event_t rd_kafka_DescribeConsumerGroups_result_t;
+/*! DeleteGroups result type */
+typedef rd_kafka_event_t rd_kafka_DeleteGroups_result_t;
+/*! DeleteConsumerGroupOffsets result type */
+typedef rd_kafka_event_t rd_kafka_DeleteConsumerGroupOffsets_result_t;
+/*! AlterConsumerGroupOffsets result type */
+typedef rd_kafka_event_t rd_kafka_AlterConsumerGroupOffsets_result_t;
+/*! ListConsumerGroupOffsets result type */
+typedef rd_kafka_event_t rd_kafka_ListConsumerGroupOffsets_result_t;
+
+/**
+ * @brief Get CreateTopics result.
+ *
+ * @returns the result of a CreateTopics request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_CREATETOPICS_RESULT
+ */
+RD_EXPORT const rd_kafka_CreateTopics_result_t *
+rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get DeleteTopics result.
+ *
+ * @returns the result of a DeleteTopics request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DELETETOPICS_RESULT
+ */
+RD_EXPORT const rd_kafka_DeleteTopics_result_t *
+rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get CreatePartitions result.
+ *
+ * @returns the result of a CreatePartitions request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
+ */
+RD_EXPORT const rd_kafka_CreatePartitions_result_t *
+rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get AlterConfigs result.
+ *
+ * @returns the result of a AlterConfigs request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_ALTERCONFIGS_RESULT
+ */
+RD_EXPORT const rd_kafka_AlterConfigs_result_t *
+rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get DescribeConfigs result.
+ *
+ * @returns the result of a DescribeConfigs request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT
+ */
+RD_EXPORT const rd_kafka_DescribeConfigs_result_t *
+rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev);
+
+/**
+ * @returns the result of a DeleteRecords request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DELETERECORDS_RESULT
+ */
+RD_EXPORT const rd_kafka_DeleteRecords_result_t *
+rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get ListConsumerGroups result.
+ *
+ * @returns the result of a ListConsumerGroups request, or NULL if event is of
+ * different type.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p rkev object.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
+ */
+RD_EXPORT const rd_kafka_ListConsumerGroups_result_t *
+rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get DescribeConsumerGroups result.
+ *
+ * @returns the result of a DescribeConsumerGroups request, or NULL if event is
+ * of different type.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p rkev object.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
+ */
+RD_EXPORT const rd_kafka_DescribeConsumerGroups_result_t *
+rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get DeleteGroups result.
+ *
+ * @returns the result of a DeleteGroups request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DELETEGROUPS_RESULT
+ */
+RD_EXPORT const rd_kafka_DeleteGroups_result_t *
+rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get DeleteConsumerGroupOffsets result.
+ *
+ * @returns the result of a DeleteConsumerGroupOffsets request, or NULL if
+ * event is of different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
+ */
+RD_EXPORT const rd_kafka_DeleteConsumerGroupOffsets_result_t *
+rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
+
+/**
+ * @returns the result of a CreateAcls request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_CREATEACLS_RESULT
+ */
+RD_EXPORT const rd_kafka_CreateAcls_result_t *
+rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev);
+
+/**
+ * @returns the result of a DescribeAcls request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
+ */
+RD_EXPORT const rd_kafka_DescribeAcls_result_t *
+rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev);
+
+/**
+ * @returns the result of a DeleteAcls request, or NULL if event is of
+ * different type.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_DELETEACLS_RESULT
+ */
+RD_EXPORT const rd_kafka_DeleteAcls_result_t *
+rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get AlterConsumerGroupOffsets result.
+ *
+ * @returns the result of a AlterConsumerGroupOffsets request, or NULL if
+ * event is of different type.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p rkev object.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
+ */
+RD_EXPORT const rd_kafka_AlterConsumerGroupOffsets_result_t *
+rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Get ListConsumerGroupOffsets result.
+ *
+ * @returns the result of a ListConsumerGroupOffsets request, or NULL if
+ * event is of different type.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p rkev object.
+ *
+ * Event types:
+ * RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
+ */
+RD_EXPORT const rd_kafka_ListConsumerGroupOffsets_result_t *
+rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev);
+
+/**
+ * @brief Poll a queue for an event for max \p timeout_ms.
+ *
+ * @returns an event, or NULL.
+ *
+ * @remark Use rd_kafka_event_destroy() to free the event.
+ *
+ * @sa rd_kafka_conf_set_background_event_cb()
+ */
+RD_EXPORT
+rd_kafka_event_t *rd_kafka_queue_poll(rd_kafka_queue_t *rkqu, int timeout_ms);
+
+/**
+ * @brief Poll a queue for events served through callbacks for max \p
+ * timeout_ms.
+ *
+ * @returns the number of events served.
+ *
+ * @remark This API must only be used for queues with callbacks registered
+ * for all expected event types. E.g., not a message queue.
+ *
+ * @remark Also see rd_kafka_conf_set_background_event_cb() for triggering
+ * event callbacks from a librdkafka-managed background thread.
+ *
+ * @sa rd_kafka_conf_set_background_event_cb()
+ */
+RD_EXPORT
+int rd_kafka_queue_poll_callback(rd_kafka_queue_t *rkqu, int timeout_ms);
+
+
+/**@}*/
+
+
+/**
+ * @name Plugin interface
+ *
+ * @brief A plugin interface that allows external runtime-loaded libraries
+ * to integrate with a client instance without modifications to
+ * the application code.
+ *
+ * Plugins are loaded when referenced through the `plugin.library.paths`
+ * configuration property and operates on the \c rd_kafka_conf_t
+ * object prior \c rd_kafka_t instance creation.
+ *
+ * @warning Plugins require the application to link librdkafka dynamically
+ * and not statically. Failure to do so will lead to missing symbols
+ * or finding symbols in another librdkafka library than the
+ * application was linked with.
+ * @{
+ */
+
+
+/**
+ * @brief Plugin's configuration initializer method called each time the
+ * library is referenced from configuration (even if previously loaded by
+ * another client instance).
+ *
+ * @remark This method MUST be implemented by plugins and have the symbol name
+ * \c conf_init
+ *
+ * @param conf Configuration set up to this point.
+ * @param plug_opaquep Plugin can set this pointer to a per-configuration
+ * opaque pointer.
+ * @param errstr String buffer of size \p errstr_size where plugin must write
+ * a human readable error string in the case the initializer
+ * fails (returns non-zero).
+ * @param errstr_size Maximum space (including \0) in \p errstr.
+ *
+ * @remark A plugin may add an on_conf_destroy() interceptor to clean up
+ * plugin-specific resources created in the plugin's conf_init() method.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code on error.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_plugin_f_conf_init_t)(
+ rd_kafka_conf_t *conf,
+ void **plug_opaquep,
+ char *errstr,
+ size_t errstr_size);
+
+/**@}*/
+
+
+
+/**
+ * @name Interceptors
+ *
+ * @{
+ *
+ * @brief A callback interface that allows message interception for both
+ * producer and consumer data pipelines.
+ *
+ * Except for the on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy()
+ * interceptors, interceptors are added to the
+ * newly created rd_kafka_t client instance. These interceptors MUST only
+ * be added from on_new() and MUST NOT be added after rd_kafka_new() returns.
+ *
+ * The on_new(), on_conf_set(), on_conf_dup() and on_conf_destroy() interceptors
+ * are added to the configuration object which is later passed to
+ * rd_kafka_new() where on_new() is called to allow addition of
+ * other interceptors.
+ *
+ * Each interceptor reference consists of a display name (ic_name),
+ * a callback function, and an application-specified opaque value that is
+ * passed as-is to the callback.
+ * The ic_name must be unique for the interceptor implementation and is used
+ * to reject duplicate interceptor methods.
+ *
+ * Any number of interceptors can be added and they are called in the order
+ * they were added, unless otherwise noted.
+ * The list of registered interceptor methods are referred to as
+ * interceptor chains.
+ *
+ * @remark Contrary to the Java client the librdkafka interceptor interface
+ * does not support message key and value modification.
+ * Message mutability is discouraged in the Java client and the
+ * combination of serializers and headers cover most use-cases.
+ *
+ * @remark Interceptors are NOT copied to the new configuration on
+ * rd_kafka_conf_dup() since it would be hard for interceptors to
+ * track usage of the interceptor's opaque value.
+ * An interceptor should rely on the plugin, which will be copied
+ * in rd_kafka_conf_conf_dup(), to set up the initial interceptors.
+ * An interceptor should implement the on_conf_dup() method
+ * to manually set up its internal configuration on the newly created
+ * configuration object that is being copied-to based on the
+ * interceptor-specific configuration properties.
+ * conf_dup() should thus be treated the same as conf_init().
+ *
+ * @remark Interceptors are keyed by the interceptor type (on_..()), the
+ * interceptor name (ic_name) and the interceptor method function.
+ * Duplicates are not allowed and the .._add_on_..() method will
+ * return RD_KAFKA_RESP_ERR__CONFLICT if attempting to add a duplicate
+ * method.
+ * The only exception is on_conf_destroy() which may be added multiple
+ * times by the same interceptor to allow proper cleanup of
+ * interceptor configuration state.
+ */
+
+
+/**
+ * @brief on_conf_set() is called from rd_kafka_*_conf_set() in the order
+ * the interceptors were added.
+ *
+ * @param conf Configuration object.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ * @param name The configuration property to set.
+ * @param val The configuration value to set, or NULL for reverting to default
+ * in which case the previous value should be freed.
+ * @param errstr A human readable error string in case the interceptor fails.
+ * @param errstr_size Maximum space (including \0) in \p errstr.
+ *
+ * @returns RD_KAFKA_CONF_OK if the property was known and successfully
+ * handled by the interceptor, RD_KAFKA_CONF_INVALID if the
+ * property was handled by the interceptor but the value was invalid,
+ * or RD_KAFKA_CONF_UNKNOWN if the interceptor did not handle
+ * this property, in which case the property is passed on on the
+ * interceptor in the chain, finally ending up at the built-in
+ * configuration handler.
+ */
+typedef rd_kafka_conf_res_t(rd_kafka_interceptor_f_on_conf_set_t)(
+ rd_kafka_conf_t *conf,
+ const char *name,
+ const char *val,
+ char *errstr,
+ size_t errstr_size,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_conf_dup() is called from rd_kafka_conf_dup() in the
+ * order the interceptors were added and is used to let
+ * an interceptor re-register its conf interecptors with a new
+ * opaque value.
+ * The on_conf_dup() method is called prior to the configuration from
+ * \p old_conf being copied to \p new_conf.
+ *
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ * @param new_conf New configuration object.
+ * @param old_conf Old configuration object to copy properties from.
+ * @param filter_cnt Number of property names to filter in \p filter.
+ * @param filter Property names to filter out (ignore) when setting up
+ * \p new_conf.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an error code
+ * on failure (which is logged but otherwise ignored).
+ *
+ * @remark No on_conf_* interceptors are copied to the new configuration
+ * object on rd_kafka_conf_dup().
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_dup_t)(
+ rd_kafka_conf_t *new_conf,
+ const rd_kafka_conf_t *old_conf,
+ size_t filter_cnt,
+ const char **filter,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_conf_destroy() is called from rd_kafka_*_conf_destroy() in the
+ * order the interceptors were added.
+ *
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_conf_destroy_t)(
+ void *ic_opaque);
+
+
+/**
+ * @brief on_new() is called from rd_kafka_new() prior toreturning
+ * the newly created client instance to the application.
+ *
+ * @param rk The client instance.
+ * @param conf The client instance's final configuration.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ * @param errstr A human readable error string in case the interceptor fails.
+ * @param errstr_size Maximum space (including \0) in \p errstr.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ *
+ * @warning The \p rk client instance will not be fully set up when this
+ * interceptor is called and the interceptor MUST NOT call any
+ * other rk-specific APIs than rd_kafka_interceptor_add..().
+ *
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_new_t)(
+ rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief on_destroy() is called from rd_kafka_destroy() or (rd_kafka_new()
+ * if rd_kafka_new() fails during initialization).
+ *
+ * @param rk The client instance.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ */
+typedef rd_kafka_resp_err_t(
+ rd_kafka_interceptor_f_on_destroy_t)(rd_kafka_t *rk, void *ic_opaque);
+
+
+
+/**
+ * @brief on_send() is called from rd_kafka_produce*() (et.al) prior to
+ * the partitioner being called.
+ *
+ * @param rk The client instance.
+ * @param rkmessage The message being produced. Immutable.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @remark This interceptor is only used by producer instances.
+ *
+ * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified
+ * by the interceptor.
+ *
+ * @remark If the partitioner fails or an unknown partition was specified,
+ * the on_acknowledgement() interceptor chain will be called from
+ * within the rd_kafka_produce*() call to maintain send-acknowledgement
+ * symmetry.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_send_t)(
+ rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage,
+ void *ic_opaque);
+
+/**
+ * @brief on_acknowledgement() is called to inform interceptors that a message
+ * was succesfully delivered or permanently failed delivery.
+ * The interceptor chain is called from internal librdkafka background
+ * threads, or rd_kafka_produce*() if the partitioner failed.
+ *
+ * @param rk The client instance.
+ * @param rkmessage The message being produced. Immutable.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @remark This interceptor is only used by producer instances.
+ *
+ * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified
+ * by the interceptor.
+ *
+ * @warning The on_acknowledgement() method may be called from internal
+ * librdkafka threads. An on_acknowledgement() interceptor MUST NOT
+ * call any librdkafka API's associated with the \p rk, or perform
+ * any blocking or prolonged work.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_acknowledgement_t)(
+ rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_consume() is called just prior to passing the message to the
+ * application in rd_kafka_consumer_poll(), rd_kafka_consume*(),
+ * the event interface, etc.
+ *
+ * @param rk The client instance.
+ * @param rkmessage The message being consumed. Immutable.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @remark This interceptor is only used by consumer instances.
+ *
+ * @remark The \p rkmessage object is NOT mutable and MUST NOT be modified
+ * by the interceptor.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_consume_t)(
+ rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage,
+ void *ic_opaque);
+
+/**
+ * @brief on_commit() is called on completed or failed offset commit.
+ * It is called from internal librdkafka threads.
+ *
+ * @param rk The client instance.
+ * @param offsets List of topic+partition+offset+error that were committed.
+ * The error message of each partition should be checked for
+ * error.
+ * @param err The commit error, if any.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @remark This interceptor is only used by consumer instances.
+ *
+ * @warning The on_commit() interceptor is called from internal
+ * librdkafka threads. An on_commit() interceptor MUST NOT
+ * call any librdkafka API's associated with the \p rk, or perform
+ * any blocking or prolonged work.
+ *
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_commit_t)(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_request_sent() is called when a request has been fully written
+ * to a broker TCP connections socket.
+ *
+ * @param rk The client instance.
+ * @param sockfd Socket file descriptor.
+ * @param brokername Broker request is being sent to.
+ * @param brokerid Broker request is being sent to.
+ * @param ApiKey Kafka protocol request type.
+ * @param ApiVersion Kafka protocol request type version.
+ * @param CorrId Kafka protocol request correlation id.
+ * @param size Size of request.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @warning The on_request_sent() interceptor is called from internal
+ * librdkafka broker threads. An on_request_sent() interceptor MUST NOT
+ * call any librdkafka API's associated with the \p rk, or perform
+ * any blocking or prolonged work.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_request_sent_t)(
+ rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_response_received() is called when a protocol response has been
+ * fully received from a broker TCP connection socket but before the
+ * response payload is parsed.
+ *
+ * @param rk The client instance.
+ * @param sockfd Socket file descriptor (always -1).
+ * @param brokername Broker response was received from, possibly empty string
+ * on error.
+ * @param brokerid Broker response was received from.
+ * @param ApiKey Kafka protocol request type or -1 on error.
+ * @param ApiVersion Kafka protocol request type version or -1 on error.
+ * @param CorrId Kafka protocol request correlation id, possibly -1 on error.
+ * @param size Size of response, possibly 0 on error.
+ * @param rtt Request round-trip-time in microseconds, possibly -1 on error.
+ * @param err Receive error.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @warning The on_response_received() interceptor is called from internal
+ * librdkafka broker threads. An on_response_received() interceptor
+ * MUST NOT call any librdkafka API's associated with the \p rk, or
+ * perform any blocking or prolonged work.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_response_received_t)(
+ rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_thread_start() is called from a newly created librdkafka-managed
+ * thread.
+
+ * @param rk The client instance.
+ * @param thread_type Thread type.
+ * @param thread_name Human-readable thread name, may not be unique.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @warning The on_thread_start() interceptor is called from internal
+ * librdkafka threads. An on_thread_start() interceptor MUST NOT
+ * call any librdkafka API's associated with the \p rk, or perform
+ * any blocking or prolonged work.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_start_t)(
+ rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type,
+ const char *thread_name,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_thread_exit() is called just prior to a librdkafka-managed
+ * thread exiting from the exiting thread itself.
+ *
+ * @param rk The client instance.
+ * @param thread_type Thread type.n
+ * @param thread_name Human-readable thread name, may not be unique.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @remark Depending on the thread type, librdkafka may execute additional
+ * code on the thread after on_thread_exit() returns.
+ *
+ * @warning The on_thread_exit() interceptor is called from internal
+ * librdkafka threads. An on_thread_exit() interceptor MUST NOT
+ * call any librdkafka API's associated with the \p rk, or perform
+ * any blocking or prolonged work.
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_thread_exit_t)(
+ rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type,
+ const char *thread_name,
+ void *ic_opaque);
+
+
+/**
+ * @brief on_broker_state_change() is called just after a broker
+ * has been created or its state has been changed.
+ *
+ * @param rk The client instance.
+ * @param broker_id The broker id (-1 is used for bootstrap brokers).
+ * @param secproto The security protocol.
+ * @param name The original name of the broker.
+ * @param port The port of the broker.
+ * @param state Broker state name.
+ * @param ic_opaque The interceptor's opaque pointer specified in ..add..().
+ *
+ * @returns an error code on failure, the error is logged but otherwise ignored.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_interceptor_f_on_broker_state_change_t)(
+ rd_kafka_t *rk,
+ int32_t broker_id,
+ const char *secproto,
+ const char *name,
+ int port,
+ const char *state,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_conf_set() interceptor.
+ *
+ * @param conf Configuration object.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_conf_set Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(
+ rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_conf_set_t *on_conf_set,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_conf_dup() interceptor.
+ *
+ * @param conf Configuration object.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_conf_dup Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(
+ rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup,
+ void *ic_opaque);
+
+/**
+ * @brief Append an on_conf_destroy() interceptor.
+ *
+ * @param conf Configuration object.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_conf_destroy Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR
+ *
+ * @remark Multiple on_conf_destroy() interceptors are allowed to be added
+ * to the same configuration object.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(
+ rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_new() interceptor.
+ *
+ * @param conf Configuration object.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_new Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @remark Since the on_new() interceptor is added to the configuration object
+ * it may be copied by rd_kafka_conf_dup().
+ * An interceptor implementation must thus be able to handle
+ * the same interceptor,ic_opaque tuple to be used by multiple
+ * client instances.
+ *
+ * @remark An interceptor plugin should check the return value to make sure it
+ * has not already been added.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_new_t *on_new,
+ void *ic_opaque);
+
+
+
+/**
+ * @brief Append an on_destroy() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_destroy Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_destroy_t *on_destroy,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_send() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_send Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing intercepted with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_interceptor_add_on_send(rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_send_t *on_send,
+ void *ic_opaque);
+
+/**
+ * @brief Append an on_acknowledgement() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_acknowledgement Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_consume() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_consume Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_consume_t *on_consume,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_commit() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_commit() Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_commit_t *on_commit,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_request_sent() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_request_sent() Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_request_sent_t *on_request_sent,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_response_received() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_response_received() Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_response_received_t *on_response_received,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_thread_start() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_thread_start() Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_thread_start_t *on_thread_start,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_thread_exit() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_thread_exit() Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit,
+ void *ic_opaque);
+
+
+/**
+ * @brief Append an on_broker_state_change() interceptor.
+ *
+ * @param rk Client instance.
+ * @param ic_name Interceptor name, used in logging.
+ * @param on_broker_state_change() Function pointer.
+ * @param ic_opaque Opaque value that will be passed to the function.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or RD_KAFKA_RESP_ERR__CONFLICT
+ * if an existing interceptor with the same \p ic_name and function
+ * has already been added to \p conf.
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change,
+ void *ic_opaque);
+
+
+
+/**@}*/
+
+
+
+/**
+ * @name Auxiliary types
+ *
+ * @{
+ */
+
+
+
+/**
+ * @brief Topic result provides per-topic operation result information.
+ *
+ */
+
+/**
+ * @returns the error code for the given topic result.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres);
+
+/**
+ * @returns the human readable error string for the given topic result,
+ * or NULL if there was no error.
+ *
+ * @remark lifetime of the returned string is the same as the \p topicres.
+ */
+RD_EXPORT const char *
+rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres);
+
+/**
+ * @returns the name of the topic for the given topic result.
+ * @remark lifetime of the returned string is the same as the \p topicres.
+ *
+ */
+RD_EXPORT const char *
+rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres);
+
+/**
+ * @brief Group result provides per-group operation result information.
+ *
+ */
+
+/**
+ * @returns the error for the given group result, or NULL on success.
+ * @remark lifetime of the returned error is the same as the \p groupres.
+ */
+RD_EXPORT const rd_kafka_error_t *
+rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres);
+
+/**
+ * @returns the name of the group for the given group result.
+ * @remark lifetime of the returned string is the same as the \p groupres.
+ *
+ */
+RD_EXPORT const char *
+rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres);
+
+/**
+ * @returns the partitions/offsets for the given group result, if applicable
+ * to the request type, else NULL.
+ * @remark lifetime of the returned list is the same as the \p groupres.
+ */
+RD_EXPORT const rd_kafka_topic_partition_list_t *
+rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres);
+
+
+/**@}*/
+
+
+/**
+ * @name Admin API
+ * @{
+ *
+ * @brief The Admin API enables applications to perform administrative
+ * Apache Kafka tasks, such as creating and deleting topics,
+ * altering and reading broker configuration, etc.
+ *
+ * The Admin API is asynchronous and makes use of librdkafka's standard
+ * \c rd_kafka_queue_t queues to propagate the result of an admin operation
+ * back to the application.
+ * The supplied queue may be any queue, such as a temporary single-call queue,
+ * a shared queue used for multiple requests, or even the main queue or
+ * consumer queues.
+ *
+ * Use \c rd_kafka_queue_poll() to collect the result of an admin operation
+ * from the queue of your choice, then extract the admin API-specific result
+ * type by using the corresponding \c rd_kafka_event_CreateTopics_result,
+ * \c rd_kafka_event_DescribeConfigs_result, etc, methods.
+ * Use the getter methods on the \c .._result_t type to extract response
+ * information and finally destroy the result and event by calling
+ * \c rd_kafka_event_destroy().
+ *
+ * Use rd_kafka_event_error() and rd_kafka_event_error_string() to acquire
+ * the request-level error/success for an Admin API request.
+ * Even if the returned value is \c RD_KAFKA_RESP_ERR_NO_ERROR there
+ * may be individual objects (topics, resources, etc) that have failed.
+ * Extract per-object error information with the corresponding
+ * \c rd_kafka_..._result_topics|resources|..() to check per-object errors.
+ *
+ * Locally triggered errors:
+ * - \c RD_KAFKA_RESP_ERR__TIMED_OUT - (Controller) broker connection did not
+ * become available in the time allowed by AdminOption_set_request_timeout.
+ */
+
+
+/**
+ * @enum rd_kafka_admin_op_t
+ *
+ * @brief Admin operation enum name for use with rd_kafka_AdminOptions_new()
+ *
+ * @sa rd_kafka_AdminOptions_new()
+ */
+typedef enum rd_kafka_admin_op_t {
+ RD_KAFKA_ADMIN_OP_ANY = 0, /**< Default value */
+ RD_KAFKA_ADMIN_OP_CREATETOPICS, /**< CreateTopics */
+ RD_KAFKA_ADMIN_OP_DELETETOPICS, /**< DeleteTopics */
+ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, /**< CreatePartitions */
+ RD_KAFKA_ADMIN_OP_ALTERCONFIGS, /**< AlterConfigs */
+ RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, /**< DescribeConfigs */
+ RD_KAFKA_ADMIN_OP_DELETERECORDS, /**< DeleteRecords */
+ RD_KAFKA_ADMIN_OP_DELETEGROUPS, /**< DeleteGroups */
+ /** DeleteConsumerGroupOffsets */
+ RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS,
+ RD_KAFKA_ADMIN_OP_CREATEACLS, /**< CreateAcls */
+ RD_KAFKA_ADMIN_OP_DESCRIBEACLS, /**< DescribeAcls */
+ RD_KAFKA_ADMIN_OP_DELETEACLS, /**< DeleteAcls */
+ RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, /**< ListConsumerGroups */
+ RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, /**< DescribeConsumerGroups */
+ /** ListConsumerGroupOffsets */
+ RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS,
+ /** AlterConsumerGroupOffsets */
+ RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS,
+ RD_KAFKA_ADMIN_OP__CNT /**< Number of ops defined */
+} rd_kafka_admin_op_t;
+
+/**
+ * @brief AdminOptions provides a generic mechanism for setting optional
+ * parameters for the Admin API requests.
+ *
+ * @remark Since AdminOptions is decoupled from the actual request type
+ * there is no enforcement to prevent setting unrelated properties,
+ * e.g. setting validate_only on a DescribeConfigs request is allowed
+ * but is silently ignored by DescribeConfigs.
+ * Future versions may introduce such enforcement.
+ */
+
+
+typedef struct rd_kafka_AdminOptions_s rd_kafka_AdminOptions_t;
+
+/**
+ * @brief Create a new AdminOptions object.
+ *
+ * The options object is not modified by the Admin API request APIs,
+ * (e.g. CreateTopics) and may be reused for multiple calls.
+ *
+ * @param rk Client instance.
+ * @param for_api Specifies what Admin API this AdminOptions object will be used
+ * for, which will enforce what AdminOptions_set_..() calls may
+ * be used based on the API, causing unsupported set..() calls
+ * to fail.
+ * Specifying RD_KAFKA_ADMIN_OP_ANY disables the enforcement
+ * allowing any option to be set, even if the option
+ * is not used in a future call to an Admin API method.
+ *
+ * @returns a new AdminOptions object (which must be freed with
+ * rd_kafka_AdminOptions_destroy()), or NULL if \p for_api was set to
+ * an unknown API op type.
+ */
+RD_EXPORT rd_kafka_AdminOptions_t *
+rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api);
+
+
+/**
+ * @brief Destroy a AdminOptions object.
+ */
+RD_EXPORT void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options);
+
+
+/**
+ * @brief Sets the overall request timeout, including broker lookup,
+ * request transmission, operation time on broker, and response.
+ *
+ * @param options Admin options.
+ * @param timeout_ms Timeout in milliseconds, use -1 for indefinite timeout.
+ * Defaults to `socket.timeout.ms`.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which
+ * case an error string will be written \p errstr.
+ *
+ * @remark This option is valid for all Admin API requests.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options,
+ int timeout_ms,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Sets the broker's operation timeout, such as the timeout for
+ * CreateTopics to complete the creation of topics on the controller
+ * before returning a result to the application.
+ *
+ * CreateTopics: values <= 0 will return immediately after triggering topic
+ * creation, while > 0 will wait this long for topic creation to propagate
+ * in cluster. Default: 60 seconds.
+ *
+ * DeleteTopics: same semantics as CreateTopics.
+ * CreatePartitions: same semantics as CreateTopics.
+ *
+ * @param options Admin options.
+ * @param timeout_ms Timeout in milliseconds.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if timeout was out of range in which
+ * case an error string will be written \p errstr.
+ *
+ * @remark This option is valid for CreateTopics, DeleteTopics,
+ * CreatePartitions, and DeleteRecords.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options,
+ int timeout_ms,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Tell broker to only validate the request, without performing
+ * the requested operation (create topics, etc).
+ *
+ * @param options Admin options.
+ * @param true_or_false Defaults to false.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an
+ * error code on failure in which case an error string will
+ * be written \p errstr.
+ *
+ * @remark This option is valid for CreateTopics,
+ * CreatePartitions, AlterConfigs.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options,
+ int true_or_false,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Override what broker the Admin request will be sent to.
+ *
+ * By default, Admin requests are sent to the controller broker, with
+ * the following exceptions:
+ * - AlterConfigs with a BROKER resource are sent to the broker id set
+ * as the resource name.
+ * - DescribeConfigs with a BROKER resource are sent to the broker id set
+ * as the resource name.
+ *
+ * @param options Admin Options.
+ * @param broker_id The broker to send the request to.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or an
+ * error code on failure in which case an error string will
+ * be written \p errstr.
+ *
+ * @remark This API should typically not be used, but serves as a workaround
+ * if new resource types are to the broker that the client
+ * does not know where to send.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options,
+ int32_t broker_id,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Whether broker should return stable offsets
+ * (transaction-committed).
+ *
+ * @param options Admin options.
+ * @param true_or_false Defaults to false.
+ *
+ * @return NULL on success, a new error instance that must be
+ * released with rd_kafka_error_destroy() in case of error.
+ *
+ * @remark This option is valid for ListConsumerGroupOffsets.
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(
+ rd_kafka_AdminOptions_t *options,
+ int true_or_false);
+
+/**
+ * @brief Set consumer groups states to query for.
+ *
+ * @param options Admin options.
+ * @param consumer_group_states Array of consumer group states.
+ * @param consumer_group_states_cnt Size of the \p consumer_group_states array.
+ *
+ * @return NULL on success, a new error instance that must be
+ * released with rd_kafka_error_destroy() in case of error.
+ *
+ * @remark This option is valid for ListConsumerGroups.
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(
+ rd_kafka_AdminOptions_t *options,
+ const rd_kafka_consumer_group_state_t *consumer_group_states,
+ size_t consumer_group_states_cnt);
+
+/**
+ * @brief Set application opaque value that can be extracted from the
+ * result event using rd_kafka_event_opaque()
+ */
+RD_EXPORT void
+rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options,
+ void *ev_opaque);
+
+/**@}*/
+
+/**
+ * @name Admin API - Topics
+ * @brief Topic related operations.
+ * @{
+ *
+ */
+
+
+/*! Defines a new topic to be created. */
+typedef struct rd_kafka_NewTopic_s rd_kafka_NewTopic_t;
+
+/**
+ * @brief Create a new NewTopic object. This object is later passed to
+ * rd_kafka_CreateTopics().
+ *
+ * @param topic Topic name to create.
+ * @param num_partitions Number of partitions in topic, or -1 to use the
+ * broker's default partition count (>= 2.4.0).
+ * @param replication_factor Default replication factor for the topic's
+ * partitions, or -1 to use the broker's default
+ * replication factor (>= 2.4.0) or if
+ * set_replica_assignment() will be used.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ *
+ * @returns a new allocated NewTopic object, or NULL if the input parameters
+ * are invalid.
+ * Use rd_kafka_NewTopic_destroy() to free object when done.
+ */
+RD_EXPORT rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic,
+ int num_partitions,
+ int replication_factor,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief Destroy and free a NewTopic object previously created with
+ * rd_kafka_NewTopic_new()
+ */
+RD_EXPORT void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic);
+
+
+/**
+ * @brief Helper function to destroy all NewTopic objects in the \p new_topics
+ * array (of \p new_topic_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics,
+ size_t new_topic_cnt);
+
+
+/**
+ * @brief Set the replica (broker) assignment for \p partition to the
+ * replica set in \p broker_ids (of \p broker_id_cnt elements).
+ *
+ * @remark When this method is used, rd_kafka_NewTopic_new() must have
+ * been called with a \c replication_factor of -1.
+ *
+ * @remark An application must either set the replica assignment for
+ * all new partitions, or none.
+ *
+ * @remark If called, this function must be called consecutively for each
+ * partition, starting at 0.
+ *
+ * @remark Use rd_kafka_metadata() to retrieve the list of brokers
+ * in the cluster.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code
+ * if the arguments were invalid.
+ *
+ * @sa rd_kafka_AdminOptions_set_validate_only()
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic,
+ int32_t partition,
+ int32_t *broker_ids,
+ size_t broker_id_cnt,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief Set (broker-side) topic configuration name/value pair.
+ *
+ * @remark The name and value are not validated by the client, the validation
+ * takes place on the broker.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code
+ * if the arguments were invalid.
+ *
+ * @sa rd_kafka_AdminOptions_set_validate_only()
+ * @sa http://kafka.apache.org/documentation.html#topicconfigs
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic,
+ const char *name,
+ const char *value);
+
+
+/**
+ * @brief Create topics in cluster as specified by the \p new_topics
+ * array of size \p new_topic_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param new_topics Array of new topics to create.
+ * @param new_topic_cnt Number of elements in \p new_topics array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_validate_only() - default false
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
+ * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_CREATETOPICS_RESULT
+ */
+RD_EXPORT void rd_kafka_CreateTopics(rd_kafka_t *rk,
+ rd_kafka_NewTopic_t **new_topics,
+ size_t new_topic_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+/*
+ * CreateTopics result type and methods
+ */
+
+/**
+ * @brief Get an array of topic results from a CreateTopics result.
+ *
+ * The returned \p topics life-time is the same as the \p result object.
+ *
+ * @param result Result to get topics from.
+ * @param cntp Updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(
+ const rd_kafka_CreateTopics_result_t *result,
+ size_t *cntp);
+
+
+
+/*
+ * DeleteTopics - delete topics from cluster
+ *
+ */
+
+/*! Represents a topic to be deleted. */
+typedef struct rd_kafka_DeleteTopic_s rd_kafka_DeleteTopic_t;
+
+/**
+ * @brief Create a new DeleteTopic object. This object is later passed to
+ * rd_kafka_DeleteTopics().
+ *
+ * @param topic Topic name to delete.
+ *
+ * @returns a new allocated DeleteTopic object.
+ * Use rd_kafka_DeleteTopic_destroy() to free object when done.
+ */
+RD_EXPORT rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic);
+
+/**
+ * @brief Destroy and free a DeleteTopic object previously created with
+ * rd_kafka_DeleteTopic_new()
+ */
+RD_EXPORT void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic);
+
+/**
+ * @brief Helper function to destroy all DeleteTopic objects in
+ * the \p del_topics array (of \p del_topic_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics,
+ size_t del_topic_cnt);
+
+/**
+ * @brief Delete topics from cluster as specified by the \p topics
+ * array of size \p topic_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param del_topics Array of topics to delete.
+ * @param del_topic_cnt Number of elements in \p topics array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DELETETOPICS_RESULT
+ */
+RD_EXPORT
+void rd_kafka_DeleteTopics(rd_kafka_t *rk,
+ rd_kafka_DeleteTopic_t **del_topics,
+ size_t del_topic_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * DeleteTopics result type and methods
+ */
+
+/**
+ * @brief Get an array of topic results from a DeleteTopics result.
+ *
+ * The returned \p topics life-time is the same as the \p result object.
+ *
+ * @param result Result to get topic results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(
+ const rd_kafka_DeleteTopics_result_t *result,
+ size_t *cntp);
+
+
+/**@}*/
+
+/**
+ * @name Admin API - Partitions
+ * @brief Partition related operations.
+ * @{
+ *
+ */
+
+/*! Defines a new partition to be created. */
+typedef struct rd_kafka_NewPartitions_s rd_kafka_NewPartitions_t;
+
+/**
+ * @brief Create a new NewPartitions. This object is later passed to
+ * rd_kafka_CreatePartitions() to increase the number of partitions
+ * to \p new_total_cnt for an existing topic.
+ *
+ * @param topic Topic name to create more partitions for.
+ * @param new_total_cnt Increase the topic's partition count to this value.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ * @returns a new allocated NewPartitions object, or NULL if the
+ * input parameters are invalid.
+ * Use rd_kafka_NewPartitions_destroy() to free object when done.
+ */
+RD_EXPORT rd_kafka_NewPartitions_t *
+rd_kafka_NewPartitions_new(const char *topic,
+ size_t new_total_cnt,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief Destroy and free a NewPartitions object previously created with
+ * rd_kafka_NewPartitions_new()
+ */
+RD_EXPORT void
+rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *new_parts);
+
+/**
+ * @brief Helper function to destroy all NewPartitions objects in the
+ * \p new_parts array (of \p new_parts_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **new_parts,
+ size_t new_parts_cnt);
+
+/**
+ * @brief Set the replica (broker id) assignment for \p new_partition_idx to the
+ * replica set in \p broker_ids (of \p broker_id_cnt elements).
+ *
+ * @remark An application must either set the replica assignment for
+ * all new partitions, or none.
+ *
+ * @remark If called, this function must be called consecutively for each
+ * new partition being created,
+ * where \p new_partition_idx 0 is the first new partition,
+ * 1 is the second, and so on.
+ *
+ * @remark \p broker_id_cnt should match the topic's replication factor.
+ *
+ * @remark Use rd_kafka_metadata() to retrieve the list of brokers
+ * in the cluster.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success, or an error code
+ * if the arguments were invalid.
+ *
+ * @sa rd_kafka_AdminOptions_set_validate_only()
+ */
+RD_EXPORT rd_kafka_resp_err_t rd_kafka_NewPartitions_set_replica_assignment(
+ rd_kafka_NewPartitions_t *new_parts,
+ int32_t new_partition_idx,
+ int32_t *broker_ids,
+ size_t broker_id_cnt,
+ char *errstr,
+ size_t errstr_size);
+
+
+/**
+ * @brief Create additional partitions for the given topics, as specified
+ * by the \p new_parts array of size \p new_parts_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param new_parts Array of topics for which new partitions are to be created.
+ * @param new_parts_cnt Number of elements in \p new_parts array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_validate_only() - default false
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds
+ * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT
+ */
+RD_EXPORT void rd_kafka_CreatePartitions(rd_kafka_t *rk,
+ rd_kafka_NewPartitions_t **new_parts,
+ size_t new_parts_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * CreatePartitions result type and methods
+ */
+
+/**
+ * @brief Get an array of topic results from a CreatePartitions result.
+ *
+ * The returned \p topics life-time is the same as the \p result object.
+ *
+ * @param result Result o get topic results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_topic_result_t **
+rd_kafka_CreatePartitions_result_topics(
+ const rd_kafka_CreatePartitions_result_t *result,
+ size_t *cntp);
+
+/**@}*/
+
+/**
+ * @name Admin API - Configuration
+ * @brief Cluster, broker, topic configuration entries, sources, etc.
+ * @{
+ *
+ */
+
+/**
+ * @enum rd_kafka_ConfigSource_t
+ *
+ * @brief Apache Kafka config sources.
+ *
+ * @remark These entities relate to the cluster, not the local client.
+ *
+ * @sa rd_kafka_conf_set(), et.al. for local client configuration.
+ */
+typedef enum rd_kafka_ConfigSource_t {
+ /** Source unknown, e.g., in the ConfigEntry used for alter requests
+ * where source is not set */
+ RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG = 0,
+ /** Dynamic topic config that is configured for a specific topic */
+ RD_KAFKA_CONFIG_SOURCE_DYNAMIC_TOPIC_CONFIG = 1,
+ /** Dynamic broker config that is configured for a specific broker */
+ RD_KAFKA_CONFIG_SOURCE_DYNAMIC_BROKER_CONFIG = 2,
+ /** Dynamic broker config that is configured as default for all
+ * brokers in the cluster */
+ RD_KAFKA_CONFIG_SOURCE_DYNAMIC_DEFAULT_BROKER_CONFIG = 3,
+ /** Static broker config provided as broker properties at startup
+ * (e.g. from server.properties file) */
+ RD_KAFKA_CONFIG_SOURCE_STATIC_BROKER_CONFIG = 4,
+ /** Built-in default configuration for configs that have a
+ * default value */
+ RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG = 5,
+
+ /** Number of source types defined */
+ RD_KAFKA_CONFIG_SOURCE__CNT,
+} rd_kafka_ConfigSource_t;
+
+
+/**
+ * @returns a string representation of the \p confsource.
+ */
+RD_EXPORT const char *
+rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource);
+
+
+/*! Apache Kafka configuration entry. */
+typedef struct rd_kafka_ConfigEntry_s rd_kafka_ConfigEntry_t;
+
+/**
+ * @returns the configuration property name
+ */
+RD_EXPORT const char *
+rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry);
+
+/**
+ * @returns the configuration value, may be NULL for sensitive or unset
+ * properties.
+ */
+RD_EXPORT const char *
+rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry);
+
+/**
+ * @returns the config source.
+ */
+RD_EXPORT rd_kafka_ConfigSource_t
+rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry);
+
+/**
+ * @returns 1 if the config property is read-only on the broker, else 0.
+ * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1.
+ */
+RD_EXPORT int
+rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry);
+
+/**
+ * @returns 1 if the config property is set to its default value on the broker,
+ * else 0.
+ * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1.
+ */
+RD_EXPORT int
+rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry);
+
+/**
+ * @returns 1 if the config property contains sensitive information (such as
+ * security configuration), else 0.
+ * @remark An application should take care not to include the value of
+ * sensitive configuration entries in its output.
+ * @remark Shall only be used on a DescribeConfigs result, otherwise returns -1.
+ */
+RD_EXPORT int
+rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry);
+
+/**
+ * @returns 1 if this entry is a synonym, else 0.
+ */
+RD_EXPORT int
+rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry);
+
+
+/**
+ * @returns the synonym config entry array.
+ *
+ * @param entry Entry to get synonyms for.
+ * @param cntp is updated to the number of elements in the array.
+ *
+ * @remark The lifetime of the returned entry is the same as \p conf .
+ * @remark Shall only be used on a DescribeConfigs result,
+ * otherwise returns NULL.
+ */
+RD_EXPORT const rd_kafka_ConfigEntry_t **
+rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry,
+ size_t *cntp);
+
+
+
+/**
+ * @enum rd_kafka_ResourceType_t
+ * @brief Apache Kafka resource types
+ */
+typedef enum rd_kafka_ResourceType_t {
+ RD_KAFKA_RESOURCE_UNKNOWN = 0, /**< Unknown */
+ RD_KAFKA_RESOURCE_ANY = 1, /**< Any (used for lookups) */
+ RD_KAFKA_RESOURCE_TOPIC = 2, /**< Topic */
+ RD_KAFKA_RESOURCE_GROUP = 3, /**< Group */
+ RD_KAFKA_RESOURCE_BROKER = 4, /**< Broker */
+ RD_KAFKA_RESOURCE__CNT, /**< Number of resource types defined */
+} rd_kafka_ResourceType_t;
+
+/**
+ * @enum rd_kafka_ResourcePatternType_t
+ * @brief Apache Kafka pattern types
+ */
+typedef enum rd_kafka_ResourcePatternType_t {
+ /** Unknown */
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN = 0,
+ /** Any (used for lookups) */
+ RD_KAFKA_RESOURCE_PATTERN_ANY = 1,
+ /** Match: will perform pattern matching */
+ RD_KAFKA_RESOURCE_PATTERN_MATCH = 2,
+ /** Literal: A literal resource name */
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL = 3,
+ /** Prefixed: A prefixed resource name */
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED = 4,
+ RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT,
+} rd_kafka_ResourcePatternType_t;
+
+/**
+ * @returns a string representation of the \p resource_pattern_type
+ */
+RD_EXPORT const char *rd_kafka_ResourcePatternType_name(
+ rd_kafka_ResourcePatternType_t resource_pattern_type);
+
+/**
+ * @returns a string representation of the \p restype
+ */
+RD_EXPORT const char *
+rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype);
+
+/*! Apache Kafka configuration resource. */
+typedef struct rd_kafka_ConfigResource_s rd_kafka_ConfigResource_t;
+
+
+/**
+ * @brief Create new ConfigResource object.
+ *
+ * @param restype The resource type (e.g., RD_KAFKA_RESOURCE_TOPIC)
+ * @param resname The resource name (e.g., the topic name)
+ *
+ * @returns a newly allocated object
+ */
+RD_EXPORT rd_kafka_ConfigResource_t *
+rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype,
+ const char *resname);
+
+/**
+ * @brief Destroy and free a ConfigResource object previously created with
+ * rd_kafka_ConfigResource_new()
+ */
+RD_EXPORT void
+rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config);
+
+
+/**
+ * @brief Helper function to destroy all ConfigResource objects in
+ * the \p configs array (of \p config_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config,
+ size_t config_cnt);
+
+
+/**
+ * @brief Set configuration name value pair.
+ *
+ * @param config ConfigResource to set config property on.
+ * @param name Configuration name, depends on resource type.
+ * @param value Configuration value, depends on resource type and \p name.
+ * Set to \c NULL to revert configuration value to default.
+ *
+ * This will overwrite the current value.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if config was added to resource,
+ * or RD_KAFKA_RESP_ERR__INVALID_ARG on invalid input.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config,
+ const char *name,
+ const char *value);
+
+
+/**
+ * @brief Get an array of config entries from a ConfigResource object.
+ *
+ * The returned object life-times are the same as the \p config object.
+ *
+ * @param config ConfigResource to get configs from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_ConfigEntry_t **
+rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config,
+ size_t *cntp);
+
+
+
+/**
+ * @returns the ResourceType for \p config
+ */
+RD_EXPORT rd_kafka_ResourceType_t
+rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config);
+
+/**
+ * @returns the name for \p config
+ */
+RD_EXPORT const char *
+rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config);
+
+/**
+ * @returns the error for this resource from an AlterConfigs request
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config);
+
+/**
+ * @returns the error string for this resource from an AlterConfigs
+ * request, or NULL if no error.
+ */
+RD_EXPORT const char *
+rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config);
+
+
+/*
+ * AlterConfigs - alter cluster configuration.
+ *
+ */
+
+
+/**
+ * @brief Update the configuration for the specified resources.
+ * Updates are not transactional so they may succeed for a subset
+ * of the provided resources while the others fail.
+ * The configuration for a particular resource is updated atomically,
+ * replacing values using the provided ConfigEntrys and reverting
+ * unspecified ConfigEntrys to their default values.
+ *
+ * @remark Requires broker version >=0.11.0.0
+ *
+ * @warning AlterConfigs will replace all existing configuration for
+ * the provided resources with the new configuration given,
+ * reverting all other configuration to their default values.
+ *
+ * @remark Multiple resources and resource types may be set, but at most one
+ * resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call
+ * since these resource requests must be sent to the broker specified
+ * in the resource.
+ *
+ */
+RD_EXPORT
+void rd_kafka_AlterConfigs(rd_kafka_t *rk,
+ rd_kafka_ConfigResource_t **configs,
+ size_t config_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+/*
+ * AlterConfigs result type and methods
+ */
+
+/**
+ * @brief Get an array of resource results from a AlterConfigs result.
+ *
+ * Use \c rd_kafka_ConfigResource_error() and
+ * \c rd_kafka_ConfigResource_error_string() to extract per-resource error
+ * results on the returned array elements.
+ *
+ * The returned object life-times are the same as the \p result object.
+ *
+ * @param result Result object to get resource results from.
+ * @param cntp is updated to the number of elements in the array.
+ *
+ * @returns an array of ConfigResource elements, or NULL if not available.
+ */
+RD_EXPORT const rd_kafka_ConfigResource_t **
+rd_kafka_AlterConfigs_result_resources(
+ const rd_kafka_AlterConfigs_result_t *result,
+ size_t *cntp);
+
+
+
+/*
+ * DescribeConfigs - retrieve cluster configuration.
+ *
+ */
+
+
+/**
+ * @brief Get configuration for the specified resources in \p configs.
+ *
+ * The returned configuration includes default values and the
+ * rd_kafka_ConfigEntry_is_default() or rd_kafka_ConfigEntry_source()
+ * methods may be used to distinguish them from user supplied values.
+ *
+ * The value of config entries where rd_kafka_ConfigEntry_is_sensitive()
+ * is true will always be NULL to avoid disclosing sensitive
+ * information, such as security settings.
+ *
+ * Configuration entries where rd_kafka_ConfigEntry_is_read_only()
+ * is true can't be updated (with rd_kafka_AlterConfigs()).
+ *
+ * Synonym configuration entries are returned if the broker supports
+ * it (broker version >= 1.1.0). See rd_kafka_ConfigEntry_synonyms().
+ *
+ * @remark Requires broker version >=0.11.0.0
+ *
+ * @remark Multiple resources and resource types may be requested, but at most
+ * one resource of type \c RD_KAFKA_RESOURCE_BROKER is allowed per call
+ * since these resource requests must be sent to the broker specified
+ * in the resource.
+ */
+RD_EXPORT
+void rd_kafka_DescribeConfigs(rd_kafka_t *rk,
+ rd_kafka_ConfigResource_t **configs,
+ size_t config_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * DescribeConfigs result type and methods
+ */
+
+/**
+ * @brief Get an array of resource results from a DescribeConfigs result.
+ *
+ * The returned \p resources life-time is the same as the \p result object.
+ *
+ * @param result Result object to get resource results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_ConfigResource_t **
+rd_kafka_DescribeConfigs_result_resources(
+ const rd_kafka_DescribeConfigs_result_t *result,
+ size_t *cntp);
+
+
+/**@}*/
+
+/**
+ * @name Admin API - DeleteRecords
+ * @brief delete records (messages) from partitions.
+ * @{
+ *
+ */
+
+/**! Represents records to be deleted */
+typedef struct rd_kafka_DeleteRecords_s rd_kafka_DeleteRecords_t;
+
+/**
+ * @brief Create a new DeleteRecords object. This object is later passed to
+ * rd_kafka_DeleteRecords().
+ *
+ * \p before_offsets must contain \c topic, \c partition, and
+ * \c offset is the offset before which the messages will
+ * be deleted (exclusive).
+ * Set \c offset to RD_KAFKA_OFFSET_END (high-watermark) in order to
+ * delete all data in the partition.
+ *
+ * @param before_offsets For each partition delete all messages up to but not
+ * including the specified offset.
+ *
+ * @returns a new allocated DeleteRecords object.
+ * Use rd_kafka_DeleteRecords_destroy() to free object when done.
+ */
+RD_EXPORT rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(
+ const rd_kafka_topic_partition_list_t *before_offsets);
+
+/**
+ * @brief Destroy and free a DeleteRecords object previously created with
+ * rd_kafka_DeleteRecords_new()
+ */
+RD_EXPORT void
+rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records);
+
+/**
+ * @brief Helper function to destroy all DeleteRecords objects in
+ * the \p del_groups array (of \p del_group_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_DeleteRecords_destroy_array(rd_kafka_DeleteRecords_t **del_records,
+ size_t del_record_cnt);
+
+/**
+ * @brief Delete records (messages) in topic partitions older than the
+ * offsets provided.
+ *
+ * @param rk Client instance.
+ * @param del_records The offsets to delete (up to).
+ * Currently only one DeleteRecords_t (but containing
+ * multiple offsets) is supported.
+ * @param del_record_cnt The number of elements in del_records, must be 1.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 60 seconds.
+ * Controls how long the brokers will wait for records to be deleted.
+ * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms.
+ * Controls how long \c rdkafka will wait for the request to complete.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DELETERECORDS_RESULT
+ */
+RD_EXPORT void rd_kafka_DeleteRecords(rd_kafka_t *rk,
+ rd_kafka_DeleteRecords_t **del_records,
+ size_t del_record_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+/*
+ * DeleteRecords result type and methods
+ */
+
+/**
+ * @brief Get a list of topic and partition results from a DeleteRecords result.
+ * The returned objects will contain \c topic, \c partition, \c offset
+ * and \c err. \c offset will be set to the post-deletion low-watermark
+ * (smallest available offset of all live replicas). \c err will be set
+ * per-partition if deletion failed.
+ *
+ * The returned object's life-time is the same as the \p result object.
+ */
+RD_EXPORT const rd_kafka_topic_partition_list_t *
+rd_kafka_DeleteRecords_result_offsets(
+ const rd_kafka_DeleteRecords_result_t *result);
+
+/**@}*/
+
+/**
+ * @name Admin API - ListConsumerGroups
+ * @{
+ */
+
+
+/**
+ * @brief ListConsumerGroups result for a single group
+ */
+
+/**! ListConsumerGroups result for a single group */
+typedef struct rd_kafka_ConsumerGroupListing_s rd_kafka_ConsumerGroupListing_t;
+
+/**! ListConsumerGroups results and errors */
+typedef struct rd_kafka_ListConsumerGroupsResult_s
+ rd_kafka_ListConsumerGroupsResult_t;
+
+/**
+ * @brief List the consumer groups available in the cluster.
+ *
+ * @param rk Client instance.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT
+ */
+RD_EXPORT
+void rd_kafka_ListConsumerGroups(rd_kafka_t *rk,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * @brief Gets the group id for the \p grplist group.
+ *
+ * @param grplist The group listing.
+ *
+ * @return The group id.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p grplist object.
+ */
+RD_EXPORT
+const char *rd_kafka_ConsumerGroupListing_group_id(
+ const rd_kafka_ConsumerGroupListing_t *grplist);
+
+/**
+ * @brief Is the \p grplist group a simple consumer group.
+ *
+ * @param grplist The group listing.
+ *
+ * @return 1 if the group is a simple consumer group,
+ * else 0.
+ */
+RD_EXPORT
+int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
+ const rd_kafka_ConsumerGroupListing_t *grplist);
+
+/**
+ * @brief Gets state for the \p grplist group.
+ *
+ * @param grplist The group listing.
+ *
+ * @return A group state.
+ */
+RD_EXPORT
+rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(
+ const rd_kafka_ConsumerGroupListing_t *grplist);
+
+/**
+ * @brief Get an array of valid list groups from a ListConsumerGroups result.
+ *
+ * The returned groups life-time is the same as the \p result object.
+ *
+ * @param result Result to get group results from.
+ * @param cntp is updated to the number of elements in the array.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p result object.
+ */
+RD_EXPORT
+const rd_kafka_ConsumerGroupListing_t **
+rd_kafka_ListConsumerGroups_result_valid(
+ const rd_kafka_ListConsumerGroups_result_t *result,
+ size_t *cntp);
+
+/**
+ * @brief Get an array of errors from a ListConsumerGroups call result.
+ *
+ * The returned errors life-time is the same as the \p result object.
+ *
+ * @param result ListConsumerGroups result.
+ * @param cntp Is updated to the number of elements in the array.
+ *
+ * @return Array of errors in \p result.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p result object.
+ */
+RD_EXPORT
+const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(
+ const rd_kafka_ListConsumerGroups_result_t *result,
+ size_t *cntp);
+
+/**@}*/
+
+/**
+ * @name Admin API - DescribeConsumerGroups
+ * @{
+ */
+
+/**
+ * @brief DescribeConsumerGroups result type.
+ *
+ */
+typedef struct rd_kafka_ConsumerGroupDescription_s
+ rd_kafka_ConsumerGroupDescription_t;
+
+/**
+ * @brief Member description included in ConsumerGroupDescription.
+ *
+ */
+typedef struct rd_kafka_MemberDescription_s rd_kafka_MemberDescription_t;
+
+/**
+ * @brief Member assignment included in MemberDescription.
+ *
+ */
+typedef struct rd_kafka_MemberAssignment_s rd_kafka_MemberAssignment_t;
+
+/**
+ * @brief Describe groups from cluster as specified by the \p groups
+ * array of size \p groups_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param groups Array of groups to describe.
+ * @param groups_cnt Number of elements in \p groups array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT
+ */
+RD_EXPORT
+void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk,
+ const char **groups,
+ size_t groups_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * @brief Get an array of group results from a DescribeConsumerGroups result.
+ *
+ * The returned groups life-time is the same as the \p result object.
+ *
+ * @param result Result to get group results from.
+ * @param cntp is updated to the number of elements in the array.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p result object.
+ */
+RD_EXPORT
+const rd_kafka_ConsumerGroupDescription_t **
+rd_kafka_DescribeConsumerGroups_result_groups(
+ const rd_kafka_DescribeConsumerGroups_result_t *result,
+ size_t *cntp);
+
+
+/**
+ * @brief Gets the group id for the \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ *
+ * @return The group id.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p grpdesc object.
+ */
+RD_EXPORT
+const char *rd_kafka_ConsumerGroupDescription_group_id(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+/**
+ * @brief Gets the error for the \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ *
+ * @return The group description error.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p grpdesc object.
+ */
+RD_EXPORT
+const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+/**
+ * @brief Is the \p grpdesc group a simple consumer group.
+ *
+ * @param grpdesc The group description.
+ * @return 1 if the group is a simple consumer group,
+ * else 0.
+ */
+RD_EXPORT
+int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+
+/**
+ * @brief Gets the partition assignor for the \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ *
+ * @return The partition assignor.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p grpdesc object.
+ */
+RD_EXPORT
+const char *rd_kafka_ConsumerGroupDescription_partition_assignor(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+
+/**
+ * @brief Gets state for the \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ *
+ * @return A group state.
+ */
+RD_EXPORT
+rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+/**
+ * @brief Gets the coordinator for the \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ *
+ * @return The group coordinator.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p grpdesc object.
+ */
+RD_EXPORT
+const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+/**
+ * @brief Gets the members count of \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ *
+ * @return The member count.
+ */
+RD_EXPORT
+size_t rd_kafka_ConsumerGroupDescription_member_count(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc);
+
+/**
+ * @brief Gets a member of \p grpdesc group.
+ *
+ * @param grpdesc The group description.
+ * @param idx The member idx.
+ *
+ * @return A member at index \p idx, or NULL if
+ * \p idx is out of range.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p grpdesc object.
+ */
+RD_EXPORT
+const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc,
+ size_t idx);
+
+/**
+ * @brief Gets client id of \p member.
+ *
+ * @param member The group member.
+ *
+ * @return The client id.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p member object.
+ */
+RD_EXPORT
+const char *rd_kafka_MemberDescription_client_id(
+ const rd_kafka_MemberDescription_t *member);
+
+/**
+ * @brief Gets group instance id of \p member.
+ *
+ * @param member The group member.
+ *
+ * @return The group instance id, or NULL if not available.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p member object.
+ */
+RD_EXPORT
+const char *rd_kafka_MemberDescription_group_instance_id(
+ const rd_kafka_MemberDescription_t *member);
+
+/**
+ * @brief Gets consumer id of \p member.
+ *
+ * @param member The group member.
+ *
+ * @return The consumer id.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p member object.
+ */
+RD_EXPORT
+const char *rd_kafka_MemberDescription_consumer_id(
+ const rd_kafka_MemberDescription_t *member);
+
+/**
+ * @brief Gets host of \p member.
+ *
+ * @param member The group member.
+ *
+ * @return The host.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p member object.
+ */
+RD_EXPORT
+const char *
+rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member);
+
+/**
+ * @brief Gets assignment of \p member.
+ *
+ * @param member The group member.
+ *
+ * @return The member assignment.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p member object.
+ */
+RD_EXPORT
+const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(
+ const rd_kafka_MemberDescription_t *member);
+
+/**
+ * @brief Gets assigned partitions of a member \p assignment.
+ *
+ * @param assignment The group member assignment.
+ *
+ * @return The assigned partitions.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p assignment object.
+ */
+RD_EXPORT
+const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(
+ const rd_kafka_MemberAssignment_t *assignment);
+
+/**@}*/
+
+/**
+ * @name Admin API - DeleteGroups
+ * @brief Delete groups from cluster
+ * @{
+ *
+ *
+ */
+
+/*! Represents a group to be deleted. */
+typedef struct rd_kafka_DeleteGroup_s rd_kafka_DeleteGroup_t;
+
+/**
+ * @brief Create a new DeleteGroup object. This object is later passed to
+ * rd_kafka_DeleteGroups().
+ *
+ * @param group Name of group to delete.
+ *
+ * @returns a new allocated DeleteGroup object.
+ * Use rd_kafka_DeleteGroup_destroy() to free object when done.
+ */
+RD_EXPORT
+rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group);
+
+/**
+ * @brief Destroy and free a DeleteGroup object previously created with
+ * rd_kafka_DeleteGroup_new()
+ */
+RD_EXPORT
+void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group);
+
+/**
+ * @brief Helper function to destroy all DeleteGroup objects in
+ * the \p del_groups array (of \p del_group_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups,
+ size_t del_group_cnt);
+
+/**
+ * @brief Delete groups from cluster as specified by the \p del_groups
+ * array of size \p del_group_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param del_groups Array of groups to delete.
+ * @param del_group_cnt Number of elements in \p del_groups array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DELETEGROUPS_RESULT
+ *
+ * @remark This function in called deleteConsumerGroups in the Java client.
+ */
+RD_EXPORT
+void rd_kafka_DeleteGroups(rd_kafka_t *rk,
+ rd_kafka_DeleteGroup_t **del_groups,
+ size_t del_group_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * DeleteGroups result type and methods
+ */
+
+/**
+ * @brief Get an array of group results from a DeleteGroups result.
+ *
+ * The returned groups life-time is the same as the \p result object.
+ *
+ * @param result Result to get group results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(
+ const rd_kafka_DeleteGroups_result_t *result,
+ size_t *cntp);
+
+/**@}*/
+
+/**
+ * @name Admin API - ListConsumerGroupOffsets
+ * @{
+ *
+ *
+ */
+
+/*! Represents consumer group committed offsets to be listed. */
+typedef struct rd_kafka_ListConsumerGroupOffsets_s
+ rd_kafka_ListConsumerGroupOffsets_t;
+
+/**
+ * @brief Create a new ListConsumerGroupOffsets object.
+ * This object is later passed to rd_kafka_ListConsumerGroupOffsets().
+ *
+ * @param group_id Consumer group id.
+ * @param partitions Partitions to list committed offsets for.
+ * Only the topic and partition fields are used.
+ *
+ * @returns a new allocated ListConsumerGroupOffsets object.
+ * Use rd_kafka_ListConsumerGroupOffsets_destroy() to free
+ * object when done.
+ */
+RD_EXPORT rd_kafka_ListConsumerGroupOffsets_t *
+rd_kafka_ListConsumerGroupOffsets_new(
+ const char *group_id,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+/**
+ * @brief Destroy and free a ListConsumerGroupOffsets object previously
+ * created with rd_kafka_ListConsumerGroupOffsets_new()
+ */
+RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy(
+ rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets);
+
+/**
+ * @brief Helper function to destroy all ListConsumerGroupOffsets objects in
+ * the \p list_grpoffsets array (of \p list_grpoffsets_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void rd_kafka_ListConsumerGroupOffsets_destroy_array(
+ rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
+ size_t list_grpoffset_cnt);
+
+/**
+ * @brief List committed offsets for a set of partitions in a consumer
+ * group.
+ *
+ * @param rk Client instance.
+ * @param list_grpoffsets Array of group committed offsets to list.
+ * MUST only be one single element.
+ * @param list_grpoffsets_cnt Number of elements in \p list_grpoffsets array.
+ * MUST always be 1.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT
+ *
+ * @remark The current implementation only supports one group per invocation.
+ */
+RD_EXPORT
+void rd_kafka_ListConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
+ size_t list_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * ListConsumerGroupOffsets result type and methods
+ */
+
+/**
+ * @brief Get an array of results from a ListConsumerGroupOffsets result.
+ *
+ * The returned groups life-time is the same as the \p result object.
+ *
+ * @param result Result to get group results from.
+ * @param cntp is updated to the number of elements in the array.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p result object.
+ */
+RD_EXPORT const rd_kafka_group_result_t **
+rd_kafka_ListConsumerGroupOffsets_result_groups(
+ const rd_kafka_ListConsumerGroupOffsets_result_t *result,
+ size_t *cntp);
+
+
+
+/**@}*/
+
+/**
+ * @name Admin API - AlterConsumerGroupOffsets
+ * @{
+ *
+ *
+ */
+
+/*! Represents consumer group committed offsets to be altered. */
+typedef struct rd_kafka_AlterConsumerGroupOffsets_s
+ rd_kafka_AlterConsumerGroupOffsets_t;
+
+/**
+ * @brief Create a new AlterConsumerGroupOffsets object.
+ * This object is later passed to rd_kafka_AlterConsumerGroupOffsets().
+ *
+ * @param group_id Consumer group id.
+ * @param partitions Partitions to alter committed offsets for.
+ * Only the topic and partition fields are used.
+ *
+ * @returns a new allocated AlterConsumerGroupOffsets object.
+ * Use rd_kafka_AlterConsumerGroupOffsets_destroy() to free
+ * object when done.
+ */
+RD_EXPORT rd_kafka_AlterConsumerGroupOffsets_t *
+rd_kafka_AlterConsumerGroupOffsets_new(
+ const char *group_id,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+/**
+ * @brief Destroy and free a AlterConsumerGroupOffsets object previously
+ * created with rd_kafka_AlterConsumerGroupOffsets_new()
+ */
+RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy(
+ rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets);
+
+/**
+ * @brief Helper function to destroy all AlterConsumerGroupOffsets objects in
+ * the \p alter_grpoffsets array (of \p alter_grpoffsets_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void rd_kafka_AlterConsumerGroupOffsets_destroy_array(
+ rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
+ size_t alter_grpoffset_cnt);
+
+/**
+ * @brief Alter committed offsets for a set of partitions in a consumer
+ * group. This will succeed at the partition level only if the group
+ * is not actively subscribed to the corresponding topic.
+ *
+ * @param rk Client instance.
+ * @param alter_grpoffsets Array of group committed offsets to alter.
+ * MUST only be one single element.
+ * @param alter_grpoffsets_cnt Number of elements in \p alter_grpoffsets array.
+ * MUST always be 1.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT
+ *
+ * @remark The current implementation only supports one group per invocation.
+ */
+RD_EXPORT
+void rd_kafka_AlterConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
+ size_t alter_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * AlterConsumerGroupOffsets result type and methods
+ */
+
+/**
+ * @brief Get an array of results from a AlterConsumerGroupOffsets result.
+ *
+ * The returned groups life-time is the same as the \p result object.
+ *
+ * @param result Result to get group results from.
+ * @param cntp is updated to the number of elements in the array.
+ *
+ * @remark The lifetime of the returned memory is the same
+ * as the lifetime of the \p result object.
+ */
+RD_EXPORT const rd_kafka_group_result_t **
+rd_kafka_AlterConsumerGroupOffsets_result_groups(
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *result,
+ size_t *cntp);
+
+
+
+/**@}*/
+
+/**
+ * @name Admin API - DeleteConsumerGroupOffsets
+ * @{
+ *
+ *
+ */
+
+/*! Represents consumer group committed offsets to be deleted. */
+typedef struct rd_kafka_DeleteConsumerGroupOffsets_s
+ rd_kafka_DeleteConsumerGroupOffsets_t;
+
+/**
+ * @brief Create a new DeleteConsumerGroupOffsets object.
+ * This object is later passed to rd_kafka_DeleteConsumerGroupOffsets().
+ *
+ * @param group Consumer group id.
+ * @param partitions Partitions to delete committed offsets for.
+ * Only the topic and partition fields are used.
+ *
+ * @returns a new allocated DeleteConsumerGroupOffsets object.
+ * Use rd_kafka_DeleteConsumerGroupOffsets_destroy() to free
+ * object when done.
+ */
+RD_EXPORT rd_kafka_DeleteConsumerGroupOffsets_t *
+rd_kafka_DeleteConsumerGroupOffsets_new(
+ const char *group,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+/**
+ * @brief Destroy and free a DeleteConsumerGroupOffsets object previously
+ * created with rd_kafka_DeleteConsumerGroupOffsets_new()
+ */
+RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy(
+ rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets);
+
+/**
+ * @brief Helper function to destroy all DeleteConsumerGroupOffsets objects in
+ * the \p del_grpoffsets array (of \p del_grpoffsets_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(
+ rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
+ size_t del_grpoffset_cnt);
+
+/**
+ * @brief Delete committed offsets for a set of partitions in a consumer
+ * group. This will succeed at the partition level only if the group
+ * is not actively subscribed to the corresponding topic.
+ *
+ * @param rk Client instance.
+ * @param del_grpoffsets Array of group committed offsets to delete.
+ * MUST only be one single element.
+ * @param del_grpoffsets_cnt Number of elements in \p del_grpoffsets array.
+ * MUST always be 1.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT
+ *
+ * @remark The current implementation only supports one group per invocation.
+ */
+RD_EXPORT
+void rd_kafka_DeleteConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
+ size_t del_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+
+
+/*
+ * DeleteConsumerGroupOffsets result type and methods
+ */
+
+/**
+ * @brief Get an array of results from a DeleteConsumerGroupOffsets result.
+ *
+ * The returned groups life-time is the same as the \p result object.
+ *
+ * @param result Result to get group results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_group_result_t **
+rd_kafka_DeleteConsumerGroupOffsets_result_groups(
+ const rd_kafka_DeleteConsumerGroupOffsets_result_t *result,
+ size_t *cntp);
+
+/**@}*/
+
+/**
+ * @name Admin API - ACL operations
+ * @{
+ */
+
+/**
+ * @brief ACL Binding is used to create access control lists.
+ *
+ *
+ */
+typedef struct rd_kafka_AclBinding_s rd_kafka_AclBinding_t;
+
+/**
+ * @brief ACL Binding filter is used to filter access control lists.
+ *
+ */
+typedef rd_kafka_AclBinding_t rd_kafka_AclBindingFilter_t;
+
+/**
+ * @returns the error object for the given acl result, or NULL on success.
+ */
+RD_EXPORT const rd_kafka_error_t *
+rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres);
+
+
+/**
+ * @enum rd_kafka_AclOperation_t
+ * @brief Apache Kafka ACL operation types.
+ */
+typedef enum rd_kafka_AclOperation_t {
+ RD_KAFKA_ACL_OPERATION_UNKNOWN = 0, /**< Unknown */
+ RD_KAFKA_ACL_OPERATION_ANY =
+ 1, /**< In a filter, matches any AclOperation */
+ RD_KAFKA_ACL_OPERATION_ALL = 2, /**< ALL operation */
+ RD_KAFKA_ACL_OPERATION_READ = 3, /**< READ operation */
+ RD_KAFKA_ACL_OPERATION_WRITE = 4, /**< WRITE operation */
+ RD_KAFKA_ACL_OPERATION_CREATE = 5, /**< CREATE operation */
+ RD_KAFKA_ACL_OPERATION_DELETE = 6, /**< DELETE operation */
+ RD_KAFKA_ACL_OPERATION_ALTER = 7, /**< ALTER operation */
+ RD_KAFKA_ACL_OPERATION_DESCRIBE = 8, /**< DESCRIBE operation */
+ RD_KAFKA_ACL_OPERATION_CLUSTER_ACTION =
+ 9, /**< CLUSTER_ACTION operation */
+ RD_KAFKA_ACL_OPERATION_DESCRIBE_CONFIGS =
+ 10, /**< DESCRIBE_CONFIGS operation */
+ RD_KAFKA_ACL_OPERATION_ALTER_CONFIGS =
+ 11, /**< ALTER_CONFIGS operation */
+ RD_KAFKA_ACL_OPERATION_IDEMPOTENT_WRITE =
+ 12, /**< IDEMPOTENT_WRITE operation */
+ RD_KAFKA_ACL_OPERATION__CNT
+} rd_kafka_AclOperation_t;
+
+/**
+ * @returns a string representation of the \p acl_operation
+ */
+RD_EXPORT const char *
+rd_kafka_AclOperation_name(rd_kafka_AclOperation_t acl_operation);
+
+/**
+ * @enum rd_kafka_AclPermissionType_t
+ * @brief Apache Kafka ACL permission types.
+ */
+typedef enum rd_kafka_AclPermissionType_t {
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN = 0, /**< Unknown */
+ RD_KAFKA_ACL_PERMISSION_TYPE_ANY =
+ 1, /**< In a filter, matches any AclPermissionType */
+ RD_KAFKA_ACL_PERMISSION_TYPE_DENY = 2, /**< Disallows access */
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW = 3, /**< Grants access. */
+ RD_KAFKA_ACL_PERMISSION_TYPE__CNT
+} rd_kafka_AclPermissionType_t;
+
+/**
+ * @returns a string representation of the \p acl_permission_type
+ */
+RD_EXPORT const char *rd_kafka_AclPermissionType_name(
+ rd_kafka_AclPermissionType_t acl_permission_type);
+
+/**
+ * @brief Create a new AclBinding object. This object is later passed to
+ * rd_kafka_CreateAcls().
+ *
+ * @param restype The ResourceType.
+ * @param name The resource name.
+ * @param resource_pattern_type The pattern type.
+ * @param principal A principal, following the kafka specification.
+ * @param host An hostname or ip.
+ * @param operation A Kafka operation.
+ * @param permission_type A Kafka permission type.
+ * @param errstr An error string for returning errors or NULL to not use it.
+ * @param errstr_size The \p errstr size or 0 to not use it.
+ *
+ * @returns a new allocated AclBinding object, or NULL if the input parameters
+ * are invalid.
+ * Use rd_kafka_AclBinding_destroy() to free object when done.
+ */
+RD_EXPORT rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief Create a new AclBindingFilter object. This object is later passed to
+ * rd_kafka_DescribeAcls() or
+ * rd_kafka_DeletesAcls() in order to filter
+ * the acls to retrieve or to delete.
+ * Use the same rd_kafka_AclBinding functions to query or destroy it.
+ *
+ * @param restype The ResourceType or \c RD_KAFKA_RESOURCE_ANY if
+ * not filtering by this field.
+ * @param name The resource name or NULL if not filtering by this field.
+ * @param resource_pattern_type The pattern type or \c
+ * RD_KAFKA_RESOURCE_PATTERN_ANY if not filtering by this field.
+ * @param principal A principal or NULL if not filtering by this field.
+ * @param host An hostname or ip or NULL if not filtering by this field.
+ * @param operation A Kafka operation or \c RD_KAFKA_ACL_OPERATION_ANY if not
+ * filtering by this field.
+ * @param permission_type A Kafka permission type or \c
+ * RD_KAFKA_ACL_PERMISSION_TYPE_ANY if not filtering by this field.
+ * @param errstr An error string for returning errors or NULL to not use it.
+ * @param errstr_size The \p errstr size or 0 to not use it.
+ *
+ * @returns a new allocated AclBindingFilter object, or NULL if the input
+ * parameters are invalid. Use rd_kafka_AclBinding_destroy() to free object when
+ * done.
+ */
+RD_EXPORT rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(
+ rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @returns the resource type for the given acl binding.
+ */
+RD_EXPORT rd_kafka_ResourceType_t
+rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the resource name for the given acl binding.
+ *
+ * @remark lifetime of the returned string is the same as the \p acl.
+ */
+RD_EXPORT const char *
+rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the principal for the given acl binding.
+ *
+ * @remark lifetime of the returned string is the same as the \p acl.
+ */
+RD_EXPORT const char *
+rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the host for the given acl binding.
+ *
+ * @remark lifetime of the returned string is the same as the \p acl.
+ */
+RD_EXPORT const char *
+rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the acl operation for the given acl binding.
+ */
+RD_EXPORT rd_kafka_AclOperation_t
+rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the permission type for the given acl binding.
+ */
+RD_EXPORT rd_kafka_AclPermissionType_t
+rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the resource pattern type for the given acl binding.
+ */
+RD_EXPORT rd_kafka_ResourcePatternType_t
+rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl);
+
+/**
+ * @returns the error object for the given acl binding, or NULL on success.
+ */
+RD_EXPORT const rd_kafka_error_t *
+rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl);
+
+
+/**
+ * @brief Destroy and free an AclBinding object previously created with
+ * rd_kafka_AclBinding_new()
+ */
+RD_EXPORT void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding);
+
+
+/**
+ * @brief Helper function to destroy all AclBinding objects in
+ * the \p acl_bindings array (of \p acl_bindings_cnt elements).
+ * The array itself is not freed.
+ */
+RD_EXPORT void
+rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings,
+ size_t acl_bindings_cnt);
+
+/**
+ * @brief Get an array of acl results from a CreateAcls result.
+ *
+ * The returned \p acl result life-time is the same as the \p result object.
+ * @param result CreateAcls result to get acl results from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_acl_result_t **
+rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result,
+ size_t *cntp);
+
+/**
+ * @brief Create acls as specified by the \p new_acls
+ * array of size \p new_topic_cnt elements.
+ *
+ * @param rk Client instance.
+ * @param new_acls Array of new acls to create.
+ * @param new_acls_cnt Number of elements in \p new_acls array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_request_timeout() - default socket.timeout.ms
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_CREATEACLS_RESULT
+ */
+RD_EXPORT void rd_kafka_CreateAcls(rd_kafka_t *rk,
+ rd_kafka_AclBinding_t **new_acls,
+ size_t new_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * DescribeAcls - describe access control lists.
+ *
+ *
+ */
+
+/**
+ * @brief Get an array of resource results from a DescribeAcls result.
+ *
+ * The returned \p resources life-time is the same as the \p result object.
+ * @param result DescribeAcls result to get acls from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_AclBinding_t **
+rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result,
+ size_t *cntp);
+
+/**
+ * @brief Describe acls matching the filter provided in \p acl_filter
+ *
+ * @param rk Client instance.
+ * @param acl_filter Filter for the returned acls.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 0
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DESCRIBEACLS_RESULT
+ */
+RD_EXPORT void rd_kafka_DescribeAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t *acl_filter,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**
+ * DeleteAcls - delete access control lists.
+ *
+ *
+ */
+
+typedef struct rd_kafka_DeleteAcls_result_response_s
+ rd_kafka_DeleteAcls_result_response_t;
+
+/**
+ * @brief Get an array of DeleteAcls result responses from a DeleteAcls result.
+ *
+ * The returned \p responses life-time is the same as the \p result object.
+ * @param result DeleteAcls result to get responses from.
+ * @param cntp is updated to the number of elements in the array.
+ */
+RD_EXPORT const rd_kafka_DeleteAcls_result_response_t **
+rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result,
+ size_t *cntp);
+
+/**
+ * @returns the error object for the given DeleteAcls result response,
+ * or NULL on success.
+ */
+RD_EXPORT const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(
+ const rd_kafka_DeleteAcls_result_response_t *result_response);
+
+
+/**
+ * @returns the matching acls array for the given DeleteAcls result response.
+ *
+ * @remark lifetime of the returned acl bindings is the same as the \p
+ * result_response.
+ */
+RD_EXPORT const rd_kafka_AclBinding_t **
+rd_kafka_DeleteAcls_result_response_matching_acls(
+ const rd_kafka_DeleteAcls_result_response_t *result_response,
+ size_t *matching_acls_cntp);
+
+/**
+ * @brief Delete acls matching the filteres provided in \p del_acls
+ * array of size \p del_acls_cnt.
+ *
+ * @param rk Client instance.
+ * @param del_acls Filters for the acls to delete.
+ * @param del_acls_cnt Number of elements in \p del_acls array.
+ * @param options Optional admin options, or NULL for defaults.
+ * @param rkqu Queue to emit result on.
+ *
+ * Supported admin options:
+ * - rd_kafka_AdminOptions_set_operation_timeout() - default 0
+ *
+ * @remark The result event type emitted on the supplied queue is of type
+ * \c RD_KAFKA_EVENT_DELETEACLS_RESULT
+ */
+RD_EXPORT void rd_kafka_DeleteAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t **del_acls,
+ size_t del_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**@}*/
+
+/**
+ * @name Security APIs
+ * @{
+ *
+ */
+
+/**
+ * @brief Set SASL/OAUTHBEARER token and metadata
+ *
+ * @param rk Client instance.
+ * @param token_value the mandatory token value to set, often (but not
+ * necessarily) a JWS compact serialization as per
+ * https://tools.ietf.org/html/rfc7515#section-3.1.
+ * @param md_lifetime_ms when the token expires, in terms of the number of
+ * milliseconds since the epoch.
+ * @param md_principal_name the mandatory Kafka principal name associated
+ * with the token.
+ * @param extensions optional SASL extensions key-value array with
+ * \p extensions_size elements (number of keys * 2), where [i] is the key and
+ * [i+1] is the key's value, to be communicated to the broker
+ * as additional key-value pairs during the initial client response as per
+ * https://tools.ietf.org/html/rfc7628#section-3.1. The key-value pairs are
+ * copied.
+ * @param extension_size the number of SASL extension keys plus values,
+ * which must be a non-negative multiple of 2.
+ * @param errstr A human readable error string (nul-terminated) is written to
+ * this location that must be of at least \p errstr_size bytes.
+ * The \p errstr is only written in case of error.
+ * @param errstr_size Writable size in \p errstr.
+ *
+ * The SASL/OAUTHBEARER token refresh callback or event handler should invoke
+ * this method upon success. The extension keys must not include the reserved
+ * key "`auth`", and all extension keys and values must conform to the required
+ * format as per https://tools.ietf.org/html/rfc7628#section-3.1:
+ *
+ * key = 1*(ALPHA)
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ *
+ * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise \p errstr set
+ * and:<br>
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are
+ * invalid;<br>
+ * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not
+ * supported by this build;<br>
+ * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is
+ * not configured as the client's authentication mechanism.<br>
+ *
+ * @sa rd_kafka_oauthbearer_set_token_failure
+ * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb
+ */
+RD_EXPORT
+rd_kafka_resp_err_t
+rd_kafka_oauthbearer_set_token(rd_kafka_t *rk,
+ const char *token_value,
+ int64_t md_lifetime_ms,
+ const char *md_principal_name,
+ const char **extensions,
+ size_t extension_size,
+ char *errstr,
+ size_t errstr_size);
+
+/**
+ * @brief SASL/OAUTHBEARER token refresh failure indicator.
+ *
+ * @param rk Client instance.
+ * @param errstr mandatory human readable error reason for failing to acquire
+ * a token.
+ *
+ * The SASL/OAUTHBEARER token refresh callback or event handler should invoke
+ * this method upon failure.
+ *
+ * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise:<br>
+ * \c RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED if SASL/OAUTHBEARER is not
+ * supported by this build;<br>
+ * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is supported but is
+ * not configured as the client's authentication mechanism,<br>
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied.
+ *
+ * @sa rd_kafka_oauthbearer_set_token
+ * @sa rd_kafka_conf_set_oauthbearer_token_refresh_cb
+ */
+RD_EXPORT
+rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure(rd_kafka_t *rk,
+ const char *errstr);
+
+/**@}*/
+
+
+/**
+ * @name Transactional producer API
+ *
+ * The transactional producer operates on top of the idempotent producer,
+ * and provides full exactly-once semantics (EOS) for Apache Kafka when used
+ * with the transaction aware consumer (\c isolation.level=read_committed).
+ *
+ * A producer instance is configured for transactions by setting the
+ * \c transactional.id to an identifier unique for the application. This
+ * id will be used to fence stale transactions from previous instances of
+ * the application, typically following an outage or crash.
+ *
+ * After creating the transactional producer instance using rd_kafka_new()
+ * the transactional state must be initialized by calling
+ * rd_kafka_init_transactions(). This is a blocking call that will
+ * acquire a runtime producer id from the transaction coordinator broker
+ * as well as abort any stale transactions and fence any still running producer
+ * instances with the same \c transactional.id.
+ *
+ * Once transactions are initialized the application may begin a new
+ * transaction by calling rd_kafka_begin_transaction().
+ * A producer instance may only have one single on-going transaction.
+ *
+ * Any messages produced after the transaction has been started will
+ * belong to the ongoing transaction and will be committed or aborted
+ * atomically.
+ * It is not permitted to produce messages outside a transaction
+ * boundary, e.g., before rd_kafka_begin_transaction() or after
+ * rd_kafka_commit_transaction(), rd_kafka_abort_transaction(), or after
+ * the current transaction has failed.
+ *
+ * If consumed messages are used as input to the transaction, the consumer
+ * instance must be configured with \c enable.auto.commit set to \c false.
+ * To commit the consumed offsets along with the transaction pass the
+ * list of consumed partitions and the last offset processed + 1 to
+ * rd_kafka_send_offsets_to_transaction() prior to committing the transaction.
+ * This allows an aborted transaction to be restarted using the previously
+ * committed offsets.
+ *
+ * To commit the produced messages, and any consumed offsets, to the
+ * current transaction, call rd_kafka_commit_transaction().
+ * This call will block until the transaction has been fully committed or
+ * failed (typically due to fencing by a newer producer instance).
+ *
+ * Alternatively, if processing fails, or an abortable transaction error is
+ * raised, the transaction needs to be aborted by calling
+ * rd_kafka_abort_transaction() which marks any produced messages and
+ * offset commits as aborted.
+ *
+ * After the current transaction has been committed or aborted a new
+ * transaction may be started by calling rd_kafka_begin_transaction() again.
+ *
+ * @par Retriable errors
+ * Some error cases allow the attempted operation to be retried, this is
+ * indicated by the error object having the retriable flag set which can
+ * be detected by calling rd_kafka_error_is_retriable().
+ * When this flag is set the application may retry the operation immediately
+ * or preferably after a shorter grace period (to avoid busy-looping).
+ * Retriable errors include timeouts, broker transport failures, etc.
+ *
+ * @par Abortable errors
+ * An ongoing transaction may fail permanently due to various errors,
+ * such as transaction coordinator becoming unavailable, write failures to the
+ * Apache Kafka log, under-replicated partitions, etc.
+ * At this point the producer application must abort the current transaction
+ * using rd_kafka_abort_transaction() and optionally start a new transaction
+ * by calling rd_kafka_begin_transaction().
+ * Whether an error is abortable or not is detected by calling
+ * rd_kafka_error_txn_requires_abort() on the returned error object.
+ *
+ * @par Fatal errors
+ * While the underlying idempotent producer will typically only raise
+ * fatal errors for unrecoverable cluster errors where the idempotency
+ * guarantees can't be maintained, most of these are treated as abortable by
+ * the transactional producer since transactions may be aborted and retried
+ * in their entirety;
+ * The transactional producer on the other hand introduces a set of additional
+ * fatal errors which the application needs to handle by shutting down the
+ * producer and terminate. There is no way for a producer instance to recover
+ * from fatal errors.
+ * Whether an error is fatal or not is detected by calling
+ * rd_kafka_error_is_fatal() on the returned error object or by checking
+ * the global rd_kafka_fatal_error() code.
+ * Fatal errors are raised by triggering the \c error_cb (see the
+ * Fatal error chapter in INTRODUCTION.md for more information), and any
+ * subsequent transactional API calls will return RD_KAFKA_RESP_ERR__FATAL
+ * or have the fatal flag set (see rd_kafka_error_is_fatal()).
+ * The originating fatal error code can be retrieved by calling
+ * rd_kafka_fatal_error().
+ *
+ * @par Handling of other errors
+ * For errors that have neither retriable, abortable or the fatal flag set
+ * it is not always obvious how to handle them. While some of these errors
+ * may be indicative of bugs in the application code, such as when
+ * an invalid parameter is passed to a method, other errors might originate
+ * from the broker and be passed thru as-is to the application.
+ * The general recommendation is to treat these errors, that have
+ * neither the retriable or abortable flags set, as fatal.
+ *
+ * @par Error handling example
+ * @code
+ * retry:
+ * rd_kafka_error_t *error;
+ *
+ * error = rd_kafka_commit_transaction(producer, 10*1000);
+ * if (!error)
+ * return success;
+ * else if (rd_kafka_error_txn_requires_abort(error)) {
+ * do_abort_transaction_and_reset_inputs();
+ * } else if (rd_kafka_error_is_retriable(error)) {
+ * rd_kafka_error_destroy(error);
+ * goto retry;
+ * } else { // treat all other errors as fatal errors
+ * fatal_error(rd_kafka_error_string(error));
+ * }
+ * rd_kafka_error_destroy(error);
+ * @endcode
+ *
+ *
+ * @{
+ */
+
+
+/**
+ * @brief Initialize transactions for the producer instance.
+ *
+ * This function ensures any transactions initiated by previous instances
+ * of the producer with the same \c transactional.id are completed.
+ * If the previous instance failed with a transaction in progress the
+ * previous transaction will be aborted.
+ * This function needs to be called before any other transactional or
+ * produce functions are called when the \c transactional.id is configured.
+ *
+ * If the last transaction had begun completion (following transaction commit)
+ * but not yet finished, this function will await the previous transaction's
+ * completion.
+ *
+ * When any previous transactions have been fenced this function
+ * will acquire the internal producer id and epoch, used in all future
+ * transactional messages issued by this producer instance.
+ *
+ * @param rk Producer instance.
+ * @param timeout_ms The maximum time to block. On timeout the operation
+ * may continue in the background, depending on state,
+ * and it is okay to call init_transactions() again.
+ * If an infinite timeout (-1) is passed, the timeout will
+ * be adjusted to 2 * \c transaction.timeout.ms.
+ *
+ * @remark This function may block up to \p timeout_ms milliseconds.
+ *
+ * @remark This call is resumable when a retriable timeout error is returned.
+ * Calling the function again will resume the operation that is
+ * progressing in the background.
+ *
+ * @returns NULL on success or an error object on failure.
+ * Check whether the returned error object permits retrying
+ * by calling rd_kafka_error_is_retriable(), or whether a fatal
+ * error has been raised by calling rd_kafka_error_is_fatal().
+ * Error codes:
+ * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction coordinator
+ * could be not be contacted within \p timeout_ms (retriable),
+ * RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE if the transaction
+ * coordinator is not available (retriable),
+ * RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS if a previous transaction
+ * would not complete within \p timeout_ms (retriable),
+ * RD_KAFKA_RESP_ERR__STATE if transactions have already been started
+ * or upon fatal error,
+ * RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE if the broker(s) do not
+ * support transactions (<Apache Kafka 0.11), this also raises a
+ * fatal error,
+ * RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT if the configured
+ * \c transaction.timeout.ms is outside the broker-configured range,
+ * this also raises a fatal error,
+ * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
+ * configured for the producer instance,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
+ * or \p timeout_ms is out of range.
+ * Other error codes not listed here may be returned, depending on
+ * broker version.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms);
+
+
+
+/**
+ * @brief Begin a new transaction.
+ *
+ * rd_kafka_init_transactions() must have been called successfully (once)
+ * before this function is called.
+ *
+ * Upon successful return from this function the application has to perform at
+ * least one of the following operations within \c transaction.timeout.ms to
+ * avoid timing out the transaction on the broker:
+ * * rd_kafka_produce() (et.al)
+ * * rd_kafka_send_offsets_to_transaction()
+ * * rd_kafka_commit_transaction()
+ * * rd_kafka_abort_transaction()
+ *
+ * Any messages produced, offsets sent (rd_kafka_send_offsets_to_transaction()),
+ * etc, after the successful return of this function will be part of
+ * the transaction and committed or aborted atomatically.
+ *
+ * Finish the transaction by calling rd_kafka_commit_transaction() or
+ * abort the transaction by calling rd_kafka_abort_transaction().
+ *
+ * @param rk Producer instance.
+ *
+ * @returns NULL on success or an error object on failure.
+ * Check whether a fatal error has been raised by
+ * calling rd_kafka_error_is_fatal().
+ * Error codes:
+ * RD_KAFKA_RESP_ERR__STATE if a transaction is already in progress
+ * or upon fatal error,
+ * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
+ * configured for the producer instance,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance.
+ * Other error codes not listed here may be returned, depending on
+ * broker version.
+ *
+ * @remark With the transactional producer, rd_kafka_produce(),
+ * rd_kafka_producev(), et.al, are only allowed during an on-going
+ * transaction, as started with this function.
+ * Any produce call outside an on-going transaction, or for a failed
+ * transaction, will fail.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk);
+
+
+/**
+ * @brief Sends a list of topic partition offsets to the consumer group
+ * coordinator for \p cgmetadata, and marks the offsets as part
+ * part of the current transaction.
+ * These offsets will be considered committed only if the transaction is
+ * committed successfully.
+ *
+ * The offsets should be the next message your application will consume,
+ * i.e., the last processed message's offset + 1 for each partition.
+ * Either track the offsets manually during processing or use
+ * rd_kafka_position() (on the consumer) to get the current offsets for
+ * the partitions assigned to the consumer.
+ *
+ * Use this method at the end of a consume-transform-produce loop prior
+ * to committing the transaction with rd_kafka_commit_transaction().
+ *
+ * @param rk Producer instance.
+ * @param offsets List of offsets to commit to the consumer group upon
+ * successful commit of the transaction. Offsets should be
+ * the next message to consume, e.g., last processed message + 1.
+ * @param cgmetadata The current consumer group metadata as returned by
+ * rd_kafka_consumer_group_metadata() on the consumer
+ * instance the provided offsets were consumed from.
+ * @param timeout_ms Maximum time allowed to register the offsets on the broker.
+ *
+ * @remark This function must be called on the transactional producer instance,
+ * not the consumer.
+ *
+ * @remark The consumer must disable auto commits
+ * (set \c enable.auto.commit to false on the consumer).
+ *
+ * @remark Logical and invalid offsets (such as RD_KAFKA_OFFSET_INVALID) in
+ * \p offsets will be ignored, if there are no valid offsets in
+ * \p offsets the function will return NULL and no action will be taken.
+ *
+ * @remark This call is retriable but not resumable, which means a new request
+ * with a new set of provided offsets and group metadata will be
+ * sent to the transaction coordinator if the call is retried.
+ *
+ * @remark It is highly recommended to retry the call (upon retriable error)
+ * with identical \p offsets and \p cgmetadata parameters.
+ * Failure to do so risks inconsistent state between what is actually
+ * included in the transaction and what the application thinks is
+ * included in the transaction.
+ *
+ * @returns NULL on success or an error object on failure.
+ * Check whether the returned error object permits retrying
+ * by calling rd_kafka_error_is_retriable(), or whether an abortable
+ * or fatal error has been raised by calling
+ * rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal()
+ * respectively.
+ * Error codes:
+ * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction,
+ * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer
+ * transaction has been fenced by a newer producer instance,
+ * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the
+ * producer is no longer authorized to perform transactional
+ * operations,
+ * RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED if the producer is
+ * not authorized to write the consumer offsets to the group
+ * coordinator,
+ * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
+ * configured for the producer instance,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
+ * or if the \p consumer_group_id or \p offsets are empty.
+ * Other error codes not listed here may be returned, depending on
+ * broker version.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ const rd_kafka_consumer_group_metadata_t *cgmetadata,
+ int timeout_ms);
+
+
+/**
+ * @brief Commit the current transaction (as started with
+ * rd_kafka_begin_transaction()).
+ *
+ * Any outstanding messages will be flushed (delivered) before actually
+ * committing the transaction.
+ *
+ * If any of the outstanding messages fail permanently the current
+ * transaction will enter the abortable error state and this
+ * function will return an abortable error, in this case the application
+ * must call rd_kafka_abort_transaction() before attempting a new
+ * transaction with rd_kafka_begin_transaction().
+ *
+ * @param rk Producer instance.
+ * @param timeout_ms The maximum time to block. On timeout the operation
+ * may continue in the background, depending on state,
+ * and it is okay to call this function again.
+ * Pass -1 to use the remaining transaction timeout,
+ * this is the recommended use.
+ *
+ * @remark It is strongly recommended to always pass -1 (remaining transaction
+ * time) as the \p timeout_ms. Using other values risk internal
+ * state desynchronization in case any of the underlying protocol
+ * requests fail.
+ *
+ * @remark This function will block until all outstanding messages are
+ * delivered and the transaction commit request has been successfully
+ * handled by the transaction coordinator, or until \p timeout_ms
+ * expires, which ever comes first. On timeout the application may
+ * call the function again.
+ *
+ * @remark Will automatically call rd_kafka_flush() to ensure all queued
+ * messages are delivered before attempting to commit the
+ * transaction.
+ * If the application has enabled RD_KAFKA_EVENT_DR it must
+ * serve the event queue in a separate thread since rd_kafka_flush()
+ * will not serve delivery reports in this mode.
+ *
+ * @remark This call is resumable when a retriable timeout error is returned.
+ * Calling the function again will resume the operation that is
+ * progressing in the background.
+ *
+ * @returns NULL on success or an error object on failure.
+ * Check whether the returned error object permits retrying
+ * by calling rd_kafka_error_is_retriable(), or whether an abortable
+ * or fatal error has been raised by calling
+ * rd_kafka_error_txn_requires_abort() or rd_kafka_error_is_fatal()
+ * respectively.
+ * Error codes:
+ * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction,
+ * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be
+ * complete commmitted within \p timeout_ms, this is a retriable
+ * error as the commit continues in the background,
+ * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer
+ * transaction has been fenced by a newer producer instance,
+ * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the
+ * producer is no longer authorized to perform transactional
+ * operations,
+ * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
+ * configured for the producer instance,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
+ * Other error codes not listed here may be returned, depending on
+ * broker version.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms);
+
+
+/**
+ * @brief Aborts the ongoing transaction.
+ *
+ * This function should also be used to recover from non-fatal abortable
+ * transaction errors.
+ *
+ * Any outstanding messages will be purged and fail with
+ * RD_KAFKA_RESP_ERR__PURGE_INFLIGHT or RD_KAFKA_RESP_ERR__PURGE_QUEUE.
+ * See rd_kafka_purge() for details.
+ *
+ * @param rk Producer instance.
+ * @param timeout_ms The maximum time to block. On timeout the operation
+ * may continue in the background, depending on state,
+ * and it is okay to call this function again.
+ * Pass -1 to use the remaining transaction timeout,
+ * this is the recommended use.
+ *
+ * @remark It is strongly recommended to always pass -1 (remaining transaction
+ * time) as the \p timeout_ms. Using other values risk internal
+ * state desynchronization in case any of the underlying protocol
+ * requests fail.
+ *
+ * @remark This function will block until all outstanding messages are purged
+ * and the transaction abort request has been successfully
+ * handled by the transaction coordinator, or until \p timeout_ms
+ * expires, which ever comes first. On timeout the application may
+ * call the function again.
+ * If the application has enabled RD_KAFKA_EVENT_DR it must
+ * serve the event queue in a separate thread since rd_kafka_flush()
+ * will not serve delivery reports in this mode.
+ *
+ * @remark This call is resumable when a retriable timeout error is returned.
+ * Calling the function again will resume the operation that is
+ * progressing in the background.
+ *
+ * @returns NULL on success or an error object on failure.
+ * Check whether the returned error object permits retrying
+ * by calling rd_kafka_error_is_retriable(), or whether a fatal error
+ * has been raised by calling rd_kafka_error_is_fatal().
+ * Error codes:
+ * RD_KAFKA_RESP_ERR__STATE if not currently in a transaction,
+ * RD_KAFKA_RESP_ERR__TIMED_OUT if the transaction could not be
+ * complete commmitted within \p timeout_ms, this is a retriable
+ * error as the commit continues in the background,
+ * RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH if the current producer
+ * transaction has been fenced by a newer producer instance,
+ * RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED if the
+ * producer is no longer authorized to perform transactional
+ * operations,
+ * RD_KAFKA_RESP_ERR__NOT_CONFIGURED if transactions have not been
+ * configured for the producer instance,
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if \p rk is not a producer instance,
+ * Other error codes not listed here may be returned, depending on
+ * broker version.
+ *
+ * @remark The returned error object (if not NULL) must be destroyed with
+ * rd_kafka_error_destroy().
+ */
+RD_EXPORT
+rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms);
+
+
+/**@}*/
+
+/* @cond NO_DOC */
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RDKAFKA_H_ */
+/* @endcond NO_DOC */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c
new file mode 100644
index 000000000..6aaec636d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.c
@@ -0,0 +1,6668 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_admin.h"
+#include "rdkafka_request.h"
+#include "rdkafka_aux.h"
+
+#include <stdarg.h>
+
+
+
+/** @brief Descriptive strings for rko_u.admin_request.state */
+static const char *rd_kafka_admin_state_desc[] = {
+ "initializing",
+ "waiting for broker",
+ "waiting for controller",
+ "waiting for fanouts",
+ "constructing request",
+ "waiting for response from broker",
+ "waiting for a valid list of brokers to be available"};
+
+
+
+/**
+ * @brief Admin API implementation.
+ *
+ * The public Admin API in librdkafka exposes a completely asynchronous
+ * interface where the initial request API (e.g., ..CreateTopics())
+ * is non-blocking and returns immediately, and the application polls
+ * a ..queue_t for the result.
+ *
+ * The underlying handling of the request is also completely asynchronous
+ * inside librdkafka, for two reasons:
+ * - everything is async in librdkafka so adding something new that isn't
+ * would mean that existing functionality will need to be changed if
+ * it should be able to work simultaneously (such as statistics, timers,
+ * etc). There is no functional value to making the admin API
+ * synchronous internally, even if it would simplify its implementation.
+ * So making it async allows the Admin API to be used with existing
+ * client types in existing applications without breakage.
+ * - the async approach allows multiple outstanding Admin API requests
+ * simultaneously.
+ *
+ * The internal async implementation relies on the following concepts:
+ * - it uses a single rko (rd_kafka_op_t) to maintain state.
+ * - the rko has a callback attached - called the worker callback.
+ * - the worker callback is a small state machine that triggers
+ * async operations (be it controller lookups, timeout timers,
+ * protocol transmits, etc).
+ * - the worker callback is only called on the rdkafka main thread.
+ * - the callback is triggered by different events and sources by enqueuing
+ * the rko on the rdkafka main ops queue.
+ *
+ *
+ * Let's illustrate this with a DeleteTopics example. This might look
+ * daunting, but it boils down to an asynchronous state machine being
+ * triggered by enqueuing the rko op.
+ *
+ * 1. [app thread] The user constructs the input arguments,
+ * including a response rkqu queue and then calls DeleteTopics().
+ *
+ * 2. [app thread] DeleteTopics() creates a new internal op (rko) of type
+ * RD_KAFKA_OP_DELETETOPICS, makes a **copy** on the rko of all the
+ * input arguments (which allows the caller to free the originals
+ * whenever she likes). The rko op worker callback is set to the
+ * generic admin worker callback rd_kafka_admin_worker()
+ *
+ * 3. [app thread] DeleteTopics() enqueues the rko on librdkafka's main ops
+ * queue that is served by the rdkafka main thread in rd_kafka_thread_main()
+ *
+ * 4. [rdkafka main thread] The rko is dequeued by rd_kafka_q_serve and
+ * the rd_kafka_poll_cb() is called.
+ *
+ * 5. [rdkafka main thread] The rko_type switch case identifies the rko
+ * as an RD_KAFKA_OP_DELETETOPICS which is served by the op callback
+ * set in step 2.
+ *
+ * 6. [rdkafka main thread] The worker callback is called.
+ * After some initial checking of err==ERR__DESTROY events
+ * (which is used to clean up outstanding ops (etc) on termination),
+ * the code hits a state machine using rko_u.admin_request.state.
+ *
+ * 7. [rdkafka main thread] The initial state is RD_KAFKA_ADMIN_STATE_INIT
+ * where the worker validates the user input.
+ * An enqueue once (eonce) object is created - the use of this object
+ * allows having multiple outstanding async functions referencing the
+ * same underlying rko object, but only allowing the first one
+ * to trigger an event.
+ * A timeout timer is set up to trigger the eonce object when the
+ * full options.request_timeout has elapsed.
+ *
+ * 8. [rdkafka main thread] After initialization the state is updated
+ * to WAIT_BROKER or WAIT_CONTROLLER and the code falls through to
+ * looking up a specific broker or the controller broker and waiting for
+ * an active connection.
+ * Both the lookup and the waiting for an active connection are
+ * fully asynchronous, and the same eonce used for the timer is passed
+ * to the rd_kafka_broker_controller_async() or broker_async() functions
+ * which will trigger the eonce when a broker state change occurs.
+ * If the controller is already known (from metadata) and the connection
+ * is up a rkb broker object is returned and the eonce is not used,
+ * skip to step 11.
+ *
+ * 9. [rdkafka main thread] Upon metadata retrieval (which is triggered
+ * automatically by other parts of the code) the controller_id may be
+ * updated in which case the eonce is triggered.
+ * The eonce triggering enqueues the original rko on the rdkafka main
+ * ops queue again and we go to step 8 which will check if the controller
+ * connection is up.
+ *
+ * 10. [broker thread] If the controller_id is now known we wait for
+ * the corresponding broker's connection to come up. This signaling
+ * is performed from the broker thread upon broker state changes
+ * and uses the same eonce. The eonce triggering enqueues the original
+ * rko on the rdkafka main ops queue again we go to back to step 8
+ * to check if broker is now available.
+ *
+ * 11. [rdkafka main thread] Back in the worker callback we now have an
+ * rkb broker pointer (with reference count increased) for the controller
+ * with the connection up (it might go down while we're referencing it,
+ * but that does not stop us from enqueuing a protocol request).
+ *
+ * 12. [rdkafka main thread] A DeleteTopics protocol request buffer is
+ * constructed using the input parameters saved on the rko and the
+ * buffer is enqueued on the broker's transmit queue.
+ * The buffer is set up to provide the reply buffer on the rdkafka main
+ * ops queue (the same queue we are operating from) with a handler
+ * callback of rd_kafka_admin_handle_response().
+ * The state is updated to the RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE.
+ *
+ * 13. [broker thread] If the request times out, a response with error code
+ * (ERR__TIMED_OUT) is enqueued. Go to 16.
+ *
+ * 14. [broker thread] If a response is received, the response buffer
+ * is enqueued. Go to 16.
+ *
+ * 15. [rdkafka main thread] The buffer callback (..handle_response())
+ * is called, which attempts to extract the original rko from the eonce,
+ * but if the eonce has already been triggered by some other source
+ * (the timeout timer) the buffer callback simply returns and does nothing
+ * since the admin request is over and a result (probably a timeout)
+ * has been enqueued for the application.
+ * If the rko was still intact we temporarily set the reply buffer
+ * in the rko struct and call the worker callback. Go to 17.
+ *
+ * 16. [rdkafka main thread] The worker callback is called in state
+ * RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE without a response but with an error.
+ * An error result op is created and enqueued on the application's
+ * provided response rkqu queue.
+ *
+ * 17. [rdkafka main thread] The worker callback is called in state
+ * RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE with a response buffer with no
+ * error set.
+ * The worker calls the response `parse()` callback to parse the response
+ * buffer and populates a result op (rko_result) with the response
+ * information (such as per-topic error codes, etc).
+ * The result op is returned to the worker.
+ *
+ * 18. [rdkafka main thread] The worker enqueues the result op (rko_result)
+ * on the application's provided response rkqu queue.
+ *
+ * 19. [app thread] The application calls rd_kafka_queue_poll() to
+ * receive the result of the operation. The result may have been
+ * enqueued in step 18 thanks to succesful completion, or in any
+ * of the earlier stages when an error was encountered.
+ *
+ * 20. [app thread] The application uses rd_kafka_event_DeleteTopics_result()
+ * to retrieve the request-specific result type.
+ *
+ * 21. Done.
+ *
+ *
+ *
+ *
+ * Fanout (RD_KAFKA_OP_ADMIN_FANOUT) requests
+ * ------------------------------------------
+ *
+ * Certain Admin APIs may have requests that need to be sent to different
+ * brokers, for instance DeleteRecords which needs to be sent to the leader
+ * for each given partition.
+ *
+ * To achieve this we create a Fanout (RD_KAFKA_OP_ADMIN_FANOUT) op for the
+ * overall Admin API call (e.g., DeleteRecords), and then sub-ops for each
+ * of the per-broker requests. These sub-ops have the proper op type for
+ * the operation they are performing (e.g., RD_KAFKA_OP_DELETERECORDS)
+ * but their replyq does not point back to the application replyq but
+ * rk_ops which is handled by the librdkafka main thread and with the op
+ * callback set to rd_kafka_admin_fanout_worker(). This worker aggregates
+ * the results of each fanned out sub-op and merges the result into a
+ * single result op (RD_KAFKA_OP_ADMIN_RESULT) that is enqueued on the
+ * application's replyq.
+ *
+ * We rely on the timeouts on the fanned out sub-ops rather than the parent
+ * fanout op.
+ *
+ * The parent fanout op must not be destroyed until all fanned out sub-ops
+ * are done (either by success, failure or timeout) and destroyed, and this
+ * is tracked by the rko_u.admin_request.fanout.outstanding counter.
+ *
+ */
+
+
+/**
+ * @enum Admin request target broker. Must be negative values since the field
+ * used is broker_id.
+ */
+enum { RD_KAFKA_ADMIN_TARGET_CONTROLLER = -1, /**< Cluster controller */
+ RD_KAFKA_ADMIN_TARGET_COORDINATOR = -2, /**< (Group) Coordinator */
+ RD_KAFKA_ADMIN_TARGET_FANOUT = -3, /**< This rko is a fanout and
+ * and has no target broker */
+ RD_KAFKA_ADMIN_TARGET_ALL = -4, /**< All available brokers */
+};
+
+/**
+ * @brief Admin op callback types
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_admin_Request_cb_t)(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *configs /*(ConfigResource_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) RD_WARN_UNUSED_RESULT;
+
+typedef rd_kafka_resp_err_t(rd_kafka_admin_Response_parse_cb_t)(
+ rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) RD_WARN_UNUSED_RESULT;
+
+typedef void(rd_kafka_admin_fanout_PartialResponse_cb_t)(
+ rd_kafka_op_t *rko_req,
+ const rd_kafka_op_t *rko_partial);
+
+typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyResult_cb_t;
+
+typedef rd_list_copy_cb_t rd_kafka_admin_fanout_CopyArg_cb_t;
+
+/**
+ * @struct Request-specific worker callbacks.
+ */
+struct rd_kafka_admin_worker_cbs {
+ /**< Protocol request callback which is called
+ * to construct and send the request. */
+ rd_kafka_admin_Request_cb_t *request;
+
+ /**< Protocol response parser callback which is called
+ * to translate the response to a rko_result op. */
+ rd_kafka_admin_Response_parse_cb_t *parse;
+};
+
+/**
+ * @struct Fanout request callbacks.
+ */
+struct rd_kafka_admin_fanout_worker_cbs {
+ /** Merge results from a fanned out request into the user response. */
+ rd_kafka_admin_fanout_PartialResponse_cb_t *partial_response;
+
+ /** Copy an accumulated result for storing into the rko_result. */
+ rd_kafka_admin_fanout_CopyResult_cb_t *copy_result;
+
+ /** Copy the original arguments, used by target ALL. */
+ rd_kafka_admin_fanout_CopyArg_cb_t *copy_arg;
+};
+
+/* Forward declarations */
+static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk,
+ rd_kafka_op_t *rko,
+ rd_bool_t do_destroy);
+static void rd_kafka_AdminOptions_init(rd_kafka_t *rk,
+ rd_kafka_AdminOptions_t *options);
+
+static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst,
+ const rd_kafka_AdminOptions_t *src);
+
+static rd_kafka_op_res_t
+rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko);
+static rd_kafka_ConfigEntry_t *
+rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src);
+static void rd_kafka_ConfigEntry_free(void *ptr);
+static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque);
+
+static void rd_kafka_admin_handle_response(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+static rd_kafka_op_res_t
+rd_kafka_admin_fanout_worker(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko_fanout);
+
+
+/**
+ * @name Common admin request code
+ * @{
+ *
+ *
+ */
+
+/**
+ * @brief Create a new admin_result op based on the request op \p rko_req.
+ *
+ * @remark This moves the rko_req's admin_request.args list from \p rko_req
+ * to the returned rko. The \p rko_req args will be emptied.
+ */
+static rd_kafka_op_t *rd_kafka_admin_result_new(rd_kafka_op_t *rko_req) {
+ rd_kafka_op_t *rko_result;
+ rd_kafka_op_t *rko_fanout;
+
+ if ((rko_fanout = rko_req->rko_u.admin_request.fanout_parent)) {
+ /* If this is a fanned out request the rko_result needs to be
+ * handled by the fanout worker rather than the application. */
+ rko_result = rd_kafka_op_new_cb(rko_req->rko_rk,
+ RD_KAFKA_OP_ADMIN_RESULT,
+ rd_kafka_admin_fanout_worker);
+ /* Transfer fanout pointer to result */
+ rko_result->rko_u.admin_result.fanout_parent = rko_fanout;
+ rko_req->rko_u.admin_request.fanout_parent = NULL;
+ /* Set event type based on original fanout ops reqtype,
+ * e.g., ..OP_DELETERECORDS */
+ rko_result->rko_u.admin_result.reqtype =
+ rko_fanout->rko_u.admin_request.fanout.reqtype;
+
+ } else {
+ rko_result = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_RESULT);
+
+ /* If this is fanout request (i.e., the parent OP_ADMIN_FANOUT
+ * to fanned out requests) we need to use the original
+ * application request type. */
+ if (rko_req->rko_type == RD_KAFKA_OP_ADMIN_FANOUT)
+ rko_result->rko_u.admin_result.reqtype =
+ rko_req->rko_u.admin_request.fanout.reqtype;
+ else
+ rko_result->rko_u.admin_result.reqtype =
+ rko_req->rko_type;
+ }
+
+ rko_result->rko_rk = rko_req->rko_rk;
+
+ rko_result->rko_u.admin_result.opaque = rd_kafka_confval_get_ptr(
+ &rko_req->rko_u.admin_request.options.opaque);
+
+ /* Move request arguments (list) from request to result.
+ * This is mainly so that partial_response() knows what arguments
+ * were provided to the response's request it is merging. */
+ rd_list_move(&rko_result->rko_u.admin_result.args,
+ &rko_req->rko_u.admin_request.args);
+
+ rko_result->rko_evtype = rko_req->rko_u.admin_request.reply_event_type;
+
+ return rko_result;
+}
+
+
+/**
+ * @brief Set error code and error string on admin_result op \p rko.
+ */
+static void rd_kafka_admin_result_set_err0(rd_kafka_op_t *rko,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ va_list ap) {
+ char buf[512];
+
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+
+ rko->rko_err = err;
+
+ if (rko->rko_u.admin_result.errstr)
+ rd_free(rko->rko_u.admin_result.errstr);
+ rko->rko_u.admin_result.errstr = rd_strdup(buf);
+
+ rd_kafka_dbg(rko->rko_rk, ADMIN, "ADMINFAIL",
+ "Admin %s result error: %s",
+ rd_kafka_op2str(rko->rko_u.admin_result.reqtype),
+ rko->rko_u.admin_result.errstr);
+}
+
+/**
+ * @sa rd_kafka_admin_result_set_err0
+ */
+static RD_UNUSED RD_FORMAT(printf, 3, 4) void rd_kafka_admin_result_set_err(
+ rd_kafka_op_t *rko,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ va_list ap;
+
+ va_start(ap, fmt);
+ rd_kafka_admin_result_set_err0(rko, err, fmt, ap);
+ va_end(ap);
+}
+
+/**
+ * @brief Enqueue admin_result on application's queue.
+ */
+static RD_INLINE void rd_kafka_admin_result_enq(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t *rko_result) {
+ rd_kafka_replyq_enq(&rko_req->rko_u.admin_request.replyq, rko_result,
+ rko_req->rko_u.admin_request.replyq.version);
+}
+
+/**
+ * @brief Set request-level error code and string in reply op.
+ *
+ * @remark This function will NOT destroy the \p rko_req, so don't forget to
+ * call rd_kafka_admin_common_worker_destroy() when done with the rko.
+ */
+static RD_FORMAT(printf,
+ 3,
+ 4) void rd_kafka_admin_result_fail(rd_kafka_op_t *rko_req,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ va_list ap;
+ rd_kafka_op_t *rko_result;
+
+ if (!rko_req->rko_u.admin_request.replyq.q)
+ return;
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ va_start(ap, fmt);
+ rd_kafka_admin_result_set_err0(rko_result, err, fmt, ap);
+ va_end(ap);
+
+ rd_kafka_admin_result_enq(rko_req, rko_result);
+}
+
+
+/**
+ * @brief Send the admin request contained in \p rko upon receiving
+ * a FindCoordinator response.
+ *
+ * @param opaque Must be an admin request op's eonce (rko_u.admin_request.eonce)
+ * (i.e. created by \c rd_kafka_admin_request_op_new )
+ *
+ * @remark To be used as a callback for \c rd_kafka_coord_req
+ */
+static rd_kafka_resp_err_t
+rd_kafka_admin_coord_request(rd_kafka_broker_t *rkb,
+ rd_kafka_op_t *rko_ignore,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_enq_once_t *eonce = opaque;
+ rd_kafka_op_t *rko;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+
+
+ rko = rd_kafka_enq_once_del_source_return(eonce, "coordinator request");
+ if (!rko)
+ /* Admin request has timed out and been destroyed */
+ return RD_KAFKA_RESP_ERR__DESTROY;
+
+ rd_kafka_enq_once_add_source(eonce, "coordinator response");
+
+ err = rko->rko_u.admin_request.cbs->request(
+ rkb, &rko->rko_u.admin_request.args,
+ &rko->rko_u.admin_request.options, errstr, sizeof(errstr), replyq,
+ rd_kafka_admin_handle_response, eonce);
+ if (err) {
+ rd_kafka_enq_once_del_source(eonce, "coordinator response");
+ rd_kafka_admin_result_fail(
+ rko, err, "%s worker failed to send request: %s",
+ rd_kafka_op2str(rko->rko_type), errstr);
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_true /*destroy*/);
+ }
+ return err;
+}
+
+
+/**
+ * @brief Return the topics list from a topic-related result object.
+ */
+static const rd_kafka_topic_result_t **
+rd_kafka_admin_result_ret_topics(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_CREATETOPICS ||
+ reqtype == RD_KAFKA_OP_DELETETOPICS ||
+ reqtype == RD_KAFKA_OP_CREATEPARTITIONS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_topic_result_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Return the ConfigResource list from a config-related result object.
+ */
+static const rd_kafka_ConfigResource_t **
+rd_kafka_admin_result_ret_resources(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_ALTERCONFIGS ||
+ reqtype == RD_KAFKA_OP_DESCRIBECONFIGS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_ConfigResource_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Return the acl result list from a acl-related result object.
+ */
+static const rd_kafka_acl_result_t **
+rd_kafka_admin_result_ret_acl_results(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_CREATEACLS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_acl_result_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Return the acl binding list from a acl-related result object.
+ */
+static const rd_kafka_AclBinding_t **
+rd_kafka_admin_result_ret_acl_bindings(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DESCRIBEACLS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_AclBinding_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Return the groups list from a group-related result object.
+ */
+static const rd_kafka_group_result_t **
+rd_kafka_admin_result_ret_groups(const rd_kafka_op_t *rko, size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DELETEGROUPS ||
+ reqtype == RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS ||
+ reqtype == RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS ||
+ reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_group_result_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Return the DeleteAcls response list from a acl-related result object.
+ */
+static const rd_kafka_DeleteAcls_result_response_t **
+rd_kafka_admin_result_ret_delete_acl_result_responses(const rd_kafka_op_t *rko,
+ size_t *cntp) {
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DELETEACLS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_DeleteAcls_result_response_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**
+ * @brief Create a new admin_request op of type \p optype and sets up the
+ * generic (type independent files).
+ *
+ * The caller shall then populate the admin_request.args list
+ * and enqueue the op on rk_ops for further processing work.
+ *
+ * @param cbs Callbacks, must reside in .data segment.
+ * @param options Optional options, may be NULL to use defaults.
+ *
+ * @locks none
+ * @locality application thread
+ */
+static rd_kafka_op_t *
+rd_kafka_admin_request_op_new(rd_kafka_t *rk,
+ rd_kafka_op_type_t optype,
+ rd_kafka_event_type_t reply_event_type,
+ const struct rd_kafka_admin_worker_cbs *cbs,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_q_t *rkq) {
+ rd_kafka_op_t *rko;
+
+ rd_assert(rk);
+ rd_assert(rkq);
+ rd_assert(cbs);
+
+ rko = rd_kafka_op_new_cb(rk, optype, rd_kafka_admin_worker);
+
+ rko->rko_u.admin_request.reply_event_type = reply_event_type;
+
+ rko->rko_u.admin_request.cbs = (struct rd_kafka_admin_worker_cbs *)cbs;
+
+ /* Make a copy of the options */
+ if (options)
+ rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options,
+ options);
+ else
+ rd_kafka_AdminOptions_init(rk,
+ &rko->rko_u.admin_request.options);
+
+ /* Default to controller */
+ rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER;
+
+ /* Calculate absolute timeout */
+ rko->rko_u.admin_request.abs_timeout =
+ rd_timeout_init(rd_kafka_confval_get_int(
+ &rko->rko_u.admin_request.options.request_timeout));
+
+ /* Setup enq-op-once, which is triggered by either timer code
+ * or future wait-controller code. */
+ rko->rko_u.admin_request.eonce =
+ rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+
+ /* The timer itself must be started from the rdkafka main thread,
+ * not here. */
+
+ /* Set up replyq */
+ rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0);
+
+ rko->rko_u.admin_request.state = RD_KAFKA_ADMIN_STATE_INIT;
+ return rko;
+}
+
+
+/**
+ * @returns the remaining request timeout in milliseconds.
+ */
+static RD_INLINE int rd_kafka_admin_timeout_remains(rd_kafka_op_t *rko) {
+ return rd_timeout_remains(rko->rko_u.admin_request.abs_timeout);
+}
+
+/**
+ * @returns the remaining request timeout in microseconds.
+ */
+static RD_INLINE rd_ts_t rd_kafka_admin_timeout_remains_us(rd_kafka_op_t *rko) {
+ return rd_timeout_remains_us(rko->rko_u.admin_request.abs_timeout);
+}
+
+
+/**
+ * @brief Timer timeout callback for the admin rko's eonce object.
+ */
+static void rd_kafka_admin_eonce_timeout_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_enq_once_t *eonce = arg;
+
+ rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "timeout timer");
+}
+
+
+
+/**
+ * @brief Common worker destroy to be called in destroy: label
+ * in worker.
+ */
+static void rd_kafka_admin_common_worker_destroy(rd_kafka_t *rk,
+ rd_kafka_op_t *rko,
+ rd_bool_t do_destroy) {
+ int timer_was_stopped;
+
+ /* Free resources for this op. */
+ timer_was_stopped = rd_kafka_timer_stop(
+ &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true);
+
+
+ if (rko->rko_u.admin_request.eonce) {
+ /* Remove the stopped timer's eonce reference since its
+ * callback will not have fired if we stopped the timer. */
+ if (timer_was_stopped)
+ rd_kafka_enq_once_del_source(
+ rko->rko_u.admin_request.eonce, "timeout timer");
+
+ /* This is thread-safe to do even if there are outstanding
+ * timers or wait-controller references to the eonce
+ * since they only hold direct reference to the eonce,
+ * not the rko (the eonce holds a reference to the rko but
+ * it is cleared here). */
+ rd_kafka_enq_once_destroy(rko->rko_u.admin_request.eonce);
+ rko->rko_u.admin_request.eonce = NULL;
+ }
+
+ if (do_destroy)
+ rd_kafka_op_destroy(rko);
+}
+
+
+
+/**
+ * @brief Asynchronously look up a broker.
+ * To be called repeatedly from each invocation of the worker
+ * when in state RD_KAFKA_ADMIN_STATE_WAIT_BROKER until
+ * a valid rkb is returned.
+ *
+ * @returns the broker rkb with refcount increased, or NULL if not yet
+ * available.
+ */
+static rd_kafka_broker_t *rd_kafka_admin_common_get_broker(rd_kafka_t *rk,
+ rd_kafka_op_t *rko,
+ int32_t broker_id) {
+ rd_kafka_broker_t *rkb;
+
+ rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up broker %" PRId32,
+ rd_kafka_op2str(rko->rko_type), broker_id);
+
+ /* Since we're iterating over this broker_async() call
+ * (asynchronously) until a broker is availabe (or timeout)
+ * we need to re-enable the eonce to be triggered again (which
+ * is not necessary the first time we get here, but there
+ * is no harm doing it then either). */
+ rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+
+ /* Look up the broker asynchronously, if the broker
+ * is not available the eonce is registered for broker
+ * state changes which will cause our function to be called
+ * again as soon as (any) broker state changes.
+ * When we are called again we perform the broker lookup
+ * again and hopefully get an rkb back, otherwise defer a new
+ * async wait. Repeat until success or timeout. */
+ if (!(rkb = rd_kafka_broker_get_async(
+ rk, broker_id, RD_KAFKA_BROKER_STATE_UP,
+ rko->rko_u.admin_request.eonce))) {
+ /* Broker not available, wait asynchronously
+ * for broker metadata code to trigger eonce. */
+ return NULL;
+ }
+
+ rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: broker %" PRId32 " is %s",
+ rd_kafka_op2str(rko->rko_type), broker_id, rkb->rkb_name);
+
+ return rkb;
+}
+
+
+/**
+ * @brief Asynchronously look up the controller.
+ * To be called repeatedly from each invocation of the worker
+ * when in state RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER until
+ * a valid rkb is returned.
+ *
+ * @returns the controller rkb with refcount increased, or NULL if not yet
+ * available.
+ */
+static rd_kafka_broker_t *
+rd_kafka_admin_common_get_controller(rd_kafka_t *rk, rd_kafka_op_t *rko) {
+ rd_kafka_broker_t *rkb;
+
+ rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up controller",
+ rd_kafka_op2str(rko->rko_type));
+
+ /* Since we're iterating over this controller_async() call
+ * (asynchronously) until a controller is availabe (or timeout)
+ * we need to re-enable the eonce to be triggered again (which
+ * is not necessary the first time we get here, but there
+ * is no harm doing it then either). */
+ rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+
+ /* Look up the controller asynchronously, if the controller
+ * is not available the eonce is registered for broker
+ * state changes which will cause our function to be called
+ * again as soon as (any) broker state changes.
+ * When we are called again we perform the controller lookup
+ * again and hopefully get an rkb back, otherwise defer a new
+ * async wait. Repeat until success or timeout. */
+ if (!(rkb = rd_kafka_broker_controller_async(
+ rk, RD_KAFKA_BROKER_STATE_UP,
+ rko->rko_u.admin_request.eonce))) {
+ /* Controller not available, wait asynchronously
+ * for controller code to trigger eonce. */
+ return NULL;
+ }
+
+ rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: controller %s",
+ rd_kafka_op2str(rko->rko_type), rkb->rkb_name);
+
+ return rkb;
+}
+
+
+/**
+ * @brief Asynchronously look up current list of broker ids until available.
+ * Bootstrap and logical brokers are excluded from the list.
+ *
+ * To be called repeatedly from each invocation of the worker
+ * when in state RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST until
+ * a not-NULL rd_list_t * is returned.
+ *
+ * @param rk Client instance.
+ * @param rko Op containing the admin request eonce to use for the
+ * async callback.
+ * @return List of int32_t with broker nodeids when ready, NULL when
+ * the eonce callback will be called.
+ */
+static rd_list_t *
+rd_kafka_admin_common_brokers_get_nodeids(rd_kafka_t *rk, rd_kafka_op_t *rko) {
+ rd_list_t *broker_ids;
+
+ rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: looking up brokers",
+ rd_kafka_op2str(rko->rko_type));
+
+ /* Since we're iterating over this rd_kafka_brokers_get_nodeids_async()
+ * call (asynchronously) until a nodeids list is available (or timeout),
+ * we need to re-enable the eonce to be triggered again (which
+ * is not necessary the first time we get here, but there
+ * is no harm doing it then either). */
+ rd_kafka_enq_once_reenable(rko->rko_u.admin_request.eonce, rko,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+
+ /* Look up the nodeids list asynchronously, if it's
+ * not available the eonce is registered for broker
+ * state changes which will cause our function to be called
+ * again as soon as (any) broker state changes.
+ * When we are called again we perform the same lookup
+ * again and hopefully get a list of nodeids again,
+ * otherwise defer a new async wait.
+ * Repeat until success or timeout. */
+ if (!(broker_ids = rd_kafka_brokers_get_nodeids_async(
+ rk, rko->rko_u.admin_request.eonce))) {
+ /* nodeids list not available, wait asynchronously
+ * for the eonce to be triggered. */
+ return NULL;
+ }
+
+ rd_kafka_dbg(rk, ADMIN, "ADMIN", "%s: %d broker(s)",
+ rd_kafka_op2str(rko->rko_type), rd_list_cnt(broker_ids));
+
+ return broker_ids;
+}
+
+
+
+/**
+ * @brief Handle response from broker by triggering worker callback.
+ *
+ * @param opaque is the eonce from the worker protocol request call.
+ */
+static void rd_kafka_admin_handle_response(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_enq_once_t *eonce = opaque;
+ rd_kafka_op_t *rko;
+
+ /* From ...add_source("send") */
+ rko = rd_kafka_enq_once_disable(eonce);
+
+ if (!rko) {
+ /* The operation timed out and the worker was
+ * dismantled while we were waiting for broker response,
+ * do nothing - everything has been cleaned up. */
+ rd_kafka_dbg(
+ rk, ADMIN, "ADMIN",
+ "Dropping outdated %sResponse with return code %s",
+ request ? rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey)
+ : "???",
+ rd_kafka_err2str(err));
+ return;
+ }
+
+ /* Attach reply buffer to rko for parsing in the worker. */
+ rd_assert(!rko->rko_u.admin_request.reply_buf);
+ rko->rko_u.admin_request.reply_buf = reply;
+ rko->rko_err = err;
+
+ if (rko->rko_op_cb(rk, NULL, rko) == RD_KAFKA_OP_RES_HANDLED)
+ rd_kafka_op_destroy(rko);
+}
+
+/**
+ * @brief Generic handler for protocol responses, calls the admin ops'
+ * Response_parse_cb and enqueues the result to the caller's queue.
+ */
+static void rd_kafka_admin_response_parse(rd_kafka_op_t *rko) {
+ rd_kafka_resp_err_t err;
+ rd_kafka_op_t *rko_result = NULL;
+ char errstr[512];
+
+ if (rko->rko_err) {
+ rd_kafka_admin_result_fail(rko, rko->rko_err,
+ "%s worker request failed: %s",
+ rd_kafka_op2str(rko->rko_type),
+ rd_kafka_err2str(rko->rko_err));
+ return;
+ }
+
+ /* Response received.
+ * Let callback parse response and provide result in rko_result
+ * which is then enqueued on the reply queue. */
+ err = rko->rko_u.admin_request.cbs->parse(
+ rko, &rko_result, rko->rko_u.admin_request.reply_buf, errstr,
+ sizeof(errstr));
+ if (err) {
+ rd_kafka_admin_result_fail(
+ rko, err, "%s worker failed to parse response: %s",
+ rd_kafka_op2str(rko->rko_type), errstr);
+ return;
+ }
+
+ rd_assert(rko_result);
+
+ /* Enqueue result on application queue, we're done. */
+ rd_kafka_admin_result_enq(rko, rko_result);
+}
+
+/**
+ * @brief Generic handler for coord_req() responses.
+ */
+static void rd_kafka_admin_coord_response_parse(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_op_t *rko_result;
+ rd_kafka_enq_once_t *eonce = opaque;
+ rd_kafka_op_t *rko;
+ char errstr[512];
+
+ rko =
+ rd_kafka_enq_once_del_source_return(eonce, "coordinator response");
+ if (!rko)
+ /* Admin request has timed out and been destroyed */
+ return;
+
+ if (err) {
+ rd_kafka_admin_result_fail(
+ rko, err, "%s worker coordinator request failed: %s",
+ rd_kafka_op2str(rko->rko_type), rd_kafka_err2str(err));
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ err = rko->rko_u.admin_request.cbs->parse(rko, &rko_result, rkbuf,
+ errstr, sizeof(errstr));
+ if (err) {
+ rd_kafka_admin_result_fail(
+ rko, err,
+ "%s worker failed to parse coordinator %sResponse: %s",
+ rd_kafka_op2str(rko->rko_type),
+ rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey), errstr);
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ rd_assert(rko_result);
+
+ /* Enqueue result on application queue, we're done. */
+ rd_kafka_admin_result_enq(rko, rko_result);
+}
+
+static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk,
+ rd_kafka_op_t *rko,
+ rd_list_t *nodeids);
+
+
+/**
+ * @brief Common worker state machine handling regardless of request type.
+ *
+ * Tasks:
+ * - Sets up timeout on first call.
+ * - Checks for timeout.
+ * - Checks for and fails on errors.
+ * - Async Controller and broker lookups
+ * - Calls the Request callback
+ * - Calls the parse callback
+ * - Result reply
+ * - Destruction of rko
+ *
+ * rko->rko_err may be one of:
+ * RD_KAFKA_RESP_ERR_NO_ERROR, or
+ * RD_KAFKA_RESP_ERR__DESTROY for queue destruction cleanup, or
+ * RD_KAFKA_RESP_ERR__TIMED_OUT if request has timed out,
+ * or any other error code triggered by other parts of the code.
+ *
+ * @returns a hint to the op code whether the rko should be destroyed or not.
+ */
+static rd_kafka_op_res_t
+rd_kafka_admin_worker(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
+ const char *name = rd_kafka_op2str(rko->rko_type);
+ rd_ts_t timeout_in;
+ rd_kafka_broker_t *rkb = NULL;
+ rd_kafka_resp_err_t err;
+ rd_list_t *nodeids = NULL;
+ char errstr[512];
+
+ /* ADMIN_FANOUT handled by fanout_worker() */
+ rd_assert((rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) !=
+ RD_KAFKA_OP_ADMIN_FANOUT);
+
+ if (rd_kafka_terminating(rk)) {
+ rd_kafka_dbg(
+ rk, ADMIN, name,
+ "%s worker called in state %s: "
+ "handle is terminating: %s",
+ name,
+ rd_kafka_admin_state_desc[rko->rko_u.admin_request.state],
+ rd_kafka_err2str(rko->rko_err));
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY,
+ "Handle is terminating: %s",
+ rd_kafka_err2str(rko->rko_err));
+ goto destroy;
+ }
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY) {
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__DESTROY,
+ "Destroyed");
+ goto destroy; /* rko being destroyed (silent) */
+ }
+
+ rd_kafka_dbg(rk, ADMIN, name, "%s worker called in state %s: %s", name,
+ rd_kafka_admin_state_desc[rko->rko_u.admin_request.state],
+ rd_kafka_err2str(rko->rko_err));
+
+ rd_assert(thrd_is_current(rko->rko_rk->rk_thread));
+
+ /* Check for errors raised asynchronously (e.g., by timer) */
+ if (rko->rko_err) {
+ rd_kafka_admin_result_fail(
+ rko, rko->rko_err, "Failed while %s: %s",
+ rd_kafka_admin_state_desc[rko->rko_u.admin_request.state],
+ rd_kafka_err2str(rko->rko_err));
+ goto destroy;
+ }
+
+ /* Check for timeout */
+ timeout_in = rd_kafka_admin_timeout_remains_us(rko);
+ if (timeout_in <= 0) {
+ rd_kafka_admin_result_fail(
+ rko, RD_KAFKA_RESP_ERR__TIMED_OUT, "Timed out %s",
+ rd_kafka_admin_state_desc[rko->rko_u.admin_request.state]);
+ goto destroy;
+ }
+
+redo:
+ switch (rko->rko_u.admin_request.state) {
+ case RD_KAFKA_ADMIN_STATE_INIT: {
+ int32_t broker_id;
+
+ /* First call. */
+
+ /* Set up timeout timer. */
+ rd_kafka_enq_once_add_source(rko->rko_u.admin_request.eonce,
+ "timeout timer");
+ rd_kafka_timer_start_oneshot(
+ &rk->rk_timers, &rko->rko_u.admin_request.tmr, rd_true,
+ timeout_in, rd_kafka_admin_eonce_timeout_cb,
+ rko->rko_u.admin_request.eonce);
+
+ /* Use explicitly specified broker_id, if available. */
+ broker_id = (int32_t)rd_kafka_confval_get_int(
+ &rko->rko_u.admin_request.options.broker);
+
+ if (broker_id != -1) {
+ rd_kafka_dbg(rk, ADMIN, name,
+ "%s using explicitly "
+ "set broker id %" PRId32
+ " rather than %" PRId32,
+ name, broker_id,
+ rko->rko_u.admin_request.broker_id);
+ rko->rko_u.admin_request.broker_id = broker_id;
+ } else {
+ /* Default to controller */
+ broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER;
+ }
+
+ /* Resolve target broker(s) */
+ switch (rko->rko_u.admin_request.broker_id) {
+ case RD_KAFKA_ADMIN_TARGET_CONTROLLER:
+ /* Controller */
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER;
+ goto redo; /* Trigger next state immediately */
+
+ case RD_KAFKA_ADMIN_TARGET_COORDINATOR:
+ /* Group (or other) coordinator */
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE;
+ rd_kafka_enq_once_add_source(
+ rko->rko_u.admin_request.eonce,
+ "coordinator request");
+ rd_kafka_coord_req(
+ rk, rko->rko_u.admin_request.coordtype,
+ rko->rko_u.admin_request.coordkey,
+ rd_kafka_admin_coord_request, NULL, 0 /* no delay*/,
+ rd_kafka_admin_timeout_remains(rko),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_admin_coord_response_parse,
+ rko->rko_u.admin_request.eonce);
+ /* Wait asynchronously for broker response, which will
+ * trigger the eonce and worker to be called again. */
+ return RD_KAFKA_OP_RES_KEEP;
+ case RD_KAFKA_ADMIN_TARGET_ALL:
+ /* All brokers */
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST;
+ goto redo; /* Trigger next state immediately */
+
+ case RD_KAFKA_ADMIN_TARGET_FANOUT:
+ /* Shouldn't come here, fanouts are handled by
+ * fanout_worker() */
+ RD_NOTREACHED();
+ return RD_KAFKA_OP_RES_KEEP;
+
+ default:
+ /* Specific broker */
+ rd_assert(rko->rko_u.admin_request.broker_id >= 0);
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_WAIT_BROKER;
+ goto redo; /* Trigger next state immediately */
+ }
+ }
+
+
+ case RD_KAFKA_ADMIN_STATE_WAIT_BROKER:
+ /* Broker lookup */
+ if (!(rkb = rd_kafka_admin_common_get_broker(
+ rk, rko, rko->rko_u.admin_request.broker_id))) {
+ /* Still waiting for broker to become available */
+ return RD_KAFKA_OP_RES_KEEP;
+ }
+
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST;
+ goto redo;
+
+ case RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER:
+ if (!(rkb = rd_kafka_admin_common_get_controller(rk, rko))) {
+ /* Still waiting for controller to become available. */
+ return RD_KAFKA_OP_RES_KEEP;
+ }
+
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST;
+ goto redo;
+
+ case RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST:
+ /* Wait for a valid list of brokers to be available. */
+ if (!(nodeids =
+ rd_kafka_admin_common_brokers_get_nodeids(rk, rko))) {
+ /* Still waiting for brokers to become available. */
+ return RD_KAFKA_OP_RES_KEEP;
+ }
+
+ rd_kafka_admin_fanout_op_distribute(rk, rko, nodeids);
+ rd_list_destroy(nodeids);
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS;
+ goto redo;
+
+ case RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS:
+ /* This op can be destroyed, as a new fanout op has been
+ * sent, and the response will be enqueued there. */
+ goto destroy;
+
+ case RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST:
+ /* Got broker, send protocol request. */
+
+ /* Make sure we're called from a 'goto redo' where
+ * the rkb was set. */
+ rd_assert(rkb);
+
+ /* Still need to use the eonce since this worker may
+ * time out while waiting for response from broker, in which
+ * case the broker response will hit an empty eonce (ok). */
+ rd_kafka_enq_once_add_source(rko->rko_u.admin_request.eonce,
+ "send");
+
+ /* Send request (async) */
+ err = rko->rko_u.admin_request.cbs->request(
+ rkb, &rko->rko_u.admin_request.args,
+ &rko->rko_u.admin_request.options, errstr, sizeof(errstr),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_admin_handle_response,
+ rko->rko_u.admin_request.eonce);
+
+ /* Loose broker refcount from get_broker(), get_controller() */
+ rd_kafka_broker_destroy(rkb);
+
+ if (err) {
+ rd_kafka_enq_once_del_source(
+ rko->rko_u.admin_request.eonce, "send");
+ rd_kafka_admin_result_fail(rko, err, "%s", errstr);
+ goto destroy;
+ }
+
+ rko->rko_u.admin_request.state =
+ RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE;
+
+ /* Wait asynchronously for broker response, which will
+ * trigger the eonce and worker to be called again. */
+ return RD_KAFKA_OP_RES_KEEP;
+
+
+ case RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE:
+ rd_kafka_admin_response_parse(rko);
+ goto destroy;
+ }
+
+ return RD_KAFKA_OP_RES_KEEP;
+
+destroy:
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_false /*don't destroy*/);
+ return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy() */
+}
+
+/**
+ * @brief Create a new admin_fanout op of type \p req_type and sets up the
+ * generic (type independent files).
+ *
+ * The caller shall then populate the \c admin_fanout.requests list,
+ * initialize the \c admin_fanout.responses list,
+ * set the initial \c admin_fanout.outstanding value,
+ * and enqueue the op on rk_ops for further processing work.
+ *
+ * @param cbs Callbacks, must reside in .data segment.
+ * @param options Optional options, may be NULL to use defaults.
+ * @param rkq is the application reply queue.
+ *
+ * @locks none
+ * @locality application thread
+ */
+static rd_kafka_op_t *
+rd_kafka_admin_fanout_op_new(rd_kafka_t *rk,
+ rd_kafka_op_type_t req_type,
+ rd_kafka_event_type_t reply_event_type,
+ const struct rd_kafka_admin_fanout_worker_cbs *cbs,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_q_t *rkq) {
+ rd_kafka_op_t *rko;
+
+ rd_assert(rk);
+ rd_assert(rkq);
+ rd_assert(cbs);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_ADMIN_FANOUT);
+ rko->rko_rk = rk;
+
+ rko->rko_u.admin_request.reply_event_type = reply_event_type;
+
+ rko->rko_u.admin_request.fanout.cbs =
+ (struct rd_kafka_admin_fanout_worker_cbs *)cbs;
+
+ /* Make a copy of the options */
+ if (options)
+ rd_kafka_AdminOptions_copy_to(&rko->rko_u.admin_request.options,
+ options);
+ else
+ rd_kafka_AdminOptions_init(rk,
+ &rko->rko_u.admin_request.options);
+
+ rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_FANOUT;
+
+ /* Calculate absolute timeout */
+ rko->rko_u.admin_request.abs_timeout =
+ rd_timeout_init(rd_kafka_confval_get_int(
+ &rko->rko_u.admin_request.options.request_timeout));
+
+ /* Set up replyq */
+ rd_kafka_set_replyq(&rko->rko_u.admin_request.replyq, rkq, 0);
+
+ rko->rko_u.admin_request.state = RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS;
+
+ rko->rko_u.admin_request.fanout.reqtype = req_type;
+
+ return rko;
+}
+
+/**
+ * @brief Duplicate the fanout operation for each nodeid passed and
+ * enqueue each new operation. Use the same fanout_parent as
+ * the passed \p rko.
+ *
+ * @param rk Client instance.
+ * @param rko Operation to distribute to each broker.
+ * @param nodeids List of int32_t with the broker nodeids.
+ * @param rkq
+ * @return rd_kafka_op_t*
+ */
+static void rd_kafka_admin_fanout_op_distribute(rd_kafka_t *rk,
+ rd_kafka_op_t *rko,
+ rd_list_t *nodeids) {
+ int i, nodeids_cnt, timeout_remains;
+ rd_kafka_op_t *rko_fanout;
+ rd_kafka_AdminOptions_t *options = &rko->rko_u.admin_request.options;
+ timeout_remains = rd_kafka_admin_timeout_remains(rko);
+ rd_kafka_AdminOptions_set_request_timeout(options, timeout_remains,
+ NULL, 0);
+
+ nodeids_cnt = rd_list_cnt(nodeids);
+ rko_fanout = rko->rko_u.admin_request.fanout_parent;
+ rko_fanout->rko_u.admin_request.fanout.outstanding = (int)nodeids_cnt;
+ rko->rko_u.admin_request.fanout_parent = NULL;
+
+ /* Create individual request ops for each node */
+ for (i = 0; i < nodeids_cnt; i++) {
+ rd_kafka_op_t *rko_dup = rd_kafka_admin_request_op_new(
+ rk, rko->rko_type,
+ rko->rko_u.admin_request.reply_event_type,
+ rko->rko_u.admin_request.cbs, options, rk->rk_ops);
+
+ rko_dup->rko_u.admin_request.fanout_parent = rko_fanout;
+ rko_dup->rko_u.admin_request.broker_id =
+ rd_list_get_int32(nodeids, i);
+
+ rd_list_init_copy(&rko_dup->rko_u.admin_request.args,
+ &rko->rko_u.admin_request.args);
+ rd_list_copy_to(
+ &rko_dup->rko_u.admin_request.args,
+ &rko->rko_u.admin_request.args,
+ rko_fanout->rko_u.admin_request.fanout.cbs->copy_arg, NULL);
+
+ rd_kafka_q_enq(rk->rk_ops, rko_dup);
+ }
+}
+
+
+/**
+ * @brief Common fanout worker state machine handling regardless of request type
+ *
+ * @param rko Result of a fanned out operation, e.g., DELETERECORDS result.
+ *
+ * Tasks:
+ * - Checks for and responds to client termination
+ * - Polls for fanned out responses
+ * - Calls the partial response callback
+ * - Calls the merge responses callback upon receipt of all partial responses
+ * - Destruction of rko
+ *
+ * rko->rko_err may be one of:
+ * RD_KAFKA_RESP_ERR_NO_ERROR, or
+ * RD_KAFKA_RESP_ERR__DESTROY for queue destruction cleanup.
+ *
+ * @returns a hint to the op code whether the rko should be destroyed or not.
+ */
+static rd_kafka_op_res_t rd_kafka_admin_fanout_worker(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_op_t *rko_fanout = rko->rko_u.admin_result.fanout_parent;
+ const char *name =
+ rd_kafka_op2str(rko_fanout->rko_u.admin_request.fanout.reqtype);
+ rd_kafka_op_t *rko_result;
+
+ RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_ADMIN_RESULT);
+ RD_KAFKA_OP_TYPE_ASSERT(rko_fanout, RD_KAFKA_OP_ADMIN_FANOUT);
+
+ rd_assert(rko_fanout->rko_u.admin_request.fanout.outstanding > 0);
+ rko_fanout->rko_u.admin_request.fanout.outstanding--;
+
+ rko->rko_u.admin_result.fanout_parent = NULL;
+
+ if (rd_kafka_terminating(rk)) {
+ rd_kafka_dbg(rk, ADMIN, name,
+ "%s fanout worker called for fanned out op %s: "
+ "handle is terminating: %s",
+ name, rd_kafka_op2str(rko->rko_type),
+ rd_kafka_err2str(rko_fanout->rko_err));
+ if (!rko->rko_err)
+ rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
+ }
+
+ rd_kafka_dbg(rk, ADMIN, name,
+ "%s fanout worker called for %s with %d request(s) "
+ "outstanding: %s",
+ name, rd_kafka_op2str(rko->rko_type),
+ rko_fanout->rko_u.admin_request.fanout.outstanding,
+ rd_kafka_err2str(rko_fanout->rko_err));
+
+ /* Add partial response to rko_fanout's result list. */
+ rko_fanout->rko_u.admin_request.fanout.cbs->partial_response(rko_fanout,
+ rko);
+
+ if (rko_fanout->rko_u.admin_request.fanout.outstanding > 0)
+ /* Wait for outstanding requests to finish */
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rko_result = rd_kafka_admin_result_new(rko_fanout);
+ rd_list_init_copy(&rko_result->rko_u.admin_result.results,
+ &rko_fanout->rko_u.admin_request.fanout.results);
+ rd_list_copy_to(&rko_result->rko_u.admin_result.results,
+ &rko_fanout->rko_u.admin_request.fanout.results,
+ rko_fanout->rko_u.admin_request.fanout.cbs->copy_result,
+ NULL);
+
+ /* Enqueue result on application queue, we're done. */
+ rd_kafka_replyq_enq(&rko_fanout->rko_u.admin_request.replyq, rko_result,
+ rko_fanout->rko_u.admin_request.replyq.version);
+
+ /* FALLTHRU */
+ if (rko_fanout->rko_u.admin_request.fanout.outstanding == 0)
+ rd_kafka_op_destroy(rko_fanout);
+
+ return RD_KAFKA_OP_RES_HANDLED; /* trigger's op_destroy(rko) */
+}
+
+/**
+ * @brief Create a new operation that targets all the brokers.
+ * The operation consists of a fanout parent that is reused and
+ * fanout operation that is duplicated for each broker found.
+ *
+ * @param rk Client instance-
+ * @param optype Operation type.
+ * @param reply_event_type Reply event type.
+ * @param cbs Fanned out op callbacks.
+ * @param fanout_cbs Fanout parent out op callbacks.
+ * @param result_free Callback for freeing the result list.
+ * @param options Operation options.
+ * @param rkq Result queue.
+ * @return The newly created op targeting all the brokers.
+ *
+ * @sa Use rd_kafka_op_destroy() to release it.
+ */
+static rd_kafka_op_t *rd_kafka_admin_request_op_target_all_new(
+ rd_kafka_t *rk,
+ rd_kafka_op_type_t optype,
+ rd_kafka_event_type_t reply_event_type,
+ const struct rd_kafka_admin_worker_cbs *cbs,
+ const struct rd_kafka_admin_fanout_worker_cbs *fanout_cbs,
+ void (*result_free)(void *),
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_q_t *rkq) {
+ rd_kafka_op_t *rko, *rko_fanout;
+
+ rko_fanout = rd_kafka_admin_fanout_op_new(rk, optype, reply_event_type,
+ fanout_cbs, options, rkq);
+
+ rko = rd_kafka_admin_request_op_new(rk, optype, reply_event_type, cbs,
+ options, rk->rk_ops);
+
+ rko_fanout->rko_u.admin_request.fanout.outstanding = 1;
+ rko->rko_u.admin_request.fanout_parent = rko_fanout;
+ rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_ALL;
+
+ rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, (int)1,
+ result_free);
+
+ return rko;
+}
+
+/**@}*/
+
+
+/**
+ * @name Generic AdminOptions
+ * @{
+ *
+ *
+ */
+
+rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_request_timeout(rd_kafka_AdminOptions_t *options,
+ int timeout_ms,
+ char *errstr,
+ size_t errstr_size) {
+ return rd_kafka_confval_set_type(&options->request_timeout,
+ RD_KAFKA_CONFVAL_INT, &timeout_ms,
+ errstr, errstr_size);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_operation_timeout(rd_kafka_AdminOptions_t *options,
+ int timeout_ms,
+ char *errstr,
+ size_t errstr_size) {
+ return rd_kafka_confval_set_type(&options->operation_timeout,
+ RD_KAFKA_CONFVAL_INT, &timeout_ms,
+ errstr, errstr_size);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_validate_only(rd_kafka_AdminOptions_t *options,
+ int true_or_false,
+ char *errstr,
+ size_t errstr_size) {
+ return rd_kafka_confval_set_type(&options->validate_only,
+ RD_KAFKA_CONFVAL_INT, &true_or_false,
+ errstr, errstr_size);
+}
+
+rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_incremental(rd_kafka_AdminOptions_t *options,
+ int true_or_false,
+ char *errstr,
+ size_t errstr_size) {
+ rd_snprintf(errstr, errstr_size,
+ "Incremental updates currently not supported, see KIP-248");
+ return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+
+ return rd_kafka_confval_set_type(&options->incremental,
+ RD_KAFKA_CONFVAL_INT, &true_or_false,
+ errstr, errstr_size);
+}
+
+rd_kafka_resp_err_t
+rd_kafka_AdminOptions_set_broker(rd_kafka_AdminOptions_t *options,
+ int32_t broker_id,
+ char *errstr,
+ size_t errstr_size) {
+ int ibroker_id = (int)broker_id;
+
+ return rd_kafka_confval_set_type(&options->broker, RD_KAFKA_CONFVAL_INT,
+ &ibroker_id, errstr, errstr_size);
+}
+
+rd_kafka_error_t *rd_kafka_AdminOptions_set_require_stable_offsets(
+ rd_kafka_AdminOptions_t *options,
+ int true_or_false) {
+ char errstr[512];
+ rd_kafka_resp_err_t err = rd_kafka_confval_set_type(
+ &options->require_stable_offsets, RD_KAFKA_CONFVAL_INT,
+ &true_or_false, errstr, sizeof(errstr));
+ return !err ? NULL : rd_kafka_error_new(err, "%s", errstr);
+}
+
+rd_kafka_error_t *rd_kafka_AdminOptions_set_match_consumer_group_states(
+ rd_kafka_AdminOptions_t *options,
+ const rd_kafka_consumer_group_state_t *consumer_group_states,
+ size_t consumer_group_states_cnt) {
+ size_t i;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ rd_list_t *states_list = rd_list_new(0, NULL);
+ rd_list_init_int32(states_list, consumer_group_states_cnt);
+ uint64_t states_bitmask = 0;
+
+ if (RD_KAFKA_CONSUMER_GROUP_STATE__CNT >= 64) {
+ rd_assert("BUG: cannot handle states with a bitmask anymore");
+ }
+
+ for (i = 0; i < consumer_group_states_cnt; i++) {
+ uint64_t state_bit;
+ rd_kafka_consumer_group_state_t state =
+ consumer_group_states[i];
+
+ if (state < 0 || state >= RD_KAFKA_CONSUMER_GROUP_STATE__CNT) {
+ rd_list_destroy(states_list);
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Invalid group state value");
+ }
+
+ state_bit = 1 << state;
+ if (states_bitmask & state_bit) {
+ rd_list_destroy(states_list);
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate states not allowed");
+ } else {
+ states_bitmask = states_bitmask | state_bit;
+ rd_list_set_int32(states_list, (int32_t)i, state);
+ }
+ }
+ err = rd_kafka_confval_set_type(&options->match_consumer_group_states,
+ RD_KAFKA_CONFVAL_PTR, states_list,
+ errstr, sizeof(errstr));
+ if (err) {
+ rd_list_destroy(states_list);
+ }
+ return !err ? NULL : rd_kafka_error_new(err, "%s", errstr);
+}
+
+void rd_kafka_AdminOptions_set_opaque(rd_kafka_AdminOptions_t *options,
+ void *opaque) {
+ rd_kafka_confval_set_type(&options->opaque, RD_KAFKA_CONFVAL_PTR,
+ opaque, NULL, 0);
+}
+
+
+/**
+ * @brief Initialize and set up defaults for AdminOptions
+ */
+static void rd_kafka_AdminOptions_init(rd_kafka_t *rk,
+ rd_kafka_AdminOptions_t *options) {
+ rd_kafka_confval_init_int(&options->request_timeout, "request_timeout",
+ 0, 3600 * 1000,
+ rk->rk_conf.admin.request_timeout_ms);
+
+ if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
+ options->for_api == RD_KAFKA_ADMIN_OP_CREATETOPICS ||
+ options->for_api == RD_KAFKA_ADMIN_OP_DELETETOPICS ||
+ options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS ||
+ options->for_api == RD_KAFKA_ADMIN_OP_DELETERECORDS)
+ rd_kafka_confval_init_int(&options->operation_timeout,
+ "operation_timeout", -1, 3600 * 1000,
+ rk->rk_conf.admin.request_timeout_ms);
+ else
+ rd_kafka_confval_disable(&options->operation_timeout,
+ "operation_timeout");
+
+ if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
+ options->for_api == RD_KAFKA_ADMIN_OP_CREATETOPICS ||
+ options->for_api == RD_KAFKA_ADMIN_OP_CREATEPARTITIONS ||
+ options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS)
+ rd_kafka_confval_init_int(&options->validate_only,
+ "validate_only", 0, 1, 0);
+ else
+ rd_kafka_confval_disable(&options->validate_only,
+ "validate_only");
+
+ if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
+ options->for_api == RD_KAFKA_ADMIN_OP_ALTERCONFIGS)
+ rd_kafka_confval_init_int(&options->incremental, "incremental",
+ 0, 1, 0);
+ else
+ rd_kafka_confval_disable(&options->incremental, "incremental");
+
+ if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
+ options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS)
+ rd_kafka_confval_init_int(&options->require_stable_offsets,
+ "require_stable_offsets", 0, 1, 0);
+ else
+ rd_kafka_confval_disable(&options->require_stable_offsets,
+ "require_stable_offsets");
+
+ if (options->for_api == RD_KAFKA_ADMIN_OP_ANY ||
+ options->for_api == RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS)
+ rd_kafka_confval_init_ptr(&options->match_consumer_group_states,
+ "match_consumer_group_states");
+ else
+ rd_kafka_confval_disable(&options->match_consumer_group_states,
+ "match_consumer_group_states");
+
+ rd_kafka_confval_init_int(&options->broker, "broker", 0, INT32_MAX, -1);
+ rd_kafka_confval_init_ptr(&options->opaque, "opaque");
+}
+
+/**
+ * @brief Copy contents of \p src to \p dst.
+ * Deep copy every pointer confval.
+ *
+ * @param dst The destination AdminOptions.
+ * @param src The source AdminOptions.
+ */
+static void rd_kafka_AdminOptions_copy_to(rd_kafka_AdminOptions_t *dst,
+ const rd_kafka_AdminOptions_t *src) {
+ *dst = *src;
+ if (src->match_consumer_group_states.u.PTR) {
+ char errstr[512];
+ rd_list_t *states_list_copy = rd_list_copy_preallocated(
+ src->match_consumer_group_states.u.PTR, NULL);
+
+ rd_kafka_resp_err_t err = rd_kafka_confval_set_type(
+ &dst->match_consumer_group_states, RD_KAFKA_CONFVAL_PTR,
+ states_list_copy, errstr, sizeof(errstr));
+ rd_assert(!err);
+ }
+}
+
+
+rd_kafka_AdminOptions_t *
+rd_kafka_AdminOptions_new(rd_kafka_t *rk, rd_kafka_admin_op_t for_api) {
+ rd_kafka_AdminOptions_t *options;
+
+ if ((int)for_api < 0 || for_api >= RD_KAFKA_ADMIN_OP__CNT)
+ return NULL;
+
+ options = rd_calloc(1, sizeof(*options));
+
+ options->for_api = for_api;
+
+ rd_kafka_AdminOptions_init(rk, options);
+
+ return options;
+}
+
+void rd_kafka_AdminOptions_destroy(rd_kafka_AdminOptions_t *options) {
+ if (options->match_consumer_group_states.u.PTR) {
+ rd_list_destroy(options->match_consumer_group_states.u.PTR);
+ }
+ rd_free(options);
+}
+
+/**@}*/
+
+
+
+/**
+ * @name CreateTopics
+ * @{
+ *
+ *
+ *
+ */
+
+
+
+rd_kafka_NewTopic_t *rd_kafka_NewTopic_new(const char *topic,
+ int num_partitions,
+ int replication_factor,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_NewTopic_t *new_topic;
+
+ if (!topic) {
+ rd_snprintf(errstr, errstr_size, "Invalid topic name");
+ return NULL;
+ }
+
+ if (num_partitions < -1 || num_partitions > RD_KAFKAP_PARTITIONS_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "num_partitions out of "
+ "expected range %d..%d or -1 for broker default",
+ 1, RD_KAFKAP_PARTITIONS_MAX);
+ return NULL;
+ }
+
+ if (replication_factor < -1 ||
+ replication_factor > RD_KAFKAP_BROKERS_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "replication_factor out of expected range %d..%d",
+ -1, RD_KAFKAP_BROKERS_MAX);
+ return NULL;
+ }
+
+ new_topic = rd_calloc(1, sizeof(*new_topic));
+ new_topic->topic = rd_strdup(topic);
+ new_topic->num_partitions = num_partitions;
+ new_topic->replication_factor = replication_factor;
+
+ /* List of int32 lists */
+ rd_list_init(&new_topic->replicas, 0, rd_list_destroy_free);
+ rd_list_prealloc_elems(&new_topic->replicas, 0,
+ num_partitions == -1 ? 0 : num_partitions,
+ 0 /*nozero*/);
+
+ /* List of ConfigEntrys */
+ rd_list_init(&new_topic->config, 0, rd_kafka_ConfigEntry_free);
+
+ return new_topic;
+}
+
+
+/**
+ * @brief Topic name comparator for NewTopic_t
+ */
+static int rd_kafka_NewTopic_cmp(const void *_a, const void *_b) {
+ const rd_kafka_NewTopic_t *a = _a, *b = _b;
+ return strcmp(a->topic, b->topic);
+}
+
+
+
+/**
+ * @brief Allocate a new NewTopic and make a copy of \p src
+ */
+static rd_kafka_NewTopic_t *
+rd_kafka_NewTopic_copy(const rd_kafka_NewTopic_t *src) {
+ rd_kafka_NewTopic_t *dst;
+
+ dst = rd_kafka_NewTopic_new(src->topic, src->num_partitions,
+ src->replication_factor, NULL, 0);
+ rd_assert(dst);
+
+ rd_list_destroy(&dst->replicas); /* created in .._new() */
+ rd_list_init_copy(&dst->replicas, &src->replicas);
+ rd_list_copy_to(&dst->replicas, &src->replicas,
+ rd_list_copy_preallocated, NULL);
+
+ rd_list_init_copy(&dst->config, &src->config);
+ rd_list_copy_to(&dst->config, &src->config,
+ rd_kafka_ConfigEntry_list_copy, NULL);
+
+ return dst;
+}
+
+void rd_kafka_NewTopic_destroy(rd_kafka_NewTopic_t *new_topic) {
+ rd_list_destroy(&new_topic->replicas);
+ rd_list_destroy(&new_topic->config);
+ rd_free(new_topic->topic);
+ rd_free(new_topic);
+}
+
+static void rd_kafka_NewTopic_free(void *ptr) {
+ rd_kafka_NewTopic_destroy(ptr);
+}
+
+void rd_kafka_NewTopic_destroy_array(rd_kafka_NewTopic_t **new_topics,
+ size_t new_topic_cnt) {
+ size_t i;
+ for (i = 0; i < new_topic_cnt; i++)
+ rd_kafka_NewTopic_destroy(new_topics[i]);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_NewTopic_set_replica_assignment(rd_kafka_NewTopic_t *new_topic,
+ int32_t partition,
+ int32_t *broker_ids,
+ size_t broker_id_cnt,
+ char *errstr,
+ size_t errstr_size) {
+ rd_list_t *rl;
+ int i;
+
+ if (new_topic->replication_factor != -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Specifying a replication factor and "
+ "a replica assignment are mutually exclusive");
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ } else if (new_topic->num_partitions == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Specifying a default partition count and a "
+ "replica assignment are mutually exclusive");
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ /* Replica partitions must be added consecutively starting from 0. */
+ if (partition != rd_list_cnt(&new_topic->replicas)) {
+ rd_snprintf(errstr, errstr_size,
+ "Partitions must be added in order, "
+ "starting at 0: expecting partition %d, "
+ "not %" PRId32,
+ rd_list_cnt(&new_topic->replicas), partition);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ if (broker_id_cnt > RD_KAFKAP_BROKERS_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "Too many brokers specified "
+ "(RD_KAFKAP_BROKERS_MAX=%d)",
+ RD_KAFKAP_BROKERS_MAX);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+
+ rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt);
+
+ for (i = 0; i < (int)broker_id_cnt; i++)
+ rd_list_set_int32(rl, i, broker_ids[i]);
+
+ rd_list_add(&new_topic->replicas, rl);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Generic constructor of ConfigEntry which is also added to \p rl
+ */
+static rd_kafka_resp_err_t
+rd_kafka_admin_add_config0(rd_list_t *rl,
+ const char *name,
+ const char *value,
+ rd_kafka_AlterOperation_t operation) {
+ rd_kafka_ConfigEntry_t *entry;
+
+ if (!name)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ entry = rd_calloc(1, sizeof(*entry));
+ entry->kv = rd_strtup_new(name, value);
+ entry->a.operation = operation;
+
+ rd_list_add(rl, entry);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_NewTopic_set_config(rd_kafka_NewTopic_t *new_topic,
+ const char *name,
+ const char *value) {
+ return rd_kafka_admin_add_config0(&new_topic->config, name, value,
+ RD_KAFKA_ALTER_OP_ADD);
+}
+
+
+
+/**
+ * @brief Parse CreateTopicsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_CreateTopicsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t topic_cnt;
+ int i;
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 2) {
+ int32_t Throttle_Time;
+ rd_kafka_buf_read_i32(reply, &Throttle_Time);
+ rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
+ }
+
+ /* #topics */
+ rd_kafka_buf_read_i32(reply, &topic_cnt);
+
+ if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " topics in response "
+ "when only %d were requested",
+ topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt,
+ rd_kafka_topic_result_free);
+
+ for (i = 0; i < (int)topic_cnt; i++) {
+ rd_kafkap_str_t ktopic;
+ int16_t error_code;
+ rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
+ char *this_errstr = NULL;
+ rd_kafka_topic_result_t *terr;
+ rd_kafka_NewTopic_t skel;
+ int orig_pos;
+
+ rd_kafka_buf_read_str(reply, &ktopic);
+ rd_kafka_buf_read_i16(reply, &error_code);
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 1)
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ /* For non-blocking CreateTopicsRequests the broker
+ * will returned REQUEST_TIMED_OUT for topics
+ * that were triggered for creation -
+ * we hide this error code from the application
+ * since the topic creation is in fact in progress. */
+ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT &&
+ rd_kafka_confval_get_int(&rko_req->rko_u.admin_request
+ .options.operation_timeout) <=
+ 0) {
+ error_code = RD_KAFKA_RESP_ERR_NO_ERROR;
+ this_errstr = NULL;
+ }
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
+ RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ this_errstr =
+ (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
+ }
+
+ terr = rd_kafka_topic_result_new(ktopic.str,
+ RD_KAFKAP_STR_LEN(&ktopic),
+ error_code, this_errstr);
+
+ /* As a convenience to the application we insert topic result
+ * in the same order as they were requested. The broker
+ * does not maintain ordering unfortunately. */
+ skel.topic = terr->topic;
+ orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
+ &skel, rd_kafka_NewTopic_cmp);
+ if (orig_pos == -1) {
+ rd_kafka_topic_result_destroy(terr);
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned topic %.*s that was not "
+ "included in the original request",
+ RD_KAFKAP_STR_PR(&ktopic));
+ }
+
+ if (rd_list_elem(&rko_result->rko_u.admin_result.results,
+ orig_pos) != NULL) {
+ rd_kafka_topic_result_destroy(terr);
+ rd_kafka_buf_parse_fail(
+ reply, "Broker returned topic %.*s multiple times",
+ RD_KAFKAP_STR_PR(&ktopic));
+ }
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
+ terr);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "CreateTopics response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+
+void rd_kafka_CreateTopics(rd_kafka_t *rk,
+ rd_kafka_NewTopic_t **new_topics,
+ size_t new_topic_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_CreateTopicsRequest,
+ rd_kafka_CreateTopicsResponse_parse,
+ };
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATETOPICS,
+ RD_KAFKA_EVENT_CREATETOPICS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)new_topic_cnt,
+ rd_kafka_NewTopic_free);
+
+ for (i = 0; i < new_topic_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_NewTopic_copy(new_topics[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+
+/**
+ * @brief Get an array of topic results from a CreateTopics result.
+ *
+ * The returned \p topics life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_topic_result_t **rd_kafka_CreateTopics_result_topics(
+ const rd_kafka_CreateTopics_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+/**@}*/
+
+
+
+/**
+ * @name Delete topics
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_DeleteTopic_t *rd_kafka_DeleteTopic_new(const char *topic) {
+ size_t tsize = strlen(topic) + 1;
+ rd_kafka_DeleteTopic_t *del_topic;
+
+ /* Single allocation */
+ del_topic = rd_malloc(sizeof(*del_topic) + tsize);
+ del_topic->topic = del_topic->data;
+ memcpy(del_topic->topic, topic, tsize);
+
+ return del_topic;
+}
+
+void rd_kafka_DeleteTopic_destroy(rd_kafka_DeleteTopic_t *del_topic) {
+ rd_free(del_topic);
+}
+
+static void rd_kafka_DeleteTopic_free(void *ptr) {
+ rd_kafka_DeleteTopic_destroy(ptr);
+}
+
+
+void rd_kafka_DeleteTopic_destroy_array(rd_kafka_DeleteTopic_t **del_topics,
+ size_t del_topic_cnt) {
+ size_t i;
+ for (i = 0; i < del_topic_cnt; i++)
+ rd_kafka_DeleteTopic_destroy(del_topics[i]);
+}
+
+
+/**
+ * @brief Topic name comparator for DeleteTopic_t
+ */
+static int rd_kafka_DeleteTopic_cmp(const void *_a, const void *_b) {
+ const rd_kafka_DeleteTopic_t *a = _a, *b = _b;
+ return strcmp(a->topic, b->topic);
+}
+
+/**
+ * @brief Allocate a new DeleteTopic and make a copy of \p src
+ */
+static rd_kafka_DeleteTopic_t *
+rd_kafka_DeleteTopic_copy(const rd_kafka_DeleteTopic_t *src) {
+ return rd_kafka_DeleteTopic_new(src->topic);
+}
+
+
+
+/**
+ * @brief Parse DeleteTopicsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DeleteTopicsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t topic_cnt;
+ int i;
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 1) {
+ int32_t Throttle_Time;
+ rd_kafka_buf_read_i32(reply, &Throttle_Time);
+ rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
+ }
+
+ /* #topics */
+ rd_kafka_buf_read_i32(reply, &topic_cnt);
+
+ if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " topics in response "
+ "when only %d were requested",
+ topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt,
+ rd_kafka_topic_result_free);
+
+ for (i = 0; i < (int)topic_cnt; i++) {
+ rd_kafkap_str_t ktopic;
+ int16_t error_code;
+ rd_kafka_topic_result_t *terr;
+ rd_kafka_NewTopic_t skel;
+ int orig_pos;
+
+ rd_kafka_buf_read_str(reply, &ktopic);
+ rd_kafka_buf_read_i16(reply, &error_code);
+
+ /* For non-blocking DeleteTopicsRequests the broker
+ * will returned REQUEST_TIMED_OUT for topics
+ * that were triggered for creation -
+ * we hide this error code from the application
+ * since the topic creation is in fact in progress. */
+ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT &&
+ rd_kafka_confval_get_int(&rko_req->rko_u.admin_request
+ .options.operation_timeout) <=
+ 0) {
+ error_code = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ terr = rd_kafka_topic_result_new(
+ ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code,
+ error_code ? rd_kafka_err2str(error_code) : NULL);
+
+ /* As a convenience to the application we insert topic result
+ * in the same order as they were requested. The broker
+ * does not maintain ordering unfortunately. */
+ skel.topic = terr->topic;
+ orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
+ &skel, rd_kafka_DeleteTopic_cmp);
+ if (orig_pos == -1) {
+ rd_kafka_topic_result_destroy(terr);
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned topic %.*s that was not "
+ "included in the original request",
+ RD_KAFKAP_STR_PR(&ktopic));
+ }
+
+ if (rd_list_elem(&rko_result->rko_u.admin_result.results,
+ orig_pos) != NULL) {
+ rd_kafka_topic_result_destroy(terr);
+ rd_kafka_buf_parse_fail(
+ reply, "Broker returned topic %.*s multiple times",
+ RD_KAFKAP_STR_PR(&ktopic));
+ }
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
+ terr);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DeleteTopics response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+
+
+void rd_kafka_DeleteTopics(rd_kafka_t *rk,
+ rd_kafka_DeleteTopic_t **del_topics,
+ size_t del_topic_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DeleteTopicsRequest,
+ rd_kafka_DeleteTopicsResponse_parse,
+ };
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETETOPICS,
+ RD_KAFKA_EVENT_DELETETOPICS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)del_topic_cnt,
+ rd_kafka_DeleteTopic_free);
+
+ for (i = 0; i < del_topic_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_DeleteTopic_copy(del_topics[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+
+/**
+ * @brief Get an array of topic results from a DeleteTopics result.
+ *
+ * The returned \p topics life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_topic_result_t **rd_kafka_DeleteTopics_result_topics(
+ const rd_kafka_DeleteTopics_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+
+
+/**
+ * @name Create partitions
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_NewPartitions_t *rd_kafka_NewPartitions_new(const char *topic,
+ size_t new_total_cnt,
+ char *errstr,
+ size_t errstr_size) {
+ size_t tsize = strlen(topic) + 1;
+ rd_kafka_NewPartitions_t *newps;
+
+ if (new_total_cnt < 1 || new_total_cnt > RD_KAFKAP_PARTITIONS_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "new_total_cnt out of "
+ "expected range %d..%d",
+ 1, RD_KAFKAP_PARTITIONS_MAX);
+ return NULL;
+ }
+
+ /* Single allocation */
+ newps = rd_malloc(sizeof(*newps) + tsize);
+ newps->total_cnt = new_total_cnt;
+ newps->topic = newps->data;
+ memcpy(newps->topic, topic, tsize);
+
+ /* List of int32 lists */
+ rd_list_init(&newps->replicas, 0, rd_list_destroy_free);
+ rd_list_prealloc_elems(&newps->replicas, 0, new_total_cnt,
+ 0 /*nozero*/);
+
+ return newps;
+}
+
+/**
+ * @brief Topic name comparator for NewPartitions_t
+ */
+static int rd_kafka_NewPartitions_cmp(const void *_a, const void *_b) {
+ const rd_kafka_NewPartitions_t *a = _a, *b = _b;
+ return strcmp(a->topic, b->topic);
+}
+
+
+/**
+ * @brief Allocate a new CreatePartitions and make a copy of \p src
+ */
+static rd_kafka_NewPartitions_t *
+rd_kafka_NewPartitions_copy(const rd_kafka_NewPartitions_t *src) {
+ rd_kafka_NewPartitions_t *dst;
+
+ dst = rd_kafka_NewPartitions_new(src->topic, src->total_cnt, NULL, 0);
+
+ rd_list_destroy(&dst->replicas); /* created in .._new() */
+ rd_list_init_copy(&dst->replicas, &src->replicas);
+ rd_list_copy_to(&dst->replicas, &src->replicas,
+ rd_list_copy_preallocated, NULL);
+
+ return dst;
+}
+
+void rd_kafka_NewPartitions_destroy(rd_kafka_NewPartitions_t *newps) {
+ rd_list_destroy(&newps->replicas);
+ rd_free(newps);
+}
+
+static void rd_kafka_NewPartitions_free(void *ptr) {
+ rd_kafka_NewPartitions_destroy(ptr);
+}
+
+
+void rd_kafka_NewPartitions_destroy_array(rd_kafka_NewPartitions_t **newps,
+ size_t newps_cnt) {
+ size_t i;
+ for (i = 0; i < newps_cnt; i++)
+ rd_kafka_NewPartitions_destroy(newps[i]);
+}
+
+
+
+rd_kafka_resp_err_t
+rd_kafka_NewPartitions_set_replica_assignment(rd_kafka_NewPartitions_t *newp,
+ int32_t new_partition_idx,
+ int32_t *broker_ids,
+ size_t broker_id_cnt,
+ char *errstr,
+ size_t errstr_size) {
+ rd_list_t *rl;
+ int i;
+
+ /* Replica partitions must be added consecutively starting from 0. */
+ if (new_partition_idx != rd_list_cnt(&newp->replicas)) {
+ rd_snprintf(errstr, errstr_size,
+ "Partitions must be added in order, "
+ "starting at 0: expecting partition "
+ "index %d, not %" PRId32,
+ rd_list_cnt(&newp->replicas), new_partition_idx);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ if (broker_id_cnt > RD_KAFKAP_BROKERS_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "Too many brokers specified "
+ "(RD_KAFKAP_BROKERS_MAX=%d)",
+ RD_KAFKAP_BROKERS_MAX);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ rl = rd_list_init_int32(rd_list_new(0, NULL), (int)broker_id_cnt);
+
+ for (i = 0; i < (int)broker_id_cnt; i++)
+ rd_list_set_int32(rl, i, broker_ids[i]);
+
+ rd_list_add(&newp->replicas, rl);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Parse CreatePartitionsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_CreatePartitionsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t topic_cnt;
+ int i;
+ int32_t Throttle_Time;
+
+ rd_kafka_buf_read_i32(reply, &Throttle_Time);
+ rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
+
+ /* #topics */
+ rd_kafka_buf_read_i32(reply, &topic_cnt);
+
+ if (topic_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " topics in response "
+ "when only %d were requested",
+ topic_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, topic_cnt,
+ rd_kafka_topic_result_free);
+
+ for (i = 0; i < (int)topic_cnt; i++) {
+ rd_kafkap_str_t ktopic;
+ int16_t error_code;
+ char *this_errstr = NULL;
+ rd_kafka_topic_result_t *terr;
+ rd_kafka_NewTopic_t skel;
+ rd_kafkap_str_t error_msg;
+ int orig_pos;
+
+ rd_kafka_buf_read_str(reply, &ktopic);
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ /* For non-blocking CreatePartitionsRequests the broker
+ * will returned REQUEST_TIMED_OUT for topics
+ * that were triggered for creation -
+ * we hide this error code from the application
+ * since the topic creation is in fact in progress. */
+ if (error_code == RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT &&
+ rd_kafka_confval_get_int(&rko_req->rko_u.admin_request
+ .options.operation_timeout) <=
+ 0) {
+ error_code = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
+ RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ this_errstr =
+ (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
+ }
+
+ terr = rd_kafka_topic_result_new(
+ ktopic.str, RD_KAFKAP_STR_LEN(&ktopic), error_code,
+ error_code ? this_errstr : NULL);
+
+ /* As a convenience to the application we insert topic result
+ * in the same order as they were requested. The broker
+ * does not maintain ordering unfortunately. */
+ skel.topic = terr->topic;
+ orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
+ &skel, rd_kafka_NewPartitions_cmp);
+ if (orig_pos == -1) {
+ rd_kafka_topic_result_destroy(terr);
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned topic %.*s that was not "
+ "included in the original request",
+ RD_KAFKAP_STR_PR(&ktopic));
+ }
+
+ if (rd_list_elem(&rko_result->rko_u.admin_result.results,
+ orig_pos) != NULL) {
+ rd_kafka_topic_result_destroy(terr);
+ rd_kafka_buf_parse_fail(
+ reply, "Broker returned topic %.*s multiple times",
+ RD_KAFKAP_STR_PR(&ktopic));
+ }
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
+ terr);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "CreatePartitions response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+
+
+void rd_kafka_CreatePartitions(rd_kafka_t *rk,
+ rd_kafka_NewPartitions_t **newps,
+ size_t newps_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_CreatePartitionsRequest,
+ rd_kafka_CreatePartitionsResponse_parse,
+ };
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_CREATEPARTITIONS,
+ RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, &cbs, options,
+ rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)newps_cnt,
+ rd_kafka_NewPartitions_free);
+
+ for (i = 0; i < newps_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_NewPartitions_copy(newps[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+
+/**
+ * @brief Get an array of topic results from a CreatePartitions result.
+ *
+ * The returned \p topics life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_topic_result_t **rd_kafka_CreatePartitions_result_topics(
+ const rd_kafka_CreatePartitions_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_topics((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+/**@}*/
+
+
+
+/**
+ * @name ConfigEntry
+ * @{
+ *
+ *
+ *
+ */
+
+static void rd_kafka_ConfigEntry_destroy(rd_kafka_ConfigEntry_t *entry) {
+ rd_strtup_destroy(entry->kv);
+ rd_list_destroy(&entry->synonyms);
+ rd_free(entry);
+}
+
+
+static void rd_kafka_ConfigEntry_free(void *ptr) {
+ rd_kafka_ConfigEntry_destroy((rd_kafka_ConfigEntry_t *)ptr);
+}
+
+
+/**
+ * @brief Create new ConfigEntry
+ *
+ * @param name Config entry name
+ * @param name_len Length of name, or -1 to use strlen()
+ * @param value Config entry value, or NULL
+ * @param value_len Length of value, or -1 to use strlen()
+ */
+static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new0(const char *name,
+ size_t name_len,
+ const char *value,
+ size_t value_len) {
+ rd_kafka_ConfigEntry_t *entry;
+
+ if (!name)
+ return NULL;
+
+ entry = rd_calloc(1, sizeof(*entry));
+ entry->kv = rd_strtup_new0(name, name_len, value, value_len);
+
+ rd_list_init(&entry->synonyms, 0, rd_kafka_ConfigEntry_free);
+
+ entry->a.source = RD_KAFKA_CONFIG_SOURCE_UNKNOWN_CONFIG;
+
+ return entry;
+}
+
+/**
+ * @sa rd_kafka_ConfigEntry_new0
+ */
+static rd_kafka_ConfigEntry_t *rd_kafka_ConfigEntry_new(const char *name,
+ const char *value) {
+ return rd_kafka_ConfigEntry_new0(name, -1, value, -1);
+}
+
+
+
+/**
+ * @brief Allocate a new AlterConfigs and make a copy of \p src
+ */
+static rd_kafka_ConfigEntry_t *
+rd_kafka_ConfigEntry_copy(const rd_kafka_ConfigEntry_t *src) {
+ rd_kafka_ConfigEntry_t *dst;
+
+ dst = rd_kafka_ConfigEntry_new(src->kv->name, src->kv->value);
+ dst->a = src->a;
+
+ rd_list_destroy(&dst->synonyms); /* created in .._new() */
+ rd_list_init_copy(&dst->synonyms, &src->synonyms);
+ rd_list_copy_to(&dst->synonyms, &src->synonyms,
+ rd_kafka_ConfigEntry_list_copy, NULL);
+
+ return dst;
+}
+
+static void *rd_kafka_ConfigEntry_list_copy(const void *src, void *opaque) {
+ return rd_kafka_ConfigEntry_copy((const rd_kafka_ConfigEntry_t *)src);
+}
+
+
+const char *rd_kafka_ConfigEntry_name(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->kv->name;
+}
+
+const char *rd_kafka_ConfigEntry_value(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->kv->value;
+}
+
+rd_kafka_ConfigSource_t
+rd_kafka_ConfigEntry_source(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->a.source;
+}
+
+int rd_kafka_ConfigEntry_is_read_only(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->a.is_readonly;
+}
+
+int rd_kafka_ConfigEntry_is_default(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->a.is_default;
+}
+
+int rd_kafka_ConfigEntry_is_sensitive(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->a.is_sensitive;
+}
+
+int rd_kafka_ConfigEntry_is_synonym(const rd_kafka_ConfigEntry_t *entry) {
+ return entry->a.is_synonym;
+}
+
+const rd_kafka_ConfigEntry_t **
+rd_kafka_ConfigEntry_synonyms(const rd_kafka_ConfigEntry_t *entry,
+ size_t *cntp) {
+ *cntp = rd_list_cnt(&entry->synonyms);
+ if (!*cntp)
+ return NULL;
+ return (const rd_kafka_ConfigEntry_t **)entry->synonyms.rl_elems;
+}
+
+
+/**@}*/
+
+
+
+/**
+ * @name ConfigSource
+ * @{
+ *
+ *
+ *
+ */
+
+const char *rd_kafka_ConfigSource_name(rd_kafka_ConfigSource_t confsource) {
+ static const char *names[] = {
+ "UNKNOWN_CONFIG", "DYNAMIC_TOPIC_CONFIG",
+ "DYNAMIC_BROKER_CONFIG", "DYNAMIC_DEFAULT_BROKER_CONFIG",
+ "STATIC_BROKER_CONFIG", "DEFAULT_CONFIG",
+ };
+
+ if ((unsigned int)confsource >=
+ (unsigned int)RD_KAFKA_CONFIG_SOURCE__CNT)
+ return "UNSUPPORTED";
+
+ return names[confsource];
+}
+
+/**@}*/
+
+
+
+/**
+ * @name ConfigResource
+ * @{
+ *
+ *
+ *
+ */
+
+const char *rd_kafka_ResourcePatternType_name(
+ rd_kafka_ResourcePatternType_t resource_pattern_type) {
+ static const char *names[] = {"UNKNOWN", "ANY", "MATCH", "LITERAL",
+ "PREFIXED"};
+
+ if ((unsigned int)resource_pattern_type >=
+ (unsigned int)RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT)
+ return "UNSUPPORTED";
+
+ return names[resource_pattern_type];
+}
+
+const char *rd_kafka_ResourceType_name(rd_kafka_ResourceType_t restype) {
+ static const char *names[] = {
+ "UNKNOWN", "ANY", "TOPIC", "GROUP", "BROKER",
+ };
+
+ if ((unsigned int)restype >= (unsigned int)RD_KAFKA_RESOURCE__CNT)
+ return "UNSUPPORTED";
+
+ return names[restype];
+}
+
+
+rd_kafka_ConfigResource_t *
+rd_kafka_ConfigResource_new(rd_kafka_ResourceType_t restype,
+ const char *resname) {
+ rd_kafka_ConfigResource_t *config;
+ size_t namesz = resname ? strlen(resname) : 0;
+
+ if (!namesz || (int)restype < 0)
+ return NULL;
+
+ config = rd_calloc(1, sizeof(*config) + namesz + 1);
+ config->name = config->data;
+ memcpy(config->name, resname, namesz + 1);
+ config->restype = restype;
+
+ rd_list_init(&config->config, 8, rd_kafka_ConfigEntry_free);
+
+ return config;
+}
+
+void rd_kafka_ConfigResource_destroy(rd_kafka_ConfigResource_t *config) {
+ rd_list_destroy(&config->config);
+ if (config->errstr)
+ rd_free(config->errstr);
+ rd_free(config);
+}
+
+static void rd_kafka_ConfigResource_free(void *ptr) {
+ rd_kafka_ConfigResource_destroy((rd_kafka_ConfigResource_t *)ptr);
+}
+
+
+void rd_kafka_ConfigResource_destroy_array(rd_kafka_ConfigResource_t **config,
+ size_t config_cnt) {
+ size_t i;
+ for (i = 0; i < config_cnt; i++)
+ rd_kafka_ConfigResource_destroy(config[i]);
+}
+
+
+/**
+ * @brief Type and name comparator for ConfigResource_t
+ */
+static int rd_kafka_ConfigResource_cmp(const void *_a, const void *_b) {
+ const rd_kafka_ConfigResource_t *a = _a, *b = _b;
+ int r = RD_CMP(a->restype, b->restype);
+ if (r)
+ return r;
+ return strcmp(a->name, b->name);
+}
+
+/**
+ * @brief Allocate a new AlterConfigs and make a copy of \p src
+ */
+static rd_kafka_ConfigResource_t *
+rd_kafka_ConfigResource_copy(const rd_kafka_ConfigResource_t *src) {
+ rd_kafka_ConfigResource_t *dst;
+
+ dst = rd_kafka_ConfigResource_new(src->restype, src->name);
+
+ rd_list_destroy(&dst->config); /* created in .._new() */
+ rd_list_init_copy(&dst->config, &src->config);
+ rd_list_copy_to(&dst->config, &src->config,
+ rd_kafka_ConfigEntry_list_copy, NULL);
+
+ return dst;
+}
+
+
+static void
+rd_kafka_ConfigResource_add_ConfigEntry(rd_kafka_ConfigResource_t *config,
+ rd_kafka_ConfigEntry_t *entry) {
+ rd_list_add(&config->config, entry);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_ConfigResource_add_config(rd_kafka_ConfigResource_t *config,
+ const char *name,
+ const char *value) {
+ if (!name || !*name || !value)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ return rd_kafka_admin_add_config0(&config->config, name, value,
+ RD_KAFKA_ALTER_OP_ADD);
+}
+
+rd_kafka_resp_err_t
+rd_kafka_ConfigResource_set_config(rd_kafka_ConfigResource_t *config,
+ const char *name,
+ const char *value) {
+ if (!name || !*name || !value)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ return rd_kafka_admin_add_config0(&config->config, name, value,
+ RD_KAFKA_ALTER_OP_SET);
+}
+
+rd_kafka_resp_err_t
+rd_kafka_ConfigResource_delete_config(rd_kafka_ConfigResource_t *config,
+ const char *name) {
+ if (!name || !*name)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ return rd_kafka_admin_add_config0(&config->config, name, NULL,
+ RD_KAFKA_ALTER_OP_DELETE);
+}
+
+
+const rd_kafka_ConfigEntry_t **
+rd_kafka_ConfigResource_configs(const rd_kafka_ConfigResource_t *config,
+ size_t *cntp) {
+ *cntp = rd_list_cnt(&config->config);
+ if (!*cntp)
+ return NULL;
+ return (const rd_kafka_ConfigEntry_t **)config->config.rl_elems;
+}
+
+
+
+rd_kafka_ResourceType_t
+rd_kafka_ConfigResource_type(const rd_kafka_ConfigResource_t *config) {
+ return config->restype;
+}
+
+const char *
+rd_kafka_ConfigResource_name(const rd_kafka_ConfigResource_t *config) {
+ return config->name;
+}
+
+rd_kafka_resp_err_t
+rd_kafka_ConfigResource_error(const rd_kafka_ConfigResource_t *config) {
+ return config->err;
+}
+
+const char *
+rd_kafka_ConfigResource_error_string(const rd_kafka_ConfigResource_t *config) {
+ if (!config->err)
+ return NULL;
+ if (config->errstr)
+ return config->errstr;
+ return rd_kafka_err2str(config->err);
+}
+
+
+/**
+ * @brief Look in the provided ConfigResource_t* list for a resource of
+ * type BROKER and set its broker id in \p broker_id, returning
+ * RD_KAFKA_RESP_ERR_NO_ERROR.
+ *
+ * If multiple BROKER resources are found RD_KAFKA_RESP_ERR__CONFLICT
+ * is returned and an error string is written to errstr.
+ *
+ * If no BROKER resources are found RD_KAFKA_RESP_ERR_NO_ERROR
+ * is returned and \p broker_idp is set to use the coordinator.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_ConfigResource_get_single_broker_id(const rd_list_t *configs,
+ int32_t *broker_idp,
+ char *errstr,
+ size_t errstr_size) {
+ const rd_kafka_ConfigResource_t *config;
+ int i;
+ int32_t broker_id = RD_KAFKA_ADMIN_TARGET_CONTROLLER; /* Some default
+ * value that we
+ * can compare
+ * to below */
+
+ RD_LIST_FOREACH(config, configs, i) {
+ char *endptr;
+ long int r;
+
+ if (config->restype != RD_KAFKA_RESOURCE_BROKER)
+ continue;
+
+ if (broker_id != RD_KAFKA_ADMIN_TARGET_CONTROLLER) {
+ rd_snprintf(errstr, errstr_size,
+ "Only one ConfigResource of type BROKER "
+ "is allowed per call");
+ return RD_KAFKA_RESP_ERR__CONFLICT;
+ }
+
+ /* Convert string broker-id to int32 */
+ r = (int32_t)strtol(config->name, &endptr, 10);
+ if (r == LONG_MIN || r == LONG_MAX || config->name == endptr ||
+ r < 0) {
+ rd_snprintf(errstr, errstr_size,
+ "Expected an int32 broker_id for "
+ "ConfigResource(type=BROKER, name=%s)",
+ config->name);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ broker_id = r;
+
+ /* Keep scanning to make sure there are no duplicate
+ * BROKER resources. */
+ }
+
+ *broker_idp = broker_id;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**@}*/
+
+
+
+/**
+ * @name AlterConfigs
+ * @{
+ *
+ *
+ *
+ */
+
+
+
+/**
+ * @brief Parse AlterConfigsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_AlterConfigsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t res_cnt;
+ int i;
+ int32_t Throttle_Time;
+
+ rd_kafka_buf_read_i32(reply, &Throttle_Time);
+ rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
+
+ rd_kafka_buf_read_i32(reply, &res_cnt);
+
+ if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args)) {
+ rd_snprintf(errstr, errstr_size,
+ "Received %" PRId32
+ " ConfigResources in response "
+ "when only %d were requested",
+ res_cnt,
+ rd_list_cnt(&rko_req->rko_u.admin_request.args));
+ return RD_KAFKA_RESP_ERR__BAD_MSG;
+ }
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
+ rd_kafka_ConfigResource_free);
+
+ for (i = 0; i < (int)res_cnt; i++) {
+ int16_t error_code;
+ rd_kafkap_str_t error_msg;
+ int8_t res_type;
+ rd_kafkap_str_t kres_name;
+ char *res_name;
+ char *this_errstr = NULL;
+ rd_kafka_ConfigResource_t *config;
+ rd_kafka_ConfigResource_t skel;
+ int orig_pos;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+ rd_kafka_buf_read_i8(reply, &res_type);
+ rd_kafka_buf_read_str(reply, &kres_name);
+ RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
+ RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ this_errstr =
+ (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
+ }
+
+ config = rd_kafka_ConfigResource_new(res_type, res_name);
+ if (!config) {
+ rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN",
+ "AlterConfigs returned "
+ "unsupported ConfigResource #%d with "
+ "type %d and name \"%s\": ignoring",
+ i, res_type, res_name);
+ continue;
+ }
+
+ config->err = error_code;
+ if (this_errstr)
+ config->errstr = rd_strdup(this_errstr);
+
+ /* As a convenience to the application we insert result
+ * in the same order as they were requested. The broker
+ * does not maintain ordering unfortunately. */
+ skel.restype = config->restype;
+ skel.name = config->name;
+ orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
+ &skel, rd_kafka_ConfigResource_cmp);
+ if (orig_pos == -1) {
+ rd_kafka_ConfigResource_destroy(config);
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned ConfigResource %d,%s "
+ "that was not "
+ "included in the original request",
+ res_type, res_name);
+ }
+
+ if (rd_list_elem(&rko_result->rko_u.admin_result.results,
+ orig_pos) != NULL) {
+ rd_kafka_ConfigResource_destroy(config);
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned ConfigResource %d,%s "
+ "multiple times",
+ res_type, res_name);
+ }
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
+ config);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "AlterConfigs response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+
+
+void rd_kafka_AlterConfigs(rd_kafka_t *rk,
+ rd_kafka_ConfigResource_t **configs,
+ size_t config_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ rd_kafka_resp_err_t err;
+ char errstr[256];
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_AlterConfigsRequest,
+ rd_kafka_AlterConfigsResponse_parse,
+ };
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_ALTERCONFIGS,
+ RD_KAFKA_EVENT_ALTERCONFIGS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt,
+ rd_kafka_ConfigResource_free);
+
+ for (i = 0; i < config_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_ConfigResource_copy(configs[i]));
+
+ /* If there's a BROKER resource in the list we need to
+ * speak directly to that broker rather than the controller.
+ *
+ * Multiple BROKER resources are not allowed.
+ */
+ err = rd_kafka_ConfigResource_get_single_broker_id(
+ &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id,
+ errstr, sizeof(errstr));
+ if (err) {
+ rd_kafka_admin_result_fail(rko, err, "%s", errstr);
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+
+const rd_kafka_ConfigResource_t **rd_kafka_AlterConfigs_result_resources(
+ const rd_kafka_AlterConfigs_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_resources(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+/**@}*/
+
+
+
+/**
+ * @name DescribeConfigs
+ * @{
+ *
+ *
+ *
+ */
+
+
+/**
+ * @brief Parse DescribeConfigsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DescribeConfigsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t res_cnt;
+ int i;
+ int32_t Throttle_Time;
+ rd_kafka_ConfigResource_t *config = NULL;
+ rd_kafka_ConfigEntry_t *entry = NULL;
+
+ rd_kafka_buf_read_i32(reply, &Throttle_Time);
+ rd_kafka_op_throttle_time(rkb, rk->rk_rep, Throttle_Time);
+
+ /* #resources */
+ rd_kafka_buf_read_i32(reply, &res_cnt);
+
+ if (res_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " ConfigResources in response "
+ "when only %d were requested",
+ res_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
+ rd_kafka_ConfigResource_free);
+
+ for (i = 0; i < (int)res_cnt; i++) {
+ int16_t error_code;
+ rd_kafkap_str_t error_msg;
+ int8_t res_type;
+ rd_kafkap_str_t kres_name;
+ char *res_name;
+ char *this_errstr = NULL;
+ rd_kafka_ConfigResource_t skel;
+ int orig_pos;
+ int32_t entry_cnt;
+ int ci;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+ rd_kafka_buf_read_i8(reply, &res_type);
+ rd_kafka_buf_read_str(reply, &kres_name);
+ RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
+ RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ this_errstr =
+ (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&this_errstr, &error_msg);
+ }
+
+ config = rd_kafka_ConfigResource_new(res_type, res_name);
+ if (!config) {
+ rd_kafka_log(rko_req->rko_rk, LOG_ERR, "ADMIN",
+ "DescribeConfigs returned "
+ "unsupported ConfigResource #%d with "
+ "type %d and name \"%s\": ignoring",
+ i, res_type, res_name);
+ continue;
+ }
+
+ config->err = error_code;
+ if (this_errstr)
+ config->errstr = rd_strdup(this_errstr);
+
+ /* #config_entries */
+ rd_kafka_buf_read_i32(reply, &entry_cnt);
+
+ for (ci = 0; ci < (int)entry_cnt; ci++) {
+ rd_kafkap_str_t config_name, config_value;
+ int32_t syn_cnt;
+ int si;
+
+ rd_kafka_buf_read_str(reply, &config_name);
+ rd_kafka_buf_read_str(reply, &config_value);
+
+ entry = rd_kafka_ConfigEntry_new0(
+ config_name.str, RD_KAFKAP_STR_LEN(&config_name),
+ config_value.str, RD_KAFKAP_STR_LEN(&config_value));
+
+ rd_kafka_buf_read_bool(reply, &entry->a.is_readonly);
+
+ /* ApiVersion 0 has is_default field, while
+ * ApiVersion 1 has source field.
+ * Convert between the two so they look the same
+ * to the caller. */
+ if (rd_kafka_buf_ApiVersion(reply) == 0) {
+ rd_kafka_buf_read_bool(reply,
+ &entry->a.is_default);
+ if (entry->a.is_default)
+ entry->a.source =
+ RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG;
+ } else {
+ int8_t config_source;
+ rd_kafka_buf_read_i8(reply, &config_source);
+ entry->a.source = config_source;
+
+ if (entry->a.source ==
+ RD_KAFKA_CONFIG_SOURCE_DEFAULT_CONFIG)
+ entry->a.is_default = 1;
+ }
+
+ rd_kafka_buf_read_bool(reply, &entry->a.is_sensitive);
+
+
+ if (rd_kafka_buf_ApiVersion(reply) == 1) {
+ /* #config_synonyms (ApiVersion 1) */
+ rd_kafka_buf_read_i32(reply, &syn_cnt);
+
+ if (syn_cnt > 100000)
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned %" PRId32
+ " config synonyms for "
+ "ConfigResource %d,%s: "
+ "limit is 100000",
+ syn_cnt, config->restype,
+ config->name);
+
+ if (syn_cnt > 0)
+ rd_list_grow(&entry->synonyms, syn_cnt);
+
+ } else {
+ /* No synonyms in ApiVersion 0 */
+ syn_cnt = 0;
+ }
+
+
+
+ /* Read synonyms (ApiVersion 1) */
+ for (si = 0; si < (int)syn_cnt; si++) {
+ rd_kafkap_str_t syn_name, syn_value;
+ int8_t syn_source;
+ rd_kafka_ConfigEntry_t *syn_entry;
+
+ rd_kafka_buf_read_str(reply, &syn_name);
+ rd_kafka_buf_read_str(reply, &syn_value);
+ rd_kafka_buf_read_i8(reply, &syn_source);
+
+ syn_entry = rd_kafka_ConfigEntry_new0(
+ syn_name.str, RD_KAFKAP_STR_LEN(&syn_name),
+ syn_value.str,
+ RD_KAFKAP_STR_LEN(&syn_value));
+ if (!syn_entry)
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned invalid "
+ "synonym #%d "
+ "for ConfigEntry #%d (%s) "
+ "and ConfigResource %d,%s: "
+ "syn_name.len %d, "
+ "syn_value.len %d",
+ si, ci, entry->kv->name,
+ config->restype, config->name,
+ (int)syn_name.len,
+ (int)syn_value.len);
+
+ syn_entry->a.source = syn_source;
+ syn_entry->a.is_synonym = 1;
+
+ rd_list_add(&entry->synonyms, syn_entry);
+ }
+
+ rd_kafka_ConfigResource_add_ConfigEntry(config, entry);
+ entry = NULL;
+ }
+
+ /* As a convenience to the application we insert result
+ * in the same order as they were requested. The broker
+ * does not maintain ordering unfortunately. */
+ skel.restype = config->restype;
+ skel.name = config->name;
+ orig_pos = rd_list_index(&rko_result->rko_u.admin_result.args,
+ &skel, rd_kafka_ConfigResource_cmp);
+ if (orig_pos == -1)
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned ConfigResource %d,%s "
+ "that was not "
+ "included in the original request",
+ res_type, res_name);
+
+ if (rd_list_elem(&rko_result->rko_u.admin_result.results,
+ orig_pos) != NULL)
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Broker returned ConfigResource %d,%s "
+ "multiple times",
+ res_type, res_name);
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, orig_pos,
+ config);
+ config = NULL;
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (entry)
+ rd_kafka_ConfigEntry_destroy(entry);
+ if (config)
+ rd_kafka_ConfigResource_destroy(config);
+
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DescribeConfigs response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+
+
+void rd_kafka_DescribeConfigs(rd_kafka_t *rk,
+ rd_kafka_ConfigResource_t **configs,
+ size_t config_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ rd_kafka_resp_err_t err;
+ char errstr[256];
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DescribeConfigsRequest,
+ rd_kafka_DescribeConfigsResponse_parse,
+ };
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_DESCRIBECONFIGS,
+ RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)config_cnt,
+ rd_kafka_ConfigResource_free);
+
+ for (i = 0; i < config_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_ConfigResource_copy(configs[i]));
+
+ /* If there's a BROKER resource in the list we need to
+ * speak directly to that broker rather than the controller.
+ *
+ * Multiple BROKER resources are not allowed.
+ */
+ err = rd_kafka_ConfigResource_get_single_broker_id(
+ &rko->rko_u.admin_request.args, &rko->rko_u.admin_request.broker_id,
+ errstr, sizeof(errstr));
+ if (err) {
+ rd_kafka_admin_result_fail(rko, err, "%s", errstr);
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+
+
+const rd_kafka_ConfigResource_t **rd_kafka_DescribeConfigs_result_resources(
+ const rd_kafka_DescribeConfigs_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_resources(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+/**@}*/
+
+/**
+ * @name Delete Records
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_DeleteRecords_t *rd_kafka_DeleteRecords_new(
+ const rd_kafka_topic_partition_list_t *before_offsets) {
+ rd_kafka_DeleteRecords_t *del_records;
+
+ del_records = rd_calloc(1, sizeof(*del_records));
+ del_records->offsets =
+ rd_kafka_topic_partition_list_copy(before_offsets);
+
+ return del_records;
+}
+
+void rd_kafka_DeleteRecords_destroy(rd_kafka_DeleteRecords_t *del_records) {
+ rd_kafka_topic_partition_list_destroy(del_records->offsets);
+ rd_free(del_records);
+}
+
+void rd_kafka_DeleteRecords_destroy_array(
+ rd_kafka_DeleteRecords_t **del_records,
+ size_t del_record_cnt) {
+ size_t i;
+ for (i = 0; i < del_record_cnt; i++)
+ rd_kafka_DeleteRecords_destroy(del_records[i]);
+}
+
+
+
+/** @brief Merge the DeleteRecords response from a single broker
+ * into the user response list.
+ */
+static void
+rd_kafka_DeleteRecords_response_merge(rd_kafka_op_t *rko_fanout,
+ const rd_kafka_op_t *rko_partial) {
+ rd_kafka_t *rk = rko_fanout->rko_rk;
+ const rd_kafka_topic_partition_list_t *partitions;
+ rd_kafka_topic_partition_list_t *respartitions;
+ const rd_kafka_topic_partition_t *partition;
+
+ rd_assert(rko_partial->rko_evtype ==
+ RD_KAFKA_EVENT_DELETERECORDS_RESULT);
+
+ /* All partitions (offsets) from the DeleteRecords() call */
+ respartitions =
+ rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results, 0);
+
+ if (rko_partial->rko_err) {
+ /* If there was a request-level error, set the error on
+ * all requested partitions for this request. */
+ const rd_kafka_topic_partition_list_t *reqpartitions;
+ rd_kafka_topic_partition_t *reqpartition;
+
+ /* Partitions (offsets) from this DeleteRecordsRequest */
+ reqpartitions =
+ rd_list_elem(&rko_partial->rko_u.admin_result.args, 0);
+
+ RD_KAFKA_TPLIST_FOREACH(reqpartition, reqpartitions) {
+ rd_kafka_topic_partition_t *respart;
+
+ /* Find result partition */
+ respart = rd_kafka_topic_partition_list_find(
+ respartitions, reqpartition->topic,
+ reqpartition->partition);
+
+ rd_assert(respart || !*"respart not found");
+
+ respart->err = rko_partial->rko_err;
+ }
+
+ return;
+ }
+
+ /* Partitions from the DeleteRecordsResponse */
+ partitions = rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
+
+ RD_KAFKA_TPLIST_FOREACH(partition, partitions) {
+ rd_kafka_topic_partition_t *respart;
+
+
+ /* Find result partition */
+ respart = rd_kafka_topic_partition_list_find(
+ respartitions, partition->topic, partition->partition);
+ if (unlikely(!respart)) {
+ rd_dassert(!*"partition not found");
+
+ rd_kafka_log(rk, LOG_WARNING, "DELETERECORDS",
+ "DeleteRecords response contains "
+ "unexpected %s [%" PRId32
+ "] which "
+ "was not in the request list: ignored",
+ partition->topic, partition->partition);
+ continue;
+ }
+
+ respart->offset = partition->offset;
+ respart->err = partition->err;
+ }
+}
+
+
+
+/**
+ * @brief Parse DeleteRecordsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DeleteRecordsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_op_t *rko_result;
+ rd_kafka_topic_partition_list_t *offsets;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ offsets = rd_kafka_buf_read_topic_partitions(reply, 0, fields);
+ if (!offsets)
+ rd_kafka_buf_parse_fail(reply,
+ "Failed to parse topic partitions");
+
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ rd_list_init(&rko_result->rko_u.admin_result.results, 1,
+ rd_kafka_topic_partition_list_destroy_free);
+ rd_list_add(&rko_result->rko_u.admin_result.results, offsets);
+ *rko_resultp = rko_result;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ rd_snprintf(errstr, errstr_size,
+ "DeleteRecords response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+
+/**
+ * @brief Call when leaders have been queried to progress the DeleteRecords
+ * admin op to its next phase, sending DeleteRecords to partition
+ * leaders.
+ *
+ * @param rko Reply op (RD_KAFKA_OP_LEADERS).
+ */
+static rd_kafka_op_res_t
+rd_kafka_DeleteRecords_leaders_queried_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *reply) {
+ rd_kafka_resp_err_t err = reply->rko_err;
+ const rd_list_t *leaders =
+ reply->rko_u.leaders.leaders; /* Possibly NULL (on err) */
+ rd_kafka_topic_partition_list_t *partitions =
+ reply->rko_u.leaders.partitions; /* Possibly NULL (on err) */
+ rd_kafka_op_t *rko_fanout = reply->rko_u.leaders.opaque;
+ rd_kafka_topic_partition_t *rktpar;
+ rd_kafka_topic_partition_list_t *offsets;
+ const struct rd_kafka_partition_leader *leader;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DeleteRecordsRequest,
+ rd_kafka_DeleteRecordsResponse_parse,
+ };
+ int i;
+
+ rd_assert((rko_fanout->rko_type & ~RD_KAFKA_OP_FLAGMASK) ==
+ RD_KAFKA_OP_ADMIN_FANOUT);
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ goto err;
+
+ /* Requested offsets */
+ offsets = rd_list_elem(&rko_fanout->rko_u.admin_request.args, 0);
+
+ /* Update the error field of each partition from the
+ * leader-queried partition list so that ERR_UNKNOWN_TOPIC_OR_PART
+ * and similar are propagated, since those partitions are not
+ * included in the leaders list. */
+ RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) {
+ rd_kafka_topic_partition_t *rktpar2;
+
+ if (!rktpar->err)
+ continue;
+
+ rktpar2 = rd_kafka_topic_partition_list_find(
+ offsets, rktpar->topic, rktpar->partition);
+ rd_assert(rktpar2);
+ rktpar2->err = rktpar->err;
+ }
+
+
+ if (err) {
+ err:
+ rd_kafka_admin_result_fail(
+ rko_fanout, err, "Failed to query partition leaders: %s",
+ err == RD_KAFKA_RESP_ERR__NOENT ? "No leaders found"
+ : rd_kafka_err2str(err));
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ /* The response lists is one element deep and that element is a
+ * rd_kafka_topic_partition_list_t with the results of the deletes. */
+ rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results, 1,
+ rd_kafka_topic_partition_list_destroy_free);
+ rd_list_add(&rko_fanout->rko_u.admin_request.fanout.results,
+ rd_kafka_topic_partition_list_copy(offsets));
+
+ rko_fanout->rko_u.admin_request.fanout.outstanding =
+ rd_list_cnt(leaders);
+
+ rd_assert(rd_list_cnt(leaders) > 0);
+
+ /* For each leader send a request for its partitions */
+ RD_LIST_FOREACH(leader, leaders, i) {
+ rd_kafka_op_t *rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_DELETERECORDS,
+ RD_KAFKA_EVENT_DELETERECORDS_RESULT, &cbs,
+ &rko_fanout->rko_u.admin_request.options, rk->rk_ops);
+ rko->rko_u.admin_request.fanout_parent = rko_fanout;
+ rko->rko_u.admin_request.broker_id = leader->rkb->rkb_nodeid;
+
+ rd_kafka_topic_partition_list_sort_by_topic(leader->partitions);
+
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_topic_partition_list_destroy_free);
+ rd_list_add(
+ &rko->rko_u.admin_request.args,
+ rd_kafka_topic_partition_list_copy(leader->partitions));
+
+ /* Enqueue op for admin_worker() to transition to next state */
+ rd_kafka_q_enq(rk->rk_ops, rko);
+ }
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+void rd_kafka_DeleteRecords(rd_kafka_t *rk,
+ rd_kafka_DeleteRecords_t **del_records,
+ size_t del_record_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko_fanout;
+ static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
+ rd_kafka_DeleteRecords_response_merge,
+ rd_kafka_topic_partition_list_copy_opaque,
+ };
+ const rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_list_t *copied_offsets;
+
+ rd_assert(rkqu);
+
+ rko_fanout = rd_kafka_admin_fanout_op_new(
+ rk, RD_KAFKA_OP_DELETERECORDS, RD_KAFKA_EVENT_DELETERECORDS_RESULT,
+ &fanout_cbs, options, rkqu->rkqu_q);
+
+ if (del_record_cnt != 1) {
+ /* We only support one DeleteRecords per call since there
+ * is no point in passing multiples, but the API still
+ * needs to be extensible/future-proof. */
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Exactly one DeleteRecords must be "
+ "passed");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ offsets = del_records[0]->offsets;
+
+ if (offsets == NULL || offsets->cnt == 0) {
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "No records to delete");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ /* Copy offsets list and store it on the request op */
+ copied_offsets = rd_kafka_topic_partition_list_copy(offsets);
+ if (rd_kafka_topic_partition_list_has_duplicates(
+ copied_offsets, rd_false /*check partition*/)) {
+ rd_kafka_topic_partition_list_destroy(copied_offsets);
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate partitions not allowed");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ /* Set default error on each partition so that if any of the partitions
+ * never get a request sent we have an error to indicate it. */
+ rd_kafka_topic_partition_list_set_err(copied_offsets,
+ RD_KAFKA_RESP_ERR__NOOP);
+
+ rd_list_init(&rko_fanout->rko_u.admin_request.args, 1,
+ rd_kafka_topic_partition_list_destroy_free);
+ rd_list_add(&rko_fanout->rko_u.admin_request.args, copied_offsets);
+
+ /* Async query for partition leaders */
+ rd_kafka_topic_partition_list_query_leaders_async(
+ rk, copied_offsets, rd_kafka_admin_timeout_remains(rko_fanout),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_DeleteRecords_leaders_queried_cb, rko_fanout);
+}
+
+
+/**
+ * @brief Get the list of offsets from a DeleteRecords result.
+ *
+ * The returned \p offsets life-time is the same as the \p result object.
+ */
+const rd_kafka_topic_partition_list_t *rd_kafka_DeleteRecords_result_offsets(
+ const rd_kafka_DeleteRecords_result_t *result) {
+ const rd_kafka_topic_partition_list_t *offsets;
+ const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
+ size_t cnt;
+
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DELETERECORDS);
+
+ cnt = rd_list_cnt(&rko->rko_u.admin_result.results);
+
+ rd_assert(cnt == 1);
+
+ offsets = (const rd_kafka_topic_partition_list_t *)rd_list_elem(
+ &rko->rko_u.admin_result.results, 0);
+
+ rd_assert(offsets);
+
+ return offsets;
+}
+
+/**@}*/
+
+/**
+ * @name Delete groups
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_DeleteGroup_t *rd_kafka_DeleteGroup_new(const char *group) {
+ size_t tsize = strlen(group) + 1;
+ rd_kafka_DeleteGroup_t *del_group;
+
+ /* Single allocation */
+ del_group = rd_malloc(sizeof(*del_group) + tsize);
+ del_group->group = del_group->data;
+ memcpy(del_group->group, group, tsize);
+
+ return del_group;
+}
+
+void rd_kafka_DeleteGroup_destroy(rd_kafka_DeleteGroup_t *del_group) {
+ rd_free(del_group);
+}
+
+static void rd_kafka_DeleteGroup_free(void *ptr) {
+ rd_kafka_DeleteGroup_destroy(ptr);
+}
+
+void rd_kafka_DeleteGroup_destroy_array(rd_kafka_DeleteGroup_t **del_groups,
+ size_t del_group_cnt) {
+ size_t i;
+ for (i = 0; i < del_group_cnt; i++)
+ rd_kafka_DeleteGroup_destroy(del_groups[i]);
+}
+
+/**
+ * @brief Group name comparator for DeleteGroup_t
+ */
+static int rd_kafka_DeleteGroup_cmp(const void *_a, const void *_b) {
+ const rd_kafka_DeleteGroup_t *a = _a, *b = _b;
+ return strcmp(a->group, b->group);
+}
+
+/**
+ * @brief Allocate a new DeleteGroup and make a copy of \p src
+ */
+static rd_kafka_DeleteGroup_t *
+rd_kafka_DeleteGroup_copy(const rd_kafka_DeleteGroup_t *src) {
+ return rd_kafka_DeleteGroup_new(src->group);
+}
+
+
+/**
+ * @brief Parse DeleteGroupsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DeleteGroupsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t group_cnt;
+ int i;
+ rd_kafka_op_t *rko_result = NULL;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ /* #group_error_codes */
+ rd_kafka_buf_read_i32(reply, &group_cnt);
+
+ if (group_cnt > rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " groups in response "
+ "when only %d were requested",
+ group_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ rd_list_init(&rko_result->rko_u.admin_result.results, group_cnt,
+ rd_kafka_group_result_free);
+
+ for (i = 0; i < (int)group_cnt; i++) {
+ rd_kafkap_str_t kgroup;
+ int16_t error_code;
+ rd_kafka_group_result_t *groupres;
+
+ rd_kafka_buf_read_str(reply, &kgroup);
+ rd_kafka_buf_read_i16(reply, &error_code);
+
+ groupres = rd_kafka_group_result_new(
+ kgroup.str, RD_KAFKAP_STR_LEN(&kgroup), NULL,
+ error_code ? rd_kafka_error_new(error_code, NULL) : NULL);
+
+ rd_list_add(&rko_result->rko_u.admin_result.results, groupres);
+ }
+
+ *rko_resultp = rko_result;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DeleteGroups response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+/** @brief Merge the DeleteGroups response from a single broker
+ * into the user response list.
+ */
+void rd_kafka_DeleteGroups_response_merge(rd_kafka_op_t *rko_fanout,
+ const rd_kafka_op_t *rko_partial) {
+ const rd_kafka_group_result_t *groupres = NULL;
+ rd_kafka_group_result_t *newgroupres;
+ const rd_kafka_DeleteGroup_t *grp =
+ rko_partial->rko_u.admin_result.opaque;
+ int orig_pos;
+
+ rd_assert(rko_partial->rko_evtype ==
+ RD_KAFKA_EVENT_DELETEGROUPS_RESULT);
+
+ if (!rko_partial->rko_err) {
+ /* Proper results.
+ * We only send one group per request, make sure it matches */
+ groupres =
+ rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
+ rd_assert(groupres);
+ rd_assert(!strcmp(groupres->group, grp->group));
+ newgroupres = rd_kafka_group_result_copy(groupres);
+ } else {
+ /* Op errored, e.g. timeout */
+ newgroupres = rd_kafka_group_result_new(
+ grp->group, -1, NULL,
+ rd_kafka_error_new(rko_partial->rko_err, NULL));
+ }
+
+ /* As a convenience to the application we insert group result
+ * in the same order as they were requested. */
+ orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp,
+ rd_kafka_DeleteGroup_cmp);
+ rd_assert(orig_pos != -1);
+
+ /* Make sure result is not already set */
+ rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results,
+ orig_pos) == NULL);
+
+ rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos,
+ newgroupres);
+}
+
+void rd_kafka_DeleteGroups(rd_kafka_t *rk,
+ rd_kafka_DeleteGroup_t **del_groups,
+ size_t del_group_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko_fanout;
+ rd_list_t dup_list;
+ size_t i;
+ static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
+ rd_kafka_DeleteGroups_response_merge,
+ rd_kafka_group_result_copy_opaque,
+ };
+
+ rd_assert(rkqu);
+
+ rko_fanout = rd_kafka_admin_fanout_op_new(
+ rk, RD_KAFKA_OP_DELETEGROUPS, RD_KAFKA_EVENT_DELETEGROUPS_RESULT,
+ &fanout_cbs, options, rkqu->rkqu_q);
+
+ if (del_group_cnt == 0) {
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "No groups to delete");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ /* Copy group list and store it on the request op.
+ * Maintain original ordering. */
+ rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)del_group_cnt,
+ rd_kafka_DeleteGroup_free);
+ for (i = 0; i < del_group_cnt; i++)
+ rd_list_add(&rko_fanout->rko_u.admin_request.args,
+ rd_kafka_DeleteGroup_copy(del_groups[i]));
+
+ /* Check for duplicates.
+ * Make a temporary copy of the group list and sort it to check for
+ * duplicates, we don't want the original list sorted since we want
+ * to maintain ordering. */
+ rd_list_init(&dup_list,
+ rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL);
+ rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL,
+ NULL);
+ rd_list_sort(&dup_list, rd_kafka_DeleteGroup_cmp);
+ if (rd_list_find_duplicate(&dup_list, rd_kafka_DeleteGroup_cmp)) {
+ rd_list_destroy(&dup_list);
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate groups not allowed");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ rd_list_destroy(&dup_list);
+
+ /* Prepare results list where fanned out op's results will be
+ * accumulated. */
+ rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results,
+ (int)del_group_cnt, rd_kafka_group_result_free);
+ rko_fanout->rko_u.admin_request.fanout.outstanding = (int)del_group_cnt;
+
+ /* Create individual request ops for each group.
+ * FIXME: A future optimization is to coalesce all groups for a single
+ * coordinator into one op. */
+ for (i = 0; i < del_group_cnt; i++) {
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DeleteGroupsRequest,
+ rd_kafka_DeleteGroupsResponse_parse,
+ };
+ rd_kafka_DeleteGroup_t *grp =
+ rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i);
+ rd_kafka_op_t *rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_DELETEGROUPS,
+ RD_KAFKA_EVENT_DELETEGROUPS_RESULT, &cbs, options,
+ rk->rk_ops);
+
+ rko->rko_u.admin_request.fanout_parent = rko_fanout;
+ rko->rko_u.admin_request.broker_id =
+ RD_KAFKA_ADMIN_TARGET_COORDINATOR;
+ rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
+ rko->rko_u.admin_request.coordkey = rd_strdup(grp->group);
+
+ /* Set the group name as the opaque so the fanout worker use it
+ * to fill in errors.
+ * References rko_fanout's memory, which will always outlive
+ * the fanned out op. */
+ rd_kafka_AdminOptions_set_opaque(
+ &rko->rko_u.admin_request.options, grp);
+
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_DeleteGroup_free);
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_DeleteGroup_copy(del_groups[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+ }
+}
+
+
+/**
+ * @brief Get an array of group results from a DeleteGroups result.
+ *
+ * The returned \p groups life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_group_result_t **rd_kafka_DeleteGroups_result_groups(
+ const rd_kafka_DeleteGroups_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+
+/**@}*/
+
+
+/**
+ * @name Delete consumer group offsets (committed offsets)
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_DeleteConsumerGroupOffsets_t *rd_kafka_DeleteConsumerGroupOffsets_new(
+ const char *group,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ size_t tsize = strlen(group) + 1;
+ rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets;
+
+ rd_assert(partitions);
+
+ /* Single allocation */
+ del_grpoffsets = rd_malloc(sizeof(*del_grpoffsets) + tsize);
+ del_grpoffsets->group = del_grpoffsets->data;
+ memcpy(del_grpoffsets->group, group, tsize);
+ del_grpoffsets->partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ return del_grpoffsets;
+}
+
+void rd_kafka_DeleteConsumerGroupOffsets_destroy(
+ rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets) {
+ rd_kafka_topic_partition_list_destroy(del_grpoffsets->partitions);
+ rd_free(del_grpoffsets);
+}
+
+static void rd_kafka_DeleteConsumerGroupOffsets_free(void *ptr) {
+ rd_kafka_DeleteConsumerGroupOffsets_destroy(ptr);
+}
+
+void rd_kafka_DeleteConsumerGroupOffsets_destroy_array(
+ rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
+ size_t del_grpoffsets_cnt) {
+ size_t i;
+ for (i = 0; i < del_grpoffsets_cnt; i++)
+ rd_kafka_DeleteConsumerGroupOffsets_destroy(del_grpoffsets[i]);
+}
+
+
+/**
+ * @brief Allocate a new DeleteGroup and make a copy of \p src
+ */
+static rd_kafka_DeleteConsumerGroupOffsets_t *
+rd_kafka_DeleteConsumerGroupOffsets_copy(
+ const rd_kafka_DeleteConsumerGroupOffsets_t *src) {
+ return rd_kafka_DeleteConsumerGroupOffsets_new(src->group,
+ src->partitions);
+}
+
+
+/**
+ * @brief Parse OffsetDeleteResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_OffsetDeleteResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_op_t *rko_result;
+ int16_t ErrorCode;
+ rd_kafka_topic_partition_list_t *partitions = NULL;
+ const rd_kafka_DeleteConsumerGroupOffsets_t *del_grpoffsets;
+
+ rd_kafka_buf_read_i16(reply, &ErrorCode);
+ if (ErrorCode) {
+ rd_snprintf(errstr, errstr_size,
+ "OffsetDelete response error: %s",
+ rd_kafka_err2str(ErrorCode));
+ return ErrorCode;
+ }
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ partitions = rd_kafka_buf_read_topic_partitions(reply, 16, fields);
+ if (!partitions) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to parse OffsetDeleteResponse partitions");
+ return RD_KAFKA_RESP_ERR__BAD_MSG;
+ }
+
+
+ /* Create result op and group_result_t */
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ del_grpoffsets = rd_list_elem(&rko_result->rko_u.admin_result.args, 0);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, 1,
+ rd_kafka_group_result_free);
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ rd_kafka_group_result_new(del_grpoffsets->group, -1,
+ partitions, NULL));
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ rd_snprintf(errstr, errstr_size,
+ "OffsetDelete response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+ return reply->rkbuf_err;
+}
+
+
+void rd_kafka_DeleteConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
+ size_t del_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_OffsetDeleteRequest,
+ rd_kafka_OffsetDeleteResponse_parse,
+ };
+ rd_kafka_op_t *rko;
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS,
+ RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, &cbs, options,
+ rkqu->rkqu_q);
+
+ if (del_grpoffsets_cnt != 1) {
+ /* For simplicity we only support one single group for now */
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Exactly one "
+ "DeleteConsumerGroupOffsets must "
+ "be passed");
+ rd_kafka_admin_common_worker_destroy(rk, rko,
+ rd_true /*destroy*/);
+ return;
+ }
+
+
+ rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR;
+ rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
+ rko->rko_u.admin_request.coordkey = rd_strdup(del_grpoffsets[0]->group);
+
+ /* Store copy of group on request so the group name can be reached
+ * from the response parser. */
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_DeleteConsumerGroupOffsets_free);
+ rd_list_add(
+ &rko->rko_u.admin_request.args,
+ rd_kafka_DeleteConsumerGroupOffsets_copy(del_grpoffsets[0]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+
+/**
+ * @brief Get an array of group results from a DeleteGroups result.
+ *
+ * The returned \p groups life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_group_result_t **
+rd_kafka_DeleteConsumerGroupOffsets_result_groups(
+ const rd_kafka_DeleteConsumerGroupOffsets_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+void rd_kafka_DeleteConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_DeleteConsumerGroupOffsets_t **del_grpoffsets,
+ size_t del_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu);
+
+/**@}*/
+/**
+ * @name CreateAcls
+ * @{
+ *
+ *
+ *
+ */
+
+const char *rd_kafka_AclOperation_name(rd_kafka_AclOperation_t operation) {
+ static const char *names[] = {"UNKNOWN",
+ "ANY",
+ "ALL",
+ "READ",
+ "WRITE",
+ "CREATE",
+ "DELETE",
+ "ALTER",
+ "DESCRIBE",
+ "CLUSTER_ACTION",
+ "DESCRIBE_CONFIGS",
+ "ALTER_CONFIGS",
+ "IDEMPOTENT_WRITE"};
+
+ if ((unsigned int)operation >=
+ (unsigned int)RD_KAFKA_ACL_OPERATION__CNT)
+ return "UNSUPPORTED";
+
+ return names[operation];
+}
+
+const char *
+rd_kafka_AclPermissionType_name(rd_kafka_AclPermissionType_t permission_type) {
+ static const char *names[] = {"UNKNOWN", "ANY", "DENY", "ALLOW"};
+
+ if ((unsigned int)permission_type >=
+ (unsigned int)RD_KAFKA_ACL_PERMISSION_TYPE__CNT)
+ return "UNSUPPORTED";
+
+ return names[permission_type];
+}
+
+static rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_new0(rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ rd_kafka_resp_err_t err,
+ const char *errstr) {
+ rd_kafka_AclBinding_t *acl_binding;
+
+ acl_binding = rd_calloc(1, sizeof(*acl_binding));
+ acl_binding->name = name != NULL ? rd_strdup(name) : NULL;
+ acl_binding->principal =
+ principal != NULL ? rd_strdup(principal) : NULL;
+ acl_binding->host = host != NULL ? rd_strdup(host) : NULL;
+ acl_binding->restype = restype;
+ acl_binding->resource_pattern_type = resource_pattern_type;
+ acl_binding->operation = operation;
+ acl_binding->permission_type = permission_type;
+ if (err)
+ acl_binding->error = rd_kafka_error_new(err, "%s", errstr);
+
+ return acl_binding;
+}
+
+rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_new(rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size) {
+ if (!name) {
+ rd_snprintf(errstr, errstr_size, "Invalid resource name");
+ return NULL;
+ }
+ if (!principal) {
+ rd_snprintf(errstr, errstr_size, "Invalid principal");
+ return NULL;
+ }
+ if (!host) {
+ rd_snprintf(errstr, errstr_size, "Invalid host");
+ return NULL;
+ }
+
+ if (restype == RD_KAFKA_RESOURCE_ANY ||
+ restype <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ restype >= RD_KAFKA_RESOURCE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid resource type");
+ return NULL;
+ }
+
+ if (resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_ANY ||
+ resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_MATCH ||
+ resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid resource pattern type");
+ return NULL;
+ }
+
+ if (operation == RD_KAFKA_ACL_OPERATION_ANY ||
+ operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid operation");
+ return NULL;
+ }
+
+ if (permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ANY ||
+ permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid permission type");
+ return NULL;
+ }
+
+ return rd_kafka_AclBinding_new0(
+ restype, name, resource_pattern_type, principal, host, operation,
+ permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
+}
+
+rd_kafka_AclBindingFilter_t *rd_kafka_AclBindingFilter_new(
+ rd_kafka_ResourceType_t restype,
+ const char *name,
+ rd_kafka_ResourcePatternType_t resource_pattern_type,
+ const char *principal,
+ const char *host,
+ rd_kafka_AclOperation_t operation,
+ rd_kafka_AclPermissionType_t permission_type,
+ char *errstr,
+ size_t errstr_size) {
+
+
+ if (restype <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ restype >= RD_KAFKA_RESOURCE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid resource type");
+ return NULL;
+ }
+
+ if (resource_pattern_type <= RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid resource pattern type");
+ return NULL;
+ }
+
+ if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid operation");
+ return NULL;
+ }
+
+ if (permission_type <= RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >= RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid permission type");
+ return NULL;
+ }
+
+ return rd_kafka_AclBinding_new0(
+ restype, name, resource_pattern_type, principal, host, operation,
+ permission_type, RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
+}
+
+rd_kafka_ResourceType_t
+rd_kafka_AclBinding_restype(const rd_kafka_AclBinding_t *acl) {
+ return acl->restype;
+}
+
+const char *rd_kafka_AclBinding_name(const rd_kafka_AclBinding_t *acl) {
+ return acl->name;
+}
+
+const char *rd_kafka_AclBinding_principal(const rd_kafka_AclBinding_t *acl) {
+ return acl->principal;
+}
+
+const char *rd_kafka_AclBinding_host(const rd_kafka_AclBinding_t *acl) {
+ return acl->host;
+}
+
+rd_kafka_AclOperation_t
+rd_kafka_AclBinding_operation(const rd_kafka_AclBinding_t *acl) {
+ return acl->operation;
+}
+
+rd_kafka_AclPermissionType_t
+rd_kafka_AclBinding_permission_type(const rd_kafka_AclBinding_t *acl) {
+ return acl->permission_type;
+}
+
+rd_kafka_ResourcePatternType_t
+rd_kafka_AclBinding_resource_pattern_type(const rd_kafka_AclBinding_t *acl) {
+ return acl->resource_pattern_type;
+}
+
+const rd_kafka_error_t *
+rd_kafka_AclBinding_error(const rd_kafka_AclBinding_t *acl) {
+ return acl->error;
+}
+
+/**
+ * @brief Allocate a new AclBinding and make a copy of \p src
+ */
+static rd_kafka_AclBinding_t *
+rd_kafka_AclBinding_copy(const rd_kafka_AclBinding_t *src) {
+ rd_kafka_AclBinding_t *dst;
+
+ dst = rd_kafka_AclBinding_new(
+ src->restype, src->name, src->resource_pattern_type, src->principal,
+ src->host, src->operation, src->permission_type, NULL, 0);
+ rd_assert(dst);
+ return dst;
+}
+
+/**
+ * @brief Allocate a new AclBindingFilter and make a copy of \p src
+ */
+static rd_kafka_AclBindingFilter_t *
+rd_kafka_AclBindingFilter_copy(const rd_kafka_AclBindingFilter_t *src) {
+ rd_kafka_AclBindingFilter_t *dst;
+
+ dst = rd_kafka_AclBindingFilter_new(
+ src->restype, src->name, src->resource_pattern_type, src->principal,
+ src->host, src->operation, src->permission_type, NULL, 0);
+ rd_assert(dst);
+ return dst;
+}
+
+void rd_kafka_AclBinding_destroy(rd_kafka_AclBinding_t *acl_binding) {
+ if (acl_binding->name)
+ rd_free(acl_binding->name);
+ if (acl_binding->principal)
+ rd_free(acl_binding->principal);
+ if (acl_binding->host)
+ rd_free(acl_binding->host);
+ if (acl_binding->error)
+ rd_kafka_error_destroy(acl_binding->error);
+ rd_free(acl_binding);
+}
+
+static void rd_kafka_AclBinding_free(void *ptr) {
+ rd_kafka_AclBinding_destroy(ptr);
+}
+
+
+void rd_kafka_AclBinding_destroy_array(rd_kafka_AclBinding_t **acl_bindings,
+ size_t acl_bindings_cnt) {
+ size_t i;
+ for (i = 0; i < acl_bindings_cnt; i++)
+ rd_kafka_AclBinding_destroy(acl_bindings[i]);
+}
+
+/**
+ * @brief Parse CreateAclsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_CreateAclsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t acl_cnt;
+ int i;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000);
+
+ if (acl_cnt != rd_list_cnt(&rko_req->rko_u.admin_request.args))
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Received %" PRId32
+ " acls in response, but %d were requested",
+ acl_cnt, rd_list_cnt(&rko_req->rko_u.admin_request.args));
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, acl_cnt,
+ rd_kafka_acl_result_free);
+
+ for (i = 0; i < (int)acl_cnt; i++) {
+ int16_t error_code;
+ rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
+ rd_kafka_acl_result_t *acl_res;
+ char *errstr = NULL;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ errstr = (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
+ }
+
+ acl_res = rd_kafka_acl_result_new(
+ error_code ? rd_kafka_error_new(error_code, "%s", errstr)
+ : NULL);
+
+ rd_list_set(&rko_result->rko_u.admin_result.results, i,
+ acl_res);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "CreateAcls response protocol parse failure: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+void rd_kafka_CreateAcls(rd_kafka_t *rk,
+ rd_kafka_AclBinding_t **new_acls,
+ size_t new_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_CreateAclsRequest, rd_kafka_CreateAclsResponse_parse};
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_CREATEACLS,
+ RD_KAFKA_EVENT_CREATEACLS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)new_acls_cnt,
+ rd_kafka_AclBinding_free);
+
+ for (i = 0; i < new_acls_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_AclBinding_copy(new_acls[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+/**
+ * @brief Get an array of rd_kafka_acl_result_t from a CreateAcls result.
+ *
+ * The returned \p rd_kafka_acl_result_t life-time is the same as the \p result
+ * object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_acl_result_t **
+rd_kafka_CreateAcls_result_acls(const rd_kafka_CreateAcls_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_acl_results(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+/**@}*/
+
+/**
+ * @name DescribeAcls
+ * @{
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Parse DescribeAclsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DescribeAclsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_op_t *rko_result = NULL;
+ int32_t res_cnt;
+ int i;
+ int j;
+ rd_kafka_AclBinding_t *acl = NULL;
+ int16_t error_code;
+ rd_kafkap_str_t error_msg;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ errstr = (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
+ }
+
+ /* #resources */
+ rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000);
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
+ rd_kafka_AclBinding_free);
+
+ for (i = 0; i < (int)res_cnt; i++) {
+ int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ rd_kafkap_str_t kres_name;
+ char *res_name;
+ int8_t resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ int32_t acl_cnt;
+
+ rd_kafka_buf_read_i8(reply, &res_type);
+ rd_kafka_buf_read_str(reply, &kres_name);
+ RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 1) {
+ rd_kafka_buf_read_i8(reply, &resource_pattern_type);
+ }
+
+ if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ res_type >= RD_KAFKA_RESOURCE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned unknown "
+ "resource type %d",
+ res_type);
+ res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ }
+ if (resource_pattern_type <=
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >=
+ RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING, "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned unknown "
+ "resource pattern type %d",
+ resource_pattern_type);
+ resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN;
+ }
+
+ /* #resources */
+ rd_kafka_buf_read_arraycnt(reply, &acl_cnt, 100000);
+
+ for (j = 0; j < (int)acl_cnt; j++) {
+ rd_kafkap_str_t kprincipal;
+ rd_kafkap_str_t khost;
+ int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ int8_t permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ char *principal;
+ char *host;
+
+ rd_kafka_buf_read_str(reply, &kprincipal);
+ rd_kafka_buf_read_str(reply, &khost);
+ rd_kafka_buf_read_i8(reply, &operation);
+ rd_kafka_buf_read_i8(reply, &permission_type);
+ RD_KAFKAP_STR_DUPA(&principal, &kprincipal);
+ RD_KAFKAP_STR_DUPA(&host, &khost);
+
+ if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned "
+ "unknown acl operation %d",
+ operation);
+ operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ }
+ if (permission_type <=
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >=
+ RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DESCRIBEACLSRESPONSE",
+ "DescribeAclsResponse returned "
+ "unknown acl permission type %d",
+ permission_type);
+ permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ }
+
+ acl = rd_kafka_AclBinding_new0(
+ res_type, res_name, resource_pattern_type,
+ principal, host, operation, permission_type,
+ RD_KAFKA_RESP_ERR_NO_ERROR, NULL);
+
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ acl);
+ }
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DescribeAcls response protocol parse failure: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+void rd_kafka_DescribeAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t *acl_filter,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DescribeAclsRequest,
+ rd_kafka_DescribeAclsResponse_parse,
+ };
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DESCRIBEACLS,
+ RD_KAFKA_EVENT_DESCRIBEACLS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_AclBinding_free);
+
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_AclBindingFilter_copy(acl_filter));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+/**
+ * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result.
+ *
+ * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result
+ * object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_AclBinding_t **
+rd_kafka_DescribeAcls_result_acls(const rd_kafka_DescribeAcls_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_acl_bindings(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+/**@}*/
+
+/**
+ * @name DeleteAcls
+ * @{
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Allocate a new DeleteAcls result response with the given
+ * \p err error code and \p errstr error message.
+ */
+const rd_kafka_DeleteAcls_result_response_t *
+rd_kafka_DeleteAcls_result_response_new(rd_kafka_resp_err_t err, char *errstr) {
+ rd_kafka_DeleteAcls_result_response_t *result_response;
+
+ result_response = rd_calloc(1, sizeof(*result_response));
+ if (err)
+ result_response->error = rd_kafka_error_new(
+ err, "%s", errstr ? errstr : rd_kafka_err2str(err));
+
+ /* List of int32 lists */
+ rd_list_init(&result_response->matching_acls, 0,
+ rd_kafka_AclBinding_free);
+
+ return result_response;
+}
+
+static void rd_kafka_DeleteAcls_result_response_destroy(
+ rd_kafka_DeleteAcls_result_response_t *resp) {
+ if (resp->error)
+ rd_kafka_error_destroy(resp->error);
+ rd_list_destroy(&resp->matching_acls);
+ rd_free(resp);
+}
+
+static void rd_kafka_DeleteAcls_result_response_free(void *ptr) {
+ rd_kafka_DeleteAcls_result_response_destroy(
+ (rd_kafka_DeleteAcls_result_response_t *)ptr);
+}
+
+/**
+ * @brief Get an array of rd_kafka_AclBinding_t from a DescribeAcls result.
+ *
+ * The returned \p rd_kafka_AclBinding_t life-time is the same as the \p result
+ * object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_DeleteAcls_result_response_t **
+rd_kafka_DeleteAcls_result_responses(const rd_kafka_DeleteAcls_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_delete_acl_result_responses(
+ (const rd_kafka_op_t *)result, cntp);
+}
+
+const rd_kafka_error_t *rd_kafka_DeleteAcls_result_response_error(
+ const rd_kafka_DeleteAcls_result_response_t *result_response) {
+ return result_response->error;
+}
+
+const rd_kafka_AclBinding_t **rd_kafka_DeleteAcls_result_response_matching_acls(
+ const rd_kafka_DeleteAcls_result_response_t *result_response,
+ size_t *matching_acls_cntp) {
+ *matching_acls_cntp = result_response->matching_acls.rl_cnt;
+ return (const rd_kafka_AclBinding_t **)
+ result_response->matching_acls.rl_elems;
+}
+
+/**
+ * @brief Parse DeleteAclsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DeleteAclsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_op_t *rko_result = NULL;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int32_t res_cnt;
+ int i;
+ int j;
+
+ rd_kafka_buf_read_throttle_time(reply);
+
+ /* #responses */
+ rd_kafka_buf_read_arraycnt(reply, &res_cnt, 100000);
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+
+ rd_list_init(&rko_result->rko_u.admin_result.results, res_cnt,
+ rd_kafka_DeleteAcls_result_response_free);
+
+ for (i = 0; i < (int)res_cnt; i++) {
+ int16_t error_code;
+ rd_kafkap_str_t error_msg = RD_KAFKAP_STR_INITIALIZER;
+ char *errstr = NULL;
+ const rd_kafka_DeleteAcls_result_response_t *result_response;
+ int32_t matching_acls_cnt;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &error_msg);
+
+ if (error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&error_msg) ||
+ RD_KAFKAP_STR_LEN(&error_msg) == 0)
+ errstr = (char *)rd_kafka_err2str(error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&errstr, &error_msg);
+ }
+
+ result_response =
+ rd_kafka_DeleteAcls_result_response_new(error_code, errstr);
+
+ /* #maching_acls */
+ rd_kafka_buf_read_arraycnt(reply, &matching_acls_cnt, 100000);
+ for (j = 0; j < (int)matching_acls_cnt; j++) {
+ int16_t acl_error_code;
+ int8_t res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ rd_kafkap_str_t acl_error_msg =
+ RD_KAFKAP_STR_INITIALIZER;
+ rd_kafkap_str_t kres_name;
+ rd_kafkap_str_t khost;
+ rd_kafkap_str_t kprincipal;
+ int8_t resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ int8_t operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ int8_t permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ rd_kafka_AclBinding_t *matching_acl;
+ char *acl_errstr = NULL;
+ char *res_name;
+ char *principal;
+ char *host;
+
+ rd_kafka_buf_read_i16(reply, &acl_error_code);
+ rd_kafka_buf_read_str(reply, &acl_error_msg);
+ if (acl_error_code) {
+ if (RD_KAFKAP_STR_IS_NULL(&acl_error_msg) ||
+ RD_KAFKAP_STR_LEN(&acl_error_msg) == 0)
+ acl_errstr = (char *)rd_kafka_err2str(
+ acl_error_code);
+ else
+ RD_KAFKAP_STR_DUPA(&acl_errstr,
+ &acl_error_msg);
+ }
+
+ rd_kafka_buf_read_i8(reply, &res_type);
+ rd_kafka_buf_read_str(reply, &kres_name);
+
+ if (rd_kafka_buf_ApiVersion(reply) >= 1) {
+ rd_kafka_buf_read_i8(reply,
+ &resource_pattern_type);
+ }
+
+ rd_kafka_buf_read_str(reply, &kprincipal);
+ rd_kafka_buf_read_str(reply, &khost);
+ rd_kafka_buf_read_i8(reply, &operation);
+ rd_kafka_buf_read_i8(reply, &permission_type);
+ RD_KAFKAP_STR_DUPA(&res_name, &kres_name);
+ RD_KAFKAP_STR_DUPA(&principal, &kprincipal);
+ RD_KAFKAP_STR_DUPA(&host, &khost);
+
+ if (res_type <= RD_KAFKA_RESOURCE_UNKNOWN ||
+ res_type >= RD_KAFKA_RESOURCE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown resource type %d",
+ res_type);
+ res_type = RD_KAFKA_RESOURCE_UNKNOWN;
+ }
+ if (resource_pattern_type <=
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN ||
+ resource_pattern_type >=
+ RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown resource pattern type %d",
+ resource_pattern_type);
+ resource_pattern_type =
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN;
+ }
+ if (operation <= RD_KAFKA_ACL_OPERATION_UNKNOWN ||
+ operation >= RD_KAFKA_ACL_OPERATION__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown acl operation %d",
+ operation);
+ operation = RD_KAFKA_ACL_OPERATION_UNKNOWN;
+ }
+ if (permission_type <=
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN ||
+ permission_type >=
+ RD_KAFKA_ACL_PERMISSION_TYPE__CNT) {
+ rd_rkb_log(rkb, LOG_WARNING,
+ "DELETEACLSRESPONSE",
+ "DeleteAclsResponse returned "
+ "unknown acl permission type %d",
+ permission_type);
+ permission_type =
+ RD_KAFKA_ACL_PERMISSION_TYPE_UNKNOWN;
+ }
+
+ matching_acl = rd_kafka_AclBinding_new0(
+ res_type, res_name, resource_pattern_type,
+ principal, host, operation, permission_type,
+ acl_error_code, acl_errstr);
+
+ rd_list_add(
+ (rd_list_t *)&result_response->matching_acls,
+ (void *)matching_acl);
+ }
+
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ (void *)result_response);
+ }
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(errstr, errstr_size,
+ "DeleteAcls response protocol parse failure: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+
+void rd_kafka_DeleteAcls(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t **del_acls,
+ size_t del_acls_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ size_t i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_DeleteAclsRequest, rd_kafka_DeleteAclsResponse_parse};
+
+ rko = rd_kafka_admin_request_op_new(rk, RD_KAFKA_OP_DELETEACLS,
+ RD_KAFKA_EVENT_DELETEACLS_RESULT,
+ &cbs, options, rkqu->rkqu_q);
+
+ rd_list_init(&rko->rko_u.admin_request.args, (int)del_acls_cnt,
+ rd_kafka_AclBinding_free);
+
+ for (i = 0; i < del_acls_cnt; i++)
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_AclBindingFilter_copy(del_acls[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+/**@}*/
+
+/**
+ * @name Alter consumer group offsets (committed offsets)
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_AlterConsumerGroupOffsets_t *rd_kafka_AlterConsumerGroupOffsets_new(
+ const char *group_id,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ rd_assert(group_id && partitions);
+
+ size_t tsize = strlen(group_id) + 1;
+ rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets;
+
+ /* Single allocation */
+ alter_grpoffsets = rd_malloc(sizeof(*alter_grpoffsets) + tsize);
+ alter_grpoffsets->group_id = alter_grpoffsets->data;
+ memcpy(alter_grpoffsets->group_id, group_id, tsize);
+ alter_grpoffsets->partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ return alter_grpoffsets;
+}
+
+void rd_kafka_AlterConsumerGroupOffsets_destroy(
+ rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets) {
+ rd_kafka_topic_partition_list_destroy(alter_grpoffsets->partitions);
+ rd_free(alter_grpoffsets);
+}
+
+static void rd_kafka_AlterConsumerGroupOffsets_free(void *ptr) {
+ rd_kafka_AlterConsumerGroupOffsets_destroy(ptr);
+}
+
+void rd_kafka_AlterConsumerGroupOffsets_destroy_array(
+ rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
+ size_t alter_grpoffsets_cnt) {
+ size_t i;
+ for (i = 0; i < alter_grpoffsets_cnt; i++)
+ rd_kafka_AlterConsumerGroupOffsets_destroy(alter_grpoffsets[i]);
+}
+
+/**
+ * @brief Allocate a new AlterGroup and make a copy of \p src
+ */
+static rd_kafka_AlterConsumerGroupOffsets_t *
+rd_kafka_AlterConsumerGroupOffsets_copy(
+ const rd_kafka_AlterConsumerGroupOffsets_t *src) {
+ return rd_kafka_AlterConsumerGroupOffsets_new(src->group_id,
+ src->partitions);
+}
+
+/**
+ * @brief Send a OffsetCommitRequest to \p rkb with the partitions
+ * in alter_grpoffsets (AlterConsumerGroupOffsets_t*) using
+ * \p options.
+ *
+ */
+static rd_kafka_resp_err_t rd_kafka_AlterConsumerGroupOffsetsRequest(
+ rd_kafka_broker_t *rkb,
+ /* (rd_kafka_AlterConsumerGroupOffsets_t*) */
+ const rd_list_t *alter_grpoffsets,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ const rd_kafka_AlterConsumerGroupOffsets_t *grpoffsets =
+ rd_list_elem(alter_grpoffsets, 0);
+
+ rd_assert(rd_list_cnt(alter_grpoffsets) == 1);
+
+ rd_kafka_topic_partition_list_t *offsets = grpoffsets->partitions;
+ rd_kafka_consumer_group_metadata_t *cgmetadata =
+ rd_kafka_consumer_group_metadata_new(grpoffsets->group_id);
+
+ int ret = rd_kafka_OffsetCommitRequest(
+ rkb, cgmetadata, offsets, replyq, resp_cb, opaque,
+ "rd_kafka_AlterConsumerGroupOffsetsRequest");
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ if (ret == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "At least one topic-partition offset must "
+ "be >= 0");
+ return RD_KAFKA_RESP_ERR__NO_OFFSET;
+ }
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Parse OffsetCommitResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_AlterConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_t *rk;
+ rd_kafka_broker_t *rkb;
+ rd_kafka_op_t *rko_result;
+ rd_kafka_topic_partition_list_t *partitions = NULL;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ const rd_kafka_AlterConsumerGroupOffsets_t *alter_grpoffsets =
+ rd_list_elem(&rko_req->rko_u.admin_request.args, 0);
+ partitions =
+ rd_kafka_topic_partition_list_copy(alter_grpoffsets->partitions);
+
+ rk = rko_req->rko_rk;
+ rkb = reply->rkbuf_rkb;
+ err = rd_kafka_handle_OffsetCommit(rk, rkb, err, reply, NULL,
+ partitions, rd_true);
+
+ /* Create result op and group_result_t */
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ rd_list_init(&rko_result->rko_u.admin_result.results, 1,
+ rd_kafka_group_result_free);
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ rd_kafka_group_result_new(alter_grpoffsets->group_id, -1,
+ partitions, NULL));
+ rd_kafka_topic_partition_list_destroy(partitions);
+ *rko_resultp = rko_result;
+
+ if (reply->rkbuf_err)
+ rd_snprintf(
+ errstr, errstr_size,
+ "AlterConsumerGroupOffset response parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+void rd_kafka_AlterConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_AlterConsumerGroupOffsets_t **alter_grpoffsets,
+ size_t alter_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ int i;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_AlterConsumerGroupOffsetsRequest,
+ rd_kafka_AlterConsumerGroupOffsetsResponse_parse,
+ };
+ rd_kafka_op_t *rko;
+ rd_kafka_topic_partition_list_t *copied_offsets;
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS,
+ RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT, &cbs, options,
+ rkqu->rkqu_q);
+
+ if (alter_grpoffsets_cnt != 1) {
+ /* For simplicity we only support one single group for now */
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Exactly one "
+ "AlterConsumerGroupOffsets must "
+ "be passed");
+ goto fail;
+ }
+
+ if (alter_grpoffsets[0]->partitions->cnt == 0) {
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Non-empty topic partition list "
+ "must be present");
+ goto fail;
+ }
+
+ for (i = 0; i < alter_grpoffsets[0]->partitions->cnt; i++) {
+ if (alter_grpoffsets[0]->partitions->elems[i].offset < 0) {
+ rd_kafka_admin_result_fail(
+ rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "All topic-partition offsets "
+ "must be >= 0");
+ goto fail;
+ }
+ }
+
+ /* TODO: add group id duplication check if in future more than one
+ * AlterConsumerGroupOffsets can be passed */
+
+ /* Copy offsets list for checking duplicated */
+ copied_offsets =
+ rd_kafka_topic_partition_list_copy(alter_grpoffsets[0]->partitions);
+ if (rd_kafka_topic_partition_list_has_duplicates(
+ copied_offsets, rd_false /*check partition*/)) {
+ rd_kafka_topic_partition_list_destroy(copied_offsets);
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate partitions not allowed");
+ goto fail;
+ }
+ rd_kafka_topic_partition_list_destroy(copied_offsets);
+
+ rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR;
+ rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
+ rko->rko_u.admin_request.coordkey =
+ rd_strdup(alter_grpoffsets[0]->group_id);
+
+ /* Store copy of group on request so the group name can be reached
+ * from the response parser. */
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_AlterConsumerGroupOffsets_free);
+ rd_list_add(&rko->rko_u.admin_request.args,
+ (void *)rd_kafka_AlterConsumerGroupOffsets_copy(
+ alter_grpoffsets[0]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+ return;
+fail:
+ rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/);
+}
+
+
+/**
+ * @brief Get an array of group results from a AlterGroups result.
+ *
+ * The returned \p groups life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_group_result_t **
+rd_kafka_AlterConsumerGroupOffsets_result_groups(
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+/**@}*/
+
+
+/**@}*/
+
+/**
+ * @name List consumer group offsets (committed offsets)
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+rd_kafka_ListConsumerGroupOffsets_t *rd_kafka_ListConsumerGroupOffsets_new(
+ const char *group_id,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ size_t tsize = strlen(group_id) + 1;
+ rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets;
+
+ rd_assert(group_id);
+
+ /* Single allocation */
+ list_grpoffsets = rd_calloc(1, sizeof(*list_grpoffsets) + tsize);
+ list_grpoffsets->group_id = list_grpoffsets->data;
+ memcpy(list_grpoffsets->group_id, group_id, tsize);
+ if (partitions) {
+ list_grpoffsets->partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+ }
+
+ return list_grpoffsets;
+}
+
+void rd_kafka_ListConsumerGroupOffsets_destroy(
+ rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets) {
+ if (list_grpoffsets->partitions != NULL) {
+ rd_kafka_topic_partition_list_destroy(
+ list_grpoffsets->partitions);
+ }
+ rd_free(list_grpoffsets);
+}
+
+static void rd_kafka_ListConsumerGroupOffsets_free(void *ptr) {
+ rd_kafka_ListConsumerGroupOffsets_destroy(ptr);
+}
+
+void rd_kafka_ListConsumerGroupOffsets_destroy_array(
+ rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
+ size_t list_grpoffsets_cnt) {
+ size_t i;
+ for (i = 0; i < list_grpoffsets_cnt; i++)
+ rd_kafka_ListConsumerGroupOffsets_destroy(list_grpoffsets[i]);
+}
+
+/**
+ * @brief Allocate a new ListGroup and make a copy of \p src
+ */
+static rd_kafka_ListConsumerGroupOffsets_t *
+rd_kafka_ListConsumerGroupOffsets_copy(
+ const rd_kafka_ListConsumerGroupOffsets_t *src) {
+ return rd_kafka_ListConsumerGroupOffsets_new(src->group_id,
+ src->partitions);
+}
+
+/**
+ * @brief Send a OffsetFetchRequest to \p rkb with the partitions
+ * in list_grpoffsets (ListConsumerGroupOffsets_t*) using
+ * \p options.
+ *
+ */
+static rd_kafka_resp_err_t rd_kafka_ListConsumerGroupOffsetsRequest(
+ rd_kafka_broker_t *rkb,
+ /* (rd_kafka_ListConsumerGroupOffsets_t*) */
+ const rd_list_t *list_grpoffsets,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ int op_timeout;
+ rd_bool_t require_stable_offsets;
+ const rd_kafka_ListConsumerGroupOffsets_t *grpoffsets =
+ rd_list_elem(list_grpoffsets, 0);
+
+ rd_assert(rd_list_cnt(list_grpoffsets) == 1);
+
+ op_timeout = rd_kafka_confval_get_int(&options->request_timeout);
+ require_stable_offsets =
+ rd_kafka_confval_get_int(&options->require_stable_offsets);
+ rd_kafka_OffsetFetchRequest(
+ rkb, grpoffsets->group_id, grpoffsets->partitions,
+ require_stable_offsets, op_timeout, replyq, resp_cb, opaque);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Parse OffsetFetchResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_ListConsumerGroupOffsetsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const rd_kafka_ListConsumerGroupOffsets_t *list_grpoffsets =
+ rd_list_elem(&rko_req->rko_u.admin_request.args, 0);
+ rd_kafka_t *rk;
+ rd_kafka_broker_t *rkb;
+ rd_kafka_topic_partition_list_t *offsets = NULL;
+ rd_kafka_op_t *rko_result;
+ rd_kafka_resp_err_t err;
+
+ rk = rko_req->rko_rk;
+ rkb = reply->rkbuf_rkb;
+ err = rd_kafka_handle_OffsetFetch(rk, rkb, RD_KAFKA_RESP_ERR_NO_ERROR,
+ reply, NULL, &offsets, rd_false,
+ rd_true, rd_false);
+
+ if (unlikely(err != RD_KAFKA_RESP_ERR_NO_ERROR)) {
+ reply->rkbuf_err = err;
+ goto err;
+ }
+
+ /* Create result op and group_result_t */
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ rd_list_init(&rko_result->rko_u.admin_result.results, 1,
+ rd_kafka_group_result_free);
+ rd_list_add(&rko_result->rko_u.admin_result.results,
+ rd_kafka_group_result_new(list_grpoffsets->group_id, -1,
+ offsets, NULL));
+
+ if (likely(offsets != NULL))
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ *rko_resultp = rko_result;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+err:
+ if (likely(offsets != NULL))
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ rd_snprintf(errstr, errstr_size,
+ "ListConsumerGroupOffsetsResponse response failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+void rd_kafka_ListConsumerGroupOffsets(
+ rd_kafka_t *rk,
+ rd_kafka_ListConsumerGroupOffsets_t **list_grpoffsets,
+ size_t list_grpoffsets_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_ListConsumerGroupOffsetsRequest,
+ rd_kafka_ListConsumerGroupOffsetsResponse_parse,
+ };
+ rd_kafka_op_t *rko;
+ rd_kafka_topic_partition_list_t *copied_offsets;
+
+ rd_assert(rkqu);
+
+ rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS,
+ RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT, &cbs, options,
+ rkqu->rkqu_q);
+
+ if (list_grpoffsets_cnt != 1) {
+ /* For simplicity we only support one single group for now */
+ rd_kafka_admin_result_fail(rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Exactly one "
+ "ListConsumerGroupOffsets must "
+ "be passed");
+ goto fail;
+ }
+
+ if (list_grpoffsets[0]->partitions != NULL &&
+ list_grpoffsets[0]->partitions->cnt == 0) {
+ /* Either pass NULL for all the partitions or a non-empty list
+ */
+ rd_kafka_admin_result_fail(
+ rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "NULL or "
+ "non-empty topic partition list must "
+ "be passed");
+ goto fail;
+ }
+
+ /* TODO: add group id duplication check when implementing KIP-709 */
+ if (list_grpoffsets[0]->partitions != NULL) {
+ /* Copy offsets list for checking duplicated */
+ copied_offsets = rd_kafka_topic_partition_list_copy(
+ list_grpoffsets[0]->partitions);
+ if (rd_kafka_topic_partition_list_has_duplicates(
+ copied_offsets, rd_false /*check partition*/)) {
+ rd_kafka_topic_partition_list_destroy(copied_offsets);
+ rd_kafka_admin_result_fail(
+ rko, RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate partitions not allowed");
+ goto fail;
+ }
+ rd_kafka_topic_partition_list_destroy(copied_offsets);
+ }
+
+ rko->rko_u.admin_request.broker_id = RD_KAFKA_ADMIN_TARGET_COORDINATOR;
+ rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
+ rko->rko_u.admin_request.coordkey =
+ rd_strdup(list_grpoffsets[0]->group_id);
+
+ /* Store copy of group on request so the group name can be reached
+ * from the response parser. */
+ rd_list_init(&rko->rko_u.admin_request.args, 1,
+ rd_kafka_ListConsumerGroupOffsets_free);
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_kafka_ListConsumerGroupOffsets_copy(list_grpoffsets[0]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+ return;
+fail:
+ rd_kafka_admin_common_worker_destroy(rk, rko, rd_true /*destroy*/);
+}
+
+
+/**
+ * @brief Get an array of group results from a ListConsumerGroups result.
+ *
+ * The returned \p groups life-time is the same as the \p result object.
+ * @param cntp is updated to the number of elements in the array.
+ */
+const rd_kafka_group_result_t **rd_kafka_ListConsumerGroupOffsets_result_groups(
+ const rd_kafka_ListConsumerGroupOffsets_result_t *result,
+ size_t *cntp) {
+ return rd_kafka_admin_result_ret_groups((const rd_kafka_op_t *)result,
+ cntp);
+}
+
+/**@}*/
+
+/**
+ * @name List consumer groups
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+#define CONSUMER_PROTOCOL_TYPE "consumer"
+
+/**
+ * @brief Create a new ConsumerGroupListing object.
+ *
+ * @param group_id The group id.
+ * @param is_simple_consumer_group Is the group simple?
+ * @param state Group state.
+ */
+static rd_kafka_ConsumerGroupListing_t *
+rd_kafka_ConsumerGroupListing_new(const char *group_id,
+ rd_bool_t is_simple_consumer_group,
+ rd_kafka_consumer_group_state_t state) {
+ rd_kafka_ConsumerGroupListing_t *grplist;
+ grplist = rd_calloc(1, sizeof(*grplist));
+ grplist->group_id = rd_strdup(group_id);
+ grplist->is_simple_consumer_group = is_simple_consumer_group;
+ grplist->state = state;
+ return grplist;
+}
+
+/**
+ * @brief Copy \p grplist ConsumerGroupListing.
+ *
+ * @param grplist The group listing to copy.
+ * @return A new allocated copy of the passed ConsumerGroupListing.
+ */
+static rd_kafka_ConsumerGroupListing_t *rd_kafka_ConsumerGroupListing_copy(
+ const rd_kafka_ConsumerGroupListing_t *grplist) {
+ return rd_kafka_ConsumerGroupListing_new(
+ grplist->group_id, grplist->is_simple_consumer_group,
+ grplist->state);
+}
+
+/**
+ * @brief Same as rd_kafka_ConsumerGroupListing_copy() but suitable for
+ * rd_list_copy(). The \p opaque is ignored.
+ */
+static void *rd_kafka_ConsumerGroupListing_copy_opaque(const void *grplist,
+ void *opaque) {
+ return rd_kafka_ConsumerGroupListing_copy(grplist);
+}
+
+static void rd_kafka_ConsumerGroupListing_destroy(
+ rd_kafka_ConsumerGroupListing_t *grplist) {
+ RD_IF_FREE(grplist->group_id, rd_free);
+ rd_free(grplist);
+}
+
+static void rd_kafka_ConsumerGroupListing_free(void *ptr) {
+ rd_kafka_ConsumerGroupListing_destroy(ptr);
+}
+
+const char *rd_kafka_ConsumerGroupListing_group_id(
+ const rd_kafka_ConsumerGroupListing_t *grplist) {
+ return grplist->group_id;
+}
+
+int rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
+ const rd_kafka_ConsumerGroupListing_t *grplist) {
+ return grplist->is_simple_consumer_group;
+}
+
+rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupListing_state(
+ const rd_kafka_ConsumerGroupListing_t *grplist) {
+ return grplist->state;
+}
+
+/**
+ * @brief Create a new ListConsumerGroupsResult object.
+ *
+ * @param valid
+ * @param errors
+ */
+static rd_kafka_ListConsumerGroupsResult_t *
+rd_kafka_ListConsumerGroupsResult_new(const rd_list_t *valid,
+ const rd_list_t *errors) {
+ rd_kafka_ListConsumerGroupsResult_t *res;
+ res = rd_calloc(1, sizeof(*res));
+ rd_list_init_copy(&res->valid, valid);
+ rd_list_copy_to(&res->valid, valid,
+ rd_kafka_ConsumerGroupListing_copy_opaque, NULL);
+ rd_list_init_copy(&res->errors, errors);
+ rd_list_copy_to(&res->errors, errors, rd_kafka_error_copy_opaque, NULL);
+ return res;
+}
+
+static void rd_kafka_ListConsumerGroupsResult_destroy(
+ rd_kafka_ListConsumerGroupsResult_t *res) {
+ rd_list_destroy(&res->valid);
+ rd_list_destroy(&res->errors);
+ rd_free(res);
+}
+
+static void rd_kafka_ListConsumerGroupsResult_free(void *ptr) {
+ rd_kafka_ListConsumerGroupsResult_destroy(ptr);
+}
+
+/**
+ * @brief Copy the passed ListConsumerGroupsResult.
+ *
+ * @param res the ListConsumerGroupsResult to copy
+ * @return a newly allocated ListConsumerGroupsResult object.
+ *
+ * @sa Release the object with rd_kafka_ListConsumerGroupsResult_destroy().
+ */
+static rd_kafka_ListConsumerGroupsResult_t *
+rd_kafka_ListConsumerGroupsResult_copy(
+ const rd_kafka_ListConsumerGroupsResult_t *res) {
+ return rd_kafka_ListConsumerGroupsResult_new(&res->valid, &res->errors);
+}
+
+/**
+ * @brief Same as rd_kafka_ListConsumerGroupsResult_copy() but suitable for
+ * rd_list_copy(). The \p opaque is ignored.
+ */
+static void *rd_kafka_ListConsumerGroupsResult_copy_opaque(const void *list,
+ void *opaque) {
+ return rd_kafka_ListConsumerGroupsResult_copy(list);
+}
+
+/**
+ * @brief Send ListConsumerGroupsRequest. Admin worker compatible callback.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_admin_ListConsumerGroupsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *groups /*(char*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ int i;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+ const char **states_str = NULL;
+ int states_str_cnt = 0;
+ rd_list_t *states =
+ rd_kafka_confval_get_ptr(&options->match_consumer_group_states);
+
+ /* Prepare list_options */
+ if (states && rd_list_cnt(states) > 0) {
+ states_str_cnt = rd_list_cnt(states);
+ states_str = rd_calloc(states_str_cnt, sizeof(*states_str));
+ for (i = 0; i < states_str_cnt; i++) {
+ states_str[i] = rd_kafka_consumer_group_state_name(
+ rd_list_get_int32(states, i));
+ }
+ }
+
+ error = rd_kafka_ListGroupsRequest(rkb, -1, states_str, states_str_cnt,
+ replyq, resp_cb, opaque);
+
+ if (states_str) {
+ rd_free(states_str);
+ }
+
+ if (error) {
+ rd_snprintf(errstr, errstr_size, "%s",
+ rd_kafka_error_string(error));
+ err = rd_kafka_error_code(error);
+ rd_kafka_error_destroy(error);
+ return err;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Parse ListConsumerGroupsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_ListConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ int i, cnt;
+ int16_t error_code, api_version;
+ rd_kafka_op_t *rko_result = NULL;
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_list_t valid, errors;
+ rd_kafka_ListConsumerGroupsResult_t *list_result;
+ char *group_id = NULL, *group_state = NULL, *proto_type = NULL;
+
+ api_version = rd_kafka_buf_ApiVersion(reply);
+ if (api_version >= 1) {
+ rd_kafka_buf_read_throttle_time(reply);
+ }
+ rd_kafka_buf_read_i16(reply, &error_code);
+ if (error_code) {
+ error = rd_kafka_error_new(error_code,
+ "Broker [%d"
+ "] "
+ "ListConsumerGroups: %s",
+ rd_kafka_broker_id(rkb),
+ rd_kafka_err2str(error_code));
+ }
+
+ rd_kafka_buf_read_arraycnt(reply, &cnt, RD_KAFKAP_GROUPS_MAX);
+ rd_list_init(&valid, cnt, rd_kafka_ConsumerGroupListing_free);
+ rd_list_init(&errors, 8, rd_free);
+ if (error)
+ rd_list_add(&errors, error);
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ rd_list_init(&rko_result->rko_u.admin_result.results, 1,
+ rd_kafka_ListConsumerGroupsResult_free);
+
+ for (i = 0; i < cnt; i++) {
+ rd_kafkap_str_t GroupId, ProtocolType,
+ GroupState = RD_ZERO_INIT;
+ rd_kafka_ConsumerGroupListing_t *group_listing;
+ rd_bool_t is_simple_consumer_group, is_consumer_protocol_type;
+ rd_kafka_consumer_group_state_t state =
+ RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN;
+
+ rd_kafka_buf_read_str(reply, &GroupId);
+ rd_kafka_buf_read_str(reply, &ProtocolType);
+ if (api_version >= 4) {
+ rd_kafka_buf_read_str(reply, &GroupState);
+ }
+ rd_kafka_buf_skip_tags(reply);
+
+ group_id = RD_KAFKAP_STR_DUP(&GroupId);
+ proto_type = RD_KAFKAP_STR_DUP(&ProtocolType);
+ if (api_version >= 4) {
+ group_state = RD_KAFKAP_STR_DUP(&GroupState);
+ state = rd_kafka_consumer_group_state_code(group_state);
+ }
+
+ is_simple_consumer_group = *proto_type == '\0';
+ is_consumer_protocol_type =
+ !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE);
+ if (is_simple_consumer_group || is_consumer_protocol_type) {
+ group_listing = rd_kafka_ConsumerGroupListing_new(
+ group_id, is_simple_consumer_group, state);
+ rd_list_add(&valid, group_listing);
+ }
+
+ rd_free(group_id);
+ rd_free(group_state);
+ rd_free(proto_type);
+ group_id = NULL;
+ group_state = NULL;
+ proto_type = NULL;
+ }
+ rd_kafka_buf_skip_tags(reply);
+
+err_parse:
+ if (group_id)
+ rd_free(group_id);
+ if (group_state)
+ rd_free(group_state);
+ if (proto_type)
+ rd_free(proto_type);
+
+ if (reply->rkbuf_err) {
+ error_code = reply->rkbuf_err;
+ error = rd_kafka_error_new(
+ error_code,
+ "Broker [%d"
+ "] "
+ "ListConsumerGroups response protocol parse failure: %s",
+ rd_kafka_broker_id(rkb), rd_kafka_err2str(error_code));
+ rd_list_add(&errors, error);
+ }
+
+ list_result = rd_kafka_ListConsumerGroupsResult_new(&valid, &errors);
+ rd_list_add(&rko_result->rko_u.admin_result.results, list_result);
+
+ *rko_resultp = rko_result;
+ rd_list_destroy(&valid);
+ rd_list_destroy(&errors);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/** @brief Merge the ListConsumerGroups response from a single broker
+ * into the user response list.
+ */
+static void
+rd_kafka_ListConsumerGroups_response_merge(rd_kafka_op_t *rko_fanout,
+ const rd_kafka_op_t *rko_partial) {
+ int cnt;
+ rd_kafka_ListConsumerGroupsResult_t *res = NULL;
+ rd_kafka_ListConsumerGroupsResult_t *newres;
+ rd_list_t new_valid, new_errors;
+
+ rd_assert(rko_partial->rko_evtype ==
+ RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT);
+
+ cnt = rd_list_cnt(&rko_fanout->rko_u.admin_request.fanout.results);
+ if (cnt) {
+ res = rd_list_elem(
+ &rko_fanout->rko_u.admin_request.fanout.results, 0);
+ } else {
+ rd_list_init(&new_valid, 0, rd_kafka_ConsumerGroupListing_free);
+ rd_list_init(&new_errors, 0, rd_free);
+ res = rd_kafka_ListConsumerGroupsResult_new(&new_valid,
+ &new_errors);
+ rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, 0,
+ res);
+ rd_list_destroy(&new_valid);
+ rd_list_destroy(&new_errors);
+ }
+ if (!rko_partial->rko_err) {
+ int new_valid_count, new_errors_count;
+ const rd_list_t *new_valid_list, *new_errors_list;
+ /* Read the partial result and merge the valid groups
+ * and the errors into the fanout parent result. */
+ newres =
+ rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
+ rd_assert(newres);
+ new_valid_count = rd_list_cnt(&newres->valid);
+ new_errors_count = rd_list_cnt(&newres->errors);
+ if (new_valid_count) {
+ new_valid_list = &newres->valid;
+ rd_list_grow(&res->valid, new_valid_count);
+ rd_list_copy_to(
+ &res->valid, new_valid_list,
+ rd_kafka_ConsumerGroupListing_copy_opaque, NULL);
+ }
+ if (new_errors_count) {
+ new_errors_list = &newres->errors;
+ rd_list_grow(&res->errors, new_errors_count);
+ rd_list_copy_to(&res->errors, new_errors_list,
+ rd_kafka_error_copy_opaque, NULL);
+ }
+ } else {
+ /* Op errored, e.g. timeout */
+ rd_list_add(&res->errors,
+ rd_kafka_error_new(rko_partial->rko_err, NULL));
+ }
+}
+
+void rd_kafka_ListConsumerGroups(rd_kafka_t *rk,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko;
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_admin_ListConsumerGroupsRequest,
+ rd_kafka_ListConsumerGroupsResponse_parse};
+ static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
+ rd_kafka_ListConsumerGroups_response_merge,
+ rd_kafka_ListConsumerGroupsResult_copy_opaque,
+ };
+
+ rko = rd_kafka_admin_request_op_target_all_new(
+ rk, RD_KAFKA_OP_LISTCONSUMERGROUPS,
+ RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT, &cbs, &fanout_cbs,
+ rd_kafka_ListConsumerGroupsResult_free, options, rkqu->rkqu_q);
+ rd_kafka_q_enq(rk->rk_ops, rko);
+}
+
+const rd_kafka_ConsumerGroupListing_t **
+rd_kafka_ListConsumerGroups_result_valid(
+ const rd_kafka_ListConsumerGroups_result_t *result,
+ size_t *cntp) {
+ int list_result_cnt;
+ const rd_kafka_ListConsumerGroupsResult_t *list_result;
+ const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS);
+
+ list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results);
+ rd_assert(list_result_cnt == 1);
+ list_result = rd_list_elem(&rko->rko_u.admin_result.results, 0);
+ *cntp = rd_list_cnt(&list_result->valid);
+
+ return (const rd_kafka_ConsumerGroupListing_t **)
+ list_result->valid.rl_elems;
+}
+
+const rd_kafka_error_t **rd_kafka_ListConsumerGroups_result_errors(
+ const rd_kafka_ListConsumerGroups_result_t *result,
+ size_t *cntp) {
+ int list_result_cnt, error_cnt;
+ const rd_kafka_ListConsumerGroupsResult_t *list_result;
+ const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_LISTCONSUMERGROUPS);
+
+ list_result_cnt = rd_list_cnt(&rko->rko_u.admin_result.results);
+ rd_assert(list_result_cnt == 1);
+ list_result = rko->rko_u.admin_result.results.rl_elems[0];
+ error_cnt = rd_list_cnt(&list_result->errors);
+ if (error_cnt == 0) {
+ *cntp = 0;
+ return NULL;
+ }
+ *cntp = error_cnt;
+ return (const rd_kafka_error_t **)list_result->errors.rl_elems;
+}
+
+/**@}*/
+
+/**
+ * @name Describe consumer groups
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Create a new MemberDescription object. This object is used for
+ * creating a ConsumerGroupDescription.
+ *
+ * @param client_id The client id.
+ * @param consumer_id The consumer id (or member id).
+ * @param group_instance_id (optional) The group instance id
+ * for static membership.
+ * @param host The consumer host.
+ * @param assignment The member's assigned partitions, or NULL if none.
+ *
+ * @return A new allocated MemberDescription object.
+ * Use rd_kafka_MemberDescription_destroy() to free when done.
+ */
+static rd_kafka_MemberDescription_t *rd_kafka_MemberDescription_new(
+ const char *client_id,
+ const char *consumer_id,
+ const char *group_instance_id,
+ const char *host,
+ const rd_kafka_topic_partition_list_t *assignment) {
+ rd_kafka_MemberDescription_t *member;
+ member = rd_calloc(1, sizeof(*member));
+ member->client_id = rd_strdup(client_id);
+ member->consumer_id = rd_strdup(consumer_id);
+ if (group_instance_id)
+ member->group_instance_id = rd_strdup(group_instance_id);
+ member->host = rd_strdup(host);
+ if (assignment)
+ member->assignment.partitions =
+ rd_kafka_topic_partition_list_copy(assignment);
+ else
+ member->assignment.partitions =
+ rd_kafka_topic_partition_list_new(0);
+ return member;
+}
+
+/**
+ * @brief Allocate a new MemberDescription, copy of \p src
+ * and return it.
+ *
+ * @param src The MemberDescription to copy.
+ * @return A new allocated MemberDescription object,
+ * Use rd_kafka_MemberDescription_destroy() to free when done.
+ */
+static rd_kafka_MemberDescription_t *
+rd_kafka_MemberDescription_copy(const rd_kafka_MemberDescription_t *src) {
+ return rd_kafka_MemberDescription_new(src->client_id, src->consumer_id,
+ src->group_instance_id, src->host,
+ src->assignment.partitions);
+}
+
+/**
+ * @brief MemberDescription copy, compatible with rd_list_copy_to.
+ *
+ * @param elem The MemberDescription to copy-
+ * @param opaque Not used.
+ */
+static void *rd_kafka_MemberDescription_list_copy(const void *elem,
+ void *opaque) {
+ return rd_kafka_MemberDescription_copy(elem);
+}
+
+static void
+rd_kafka_MemberDescription_destroy(rd_kafka_MemberDescription_t *member) {
+ rd_free(member->client_id);
+ rd_free(member->consumer_id);
+ rd_free(member->host);
+ if (member->group_instance_id != NULL)
+ rd_free(member->group_instance_id);
+ if (member->assignment.partitions)
+ rd_kafka_topic_partition_list_destroy(
+ member->assignment.partitions);
+ rd_free(member);
+}
+
+static void rd_kafka_MemberDescription_free(void *member) {
+ rd_kafka_MemberDescription_destroy(member);
+}
+
+const char *rd_kafka_MemberDescription_client_id(
+ const rd_kafka_MemberDescription_t *member) {
+ return member->client_id;
+}
+
+const char *rd_kafka_MemberDescription_group_instance_id(
+ const rd_kafka_MemberDescription_t *member) {
+ return member->group_instance_id;
+}
+
+const char *rd_kafka_MemberDescription_consumer_id(
+ const rd_kafka_MemberDescription_t *member) {
+ return member->consumer_id;
+}
+
+const char *
+rd_kafka_MemberDescription_host(const rd_kafka_MemberDescription_t *member) {
+ return member->host;
+}
+
+const rd_kafka_MemberAssignment_t *rd_kafka_MemberDescription_assignment(
+ const rd_kafka_MemberDescription_t *member) {
+ return &member->assignment;
+}
+
+const rd_kafka_topic_partition_list_t *rd_kafka_MemberAssignment_partitions(
+ const rd_kafka_MemberAssignment_t *assignment) {
+ return assignment->partitions;
+}
+
+
+/**
+ * @brief Create a new ConsumerGroupDescription object.
+ *
+ * @param group_id The group id.
+ * @param is_simple_consumer_group Is the group simple?
+ * @param members List of members (rd_kafka_MemberDescription_t) of this
+ * group.
+ * @param partition_assignor (optional) Chosen assignor.
+ * @param state Group state.
+ * @param coordinator (optional) Group coordinator.
+ * @param error (optional) Error received for this group.
+ * @return A new allocated ConsumerGroupDescription object.
+ * Use rd_kafka_ConsumerGroupDescription_destroy() to free when done.
+ */
+static rd_kafka_ConsumerGroupDescription_t *
+rd_kafka_ConsumerGroupDescription_new(const char *group_id,
+ rd_bool_t is_simple_consumer_group,
+ const rd_list_t *members,
+ const char *partition_assignor,
+ rd_kafka_consumer_group_state_t state,
+ const rd_kafka_Node_t *coordinator,
+ rd_kafka_error_t *error) {
+ rd_kafka_ConsumerGroupDescription_t *grpdesc;
+ grpdesc = rd_calloc(1, sizeof(*grpdesc));
+ grpdesc->group_id = rd_strdup(group_id);
+ grpdesc->is_simple_consumer_group = is_simple_consumer_group;
+ if (members == NULL) {
+ rd_list_init(&grpdesc->members, 0,
+ rd_kafka_MemberDescription_free);
+ } else {
+ rd_list_init_copy(&grpdesc->members, members);
+ rd_list_copy_to(&grpdesc->members, members,
+ rd_kafka_MemberDescription_list_copy, NULL);
+ }
+ grpdesc->partition_assignor = !partition_assignor
+ ? (char *)partition_assignor
+ : rd_strdup(partition_assignor);
+ grpdesc->state = state;
+ if (coordinator != NULL)
+ grpdesc->coordinator = rd_kafka_Node_copy(coordinator);
+ grpdesc->error =
+ error != NULL ? rd_kafka_error_new(rd_kafka_error_code(error), "%s",
+ rd_kafka_error_string(error))
+ : NULL;
+ return grpdesc;
+}
+
+/**
+ * @brief New instance of ConsumerGroupDescription from an error.
+ *
+ * @param group_id The group id.
+ * @param error The error.
+ * @return A new allocated ConsumerGroupDescription with the passed error.
+ */
+static rd_kafka_ConsumerGroupDescription_t *
+rd_kafka_ConsumerGroupDescription_new_error(const char *group_id,
+ rd_kafka_error_t *error) {
+ return rd_kafka_ConsumerGroupDescription_new(
+ group_id, rd_false, NULL, NULL,
+ RD_KAFKA_CONSUMER_GROUP_STATE_UNKNOWN, NULL, error);
+}
+
+/**
+ * @brief Copy \p desc ConsumerGroupDescription.
+ *
+ * @param desc The group description to copy.
+ * @return A new allocated copy of the passed ConsumerGroupDescription.
+ */
+static rd_kafka_ConsumerGroupDescription_t *
+rd_kafka_ConsumerGroupDescription_copy(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return rd_kafka_ConsumerGroupDescription_new(
+ grpdesc->group_id, grpdesc->is_simple_consumer_group,
+ &grpdesc->members, grpdesc->partition_assignor, grpdesc->state,
+ grpdesc->coordinator, grpdesc->error);
+}
+
+/**
+ * @brief Same as rd_kafka_ConsumerGroupDescription_copy() but suitable for
+ * rd_list_copy(). The \p opaque is ignored.
+ */
+static void *rd_kafka_ConsumerGroupDescription_copy_opaque(const void *grpdesc,
+ void *opaque) {
+ return rd_kafka_ConsumerGroupDescription_copy(grpdesc);
+}
+
+static void rd_kafka_ConsumerGroupDescription_destroy(
+ rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ if (likely(grpdesc->group_id != NULL))
+ rd_free(grpdesc->group_id);
+ rd_list_destroy(&grpdesc->members);
+ if (likely(grpdesc->partition_assignor != NULL))
+ rd_free(grpdesc->partition_assignor);
+ if (likely(grpdesc->error != NULL))
+ rd_kafka_error_destroy(grpdesc->error);
+ if (grpdesc->coordinator)
+ rd_kafka_Node_destroy(grpdesc->coordinator);
+ rd_free(grpdesc);
+}
+
+static void rd_kafka_ConsumerGroupDescription_free(void *ptr) {
+ rd_kafka_ConsumerGroupDescription_destroy(ptr);
+}
+
+const char *rd_kafka_ConsumerGroupDescription_group_id(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return grpdesc->group_id;
+}
+
+const rd_kafka_error_t *rd_kafka_ConsumerGroupDescription_error(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return grpdesc->error;
+}
+
+
+int rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return grpdesc->is_simple_consumer_group;
+}
+
+
+const char *rd_kafka_ConsumerGroupDescription_partition_assignor(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return grpdesc->partition_assignor;
+}
+
+
+rd_kafka_consumer_group_state_t rd_kafka_ConsumerGroupDescription_state(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return grpdesc->state;
+}
+
+const rd_kafka_Node_t *rd_kafka_ConsumerGroupDescription_coordinator(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return grpdesc->coordinator;
+}
+
+size_t rd_kafka_ConsumerGroupDescription_member_count(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc) {
+ return rd_list_cnt(&grpdesc->members);
+}
+
+const rd_kafka_MemberDescription_t *rd_kafka_ConsumerGroupDescription_member(
+ const rd_kafka_ConsumerGroupDescription_t *grpdesc,
+ size_t idx) {
+ return (rd_kafka_MemberDescription_t *)rd_list_elem(&grpdesc->members,
+ idx);
+}
+
+/**
+ * @brief Group arguments comparator for DescribeConsumerGroups args
+ */
+static int rd_kafka_DescribeConsumerGroups_cmp(const void *a, const void *b) {
+ return strcmp(a, b);
+}
+
+/** @brief Merge the DescribeConsumerGroups response from a single broker
+ * into the user response list.
+ */
+static void rd_kafka_DescribeConsumerGroups_response_merge(
+ rd_kafka_op_t *rko_fanout,
+ const rd_kafka_op_t *rko_partial) {
+ rd_kafka_ConsumerGroupDescription_t *groupres = NULL;
+ rd_kafka_ConsumerGroupDescription_t *newgroupres;
+ const char *grp = rko_partial->rko_u.admin_result.opaque;
+ int orig_pos;
+
+ rd_assert(rko_partial->rko_evtype ==
+ RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT);
+
+ if (!rko_partial->rko_err) {
+ /* Proper results.
+ * We only send one group per request, make sure it matches */
+ groupres =
+ rd_list_elem(&rko_partial->rko_u.admin_result.results, 0);
+ rd_assert(groupres);
+ rd_assert(!strcmp(groupres->group_id, grp));
+ newgroupres = rd_kafka_ConsumerGroupDescription_copy(groupres);
+ } else {
+ /* Op errored, e.g. timeout */
+ rd_kafka_error_t *error =
+ rd_kafka_error_new(rko_partial->rko_err, NULL);
+ newgroupres =
+ rd_kafka_ConsumerGroupDescription_new_error(grp, error);
+ rd_kafka_error_destroy(error);
+ }
+
+ /* As a convenience to the application we insert group result
+ * in the same order as they were requested. */
+ orig_pos = rd_list_index(&rko_fanout->rko_u.admin_request.args, grp,
+ rd_kafka_DescribeConsumerGroups_cmp);
+ rd_assert(orig_pos != -1);
+
+ /* Make sure result is not already set */
+ rd_assert(rd_list_elem(&rko_fanout->rko_u.admin_request.fanout.results,
+ orig_pos) == NULL);
+
+ rd_list_set(&rko_fanout->rko_u.admin_request.fanout.results, orig_pos,
+ newgroupres);
+}
+
+
+/**
+ * @brief Construct and send DescribeConsumerGroupsRequest to \p rkb
+ * with the groups (char *) in \p groups, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+static rd_kafka_resp_err_t rd_kafka_admin_DescribeConsumerGroupsRequest(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *groups /*(char*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ int i;
+ char *group;
+ rd_kafka_resp_err_t err;
+ int groups_cnt = rd_list_cnt(groups);
+ rd_kafka_error_t *error = NULL;
+ char **groups_arr = rd_calloc(groups_cnt, sizeof(*groups_arr));
+
+ RD_LIST_FOREACH(group, groups, i) {
+ groups_arr[i] = rd_list_elem(groups, i);
+ }
+ error = rd_kafka_DescribeGroupsRequest(rkb, -1, groups_arr, groups_cnt,
+ replyq, resp_cb, opaque);
+ rd_free(groups_arr);
+
+ if (error) {
+ rd_snprintf(errstr, errstr_size, "%s",
+ rd_kafka_error_string(error));
+ err = rd_kafka_error_code(error);
+ rd_kafka_error_destroy(error);
+ return err;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Parse DescribeConsumerGroupsResponse and create ADMIN_RESULT op.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_DescribeConsumerGroupsResponse_parse(rd_kafka_op_t *rko_req,
+ rd_kafka_op_t **rko_resultp,
+ rd_kafka_buf_t *reply,
+ char *errstr,
+ size_t errstr_size) {
+ const int log_decode_errors = LOG_ERR;
+ int nodeid;
+ uint16_t port;
+ int16_t api_version;
+ int32_t cnt;
+ rd_kafka_op_t *rko_result = NULL;
+ rd_kafka_broker_t *rkb = reply->rkbuf_rkb;
+ rd_kafka_Node_t *node = NULL;
+ rd_kafka_error_t *error = NULL;
+ char *group_id = NULL, *group_state = NULL, *proto_type = NULL,
+ *proto = NULL, *host = NULL;
+
+ api_version = rd_kafka_buf_ApiVersion(reply);
+ if (api_version >= 1) {
+ rd_kafka_buf_read_throttle_time(reply);
+ }
+
+ rd_kafka_buf_read_arraycnt(reply, &cnt, 100000);
+
+ rko_result = rd_kafka_admin_result_new(rko_req);
+ rd_list_init(&rko_result->rko_u.admin_result.results, cnt,
+ rd_kafka_ConsumerGroupDescription_free);
+
+ rd_kafka_broker_lock(rkb);
+ nodeid = rkb->rkb_nodeid;
+ host = rd_strdup(rkb->rkb_origname);
+ port = rkb->rkb_port;
+ rd_kafka_broker_unlock(rkb);
+
+ node = rd_kafka_Node_new(nodeid, host, port, NULL);
+ while (cnt-- > 0) {
+ int16_t error_code;
+ rd_kafkap_str_t GroupId, GroupState, ProtocolType, ProtocolData;
+ rd_bool_t is_simple_consumer_group, is_consumer_protocol_type;
+ int32_t member_cnt;
+ rd_list_t members;
+ rd_kafka_ConsumerGroupDescription_t *grpdesc = NULL;
+
+ rd_kafka_buf_read_i16(reply, &error_code);
+ rd_kafka_buf_read_str(reply, &GroupId);
+ rd_kafka_buf_read_str(reply, &GroupState);
+ rd_kafka_buf_read_str(reply, &ProtocolType);
+ rd_kafka_buf_read_str(reply, &ProtocolData);
+ rd_kafka_buf_read_arraycnt(reply, &member_cnt, 100000);
+
+ group_id = RD_KAFKAP_STR_DUP(&GroupId);
+ group_state = RD_KAFKAP_STR_DUP(&GroupState);
+ proto_type = RD_KAFKAP_STR_DUP(&ProtocolType);
+ proto = RD_KAFKAP_STR_DUP(&ProtocolData);
+
+ if (error_code) {
+ error = rd_kafka_error_new(
+ error_code, "DescribeConsumerGroups: %s",
+ rd_kafka_err2str(error_code));
+ }
+
+ is_simple_consumer_group = *proto_type == '\0';
+ is_consumer_protocol_type =
+ !strcmp(proto_type, CONSUMER_PROTOCOL_TYPE);
+ if (error == NULL && !is_simple_consumer_group &&
+ !is_consumer_protocol_type) {
+ error = rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "GroupId %s is not a consumer group (%s).",
+ group_id, proto_type);
+ }
+
+ rd_list_init(&members, 0, rd_kafka_MemberDescription_free);
+
+ while (member_cnt-- > 0) {
+ rd_kafkap_str_t MemberId, ClientId, ClientHost,
+ GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
+ char *member_id, *client_id, *client_host,
+ *group_instance_id = NULL;
+ rd_kafkap_bytes_t MemberMetadata, MemberAssignment;
+ rd_kafka_MemberDescription_t *member;
+ rd_kafka_topic_partition_list_t *partitions = NULL;
+ rd_kafka_buf_t *rkbuf;
+
+ rd_kafka_buf_read_str(reply, &MemberId);
+ if (api_version >= 4) {
+ rd_kafka_buf_read_str(reply, &GroupInstanceId);
+ }
+ rd_kafka_buf_read_str(reply, &ClientId);
+ rd_kafka_buf_read_str(reply, &ClientHost);
+ rd_kafka_buf_read_bytes(reply, &MemberMetadata);
+ rd_kafka_buf_read_bytes(reply, &MemberAssignment);
+ if (error != NULL)
+ continue;
+
+ if (RD_KAFKAP_BYTES_LEN(&MemberAssignment) != 0) {
+ int16_t version;
+ /* Parse assignment */
+ rkbuf = rd_kafka_buf_new_shadow(
+ MemberAssignment.data,
+ RD_KAFKAP_BYTES_LEN(&MemberAssignment),
+ NULL);
+ /* Protocol parser needs a broker handle
+ * to log errors on. */
+ rkbuf->rkbuf_rkb = rkb;
+ /* Decreased in rd_kafka_buf_destroy */
+ rd_kafka_broker_keep(rkb);
+ rd_kafka_buf_read_i16(rkbuf, &version);
+ const rd_kafka_topic_partition_field_t fields[] =
+ {RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ partitions = rd_kafka_buf_read_topic_partitions(
+ rkbuf, 0, fields);
+ rd_kafka_buf_destroy(rkbuf);
+ if (!partitions)
+ rd_kafka_buf_parse_fail(
+ reply,
+ "Error reading topic partitions");
+ }
+
+ member_id = RD_KAFKAP_STR_DUP(&MemberId);
+ if (!RD_KAFKAP_STR_IS_NULL(&GroupInstanceId)) {
+ group_instance_id =
+ RD_KAFKAP_STR_DUP(&GroupInstanceId);
+ }
+ client_id = RD_KAFKAP_STR_DUP(&ClientId);
+ client_host = RD_KAFKAP_STR_DUP(&ClientHost);
+
+ member = rd_kafka_MemberDescription_new(
+ client_id, member_id, group_instance_id,
+ client_host, partitions);
+ if (partitions)
+ rd_kafka_topic_partition_list_destroy(
+ partitions);
+ rd_list_add(&members, member);
+ rd_free(member_id);
+ rd_free(group_instance_id);
+ rd_free(client_id);
+ rd_free(client_host);
+ member_id = NULL;
+ group_instance_id = NULL;
+ client_id = NULL;
+ client_host = NULL;
+ }
+
+ if (api_version >= 3) {
+ /* TODO: implement KIP-430 */
+ int32_t authorized_operations;
+ rd_kafka_buf_read_i32(reply, &authorized_operations);
+ }
+
+ if (error == NULL) {
+ grpdesc = rd_kafka_ConsumerGroupDescription_new(
+ group_id, is_simple_consumer_group, &members, proto,
+ rd_kafka_consumer_group_state_code(group_state),
+ node, error);
+ } else {
+ grpdesc = rd_kafka_ConsumerGroupDescription_new_error(
+ group_id, error);
+ }
+ rd_list_add(&rko_result->rko_u.admin_result.results, grpdesc);
+ if (error)
+ rd_kafka_error_destroy(error);
+ rd_list_destroy(&members);
+ rd_free(group_id);
+ rd_free(group_state);
+ rd_free(proto_type);
+ rd_free(proto);
+ error = NULL;
+ group_id = NULL;
+ group_state = NULL;
+ proto_type = NULL;
+ proto = NULL;
+ }
+
+ if (host)
+ rd_free(host);
+ if (node)
+ rd_kafka_Node_destroy(node);
+ *rko_resultp = rko_result;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (group_id)
+ rd_free(group_id);
+ if (group_state)
+ rd_free(group_state);
+ if (proto_type)
+ rd_free(proto_type);
+ if (proto)
+ rd_free(proto);
+ if (error)
+ rd_kafka_error_destroy(error);
+ if (host)
+ rd_free(host);
+ if (node)
+ rd_kafka_Node_destroy(node);
+ if (rko_result)
+ rd_kafka_op_destroy(rko_result);
+
+ rd_snprintf(
+ errstr, errstr_size,
+ "DescribeConsumerGroups response protocol parse failure: %s",
+ rd_kafka_err2str(reply->rkbuf_err));
+
+ return reply->rkbuf_err;
+}
+
+void rd_kafka_DescribeConsumerGroups(rd_kafka_t *rk,
+ const char **groups,
+ size_t groups_cnt,
+ const rd_kafka_AdminOptions_t *options,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_op_t *rko_fanout;
+ rd_list_t dup_list;
+ size_t i;
+ static const struct rd_kafka_admin_fanout_worker_cbs fanout_cbs = {
+ rd_kafka_DescribeConsumerGroups_response_merge,
+ rd_kafka_ConsumerGroupDescription_copy_opaque};
+
+ rd_assert(rkqu);
+
+ rko_fanout = rd_kafka_admin_fanout_op_new(
+ rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS,
+ RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &fanout_cbs, options,
+ rkqu->rkqu_q);
+
+ if (groups_cnt == 0) {
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "No groups to describe");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ /* Copy group list and store it on the request op.
+ * Maintain original ordering. */
+ rd_list_init(&rko_fanout->rko_u.admin_request.args, (int)groups_cnt,
+ rd_free);
+ for (i = 0; i < groups_cnt; i++)
+ rd_list_add(&rko_fanout->rko_u.admin_request.args,
+ rd_strdup(groups[i]));
+
+ /* Check for duplicates.
+ * Make a temporary copy of the group list and sort it to check for
+ * duplicates, we don't want the original list sorted since we want
+ * to maintain ordering. */
+ rd_list_init(&dup_list,
+ rd_list_cnt(&rko_fanout->rko_u.admin_request.args), NULL);
+ rd_list_copy_to(&dup_list, &rko_fanout->rko_u.admin_request.args, NULL,
+ NULL);
+ rd_list_sort(&dup_list, rd_kafka_DescribeConsumerGroups_cmp);
+ if (rd_list_find_duplicate(&dup_list,
+ rd_kafka_DescribeConsumerGroups_cmp)) {
+ rd_list_destroy(&dup_list);
+ rd_kafka_admin_result_fail(rko_fanout,
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate groups not allowed");
+ rd_kafka_admin_common_worker_destroy(rk, rko_fanout,
+ rd_true /*destroy*/);
+ return;
+ }
+
+ rd_list_destroy(&dup_list);
+
+ /* Prepare results list where fanned out op's results will be
+ * accumulated. */
+ rd_list_init(&rko_fanout->rko_u.admin_request.fanout.results,
+ (int)groups_cnt, rd_kafka_ConsumerGroupDescription_free);
+ rko_fanout->rko_u.admin_request.fanout.outstanding = (int)groups_cnt;
+
+ /* Create individual request ops for each group.
+ * FIXME: A future optimization is to coalesce all groups for a single
+ * coordinator into one op. */
+ for (i = 0; i < groups_cnt; i++) {
+ static const struct rd_kafka_admin_worker_cbs cbs = {
+ rd_kafka_admin_DescribeConsumerGroupsRequest,
+ rd_kafka_DescribeConsumerGroupsResponse_parse,
+ };
+ char *grp =
+ rd_list_elem(&rko_fanout->rko_u.admin_request.args, (int)i);
+ rd_kafka_op_t *rko = rd_kafka_admin_request_op_new(
+ rk, RD_KAFKA_OP_DESCRIBECONSUMERGROUPS,
+ RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT, &cbs, options,
+ rk->rk_ops);
+
+ rko->rko_u.admin_request.fanout_parent = rko_fanout;
+ rko->rko_u.admin_request.broker_id =
+ RD_KAFKA_ADMIN_TARGET_COORDINATOR;
+ rko->rko_u.admin_request.coordtype = RD_KAFKA_COORD_GROUP;
+ rko->rko_u.admin_request.coordkey = rd_strdup(grp);
+
+ /* Set the group name as the opaque so the fanout worker use it
+ * to fill in errors.
+ * References rko_fanout's memory, which will always outlive
+ * the fanned out op. */
+ rd_kafka_AdminOptions_set_opaque(
+ &rko->rko_u.admin_request.options, grp);
+
+ rd_list_init(&rko->rko_u.admin_request.args, 1, rd_free);
+ rd_list_add(&rko->rko_u.admin_request.args,
+ rd_strdup(groups[i]));
+
+ rd_kafka_q_enq(rk->rk_ops, rko);
+ }
+}
+
+const rd_kafka_ConsumerGroupDescription_t **
+rd_kafka_DescribeConsumerGroups_result_groups(
+ const rd_kafka_DescribeConsumerGroups_result_t *result,
+ size_t *cntp) {
+ const rd_kafka_op_t *rko = (const rd_kafka_op_t *)result;
+ rd_kafka_op_type_t reqtype =
+ rko->rko_u.admin_result.reqtype & ~RD_KAFKA_OP_FLAGMASK;
+ rd_assert(reqtype == RD_KAFKA_OP_DESCRIBECONSUMERGROUPS);
+
+ *cntp = rd_list_cnt(&rko->rko_u.admin_result.results);
+ return (const rd_kafka_ConsumerGroupDescription_t **)
+ rko->rko_u.admin_result.results.rl_elems;
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h
new file mode 100644
index 000000000..62fe9e87a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_admin.h
@@ -0,0 +1,482 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_ADMIN_H_
+#define _RDKAFKA_ADMIN_H_
+
+
+#include "rdstring.h"
+#include "rdkafka_error.h"
+#include "rdkafka_confval.h"
+
+
+
+/**
+ * @brief Common AdminOptions type used for all admin APIs.
+ *
+ * @remark Visit AdminOptions_use() when you change this struct
+ * to make sure it is copied properly.
+ */
+struct rd_kafka_AdminOptions_s {
+ rd_kafka_admin_op_t for_api; /**< Limit allowed options to
+ * this API (optional) */
+
+ /* Generic */
+ rd_kafka_confval_t request_timeout; /**< I32: Full request timeout,
+ * includes looking up leader
+ * broker,
+ * waiting for req/response,
+ * etc. */
+ rd_ts_t abs_timeout; /**< Absolute timeout calculated
+ * from .timeout */
+
+ /* Specific for one or more APIs */
+ rd_kafka_confval_t operation_timeout; /**< I32: Timeout on broker.
+ * Valid for:
+ * CreateParititons
+ * CreateTopics
+ * DeleteRecords
+ * DeleteTopics
+ */
+ rd_kafka_confval_t validate_only; /**< BOOL: Only validate (on broker),
+ * but don't perform action.
+ * Valid for:
+ * CreateTopics
+ * CreatePartitions
+ * AlterConfigs
+ */
+
+ rd_kafka_confval_t incremental; /**< BOOL: Incremental rather than
+ * absolute application
+ * of config.
+ * Valid for:
+ * AlterConfigs
+ */
+
+ rd_kafka_confval_t broker; /**< INT: Explicitly override
+ * broker id to send
+ * requests to.
+ * Valid for:
+ * all
+ */
+
+ rd_kafka_confval_t
+ require_stable_offsets; /**< BOOL: Whether broker should return
+ * stable offsets (transaction-committed).
+ * Valid for:
+ * ListConsumerGroupOffsets
+ */
+
+ rd_kafka_confval_t
+ match_consumer_group_states; /**< PTR: list of consumer group states
+ * to query for.
+ * Valid for: ListConsumerGroups.
+ */
+
+ rd_kafka_confval_t opaque; /**< PTR: Application opaque.
+ * Valid for all. */
+};
+
+
+/**
+ * @name CreateTopics
+ * @{
+ */
+
+/**
+ * @brief NewTopic type, used with CreateTopics.
+ */
+struct rd_kafka_NewTopic_s {
+ /* Required */
+ char *topic; /**< Topic to be created */
+ int num_partitions; /**< Number of partitions to create */
+ int replication_factor; /**< Replication factor */
+
+ /* Optional */
+ rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
+ * Array of replica lists indexed by
+ * partition, size num_partitions. */
+ rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *):
+ * List of configuration entries */
+};
+
+/**@}*/
+
+
+/**
+ * @name DeleteTopics
+ * @{
+ */
+
+/**
+ * @brief DeleteTopics result
+ */
+struct rd_kafka_DeleteTopics_result_s {
+ rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
+};
+
+struct rd_kafka_DeleteTopic_s {
+ char *topic; /**< Points to data */
+ char data[1]; /**< The topic name is allocated along with
+ * the struct here. */
+};
+
+/**@}*/
+
+
+
+/**
+ * @name CreatePartitions
+ * @{
+ */
+
+
+/**
+ * @brief CreatePartitions result
+ */
+struct rd_kafka_CreatePartitions_result_s {
+ rd_list_t topics; /**< Type (rd_kafka_topic_result_t *) */
+};
+
+struct rd_kafka_NewPartitions_s {
+ char *topic; /**< Points to data */
+ size_t total_cnt; /**< New total partition count */
+
+ /* Optional */
+ rd_list_t replicas; /**< Type (rd_list_t (int32_t)):
+ * Array of replica lists indexed by
+ * new partition relative index.
+ * Size is dynamic since we don't
+ * know how many partitions are actually
+ * being added by total_cnt */
+
+ char data[1]; /**< The topic name is allocated along with
+ * the struct here. */
+};
+
+/**@}*/
+
+
+
+/**
+ * @name ConfigEntry
+ * @{
+ */
+
+/* KIP-248 */
+typedef enum rd_kafka_AlterOperation_t {
+ RD_KAFKA_ALTER_OP_ADD = 0,
+ RD_KAFKA_ALTER_OP_SET = 1,
+ RD_KAFKA_ALTER_OP_DELETE = 2,
+} rd_kafka_AlterOperation_t;
+
+struct rd_kafka_ConfigEntry_s {
+ rd_strtup_t *kv; /**< Name/Value pair */
+
+ /* Response */
+
+ /* Attributes: this is a struct for easy copying */
+ struct {
+ rd_kafka_AlterOperation_t operation; /**< Operation */
+ rd_kafka_ConfigSource_t source; /**< Config source */
+ rd_bool_t is_readonly; /**< Value is read-only (on broker) */
+ rd_bool_t is_default; /**< Value is at its default */
+ rd_bool_t is_sensitive; /**< Value is sensitive */
+ rd_bool_t is_synonym; /**< Value is synonym */
+ } a;
+
+ rd_list_t synonyms; /**< Type (rd_kafka_configEntry *) */
+};
+
+/**
+ * @brief A cluster ConfigResource constisting of:
+ * - resource type (BROKER, TOPIC)
+ * - configuration property name
+ * - configuration property value
+ *
+ * https://cwiki.apache.org/confluence/display/KAFKA/KIP-133%3A+Describe+and+Alter+Configs+Admin+APIs
+ */
+struct rd_kafka_ConfigResource_s {
+ rd_kafka_ResourceType_t restype; /**< Resource type */
+ char *name; /**< Resource name, points to .data*/
+ rd_list_t config; /**< Type (rd_kafka_ConfigEntry_t *):
+ * List of config props */
+
+ /* Response */
+ rd_kafka_resp_err_t err; /**< Response error code */
+ char *errstr; /**< Response error string */
+
+ char data[1]; /**< The name is allocated along with
+ * the struct here. */
+};
+
+
+
+/**@}*/
+
+/**
+ * @name AlterConfigs
+ * @{
+ */
+
+
+
+struct rd_kafka_AlterConfigs_result_s {
+ rd_list_t resources; /**< Type (rd_kafka_ConfigResource_t *) */
+};
+
+struct rd_kafka_ConfigResource_result_s {
+ rd_list_t resources; /**< Type (struct rd_kafka_ConfigResource *):
+ * List of config resources, sans config
+ * but with response error values. */
+};
+
+/**@}*/
+
+
+
+/**
+ * @name DescribeConfigs
+ * @{
+ */
+
+struct rd_kafka_DescribeConfigs_result_s {
+ rd_list_t configs; /**< Type (rd_kafka_ConfigResource_t *) */
+};
+
+/**@}*/
+
+
+/**
+ * @name DeleteGroups
+ * @{
+ */
+
+
+struct rd_kafka_DeleteGroup_s {
+ char *group; /**< Points to data */
+ char data[1]; /**< The group name is allocated along with
+ * the struct here. */
+};
+
+/**@}*/
+
+
+/**
+ * @name DeleteRecords
+ * @{
+ */
+
+struct rd_kafka_DeleteRecords_s {
+ rd_kafka_topic_partition_list_t *offsets;
+};
+
+/**@}*/
+
+
+/**
+ * @name DeleteConsumerGroupOffsets
+ * @{
+ */
+
+/**
+ * @brief DeleteConsumerGroupOffsets result
+ */
+struct rd_kafka_DeleteConsumerGroupOffsets_result_s {
+ rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
+};
+
+struct rd_kafka_DeleteConsumerGroupOffsets_s {
+ char *group; /**< Points to data */
+ rd_kafka_topic_partition_list_t *partitions;
+ char data[1]; /**< The group name is allocated along with
+ * the struct here. */
+};
+
+/**@}*/
+
+/**
+ * @name CreateAcls
+ * @{
+ */
+
+/**
+ * @brief AclBinding type, used with CreateAcls.
+ */
+struct rd_kafka_AclBinding_s {
+ rd_kafka_ResourceType_t restype; /**< Resource type */
+ char *name; /**< Resource name, points to .data */
+ rd_kafka_ResourcePatternType_t
+ resource_pattern_type; /**< Resource pattern type */
+ char *principal; /**< Access Control Entry principal */
+ char *host; /**< Access Control Entry host */
+ rd_kafka_AclOperation_t operation; /**< AclOperation enumeration */
+ rd_kafka_AclPermissionType_t
+ permission_type; /**< AclPermissionType enumeration */
+ rd_kafka_error_t *error; /**< Response error, or NULL on success. */
+};
+/**@}*/
+
+/**
+ * @name DeleteAcls
+ * @{
+ */
+
+/**
+ * @brief DeleteAcls_result type, used with DeleteAcls.
+ */
+struct rd_kafka_DeleteAcls_result_response_s {
+ rd_kafka_error_t *error; /**< Response error object, or NULL */
+ rd_list_t matching_acls; /**< Type (rd_kafka_AclBinding_t *) */
+};
+
+/**@}*/
+
+
+/**
+ * @name AlterConsumerGroupOffsets
+ * @{
+ */
+
+/**
+ * @brief AlterConsumerGroupOffsets result
+ */
+struct rd_kafka_AlterConsumerGroupOffsets_result_s {
+ rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
+};
+
+struct rd_kafka_AlterConsumerGroupOffsets_s {
+ char *group_id; /**< Points to data */
+ rd_kafka_topic_partition_list_t *partitions;
+ char data[1]; /**< The group id is allocated along with
+ * the struct here. */
+};
+
+/**@}*/
+
+
+/**
+ * @name ListConsumerGroupOffsets
+ * @{
+ */
+
+/**
+ * @brief ListConsumerGroupOffsets result
+ */
+struct rd_kafka_ListConsumerGroupOffsets_result_s {
+ rd_list_t groups; /**< Type (rd_kafka_group_result_t *) */
+};
+
+struct rd_kafka_ListConsumerGroupOffsets_s {
+ char *group_id; /**< Points to data */
+ rd_kafka_topic_partition_list_t *partitions;
+ char data[1]; /**< The group id is allocated along with
+ * the struct here. */
+};
+
+/**@}*/
+
+/**
+ * @name ListConsumerGroups
+ * @{
+ */
+
+/**
+ * @struct ListConsumerGroups result for a single group
+ */
+struct rd_kafka_ConsumerGroupListing_s {
+ char *group_id; /**< Group id */
+ /** Is it a simple consumer group? That means empty protocol_type. */
+ rd_bool_t is_simple_consumer_group;
+ rd_kafka_consumer_group_state_t state; /**< Consumer group state. */
+};
+
+
+/**
+ * @struct ListConsumerGroups results and errors
+ */
+struct rd_kafka_ListConsumerGroupsResult_s {
+ rd_list_t valid; /**< List of valid ConsumerGroupListing
+ (rd_kafka_ConsumerGroupListing_t *) */
+ rd_list_t errors; /**< List of errors (rd_kafka_error_t *) */
+};
+
+/**@}*/
+
+/**
+ * @name DescribeConsumerGroups
+ * @{
+ */
+
+/**
+ * @struct Assignment of a consumer group member.
+ *
+ */
+struct rd_kafka_MemberAssignment_s {
+ /** Partitions assigned to current member. */
+ rd_kafka_topic_partition_list_t *partitions;
+};
+
+/**
+ * @struct Description of a consumer group member.
+ *
+ */
+struct rd_kafka_MemberDescription_s {
+ char *client_id; /**< Client id */
+ char *consumer_id; /**< Consumer id */
+ char *group_instance_id; /**< Group instance id */
+ char *host; /**< Group member host */
+ rd_kafka_MemberAssignment_t assignment; /**< Member assignment */
+};
+
+/**
+ * @struct DescribeConsumerGroups result
+ */
+struct rd_kafka_ConsumerGroupDescription_s {
+ /** Group id */
+ char *group_id;
+ /** Is it a simple consumer group? That means empty protocol_type. */
+ rd_bool_t is_simple_consumer_group;
+ /** List of members.
+ * Type (rd_kafka_MemberDescription_t *): members list */
+ rd_list_t members;
+ /** Protocol type */
+ char *protocol_type;
+ /** Partition assignor identifier. */
+ char *partition_assignor;
+ /** Consumer group state. */
+ rd_kafka_consumer_group_state_t state;
+ /** Consumer group coordinator. */
+ rd_kafka_Node_t *coordinator;
+ /** Group specific error. */
+ rd_kafka_error_t *error;
+};
+
+/**@}*/
+
+#endif /* _RDKAFKA_ADMIN_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c
new file mode 100644
index 000000000..dc4bdae94
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.c
@@ -0,0 +1,968 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name Consumer assignment state.
+ *
+ * Responsible for managing the state of assigned partitions.
+ *
+ *
+ ******************************************************************************
+ * rd_kafka_assignment_serve()
+ * ---------------------------
+ *
+ * It is important to call rd_kafka_assignment_serve() after each change
+ * to the assignment through assignment_add, assignment_subtract or
+ * assignment_clear as those functions only modify the assignment but does
+ * not take any action to transition partitions to or from the assignment
+ * states.
+ *
+ * The reason assignment_serve() is not automatically called from these
+ * functions is for the caller to be able to set the current state before
+ * the side-effects of serve() kick in, such as the call to
+ * rd_kafka_cgrp_assignment_done() that in turn will set the cgrp state.
+ *
+ *
+ *
+ ******************************************************************************
+ * Querying for committed offsets (.queried list)
+ * ----------------------------------------------
+ *
+ * We only allow one outstanding query (fetch committed offset), this avoids
+ * complex handling of partitions that are assigned, unassigned and reassigned
+ * all within the window of a OffsetFetch request.
+ * Consider the following case:
+ *
+ * 1. tp1 and tp2 are incrementally assigned.
+ * 2. An OffsetFetchRequest is sent for tp1 and tp2
+ * 3. tp2 is incremental unassigned.
+ * 4. Broker sends OffsetFetchResponse with offsets tp1=10, tp2=20.
+ * 4. Some other consumer commits offsets 30 for tp2.
+ * 5. tp2 is incrementally assigned again.
+ * 6. The OffsetFetchResponse is received.
+ *
+ * Without extra handling the consumer would start fetching tp1 at offset 10
+ * (which is correct) and tp2 at offset 20 (which is incorrect, the last
+ * committed offset is now 30).
+ *
+ * To alleviate this situation we remove unassigned partitions from the
+ * .queried list, and in the OffsetFetch response handler we only use offsets
+ * for partitions that are on the .queried list.
+ *
+ * To make sure the tp1 offset is used and not re-queried we only allow
+ * one outstanding OffsetFetch request at the time, meaning that at step 5
+ * a new OffsetFetch request will not be sent and tp2 will remain in the
+ * .pending list until the outstanding OffsetFetch response is received in
+ * step 6. At this point tp2 will transition to .queried and a new
+ * OffsetFetch request will be sent.
+ *
+ * This explanation is more verbose than the code involved.
+ *
+ ******************************************************************************
+ *
+ *
+ * @remark Try to keep any cgrp state out of this file.
+ *
+ * FIXME: There are some pretty obvious optimizations that needs to be done here
+ * with regards to partition_list_t lookups. But we can do that when
+ * we know the current implementation works correctly.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_request.h"
+
+
+static void rd_kafka_assignment_dump(rd_kafka_t *rk) {
+ rd_kafka_dbg(rk, CGRP, "DUMP",
+ "Assignment dump (started_cnt=%d, wait_stop_cnt=%d)",
+ rk->rk_consumer.assignment.started_cnt,
+ rk->rk_consumer.assignment.wait_stop_cnt);
+
+ rd_kafka_topic_partition_list_log(rk, "DUMP_ALL", RD_KAFKA_DBG_CGRP,
+ rk->rk_consumer.assignment.all);
+
+ rd_kafka_topic_partition_list_log(rk, "DUMP_PND", RD_KAFKA_DBG_CGRP,
+ rk->rk_consumer.assignment.pending);
+
+ rd_kafka_topic_partition_list_log(rk, "DUMP_QRY", RD_KAFKA_DBG_CGRP,
+ rk->rk_consumer.assignment.queried);
+
+ rd_kafka_topic_partition_list_log(rk, "DUMP_REM", RD_KAFKA_DBG_CGRP,
+ rk->rk_consumer.assignment.removed);
+}
+
+/**
+ * @brief Apply the fetched committed offsets to the current assignment's
+ * queried partitions.
+ *
+ * @param err is the request-level error, if any. The caller is responsible
+ * for raising this error to the application. It is only used here
+ * to avoid taking actions.
+ *
+ * Called from the FetchOffsets response handler below.
+ */
+static void
+rd_kafka_assignment_apply_offsets(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_topic_partition_t *rktpar;
+
+ RD_KAFKA_TPLIST_FOREACH(rktpar, offsets) {
+ /* May be NULL, borrow ref. */
+ rd_kafka_toppar_t *rktp =
+ rd_kafka_topic_partition_toppar(rk, rktpar);
+
+ if (!rd_kafka_topic_partition_list_del(
+ rk->rk_consumer.assignment.queried, rktpar->topic,
+ rktpar->partition)) {
+ rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
+ "Ignoring OffsetFetch "
+ "response for %s [%" PRId32
+ "] which is no "
+ "longer in the queried list "
+ "(possibly unassigned?)",
+ rktpar->topic, rktpar->partition);
+ continue;
+ }
+
+ if (err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT ||
+ rktpar->err == RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT) {
+ /* Ongoing transactions are blocking offset retrieval.
+ * This is typically retried from the OffsetFetch
+ * handler but we can come here if the assignment
+ * (and thus the assignment.version) was changed while
+ * the OffsetFetch request was in-flight, in which case
+ * we put this partition back on the pending list for
+ * later handling by the assignment state machine. */
+
+ rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
+ "Adding %s [%" PRId32
+ "] back to pending "
+ "list because on-going transaction is "
+ "blocking offset retrieval",
+ rktpar->topic, rktpar->partition);
+
+ rd_kafka_topic_partition_list_add_copy(
+ rk->rk_consumer.assignment.pending, rktpar);
+
+ } else if (rktpar->err) {
+ /* Partition-level error */
+ rd_kafka_consumer_err(
+ rk->rk_consumer.q, RD_KAFKA_NODEID_UA, rktpar->err,
+ 0, rktpar->topic, rktp, RD_KAFKA_OFFSET_INVALID,
+ "Failed to fetch committed offset for "
+ "group \"%s\" topic %s [%" PRId32 "]: %s",
+ rk->rk_group_id->str, rktpar->topic,
+ rktpar->partition, rd_kafka_err2str(rktpar->err));
+
+ /* The partition will not be added back to .pending
+ * and thus only reside on .all until the application
+ * unassigns it and possible re-assigns it. */
+
+ } else if (!err) {
+ /* If rktpar->offset is RD_KAFKA_OFFSET_INVALID it means
+ * there was no committed offset for this partition.
+ * serve_pending() will now start this partition
+ * since the offset is set to INVALID (rather than
+ * STORED) and the partition fetcher will employ
+ * auto.offset.reset to know what to do. */
+
+ /* Add partition to pending list where serve()
+ * will start the fetcher. */
+ rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
+ "Adding %s [%" PRId32
+ "] back to pending "
+ "list with offset %s",
+ rktpar->topic, rktpar->partition,
+ rd_kafka_offset2str(rktpar->offset));
+
+ rd_kafka_topic_partition_list_add_copy(
+ rk->rk_consumer.assignment.pending, rktpar);
+ }
+ /* Do nothing for request-level errors (err is set). */
+ }
+
+ if (offsets->cnt > 0)
+ rd_kafka_assignment_serve(rk);
+}
+
+
+
+/**
+ * @brief Reply handler for OffsetFetch queries from the assignment code.
+ *
+ * @param opaque Is a malloced int64_t* containing the assignment version at the
+ * time of the request.
+ *
+ * @locality rdkafka main thread
+ */
+static void rd_kafka_assignment_handle_OffsetFetch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_topic_partition_list_t *offsets = NULL;
+ int64_t *req_assignment_version = (int64_t *)opaque;
+ /* Only allow retries if there's been no change to the assignment,
+ * otherwise rely on assignment state machine to retry. */
+ rd_bool_t allow_retry =
+ *req_assignment_version == rk->rk_consumer.assignment.version;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* Termination, quick cleanup. */
+ rd_free(req_assignment_version);
+ return;
+ }
+
+ err = rd_kafka_handle_OffsetFetch(
+ rk, rkb, err, reply, request, &offsets,
+ rd_true /* Update toppars */, rd_true /* Add parts */, allow_retry);
+ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
+ if (offsets)
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return; /* retrying */
+ }
+
+ rd_free(req_assignment_version);
+
+ /* offsets may be NULL for certain errors, such
+ * as ERR__TRANSPORT. */
+ if (!offsets && !allow_retry) {
+ rd_dassert(err);
+ if (!err)
+ err = RD_KAFKA_RESP_ERR__NO_OFFSET;
+
+ rd_kafka_dbg(rk, CGRP, "OFFSET", "Offset fetch error: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_consumer_err(
+ rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
+ NULL, RD_KAFKA_OFFSET_INVALID,
+ "Failed to fetch committed "
+ "offsets for partitions "
+ "in group \"%s\": %s",
+ rk->rk_group_id->str, rd_kafka_err2str(err));
+
+ return;
+ }
+
+
+
+ if (err) {
+ rd_kafka_dbg(rk, CGRP, "OFFSET",
+ "Offset fetch error for %d partition(s): %s",
+ offsets->cnt, rd_kafka_err2str(err));
+ rd_kafka_consumer_err(
+ rk->rk_consumer.q, rd_kafka_broker_id(rkb), err, 0, NULL,
+ NULL, RD_KAFKA_OFFSET_INVALID,
+ "Failed to fetch committed offsets for "
+ "%d partition(s) in group \"%s\": %s",
+ offsets->cnt, rk->rk_group_id->str, rd_kafka_err2str(err));
+ }
+
+ /* Apply the fetched offsets to the assignment */
+ rd_kafka_assignment_apply_offsets(rk, offsets, err);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+}
+
+
+/**
+ * @brief Decommission all partitions in the removed list.
+ *
+ * @returns >0 if there are removal operations in progress, else 0.
+ */
+static int rd_kafka_assignment_serve_removals(rd_kafka_t *rk) {
+ rd_kafka_topic_partition_t *rktpar;
+ int valid_offsets = 0;
+
+ RD_KAFKA_TPLIST_FOREACH(rktpar, rk->rk_consumer.assignment.removed) {
+ rd_kafka_toppar_t *rktp =
+ rd_kafka_topic_partition_ensure_toppar(
+ rk, rktpar, rd_true); /* Borrow ref */
+ int was_pending, was_queried;
+
+ /* Remove partition from pending and querying lists,
+ * if it happens to be there.
+ * Outstanding OffsetFetch query results will be ignored
+ * for partitions that are no longer on the .queried list. */
+ was_pending = rd_kafka_topic_partition_list_del(
+ rk->rk_consumer.assignment.pending, rktpar->topic,
+ rktpar->partition);
+ was_queried = rd_kafka_topic_partition_list_del(
+ rk->rk_consumer.assignment.queried, rktpar->topic,
+ rktpar->partition);
+
+ if (rktp->rktp_started) {
+ /* Partition was started, stop the fetcher. */
+ rd_assert(rk->rk_consumer.assignment.started_cnt > 0);
+
+ rd_kafka_toppar_op_fetch_stop(
+ rktp, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+ rk->rk_consumer.assignment.wait_stop_cnt++;
+ }
+
+ /* Reset the (lib) pause flag which may have been set by
+ * the cgrp when scheduling the rebalance callback. */
+ rd_kafka_toppar_op_pause_resume(rktp, rd_false /*resume*/,
+ RD_KAFKA_TOPPAR_F_LIB_PAUSE,
+ RD_KAFKA_NO_REPLYQ);
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Save the currently stored offset and epoch on .removed
+ * so it will be committed below. */
+ rd_kafka_topic_partition_set_from_fetch_pos(
+ rktpar, rktp->rktp_stored_pos);
+ valid_offsets += !RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset);
+
+ /* Reset the stored offset to invalid so that
+ * a manual offset-less commit() or the auto-committer
+ * will not commit a stored offset from a previous
+ * assignment (issue #2782). */
+ rd_kafka_offset_store0(
+ rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1),
+ rd_true, RD_DONT_LOCK);
+
+ /* Partition is no longer desired */
+ rd_kafka_toppar_desired_del(rktp);
+
+ rd_assert((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED));
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ASSIGNED;
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_dbg(rk, CGRP, "REMOVE",
+ "Removing %s [%" PRId32
+ "] from assignment "
+ "(started=%s, pending=%s, queried=%s, "
+ "stored offset=%s)",
+ rktpar->topic, rktpar->partition,
+ RD_STR_ToF(rktp->rktp_started),
+ RD_STR_ToF(was_pending), RD_STR_ToF(was_queried),
+ rd_kafka_offset2str(rktpar->offset));
+ }
+
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REMOVE",
+ "Served %d removed partition(s), "
+ "with %d offset(s) to commit",
+ rk->rk_consumer.assignment.removed->cnt, valid_offsets);
+
+ /* If enable.auto.commit=true:
+ * Commit final offsets to broker for the removed partitions,
+ * unless this is a consumer destruction with a close() call. */
+ if (valid_offsets > 0 &&
+ rk->rk_conf.offset_store_method == RD_KAFKA_OFFSET_METHOD_BROKER &&
+ rk->rk_cgrp && rk->rk_conf.enable_auto_commit &&
+ !rd_kafka_destroy_flags_no_consumer_close(rk))
+ rd_kafka_cgrp_assigned_offsets_commit(
+ rk->rk_cgrp, rk->rk_consumer.assignment.removed,
+ rd_false /* use offsets from .removed */,
+ "unassigned partitions");
+
+ rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.removed);
+
+ return rk->rk_consumer.assignment.wait_stop_cnt +
+ rk->rk_consumer.wait_commit_cnt;
+}
+
+
+/**
+ * @brief Serve all partitions in the pending list.
+ *
+ * This either (asynchronously) queries the partition's committed offset, or
+ * if the start offset is known, starts the partition fetcher.
+ *
+ * @returns >0 if there are pending operations in progress for the current
+ * assignment, else 0.
+ */
+static int rd_kafka_assignment_serve_pending(rd_kafka_t *rk) {
+ rd_kafka_topic_partition_list_t *partitions_to_query = NULL;
+ /* We can query committed offsets only if all of the following are true:
+ * - We have a group coordinator.
+ * - There are no outstanding commits (since we might need to
+ * read back those commits as our starting position).
+ * - There are no outstanding queries already (since we want to
+ * avoid using a earlier queries response for a partition that
+ * is unassigned and then assigned again).
+ */
+ rd_kafka_broker_t *coord =
+ rk->rk_cgrp ? rd_kafka_cgrp_get_coord(rk->rk_cgrp) : NULL;
+ rd_bool_t can_query_offsets =
+ coord && rk->rk_consumer.wait_commit_cnt == 0 &&
+ rk->rk_consumer.assignment.queried->cnt == 0;
+ int i;
+
+ if (can_query_offsets)
+ partitions_to_query = rd_kafka_topic_partition_list_new(
+ rk->rk_consumer.assignment.pending->cnt);
+
+ /* Scan the list backwards so removals are cheap (no array shuffle) */
+ for (i = rk->rk_consumer.assignment.pending->cnt - 1; i >= 0; i--) {
+ rd_kafka_topic_partition_t *rktpar =
+ &rk->rk_consumer.assignment.pending->elems[i];
+ /* Borrow ref */
+ rd_kafka_toppar_t *rktp =
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
+
+ rd_assert(!rktp->rktp_started);
+
+ if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) ||
+ rktpar->offset == RD_KAFKA_OFFSET_BEGINNING ||
+ rktpar->offset == RD_KAFKA_OFFSET_END ||
+ rktpar->offset == RD_KAFKA_OFFSET_INVALID ||
+ rktpar->offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
+ /* The partition fetcher can handle absolute
+ * as well as beginning/end/tail start offsets, so we're
+ * ready to start the fetcher now.
+ * The INVALID offset means there was no committed
+ * offset and the partition fetcher will employ
+ * auto.offset.reset.
+ *
+ * Start fetcher for partition and forward partition's
+ * fetchq to consumer group's queue. */
+
+ rd_kafka_dbg(rk, CGRP, "SRVPEND",
+ "Starting pending assigned partition "
+ "%s [%" PRId32 "] at %s",
+ rktpar->topic, rktpar->partition,
+ rd_kafka_fetch_pos2str(
+ rd_kafka_topic_partition_get_fetch_pos(
+ rktpar)));
+
+ /* Reset the (lib) pause flag which may have been set by
+ * the cgrp when scheduling the rebalance callback. */
+ rd_kafka_toppar_op_pause_resume(
+ rktp, rd_false /*resume*/,
+ RD_KAFKA_TOPPAR_F_LIB_PAUSE, RD_KAFKA_NO_REPLYQ);
+
+ /* Start the fetcher */
+ rktp->rktp_started = rd_true;
+ rk->rk_consumer.assignment.started_cnt++;
+
+ rd_kafka_toppar_op_fetch_start(
+ rktp,
+ rd_kafka_topic_partition_get_fetch_pos(rktpar),
+ rk->rk_consumer.q, RD_KAFKA_NO_REPLYQ);
+
+
+ } else if (can_query_offsets) {
+ /* Else use the last committed offset for partition.
+ * We can't rely on any internal cached committed offset
+ * so we'll accumulate a list of partitions that need
+ * to be queried and then send FetchOffsetsRequest
+ * to the group coordinator. */
+
+ rd_dassert(!rd_kafka_topic_partition_list_find(
+ rk->rk_consumer.assignment.queried, rktpar->topic,
+ rktpar->partition));
+
+ rd_kafka_topic_partition_list_add_copy(
+ partitions_to_query, rktpar);
+
+ rd_kafka_topic_partition_list_add_copy(
+ rk->rk_consumer.assignment.queried, rktpar);
+
+ rd_kafka_dbg(rk, CGRP, "SRVPEND",
+ "Querying committed offset for pending "
+ "assigned partition %s [%" PRId32 "]",
+ rktpar->topic, rktpar->partition);
+
+
+ } else {
+ rd_kafka_dbg(
+ rk, CGRP, "SRVPEND",
+ "Pending assignment partition "
+ "%s [%" PRId32
+ "] can't fetch committed "
+ "offset yet "
+ "(cgrp state %s, awaiting %d commits, "
+ "%d partition(s) already being queried)",
+ rktpar->topic, rktpar->partition,
+ rk->rk_cgrp
+ ? rd_kafka_cgrp_state_names[rk->rk_cgrp
+ ->rkcg_state]
+ : "n/a",
+ rk->rk_consumer.wait_commit_cnt,
+ rk->rk_consumer.assignment.queried->cnt);
+
+ continue; /* Keep rktpar on pending list */
+ }
+
+ /* Remove rktpar from the pending list */
+ rd_kafka_topic_partition_list_del_by_idx(
+ rk->rk_consumer.assignment.pending, i);
+ }
+
+
+ if (!can_query_offsets) {
+ if (coord)
+ rd_kafka_broker_destroy(coord);
+ return rk->rk_consumer.assignment.pending->cnt +
+ rk->rk_consumer.assignment.queried->cnt;
+ }
+
+
+ if (partitions_to_query->cnt > 0) {
+ int64_t *req_assignment_version = rd_malloc(sizeof(int64_t));
+ *req_assignment_version = rk->rk_consumer.assignment.version;
+
+ rd_kafka_dbg(rk, CGRP, "OFFSETFETCH",
+ "Fetching committed offsets for "
+ "%d pending partition(s) in assignment",
+ partitions_to_query->cnt);
+
+ rd_kafka_OffsetFetchRequest(
+ coord, rk->rk_group_id->str, partitions_to_query,
+ rk->rk_conf.isolation_level ==
+ RD_KAFKA_READ_COMMITTED /*require_stable_offsets*/,
+ 0, /* Timeout */
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_assignment_handle_OffsetFetch,
+ /* Must be freed by handler */
+ (void *)req_assignment_version);
+ }
+
+ if (coord)
+ rd_kafka_broker_destroy(coord);
+
+ rd_kafka_topic_partition_list_destroy(partitions_to_query);
+
+ return rk->rk_consumer.assignment.pending->cnt +
+ rk->rk_consumer.assignment.queried->cnt;
+}
+
+
+
+/**
+ * @brief Serve updates to the assignment.
+ *
+ * Call on:
+ * - assignment changes
+ * - wait_commit_cnt reaches 0
+ * - partition fetcher is stopped
+ */
+void rd_kafka_assignment_serve(rd_kafka_t *rk) {
+ int inp_removals = 0;
+ int inp_pending = 0;
+
+ rd_kafka_assignment_dump(rk);
+
+ /* Serve any partitions that should be removed */
+ if (rk->rk_consumer.assignment.removed->cnt > 0)
+ inp_removals = rd_kafka_assignment_serve_removals(rk);
+
+ /* Serve any partitions in the pending list that need further action,
+ * unless we're waiting for a previous assignment change (an unassign
+ * in some form) to propagate, or outstanding offset commits
+ * to finish (since we might need the committed offsets as start
+ * offsets). */
+ if (rk->rk_consumer.assignment.wait_stop_cnt == 0 &&
+ rk->rk_consumer.wait_commit_cnt == 0 && inp_removals == 0 &&
+ rk->rk_consumer.assignment.pending->cnt > 0)
+ inp_pending = rd_kafka_assignment_serve_pending(rk);
+
+ if (inp_removals + inp_pending +
+ rk->rk_consumer.assignment.queried->cnt +
+ rk->rk_consumer.assignment.wait_stop_cnt +
+ rk->rk_consumer.wait_commit_cnt ==
+ 0) {
+ /* No assignment operations in progress,
+ * signal assignment done back to cgrp to let it
+ * transition to its next state if necessary.
+ * We may emit this signalling more than necessary and it is
+ * up to the cgrp to only take action if needed, based on its
+ * state. */
+ rd_kafka_cgrp_assignment_done(rk->rk_cgrp);
+ } else {
+ rd_kafka_dbg(rk, CGRP, "ASSIGNMENT",
+ "Current assignment of %d partition(s) "
+ "with %d pending adds, %d offset queries, "
+ "%d partitions awaiting stop and "
+ "%d offset commits in progress",
+ rk->rk_consumer.assignment.all->cnt, inp_pending,
+ rk->rk_consumer.assignment.queried->cnt,
+ rk->rk_consumer.assignment.wait_stop_cnt,
+ rk->rk_consumer.wait_commit_cnt);
+ }
+}
+
+
+/**
+ * @returns true if the current or previous assignment has operations in
+ * progress, such as waiting for partition fetchers to stop.
+ */
+rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk) {
+ return rk->rk_consumer.wait_commit_cnt > 0 ||
+ rk->rk_consumer.assignment.wait_stop_cnt > 0 ||
+ rk->rk_consumer.assignment.pending->cnt > 0 ||
+ rk->rk_consumer.assignment.queried->cnt > 0 ||
+ rk->rk_consumer.assignment.removed->cnt > 0;
+}
+
+
+/**
+ * @brief Clear the current assignment.
+ *
+ * @remark Make sure to call rd_kafka_assignment_serve() after successful
+ * return from this function.
+ *
+ * @returns the number of partitions removed.
+ */
+int rd_kafka_assignment_clear(rd_kafka_t *rk) {
+ int cnt = rk->rk_consumer.assignment.all->cnt;
+
+ if (cnt == 0) {
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
+ "No current assignment to clear");
+ return 0;
+ }
+
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "CLEARASSIGN",
+ "Clearing current assignment of %d partition(s)",
+ rk->rk_consumer.assignment.all->cnt);
+
+ rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.pending);
+ rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.queried);
+
+ rd_kafka_topic_partition_list_add_list(
+ rk->rk_consumer.assignment.removed, rk->rk_consumer.assignment.all);
+ rd_kafka_topic_partition_list_clear(rk->rk_consumer.assignment.all);
+
+ rk->rk_consumer.assignment.version++;
+
+ return cnt;
+}
+
+
+/**
+ * @brief Adds \p partitions to the current assignment.
+ *
+ * Will return error if trying to add a partition that is already in the
+ * assignment.
+ *
+ * @remark Make sure to call rd_kafka_assignment_serve() after successful
+ * return from this function.
+ */
+rd_kafka_error_t *
+rd_kafka_assignment_add(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ rd_bool_t was_empty = rk->rk_consumer.assignment.all->cnt == 0;
+ int i;
+
+ /* Make sure there are no duplicates, invalid partitions, or
+ * invalid offsets in the input partitions. */
+ rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
+
+ for (i = 0; i < partitions->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
+ const rd_kafka_topic_partition_t *prev =
+ i > 0 ? &partitions->elems[i - 1] : NULL;
+
+ if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset) &&
+ rktpar->offset != RD_KAFKA_OFFSET_BEGINNING &&
+ rktpar->offset != RD_KAFKA_OFFSET_END &&
+ rktpar->offset != RD_KAFKA_OFFSET_STORED &&
+ rktpar->offset != RD_KAFKA_OFFSET_INVALID &&
+ rktpar->offset > RD_KAFKA_OFFSET_TAIL_BASE)
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "%s [%" PRId32
+ "] has invalid start offset %" PRId64,
+ rktpar->topic, rktpar->partition, rktpar->offset);
+
+ if (prev && !rd_kafka_topic_partition_cmp(rktpar, prev))
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Duplicate %s [%" PRId32 "] in input list",
+ rktpar->topic, rktpar->partition);
+
+ if (rd_kafka_topic_partition_list_find(
+ rk->rk_consumer.assignment.all, rktpar->topic,
+ rktpar->partition))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__CONFLICT,
+ "%s [%" PRId32
+ "] is already part of the "
+ "current assignment",
+ rktpar->topic,
+ rktpar->partition);
+
+ /* Translate RD_KAFKA_OFFSET_INVALID to RD_KAFKA_OFFSET_STORED,
+ * i.e., read from committed offset, since we use INVALID
+ * internally to differentiate between querying for
+ * committed offset (STORED) and no committed offset (INVALID).
+ */
+ if (rktpar->offset == RD_KAFKA_OFFSET_INVALID)
+ rktpar->offset = RD_KAFKA_OFFSET_STORED;
+
+ /* Get toppar object for each partition.
+ * This is to make sure the rktp stays alive while unassigning
+ * any previous assignment in the call to
+ * assignment_clear() below. */
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
+ }
+
+ /* Mark all partition objects as assigned and reset the stored
+ * offsets back to invalid in case it was explicitly stored during
+ * the time the partition was not assigned. */
+ for (i = 0; i < partitions->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
+ rd_kafka_toppar_t *rktp =
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED));
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ASSIGNED;
+
+ /* Reset the stored offset to INVALID to avoid the race
+ * condition described in rdkafka_offset.h */
+ rd_kafka_offset_store0(
+ rktp, RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1),
+ rd_true /* force */, RD_DONT_LOCK);
+
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+
+ /* Add the new list of partitions to the current assignment.
+ * Only need to sort the final assignment if it was non-empty
+ * to begin with since \p partitions is sorted above. */
+ rd_kafka_topic_partition_list_add_list(rk->rk_consumer.assignment.all,
+ partitions);
+ if (!was_empty)
+ rd_kafka_topic_partition_list_sort(
+ rk->rk_consumer.assignment.all, NULL, NULL);
+
+ /* And add to .pending for serve_pending() to handle. */
+ rd_kafka_topic_partition_list_add_list(
+ rk->rk_consumer.assignment.pending, partitions);
+
+
+ rd_kafka_dbg(rk, CONSUMER | RD_KAFKA_DBG_CGRP, "ASSIGNMENT",
+ "Added %d partition(s) to assignment which "
+ "now consists of %d partition(s) where of %d are in "
+ "pending state and %d are being queried",
+ partitions->cnt, rk->rk_consumer.assignment.all->cnt,
+ rk->rk_consumer.assignment.pending->cnt,
+ rk->rk_consumer.assignment.queried->cnt);
+
+ rk->rk_consumer.assignment.version++;
+
+ return NULL;
+}
+
+
+/**
+ * @brief Remove \p partitions from the current assignment.
+ *
+ * Will return error if trying to remove a partition that is not in the
+ * assignment.
+ *
+ * @remark Make sure to call rd_kafka_assignment_serve() after successful
+ * return from this function.
+ */
+rd_kafka_error_t *
+rd_kafka_assignment_subtract(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+ int matched_queried_partitions = 0;
+ int assignment_pre_cnt;
+
+ if (rk->rk_consumer.assignment.all->cnt == 0 && partitions->cnt > 0)
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Can't subtract from empty assignment");
+
+ /* Verify that all partitions in \p partitions are in the assignment
+ * before starting to modify the assignment. */
+ rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
+
+ for (i = 0; i < partitions->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
+
+ if (!rd_kafka_topic_partition_list_find(
+ rk->rk_consumer.assignment.all, rktpar->topic,
+ rktpar->partition))
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "%s [%" PRId32
+ "] can't be unassigned since "
+ "it is not in the current assignment",
+ rktpar->topic, rktpar->partition);
+
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar, rd_true);
+ }
+
+
+ assignment_pre_cnt = rk->rk_consumer.assignment.all->cnt;
+
+ /* Remove partitions in reverse order to avoid excessive
+ * array shuffling of .all.
+ * Add the removed partitions to .pending for serve() to handle. */
+ for (i = partitions->cnt - 1; i >= 0; i--) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &partitions->elems[i];
+
+ if (!rd_kafka_topic_partition_list_del(
+ rk->rk_consumer.assignment.all, rktpar->topic,
+ rktpar->partition))
+ RD_BUG("Removed partition %s [%" PRId32
+ "] not found "
+ "in assignment.all",
+ rktpar->topic, rktpar->partition);
+
+ if (rd_kafka_topic_partition_list_del(
+ rk->rk_consumer.assignment.queried, rktpar->topic,
+ rktpar->partition))
+ matched_queried_partitions++;
+ else
+ rd_kafka_topic_partition_list_del(
+ rk->rk_consumer.assignment.pending, rktpar->topic,
+ rktpar->partition);
+
+ /* Add to .removed list which will be served by
+ * serve_removals(). */
+ rd_kafka_topic_partition_list_add_copy(
+ rk->rk_consumer.assignment.removed, rktpar);
+ }
+
+ rd_kafka_dbg(rk, CGRP, "REMOVEASSIGN",
+ "Removed %d partition(s) "
+ "(%d with outstanding offset queries) from assignment "
+ "of %d partition(s)",
+ partitions->cnt, matched_queried_partitions,
+ assignment_pre_cnt);
+
+ if (rk->rk_consumer.assignment.all->cnt == 0) {
+ /* Some safe checking */
+ rd_assert(rk->rk_consumer.assignment.pending->cnt == 0);
+ rd_assert(rk->rk_consumer.assignment.queried->cnt == 0);
+ }
+
+ rk->rk_consumer.assignment.version++;
+
+ return NULL;
+}
+
+
+/**
+ * @brief Call when partition fetcher has stopped.
+ */
+void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp) {
+ rd_assert(rk->rk_consumer.assignment.wait_stop_cnt > 0);
+ rk->rk_consumer.assignment.wait_stop_cnt--;
+
+ rd_assert(rktp->rktp_started);
+ rktp->rktp_started = rd_false;
+
+ rd_assert(rk->rk_consumer.assignment.started_cnt > 0);
+ rk->rk_consumer.assignment.started_cnt--;
+
+ /* If this was the last partition we awaited stop for, serve the
+ * assignment to transition any existing assignment to the next state */
+ if (rk->rk_consumer.assignment.wait_stop_cnt == 0) {
+ rd_kafka_dbg(rk, CGRP, "STOPSERVE",
+ "All partitions awaiting stop are now "
+ "stopped: serving assignment");
+ rd_kafka_assignment_serve(rk);
+ }
+}
+
+
+/**
+ * @brief Pause fetching of the currently assigned partitions.
+ *
+ * Partitions will be resumed by calling rd_kafka_assignment_resume() or
+ * from either serve_removals() or serve_pending() above.
+ */
+void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason) {
+
+ if (rk->rk_consumer.assignment.all->cnt == 0)
+ return;
+
+ rd_kafka_dbg(rk, CGRP, "PAUSE",
+ "Pausing fetchers for %d assigned partition(s): %s",
+ rk->rk_consumer.assignment.all->cnt, reason);
+
+ rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_ASYNC,
+ RD_KAFKA_TOPPAR_F_LIB_PAUSE,
+ rk->rk_consumer.assignment.all);
+}
+
+/**
+ * @brief Resume fetching of the currently assigned partitions which have
+ * previously been paused by rd_kafka_assignment_pause().
+ */
+void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason) {
+
+ if (rk->rk_consumer.assignment.all->cnt == 0)
+ return;
+
+ rd_kafka_dbg(rk, CGRP, "PAUSE",
+ "Resuming fetchers for %d assigned partition(s): %s",
+ rk->rk_consumer.assignment.all->cnt, reason);
+
+ rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_ASYNC,
+ RD_KAFKA_TOPPAR_F_LIB_PAUSE,
+ rk->rk_consumer.assignment.all);
+}
+
+
+
+/**
+ * @brief Destroy assignment state (but not \p assignment itself)
+ */
+void rd_kafka_assignment_destroy(rd_kafka_t *rk) {
+ if (!rk->rk_consumer.assignment.all)
+ return; /* rd_kafka_assignment_init() not called */
+ rd_kafka_topic_partition_list_destroy(rk->rk_consumer.assignment.all);
+ rd_kafka_topic_partition_list_destroy(
+ rk->rk_consumer.assignment.pending);
+ rd_kafka_topic_partition_list_destroy(
+ rk->rk_consumer.assignment.queried);
+ rd_kafka_topic_partition_list_destroy(
+ rk->rk_consumer.assignment.removed);
+}
+
+
+/**
+ * @brief Initialize the assignment struct.
+ */
+void rd_kafka_assignment_init(rd_kafka_t *rk) {
+ rk->rk_consumer.assignment.all = rd_kafka_topic_partition_list_new(100);
+ rk->rk_consumer.assignment.pending =
+ rd_kafka_topic_partition_list_new(100);
+ rk->rk_consumer.assignment.queried =
+ rd_kafka_topic_partition_list_new(100);
+ rk->rk_consumer.assignment.removed =
+ rd_kafka_topic_partition_list_new(100);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h
new file mode 100644
index 000000000..fa51bb10c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignment.h
@@ -0,0 +1,73 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDKAFKA_ASSIGNMENT_H_
+#define _RDKAFKA_ASSIGNMENT_H_
+
+typedef struct rd_kafka_assignment_s {
+ /** All currently assigned partitions. */
+ rd_kafka_topic_partition_list_t *all;
+ /** Partitions in need of action (subset of .all) */
+ rd_kafka_topic_partition_list_t *pending;
+ /** Partitions that are being queried for committed
+ * offsets (subset of .all) */
+ rd_kafka_topic_partition_list_t *queried;
+ /** Partitions that have been removed from the assignment
+ * but not yet decommissioned. (not included in .all) */
+ rd_kafka_topic_partition_list_t *removed;
+ /** Number of started partitions */
+ int started_cnt;
+ /** Number of partitions being stopped. */
+ int wait_stop_cnt;
+ /** Assignment version: any change to the assignment will bump this
+ * version by one. This is used to know if a protocol response is
+ * outdated or not.
+ * @locks_required none
+ * @locality rdkafka main thread */
+ int64_t version;
+} rd_kafka_assignment_t;
+
+
+int rd_kafka_assignment_clear(rd_kafka_t *rk);
+rd_kafka_error_t *
+rd_kafka_assignment_add(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions);
+rd_kafka_error_t *
+rd_kafka_assignment_subtract(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions);
+void rd_kafka_assignment_partition_stopped(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp);
+void rd_kafka_assignment_pause(rd_kafka_t *rk, const char *reason);
+void rd_kafka_assignment_resume(rd_kafka_t *rk, const char *reason);
+void rd_kafka_assignment_serve(rd_kafka_t *rk);
+rd_bool_t rd_kafka_assignment_in_progress(rd_kafka_t *rk);
+void rd_kafka_assignment_destroy(rd_kafka_t *rk);
+void rd_kafka_assignment_init(rd_kafka_t *rk);
+
+#endif /* _RDKAFKA_ASSIGNMENT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c
new file mode 100644
index 000000000..792573845
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.c
@@ -0,0 +1,1065 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_assignor.h"
+#include "rdkafka_request.h"
+#include "rdunittest.h"
+
+#include <ctype.h>
+
+/**
+ * Clear out and free any memory used by the member, but not the rkgm itself.
+ */
+void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm) {
+ if (rkgm->rkgm_owned)
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
+
+ if (rkgm->rkgm_subscription)
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription);
+
+ if (rkgm->rkgm_assignment)
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment);
+
+ rd_list_destroy(&rkgm->rkgm_eligible);
+
+ if (rkgm->rkgm_member_id)
+ rd_kafkap_str_destroy(rkgm->rkgm_member_id);
+
+ if (rkgm->rkgm_group_instance_id)
+ rd_kafkap_str_destroy(rkgm->rkgm_group_instance_id);
+
+ if (rkgm->rkgm_userdata)
+ rd_kafkap_bytes_destroy(rkgm->rkgm_userdata);
+
+ if (rkgm->rkgm_member_metadata)
+ rd_kafkap_bytes_destroy(rkgm->rkgm_member_metadata);
+
+ memset(rkgm, 0, sizeof(*rkgm));
+}
+
+
+/**
+ * @brief Group member comparator (takes rd_kafka_group_member_t *)
+ */
+int rd_kafka_group_member_cmp(const void *_a, const void *_b) {
+ const rd_kafka_group_member_t *a = (const rd_kafka_group_member_t *)_a;
+ const rd_kafka_group_member_t *b = (const rd_kafka_group_member_t *)_b;
+
+ /* Use the group instance id to compare static group members */
+ if (!RD_KAFKAP_STR_IS_NULL(a->rkgm_group_instance_id) &&
+ !RD_KAFKAP_STR_IS_NULL(b->rkgm_group_instance_id))
+ return rd_kafkap_str_cmp(a->rkgm_group_instance_id,
+ b->rkgm_group_instance_id);
+
+ return rd_kafkap_str_cmp(a->rkgm_member_id, b->rkgm_member_id);
+}
+
+
+/**
+ * Returns true if member subscribes to topic, else false.
+ */
+int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
+ const rd_kafka_group_member_t *rkgm,
+ const char *topic) {
+ int i;
+
+ /* Match against member's subscription. */
+ for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rkgm->rkgm_subscription->elems[i];
+
+ if (rd_kafka_topic_partition_match(rk, rkgm, rktpar, topic,
+ NULL))
+ return 1;
+ }
+
+ return 0;
+}
+
+
+rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
+ const rd_list_t *topics,
+ const void *userdata,
+ size_t userdata_size,
+ const rd_kafka_topic_partition_list_t *owned_partitions) {
+
+ rd_kafka_buf_t *rkbuf;
+ rd_kafkap_bytes_t *kbytes;
+ int i;
+ int topic_cnt = rd_list_cnt(topics);
+ const rd_kafka_topic_info_t *tinfo;
+ size_t len;
+
+ /*
+ * MemberMetadata => Version Subscription AssignmentStrategies
+ * Version => int16
+ * Subscription => Topics UserData
+ * Topics => [String]
+ * UserData => Bytes
+ * OwnedPartitions => [Topic Partitions] // added in v1
+ * Topic => string
+ * Partitions => [int32]
+ */
+
+ rkbuf = rd_kafka_buf_new(1, 100 + (topic_cnt * 100) + userdata_size);
+
+ /* Version */
+ rd_kafka_buf_write_i16(rkbuf, 1);
+ rd_kafka_buf_write_i32(rkbuf, topic_cnt);
+ RD_LIST_FOREACH(tinfo, topics, i)
+ rd_kafka_buf_write_str(rkbuf, tinfo->topic, -1);
+ if (userdata)
+ rd_kafka_buf_write_bytes(rkbuf, userdata, userdata_size);
+ else /* Kafka 0.9.0.0 can't parse NULL bytes, so we provide empty,
+ * which is compatible with all of the built-in Java client
+ * assignors at the present time (up to and including v2.5) */
+ rd_kafka_buf_write_bytes(rkbuf, "", 0);
+ /* Following data is ignored by v0 consumers */
+ if (!owned_partitions)
+ /* If there are no owned partitions, this is specified as an
+ * empty array, not NULL. */
+ rd_kafka_buf_write_i32(rkbuf, 0); /* Topic count */
+ else {
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ rd_kafka_buf_write_topic_partitions(
+ rkbuf, owned_partitions,
+ rd_false /*don't skip invalid offsets*/,
+ rd_false /*any offset*/, fields);
+ }
+
+ /* Get binary buffer and allocate a new Kafka Bytes with a copy. */
+ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
+ len = rd_slice_remains(&rkbuf->rkbuf_reader);
+ kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len);
+ rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len);
+ rd_kafka_buf_destroy(rkbuf);
+
+ return kbytes;
+}
+
+
+
+rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
+ const rd_kafka_assignor_t *rkas,
+ void *assignor_state,
+ const rd_list_t *topics,
+ const rd_kafka_topic_partition_list_t *owned_partitions) {
+ return rd_kafka_consumer_protocol_member_metadata_new(topics, NULL, 0,
+ owned_partitions);
+}
+
+
+
+/**
+ * Returns 1 if all subscriptions are satifised for this member, else 0.
+ */
+static int rd_kafka_member_subscription_match(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_group_member_t *rkgm,
+ const rd_kafka_metadata_topic_t *topic_metadata,
+ rd_kafka_assignor_topic_t *eligible_topic) {
+ int i;
+ int has_regex = 0;
+ int matched = 0;
+
+ /* Match against member's subscription. */
+ for (i = 0; i < rkgm->rkgm_subscription->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rkgm->rkgm_subscription->elems[i];
+ int matched_by_regex = 0;
+
+ if (rd_kafka_topic_partition_match(rkcg->rkcg_rk, rkgm, rktpar,
+ topic_metadata->topic,
+ &matched_by_regex)) {
+ rd_list_add(&rkgm->rkgm_eligible,
+ (void *)topic_metadata);
+ matched++;
+ has_regex += matched_by_regex;
+ }
+ }
+
+ if (matched)
+ rd_list_add(&eligible_topic->members, rkgm);
+
+ if (!has_regex &&
+ rd_list_cnt(&rkgm->rkgm_eligible) == rkgm->rkgm_subscription->cnt)
+ return 1; /* All subscriptions matched */
+ else
+ return 0;
+}
+
+
+static void rd_kafka_assignor_topic_destroy(rd_kafka_assignor_topic_t *at) {
+ rd_list_destroy(&at->members);
+ rd_free(at);
+}
+
+int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b) {
+ const rd_kafka_assignor_topic_t *a =
+ *(const rd_kafka_assignor_topic_t *const *)_a;
+ const rd_kafka_assignor_topic_t *b =
+ *(const rd_kafka_assignor_topic_t *const *)_b;
+
+ return strcmp(a->metadata->topic, b->metadata->topic);
+}
+
+/**
+ * Determine the complete set of topics that match at least one of
+ * the group member subscriptions. Associate with each of these the
+ * complete set of members that are subscribed to it. The result is
+ * returned in `eligible_topics`.
+ */
+static void
+rd_kafka_member_subscriptions_map(rd_kafka_cgrp_t *rkcg,
+ rd_list_t *eligible_topics,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ int member_cnt) {
+ int ti;
+ rd_kafka_assignor_topic_t *eligible_topic = NULL;
+
+ rd_list_init(eligible_topics, RD_MIN(metadata->topic_cnt, 10),
+ (void *)rd_kafka_assignor_topic_destroy);
+
+ /* For each topic in the cluster, scan through the member list
+ * to find matching subscriptions. */
+ for (ti = 0; ti < metadata->topic_cnt; ti++) {
+ int i;
+
+ /* Ignore topics in blacklist */
+ if (rkcg->rkcg_rk->rk_conf.topic_blacklist &&
+ rd_kafka_pattern_match(
+ rkcg->rkcg_rk->rk_conf.topic_blacklist,
+ metadata->topics[ti].topic)) {
+ rd_kafka_dbg(rkcg->rkcg_rk,
+ TOPIC | RD_KAFKA_DBG_ASSIGNOR, "BLACKLIST",
+ "Assignor ignoring blacklisted "
+ "topic \"%s\"",
+ metadata->topics[ti].topic);
+ continue;
+ }
+
+ if (!eligible_topic)
+ eligible_topic = rd_calloc(1, sizeof(*eligible_topic));
+
+ rd_list_init(&eligible_topic->members, member_cnt, NULL);
+
+ /* For each member: scan through its topic subscription */
+ for (i = 0; i < member_cnt; i++) {
+ /* Match topic against existing metadata,
+ incl regex matching. */
+ rd_kafka_member_subscription_match(
+ rkcg, &members[i], &metadata->topics[ti],
+ eligible_topic);
+ }
+
+ if (rd_list_empty(&eligible_topic->members)) {
+ rd_list_destroy(&eligible_topic->members);
+ continue;
+ }
+
+ eligible_topic->metadata = &metadata->topics[ti];
+ rd_list_add(eligible_topics, eligible_topic);
+ eligible_topic = NULL;
+ }
+
+ if (eligible_topic)
+ rd_free(eligible_topic);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_assignor_run(rd_kafka_cgrp_t *rkcg,
+ const rd_kafka_assignor_t *rkas,
+ rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ int member_cnt,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_resp_err_t err;
+ rd_ts_t ts_start = rd_clock();
+ int i;
+ rd_list_t eligible_topics;
+ int j;
+
+ /* Construct eligible_topics, a map of:
+ * topic -> set of members that are subscribed to it. */
+ rd_kafka_member_subscriptions_map(rkcg, &eligible_topics, metadata,
+ members, member_cnt);
+
+
+ if (rkcg->rkcg_rk->rk_conf.debug &
+ (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
+ "Group \"%s\" running %s assignor for "
+ "%d member(s) and "
+ "%d eligible subscribed topic(s):",
+ rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
+ member_cnt, eligible_topics.rl_cnt);
+
+ for (i = 0; i < member_cnt; i++) {
+ const rd_kafka_group_member_t *member = &members[i];
+
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR,
+ "ASSIGN",
+ " Member \"%.*s\"%s with "
+ "%d owned partition(s) and "
+ "%d subscribed topic(s):",
+ RD_KAFKAP_STR_PR(member->rkgm_member_id),
+ !rd_kafkap_str_cmp(member->rkgm_member_id,
+ rkcg->rkcg_member_id)
+ ? " (me)"
+ : "",
+ member->rkgm_owned ? member->rkgm_owned->cnt : 0,
+ member->rkgm_subscription->cnt);
+ for (j = 0; j < member->rkgm_subscription->cnt; j++) {
+ const rd_kafka_topic_partition_t *p =
+ &member->rkgm_subscription->elems[j];
+ rd_kafka_dbg(rkcg->rkcg_rk,
+ CGRP | RD_KAFKA_DBG_ASSIGNOR,
+ "ASSIGN", " %s [%" PRId32 "]",
+ p->topic, p->partition);
+ }
+ }
+ }
+
+ /* Call assignors assign callback */
+ err = rkas->rkas_assign_cb(
+ rkcg->rkcg_rk, rkas, rkcg->rkcg_member_id->str, metadata, members,
+ member_cnt, (rd_kafka_assignor_topic_t **)eligible_topics.rl_elems,
+ eligible_topics.rl_cnt, errstr, errstr_size, rkas->rkas_opaque);
+
+ if (err) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
+ "Group \"%s\" %s assignment failed "
+ "for %d member(s): %s",
+ rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
+ (int)member_cnt, errstr);
+ } else if (rkcg->rkcg_rk->rk_conf.debug &
+ (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_ASSIGNOR)) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
+ "Group \"%s\" %s assignment for %d member(s) "
+ "finished in %.3fms:",
+ rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
+ (int)member_cnt, (float)(rd_clock() - ts_start) / 1000.0f);
+ for (i = 0; i < member_cnt; i++) {
+ const rd_kafka_group_member_t *member = &members[i];
+
+ rd_kafka_dbg(rkcg->rkcg_rk,
+ CGRP | RD_KAFKA_DBG_ASSIGNOR, "ASSIGN",
+ " Member \"%.*s\"%s assigned "
+ "%d partition(s):",
+ RD_KAFKAP_STR_PR(member->rkgm_member_id),
+ !rd_kafkap_str_cmp(member->rkgm_member_id,
+ rkcg->rkcg_member_id)
+ ? " (me)"
+ : "",
+ member->rkgm_assignment->cnt);
+ for (j = 0; j < member->rkgm_assignment->cnt; j++) {
+ const rd_kafka_topic_partition_t *p =
+ &member->rkgm_assignment->elems[j];
+ rd_kafka_dbg(rkcg->rkcg_rk,
+ CGRP | RD_KAFKA_DBG_ASSIGNOR,
+ "ASSIGN", " %s [%" PRId32 "]",
+ p->topic, p->partition);
+ }
+ }
+ }
+
+ rd_list_destroy(&eligible_topics);
+
+ return err;
+}
+
+
+/**
+ * Assignor protocol string comparator
+ */
+static int rd_kafka_assignor_cmp_str(const void *_a, const void *_b) {
+ const char *a = _a;
+ const rd_kafka_assignor_t *b = _b;
+
+ return rd_kafkap_str_cmp_str2(a, b->rkas_protocol_name);
+}
+
+/**
+ * Find assignor by protocol name.
+ *
+ * Locality: any
+ * Locks: none
+ */
+rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk,
+ const char *protocol) {
+ return (rd_kafka_assignor_t *)rd_list_find(
+ &rk->rk_conf.partition_assignors, protocol,
+ rd_kafka_assignor_cmp_str);
+}
+
+
+/**
+ * Destroys an assignor (but does not unlink).
+ */
+static void rd_kafka_assignor_destroy(rd_kafka_assignor_t *rkas) {
+ rd_kafkap_str_destroy(rkas->rkas_protocol_type);
+ rd_kafkap_str_destroy(rkas->rkas_protocol_name);
+ rd_free(rkas);
+}
+
+
+/**
+ * @brief Check that the rebalance protocol of all enabled assignors is
+ * the same.
+ */
+rd_kafka_resp_err_t
+rd_kafka_assignor_rebalance_protocol_check(const rd_kafka_conf_t *conf) {
+ int i;
+ rd_kafka_assignor_t *rkas;
+ rd_kafka_rebalance_protocol_t rebalance_protocol =
+ RD_KAFKA_REBALANCE_PROTOCOL_NONE;
+
+ RD_LIST_FOREACH(rkas, &conf->partition_assignors, i) {
+ if (!rkas->rkas_enabled)
+ continue;
+
+ if (rebalance_protocol == RD_KAFKA_REBALANCE_PROTOCOL_NONE)
+ rebalance_protocol = rkas->rkas_protocol;
+ else if (rebalance_protocol != rkas->rkas_protocol)
+ return RD_KAFKA_RESP_ERR__CONFLICT;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Add an assignor.
+ */
+rd_kafka_resp_err_t rd_kafka_assignor_add(
+ rd_kafka_t *rk,
+ const char *protocol_type,
+ const char *protocol_name,
+ rd_kafka_rebalance_protocol_t rebalance_protocol,
+ rd_kafka_resp_err_t (*assign_cb)(
+ rd_kafka_t *rk,
+ const struct rd_kafka_assignor_s *rkas,
+ const char *member_id,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ rd_kafka_assignor_topic_t **eligible_topics,
+ size_t eligible_topic_cnt,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque),
+ rd_kafkap_bytes_t *(*get_metadata_cb)(
+ const struct rd_kafka_assignor_s *rkas,
+ void *assignor_state,
+ const rd_list_t *topics,
+ const rd_kafka_topic_partition_list_t *owned_partitions),
+ void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
+ void **assignor_state,
+ const rd_kafka_topic_partition_list_t *assignment,
+ const rd_kafkap_bytes_t *userdata,
+ const rd_kafka_consumer_group_metadata_t *rkcgm),
+ void (*destroy_state_cb)(void *assignor_state),
+ int (*unittest_cb)(void),
+ void *opaque) {
+ rd_kafka_assignor_t *rkas;
+
+ if (rd_kafkap_str_cmp_str(rk->rk_conf.group_protocol_type,
+ protocol_type))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
+
+ if (rebalance_protocol != RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
+ rebalance_protocol != RD_KAFKA_REBALANCE_PROTOCOL_EAGER)
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
+
+ /* Dont overwrite application assignors */
+ if ((rkas = rd_kafka_assignor_find(rk, protocol_name)))
+ return RD_KAFKA_RESP_ERR__CONFLICT;
+
+ rkas = rd_calloc(1, sizeof(*rkas));
+
+ rkas->rkas_protocol_name = rd_kafkap_str_new(protocol_name, -1);
+ rkas->rkas_protocol_type = rd_kafkap_str_new(protocol_type, -1);
+ rkas->rkas_protocol = rebalance_protocol;
+ rkas->rkas_assign_cb = assign_cb;
+ rkas->rkas_get_metadata_cb = get_metadata_cb;
+ rkas->rkas_on_assignment_cb = on_assignment_cb;
+ rkas->rkas_destroy_state_cb = destroy_state_cb;
+ rkas->rkas_unittest = unittest_cb;
+ rkas->rkas_opaque = opaque;
+ rkas->rkas_index = INT_MAX;
+
+ rd_list_add(&rk->rk_conf.partition_assignors, rkas);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/* Right trim string of whitespaces */
+static void rtrim(char *s) {
+ char *e = s + strlen(s);
+
+ if (e == s)
+ return;
+
+ while (e >= s && isspace(*e))
+ e--;
+
+ *e = '\0';
+}
+
+
+static int rd_kafka_assignor_cmp_idx(const void *ptr1, const void *ptr2) {
+ const rd_kafka_assignor_t *rkas1 = (const rd_kafka_assignor_t *)ptr1;
+ const rd_kafka_assignor_t *rkas2 = (const rd_kafka_assignor_t *)ptr2;
+ return rkas1->rkas_index - rkas2->rkas_index;
+}
+
+
+/**
+ * Initialize assignor list based on configuration.
+ */
+int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
+ char *wanted;
+ char *s;
+ int idx = 0;
+
+ rd_list_init(&rk->rk_conf.partition_assignors, 3,
+ (void *)rd_kafka_assignor_destroy);
+
+ /* Initialize builtin assignors (ignore errors) */
+ rd_kafka_range_assignor_init(rk);
+ rd_kafka_roundrobin_assignor_init(rk);
+ rd_kafka_sticky_assignor_init(rk);
+
+ rd_strdupa(&wanted, rk->rk_conf.partition_assignment_strategy);
+
+ s = wanted;
+ while (*s) {
+ rd_kafka_assignor_t *rkas = NULL;
+ char *t;
+
+ /* Left trim */
+ while (*s == ' ' || *s == ',')
+ s++;
+
+ if ((t = strchr(s, ','))) {
+ *t = '\0';
+ t++;
+ } else {
+ t = s + strlen(s);
+ }
+
+ /* Right trim */
+ rtrim(s);
+
+ rkas = rd_kafka_assignor_find(rk, s);
+ if (!rkas) {
+ rd_snprintf(errstr, errstr_size,
+ "Unsupported partition.assignment.strategy:"
+ " %s",
+ s);
+ return -1;
+ }
+
+ if (!rkas->rkas_enabled) {
+ rkas->rkas_enabled = 1;
+ rk->rk_conf.enabled_assignor_cnt++;
+ rkas->rkas_index = idx;
+ idx++;
+ }
+
+ s = t;
+ }
+
+ /* Sort the assignors according to the input strategy order
+ * since assignors will be scaned from the list sequentially
+ * and the strategies earlier in the list have higher priority. */
+ rd_list_sort(&rk->rk_conf.partition_assignors,
+ rd_kafka_assignor_cmp_idx);
+
+ /* Clear the SORTED flag because the list is sorted according to the
+ * rkas_index, but will do the search using rkas_protocol_name. */
+ rk->rk_conf.partition_assignors.rl_flags &= ~RD_LIST_F_SORTED;
+
+ if (rd_kafka_assignor_rebalance_protocol_check(&rk->rk_conf)) {
+ rd_snprintf(errstr, errstr_size,
+ "All partition.assignment.strategy (%s) assignors "
+ "must have the same protocol type, "
+ "online migration between assignors with "
+ "different protocol types is not supported",
+ rk->rk_conf.partition_assignment_strategy);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+/**
+ * Free assignors
+ */
+void rd_kafka_assignors_term(rd_kafka_t *rk) {
+ rd_list_destroy(&rk->rk_conf.partition_assignors);
+}
+
+
+
+/**
+ * @brief Unittest for assignors
+ */
+static int ut_assignors(void) {
+ const struct {
+ const char *name;
+ int topic_cnt;
+ struct {
+ const char *name;
+ int partition_cnt;
+ } topics[12];
+ int member_cnt;
+ struct {
+ const char *name;
+ int topic_cnt;
+ const char *topics[12];
+ } members[3];
+ int expect_cnt;
+ struct {
+ const char *protocol_name;
+ struct {
+ int partition_cnt;
+ const char *partitions[12]; /* "topic:part" */
+ } members[3];
+ } expect[2];
+ } tests[] = {
+ /*
+ * Test cases
+ */
+ {
+ .name = "Symmetrical subscription",
+ .topic_cnt = 4,
+ .topics =
+ {
+ {"a", 3}, /* a:0 a:1 a:2 */
+ {
+ "b",
+ 4,
+ }, /* b:0 b:1 b:2 b:3 */
+ {"c", 2}, /* c:0 c:1 */
+ {"d", 1}, /* d:0 */
+ },
+ .member_cnt = 2,
+ .members =
+ {
+ {.name = "consumer1",
+ .topic_cnt = 4,
+ .topics = {"d", "b", "a", "c"}},
+ {.name = "consumer2",
+ .topic_cnt = 4,
+ .topics = {"a", "b", "c", "d"}},
+ },
+ .expect_cnt = 2,
+ .expect =
+ {
+ {
+ .protocol_name = "range",
+ .members =
+ {
+ /* Consumer1 */
+ {6,
+ {"a:0", "a:1", "b:0", "b:1", "c:0",
+ "d:0"}},
+ /* Consumer2 */
+ {4, {"a:2", "b:2", "b:3", "c:1"}},
+ },
+ },
+ {
+ .protocol_name = "roundrobin",
+ .members =
+ {
+ /* Consumer1 */
+ {5, {"a:0", "a:2", "b:1", "b:3", "c:1"}},
+ /* Consumer2 */
+ {5, {"a:1", "b:0", "b:2", "c:0", "d:0"}},
+ },
+ },
+ },
+ },
+ {
+ .name = "1*3 partitions (asymmetrical)",
+ .topic_cnt = 1,
+ .topics =
+ {
+ {"a", 3},
+ },
+ .member_cnt = 2,
+ .members =
+ {
+ {.name = "consumer1",
+ .topic_cnt = 3,
+ .topics = {"a", "b", "c"}},
+ {.name = "consumer2", .topic_cnt = 1, .topics = {"a"}},
+ },
+ .expect_cnt = 2,
+ .expect =
+ {
+ {
+ .protocol_name = "range",
+ .members =
+ {
+ /* Consumer1.
+ * range assignor applies
+ * per topic. */
+ {2, {"a:0", "a:1"}},
+ /* Consumer2 */
+ {1, {"a:2"}},
+ },
+ },
+ {
+ .protocol_name = "roundrobin",
+ .members =
+ {
+ /* Consumer1 */
+ {2, {"a:0", "a:2"}},
+ /* Consumer2 */
+ {1, {"a:1"}},
+ },
+ },
+ },
+ },
+ {
+ .name = "#2121 (asymmetrical)",
+ .topic_cnt = 12,
+ .topics =
+ {
+ {"a", 1},
+ {"b", 1},
+ {"c", 1},
+ {"d", 1},
+ {"e", 1},
+ {"f", 1},
+ {"g", 1},
+ {"h", 1},
+ {"i", 1},
+ {"j", 1},
+ {"k", 1},
+ {"l", 1},
+ },
+ .member_cnt = 2,
+ .members =
+ {
+ {
+ .name = "consumer1",
+ .topic_cnt = 12,
+ .topics =
+ {
+ "a",
+ "b",
+ "c",
+ "d",
+ "e",
+ "f",
+ "g",
+ "h",
+ "i",
+ "j",
+ "k",
+ "l",
+ },
+ },
+ {
+ .name = "consumer2", /* must be second */
+ .topic_cnt = 5,
+ .topics =
+ {
+ "b",
+ "d",
+ "f",
+ "h",
+ "l",
+ },
+ },
+ },
+ .expect_cnt = 2,
+ .expect =
+ {
+ {
+ .protocol_name = "range",
+ .members =
+ {
+ /* Consumer1.
+ * All partitions. */
+ {12,
+ {
+ "a:0",
+ "b:0",
+ "c:0",
+ "d:0",
+ "e:0",
+ "f:0",
+ "g:0",
+ "h:0",
+ "i:0",
+ "j:0",
+ "k:0",
+ "l:0",
+ }},
+ /* Consumer2 */
+ {0},
+ },
+ },
+ {
+ .protocol_name = "roundrobin",
+ .members =
+ {
+ /* Consumer1 */
+ {
+ 7,
+ {
+ "a:0",
+ "c:0",
+ "e:0",
+ "g:0",
+ "i:0",
+ "j:0",
+ "k:0",
+ },
+ },
+ /* Consumer2 */
+ {5, {"b:0", "d:0", "f:0", "h:0", "l:0"}},
+ },
+ },
+ },
+ },
+ {NULL},
+ };
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ const rd_kafka_assignor_t *rkas;
+ int fails = 0;
+ int i;
+
+ conf = rd_kafka_conf_new();
+ rd_kafka_conf_set(conf, "group.id", "group", NULL, 0);
+ rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL,
+ 0);
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0);
+ RD_UT_ASSERT(rk != NULL, "Failed to create consumer");
+
+ /* Run through test cases */
+ for (i = 0; tests[i].name; i++) {
+ int ie, it, im;
+ rd_kafka_metadata_t metadata;
+ rd_kafka_group_member_t *members;
+
+ /* Create topic metadata */
+ metadata.topic_cnt = tests[i].topic_cnt;
+ metadata.topics =
+ rd_alloca(sizeof(*metadata.topics) * metadata.topic_cnt);
+ memset(metadata.topics, 0,
+ sizeof(*metadata.topics) * metadata.topic_cnt);
+ for (it = 0; it < metadata.topic_cnt; it++) {
+ metadata.topics[it].topic =
+ (char *)tests[i].topics[it].name;
+ metadata.topics[it].partition_cnt =
+ tests[i].topics[it].partition_cnt;
+ metadata.topics[it].partitions = NULL; /* Not used */
+ }
+
+ /* Create members */
+ members = rd_alloca(sizeof(*members) * tests[i].member_cnt);
+ memset(members, 0, sizeof(*members) * tests[i].member_cnt);
+
+ for (im = 0; im < tests[i].member_cnt; im++) {
+ rd_kafka_group_member_t *rkgm = &members[im];
+ rkgm->rkgm_member_id =
+ rd_kafkap_str_new(tests[i].members[im].name, -1);
+ rkgm->rkgm_group_instance_id =
+ rd_kafkap_str_new(tests[i].members[im].name, -1);
+ rd_list_init(&rkgm->rkgm_eligible,
+ tests[i].members[im].topic_cnt, NULL);
+
+ rkgm->rkgm_subscription =
+ rd_kafka_topic_partition_list_new(
+ tests[i].members[im].topic_cnt);
+ for (it = 0; it < tests[i].members[im].topic_cnt; it++)
+ rd_kafka_topic_partition_list_add(
+ rkgm->rkgm_subscription,
+ tests[i].members[im].topics[it],
+ RD_KAFKA_PARTITION_UA);
+
+ rkgm->rkgm_userdata = NULL;
+
+ rkgm->rkgm_assignment =
+ rd_kafka_topic_partition_list_new(
+ rkgm->rkgm_subscription->size);
+ }
+
+ /* For each assignor verify that the assignment
+ * matches the expection set out in the test case. */
+ for (ie = 0; ie < tests[i].expect_cnt; ie++) {
+ rd_kafka_resp_err_t err;
+ char errstr[256];
+
+ RD_UT_SAY("Test case %s: %s assignor", tests[i].name,
+ tests[i].expect[ie].protocol_name);
+
+ if (!(rkas = rd_kafka_assignor_find(
+ rk, tests[i].expect[ie].protocol_name))) {
+ RD_UT_FAIL(
+ "Assignor test case %s for %s failed: "
+ "assignor not found",
+ tests[i].name,
+ tests[i].expect[ie].protocol_name);
+ }
+
+ /* Run assignor */
+ err = rd_kafka_assignor_run(
+ rk->rk_cgrp, rkas, &metadata, members,
+ tests[i].member_cnt, errstr, sizeof(errstr));
+
+ RD_UT_ASSERT(!err, "Assignor case %s for %s failed: %s",
+ tests[i].name,
+ tests[i].expect[ie].protocol_name, errstr);
+
+ /* Verify assignments */
+ for (im = 0; im < tests[i].member_cnt; im++) {
+ rd_kafka_group_member_t *rkgm = &members[im];
+ int ia;
+
+ if (rkgm->rkgm_assignment->cnt !=
+ tests[i]
+ .expect[ie]
+ .members[im]
+ .partition_cnt) {
+ RD_UT_WARN(
+ " Member %.*s assignment count "
+ "mismatch: %d != %d",
+ RD_KAFKAP_STR_PR(
+ rkgm->rkgm_member_id),
+ rkgm->rkgm_assignment->cnt,
+ tests[i]
+ .expect[ie]
+ .members[im]
+ .partition_cnt);
+ fails++;
+ }
+
+ if (rkgm->rkgm_assignment->cnt > 0)
+ rd_kafka_topic_partition_list_sort_by_topic(
+ rkgm->rkgm_assignment);
+
+ for (ia = 0; ia < rkgm->rkgm_assignment->cnt;
+ ia++) {
+ rd_kafka_topic_partition_t *p =
+ &rkgm->rkgm_assignment->elems[ia];
+ char part[64];
+ const char *exp =
+ ia < tests[i]
+ .expect[ie]
+ .members[im]
+ .partition_cnt
+ ? tests[i]
+ .expect[ie]
+ .members[im]
+ .partitions[ia]
+ : "(none)";
+
+ rd_snprintf(part, sizeof(part), "%s:%d",
+ p->topic,
+ (int)p->partition);
+
+#if 0 /* Enable to print actual assignment */
+ RD_UT_SAY(" Member %.*s assignment "
+ "%d/%d %s =? %s",
+ RD_KAFKAP_STR_PR(
+ rkgm->rkgm_member_id),
+ ia,
+ rkgm->rkgm_assignment->cnt-1,
+ part, exp);
+#endif
+
+ if (strcmp(part, exp)) {
+ RD_UT_WARN(
+ " Member %.*s "
+ "assignment %d/%d "
+ "mismatch: %s != %s",
+ RD_KAFKAP_STR_PR(
+ rkgm->rkgm_member_id),
+ ia,
+ rkgm->rkgm_assignment->cnt -
+ 1,
+ part, exp);
+ fails++;
+ }
+ }
+
+ /* Reset assignment for next loop */
+ rd_kafka_topic_partition_list_destroy(
+ rkgm->rkgm_assignment);
+ rkgm->rkgm_assignment =
+ rd_kafka_topic_partition_list_new(
+ rkgm->rkgm_subscription->size);
+ }
+ }
+
+ for (im = 0; im < tests[i].member_cnt; im++) {
+ rd_kafka_group_member_t *rkgm = &members[im];
+ rd_kafka_group_member_clear(rkgm);
+ }
+ }
+
+
+ /* Run assignor-specific unittests */
+ RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) {
+ if (rkas->rkas_unittest)
+ fails += rkas->rkas_unittest();
+ }
+
+ rd_kafka_destroy(rk);
+
+ if (fails)
+ return 1;
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Unit tests for assignors
+ */
+int unittest_assignors(void) {
+ return ut_assignors();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h
new file mode 100644
index 000000000..b90e7dc98
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_assignor.h
@@ -0,0 +1,212 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_ASSIGNOR_H_
+#define _RDKAFKA_ASSIGNOR_H_
+
+
+
+/*!
+ * Enumerates the different rebalance protocol types.
+ *
+ * @sa rd_kafka_rebalance_protocol()
+ */
+typedef enum rd_kafka_rebalance_protocol_t {
+ RD_KAFKA_REBALANCE_PROTOCOL_NONE, /**< Rebalance protocol is
+ unknown */
+ RD_KAFKA_REBALANCE_PROTOCOL_EAGER, /**< Eager rebalance
+ protocol */
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE /**< Cooperative
+ rebalance protocol*/
+} rd_kafka_rebalance_protocol_t;
+
+
+
+typedef struct rd_kafka_group_member_s {
+ /** Subscribed topics (partition field is ignored). */
+ rd_kafka_topic_partition_list_t *rkgm_subscription;
+ /** Partitions assigned to this member after running the assignor.
+ * E.g., the current assignment coming out of the rebalance. */
+ rd_kafka_topic_partition_list_t *rkgm_assignment;
+ /** Partitions reported as currently owned by the member, read
+ * from consumer metadata. E.g., the current assignment going into
+ * the rebalance. */
+ rd_kafka_topic_partition_list_t *rkgm_owned;
+ /** List of eligible topics in subscription. E.g., subscribed topics
+ * that exist. */
+ rd_list_t rkgm_eligible;
+ /** Member id (e.g., client.id-some-uuid). */
+ rd_kafkap_str_t *rkgm_member_id;
+ /** Group instance id. */
+ rd_kafkap_str_t *rkgm_group_instance_id;
+ /** Member-specific opaque userdata. */
+ rd_kafkap_bytes_t *rkgm_userdata;
+ /** Member metadata, e.g., the currently owned partitions. */
+ rd_kafkap_bytes_t *rkgm_member_metadata;
+ /** Group generation id. */
+ int rkgm_generation;
+} rd_kafka_group_member_t;
+
+
+int rd_kafka_group_member_cmp(const void *_a, const void *_b);
+
+int rd_kafka_group_member_find_subscription(rd_kafka_t *rk,
+ const rd_kafka_group_member_t *rkgm,
+ const char *topic);
+
+
+/**
+ * Structure to hold metadata for a single topic and all its
+ * subscribing members.
+ */
+typedef struct rd_kafka_assignor_topic_s {
+ const rd_kafka_metadata_topic_t *metadata;
+ rd_list_t members; /* rd_kafka_group_member_t * */
+} rd_kafka_assignor_topic_t;
+
+
+int rd_kafka_assignor_topic_cmp(const void *_a, const void *_b);
+
+
+typedef struct rd_kafka_assignor_s {
+ rd_kafkap_str_t *rkas_protocol_type;
+ rd_kafkap_str_t *rkas_protocol_name;
+
+ int rkas_enabled;
+
+ /** Order for strategies. */
+ int rkas_index;
+
+ rd_kafka_rebalance_protocol_t rkas_protocol;
+
+ rd_kafka_resp_err_t (*rkas_assign_cb)(
+ rd_kafka_t *rk,
+ const struct rd_kafka_assignor_s *rkas,
+ const char *member_id,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ rd_kafka_assignor_topic_t **eligible_topics,
+ size_t eligible_topic_cnt,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque);
+
+ rd_kafkap_bytes_t *(*rkas_get_metadata_cb)(
+ const struct rd_kafka_assignor_s *rkas,
+ void *assignor_state,
+ const rd_list_t *topics,
+ const rd_kafka_topic_partition_list_t *owned_partitions);
+
+ void (*rkas_on_assignment_cb)(
+ const struct rd_kafka_assignor_s *rkas,
+ void **assignor_state,
+ const rd_kafka_topic_partition_list_t *assignment,
+ const rd_kafkap_bytes_t *assignment_userdata,
+ const rd_kafka_consumer_group_metadata_t *rkcgm);
+
+ void (*rkas_destroy_state_cb)(void *assignor_state);
+
+ int (*rkas_unittest)(void);
+
+ void *rkas_opaque;
+} rd_kafka_assignor_t;
+
+
+rd_kafka_resp_err_t rd_kafka_assignor_add(
+ rd_kafka_t *rk,
+ const char *protocol_type,
+ const char *protocol_name,
+ rd_kafka_rebalance_protocol_t rebalance_protocol,
+ rd_kafka_resp_err_t (*assign_cb)(
+ rd_kafka_t *rk,
+ const struct rd_kafka_assignor_s *rkas,
+ const char *member_id,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ rd_kafka_assignor_topic_t **eligible_topics,
+ size_t eligible_topic_cnt,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque),
+ rd_kafkap_bytes_t *(*get_metadata_cb)(
+ const struct rd_kafka_assignor_s *rkas,
+ void *assignor_state,
+ const rd_list_t *topics,
+ const rd_kafka_topic_partition_list_t *owned_partitions),
+ void (*on_assignment_cb)(const struct rd_kafka_assignor_s *rkas,
+ void **assignor_state,
+ const rd_kafka_topic_partition_list_t *assignment,
+ const rd_kafkap_bytes_t *userdata,
+ const rd_kafka_consumer_group_metadata_t *rkcgm),
+ void (*destroy_state_cb)(void *assignor_state),
+ int (*unittest_cb)(void),
+ void *opaque);
+
+rd_kafkap_bytes_t *rd_kafka_consumer_protocol_member_metadata_new(
+ const rd_list_t *topics,
+ const void *userdata,
+ size_t userdata_size,
+ const rd_kafka_topic_partition_list_t *owned_partitions);
+
+rd_kafkap_bytes_t *rd_kafka_assignor_get_metadata_with_empty_userdata(
+ const rd_kafka_assignor_t *rkas,
+ void *assignor_state,
+ const rd_list_t *topics,
+ const rd_kafka_topic_partition_list_t *owned_partitions);
+
+
+void rd_kafka_assignor_update_subscription(
+ const rd_kafka_assignor_t *rkas,
+ const rd_kafka_topic_partition_list_t *subscription);
+
+
+rd_kafka_resp_err_t rd_kafka_assignor_run(struct rd_kafka_cgrp_s *rkcg,
+ const rd_kafka_assignor_t *rkas,
+ rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ int member_cnt,
+ char *errstr,
+ size_t errstr_size);
+
+rd_kafka_assignor_t *rd_kafka_assignor_find(rd_kafka_t *rk,
+ const char *protocol);
+
+int rd_kafka_assignors_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
+void rd_kafka_assignors_term(rd_kafka_t *rk);
+
+
+
+void rd_kafka_group_member_clear(rd_kafka_group_member_t *rkgm);
+
+
+rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk);
+rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk);
+rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk);
+
+#endif /* _RDKAFKA_ASSIGNOR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c
new file mode 100644
index 000000000..753f03d67
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.c
@@ -0,0 +1,278 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rdkafka_int.h"
+#include "rdkafka_aux.h"
+#include "rdkafka_error.h"
+
+rd_kafka_resp_err_t
+rd_kafka_topic_result_error(const rd_kafka_topic_result_t *topicres) {
+ return topicres->err;
+}
+
+const char *
+rd_kafka_topic_result_error_string(const rd_kafka_topic_result_t *topicres) {
+ return topicres->errstr;
+}
+
+const char *
+rd_kafka_topic_result_name(const rd_kafka_topic_result_t *topicres) {
+ return topicres->topic;
+}
+
+/**
+ * @brief Create new topic_result (single allocation).
+ *
+ * @param topic Topic string, if topic_size is != -1 it does not have to
+ * be nul-terminated.
+ * @param topic_size Size of topic, or -1 to perform automatic strlen()
+ * @param err Error code
+ * @param errstr Optional error string.
+ *
+ * All input arguments are copied.
+ */
+
+rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
+ ssize_t topic_size,
+ rd_kafka_resp_err_t err,
+ const char *errstr) {
+ size_t tlen = topic_size != -1 ? (size_t)topic_size : strlen(topic);
+ size_t elen = errstr ? strlen(errstr) + 1 : 0;
+ rd_kafka_topic_result_t *terr;
+
+ terr = rd_malloc(sizeof(*terr) + tlen + 1 + elen);
+
+ terr->err = err;
+
+ terr->topic = terr->data;
+ memcpy(terr->topic, topic, tlen);
+ terr->topic[tlen] = '\0';
+
+ if (errstr) {
+ terr->errstr = terr->topic + tlen + 1;
+ memcpy(terr->errstr, errstr, elen);
+ } else {
+ terr->errstr = NULL;
+ }
+
+ return terr;
+}
+
+
+/**
+ * @brief Destroy topic_result
+ */
+void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr) {
+ rd_free(terr);
+}
+
+/**
+ * @brief Destroy-variant suitable for rd_list free_cb use.
+ */
+void rd_kafka_topic_result_free(void *ptr) {
+ rd_kafka_topic_result_destroy((rd_kafka_topic_result_t *)ptr);
+}
+
+const rd_kafka_error_t *
+rd_kafka_group_result_error(const rd_kafka_group_result_t *groupres) {
+ return groupres->error;
+}
+
+const char *
+rd_kafka_group_result_name(const rd_kafka_group_result_t *groupres) {
+ return groupres->group;
+}
+
+const rd_kafka_topic_partition_list_t *
+rd_kafka_group_result_partitions(const rd_kafka_group_result_t *groupres) {
+ return groupres->partitions;
+}
+
+rd_kafka_group_result_t *
+rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres) {
+ return rd_kafka_group_result_new(
+ groupres->group, -1, groupres->partitions,
+ groupres->error ? rd_kafka_error_copy(groupres->error) : NULL);
+}
+
+/**
+ * @brief Same as rd_kafka_group_result_copy() but suitable for
+ * rd_list_copy(). The \p opaque is ignored.
+ */
+void *rd_kafka_group_result_copy_opaque(const void *src_groupres,
+ void *opaque) {
+ return rd_kafka_group_result_copy(src_groupres);
+}
+
+
+/**
+ * @brief Create new group_result (single allocation).
+ *
+ * @param group Group string, if group_size is != -1 it does not have to
+ * be nul-terminated.
+ * @param group_size Size of group, or -1 to perform automatic strlen()
+ * @param error Error object, or NULL on success. Takes ownership of \p error.
+ *
+ * All input arguments are copied.
+ */
+
+rd_kafka_group_result_t *
+rd_kafka_group_result_new(const char *group,
+ ssize_t group_size,
+ const rd_kafka_topic_partition_list_t *partitions,
+ rd_kafka_error_t *error) {
+ size_t glen = group_size != -1 ? (size_t)group_size : strlen(group);
+ rd_kafka_group_result_t *groupres;
+
+ groupres = rd_calloc(1, sizeof(*groupres) + glen + 1);
+
+
+ groupres->group = groupres->data;
+ memcpy(groupres->group, group, glen);
+ groupres->group[glen] = '\0';
+
+ if (partitions)
+ groupres->partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ groupres->error = error;
+
+ return groupres;
+}
+
+
+/**
+ * @brief Destroy group_result
+ */
+void rd_kafka_group_result_destroy(rd_kafka_group_result_t *groupres) {
+ if (groupres->partitions)
+ rd_kafka_topic_partition_list_destroy(groupres->partitions);
+ if (groupres->error)
+ rd_kafka_error_destroy(groupres->error);
+ rd_free(groupres);
+}
+
+/**
+ * @brief Destroy-variant suitable for rd_list free_cb use.
+ */
+void rd_kafka_group_result_free(void *ptr) {
+ rd_kafka_group_result_destroy((rd_kafka_group_result_t *)ptr);
+}
+
+
+const rd_kafka_error_t *
+rd_kafka_acl_result_error(const rd_kafka_acl_result_t *aclres) {
+ return aclres->error;
+}
+
+/**
+ * @brief Allocates and return an acl result, takes ownership of \p error
+ * (unless NULL).
+ *
+ * @returns The new acl result.
+ */
+rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error) {
+ rd_kafka_acl_result_t *acl_res;
+
+ acl_res = rd_calloc(1, sizeof(*acl_res));
+
+ acl_res->error = error;
+
+ return acl_res;
+}
+
+/**
+ * @brief Destroy acl_result
+ */
+void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res) {
+ if (acl_res->error)
+ rd_kafka_error_destroy(acl_res->error);
+ rd_free(acl_res);
+}
+
+/**
+ * @brief Destroy-variant suitable for rd_list free_cb use.
+ */
+void rd_kafka_acl_result_free(void *ptr) {
+ rd_kafka_acl_result_destroy((rd_kafka_acl_result_t *)ptr);
+}
+
+
+/**
+ * @brief Create a new Node object.
+ *
+ * @param id The node id.
+ * @param host The node host.
+ * @param port The node port.
+ * @param rack_id (optional) The node rack id.
+ * @return A new allocated Node object.
+ * Use rd_kafka_Node_destroy() to free when done.
+ */
+rd_kafka_Node_t *rd_kafka_Node_new(int id,
+ const char *host,
+ uint16_t port,
+ const char *rack_id) {
+ rd_kafka_Node_t *ret = rd_calloc(1, sizeof(*ret));
+ ret->id = id;
+ ret->port = port;
+ ret->host = rd_strdup(host);
+ if (rack_id != NULL)
+ ret->rack_id = rd_strdup(rack_id);
+ return ret;
+}
+
+/**
+ * @brief Copy \p src Node object
+ *
+ * @param src The Node to copy.
+ * @return A new allocated Node object.
+ * Use rd_kafka_Node_destroy() to free when done.
+ */
+rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src) {
+ return rd_kafka_Node_new(src->id, src->host, src->port, src->rack_id);
+}
+
+void rd_kafka_Node_destroy(rd_kafka_Node_t *node) {
+ rd_free(node->host);
+ if (node->rack_id)
+ rd_free(node->rack_id);
+ rd_free(node);
+}
+
+int rd_kafka_Node_id(const rd_kafka_Node_t *node) {
+ return node->id;
+}
+
+const char *rd_kafka_Node_host(const rd_kafka_Node_t *node) {
+ return node->host;
+}
+
+uint16_t rd_kafka_Node_port(const rd_kafka_Node_t *node) {
+ return node->port;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h
new file mode 100644
index 000000000..ccf18e91e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_aux.h
@@ -0,0 +1,120 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_AUX_H_
+#define _RDKAFKA_AUX_H_
+
+/**
+ * @name Auxiliary types
+ */
+
+#include "rdkafka_conf.h"
+
+/**
+ * @brief Topic [ + Error code + Error string ]
+ *
+ * @remark Public type.
+ * @remark Single allocation.
+ */
+struct rd_kafka_topic_result_s {
+ char *topic; /**< Points to data */
+ rd_kafka_resp_err_t err; /**< Error code */
+ char *errstr; /**< Points to data after topic, unless NULL */
+ char data[1]; /**< topic followed by errstr */
+};
+
+void rd_kafka_topic_result_destroy(rd_kafka_topic_result_t *terr);
+void rd_kafka_topic_result_free(void *ptr);
+
+rd_kafka_topic_result_t *rd_kafka_topic_result_new(const char *topic,
+ ssize_t topic_size,
+ rd_kafka_resp_err_t err,
+ const char *errstr);
+
+/**
+ * @brief Group [ + Error object ]
+ *
+ * @remark Public type.
+ * @remark Single allocation.
+ */
+struct rd_kafka_group_result_s {
+ char *group; /**< Points to data */
+ rd_kafka_error_t *error; /**< Error object, or NULL on success */
+ /** Partitions, used by DeleteConsumerGroupOffsets. */
+ rd_kafka_topic_partition_list_t *partitions;
+ char data[1]; /**< Group name */
+};
+
+void rd_kafka_group_result_destroy(rd_kafka_group_result_t *terr);
+void rd_kafka_group_result_free(void *ptr);
+
+rd_kafka_group_result_t *
+rd_kafka_group_result_new(const char *group,
+ ssize_t group_size,
+ const rd_kafka_topic_partition_list_t *partitions,
+ rd_kafka_error_t *error);
+
+/**
+ * @brief Acl creation result [ Error code + Error string ]
+ *
+ * @remark Public type.
+ * @remark Single allocation.
+ */
+struct rd_kafka_acl_result_s {
+ rd_kafka_error_t *error; /**< Error object, or NULL on success. */
+};
+
+void rd_kafka_acl_result_destroy(rd_kafka_acl_result_t *acl_res);
+void rd_kafka_acl_result_free(void *ptr);
+
+rd_kafka_acl_result_t *rd_kafka_acl_result_new(rd_kafka_error_t *error);
+
+rd_kafka_group_result_t *
+rd_kafka_group_result_copy(const rd_kafka_group_result_t *groupres);
+void *rd_kafka_group_result_copy_opaque(const void *src_groupres, void *opaque);
+/**@}*/
+
+/**
+ * @struct Node represents a broker.
+ * It's the public type.
+ */
+typedef struct rd_kafka_Node_s {
+ int id; /*< Node id */
+ char *host; /*< Node host */
+ uint16_t port; /*< Node port */
+ char *rack_id; /*< (optional) Node rack id */
+} rd_kafka_Node_t;
+
+rd_kafka_Node_t *
+rd_kafka_Node_new(int id, const char *host, uint16_t port, const char *rack_id);
+
+rd_kafka_Node_t *rd_kafka_Node_copy(const rd_kafka_Node_t *src);
+
+void rd_kafka_Node_destroy(rd_kafka_Node_t *node);
+
+#endif /* _RDKAFKA_AUX_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c
new file mode 100644
index 000000000..c69ec1767
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_background.c
@@ -0,0 +1,221 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Background queue thread and event handling.
+ *
+ * See rdkafka.h's rd_kafka_conf_set_background_event_cb() for details.
+ */
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_event.h"
+#include "rdkafka_interceptor.h"
+
+#include <signal.h>
+
+/**
+ * @brief Call the registered background_event_cb.
+ * @locality rdkafka background queue thread
+ */
+static RD_INLINE void rd_kafka_call_background_event_cb(rd_kafka_t *rk,
+ rd_kafka_op_t *rko) {
+ rd_assert(!rk->rk_background.calling);
+ rk->rk_background.calling = 1;
+
+ rk->rk_conf.background_event_cb(rk, rko, rk->rk_conf.opaque);
+
+ rk->rk_background.calling = 0;
+}
+
+
+/**
+ * @brief Background queue handler.
+ *
+ * Triggers the background_event_cb for all event:able ops,
+ * for non-event:able ops:
+ * - call op callback if set, else
+ * - log and discard the op. This is a user error, forwarding non-event
+ * APIs to the background queue.
+ */
+static rd_kafka_op_res_t
+rd_kafka_background_queue_serve(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque) {
+ rd_kafka_op_res_t res;
+
+ /*
+ * Dispatch Event:able ops to background_event_cb()
+ */
+ if (likely(rk->rk_conf.background_event_cb &&
+ rd_kafka_event_setup(rk, rko))) {
+ rd_kafka_call_background_event_cb(rk, rko);
+ /* Event must be destroyed by application. */
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ /*
+ * Handle non-event:able ops through the standard poll_cb that
+ * will trigger type-specific callbacks (and return OP_RES_HANDLED)
+ * or do no handling and return OP_RES_PASS.
+ * Also signal yield to q_serve() (which implies that op was handled).
+ */
+ res = rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_CALLBACK, opaque);
+ if (res == RD_KAFKA_OP_RES_HANDLED || res == RD_KAFKA_OP_RES_YIELD)
+ return res;
+
+ /* Op was not handled, log and destroy it. */
+ rd_kafka_log(rk, LOG_NOTICE, "BGQUEUE",
+ "No support for handling "
+ "non-event op %s in background queue: discarding",
+ rd_kafka_op2str(rko->rko_type));
+ rd_kafka_op_destroy(rko);
+
+ /* Indicate that the op was handled. */
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Main loop for background queue thread.
+ */
+int rd_kafka_background_thread_main(void *arg) {
+ rd_kafka_t *rk = arg;
+
+ rd_kafka_set_thread_name("background");
+ rd_kafka_set_thread_sysname("rdk:bg");
+
+ rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BACKGROUND);
+
+ (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
+
+ /* Acquire lock (which was held by thread creator during creation)
+ * to synchronise state. */
+ rd_kafka_wrlock(rk);
+ rd_kafka_wrunlock(rk);
+
+ mtx_lock(&rk->rk_init_lock);
+ rk->rk_init_wait_cnt--;
+ cnd_broadcast(&rk->rk_init_cnd);
+ mtx_unlock(&rk->rk_init_lock);
+
+ while (likely(!rd_kafka_terminating(rk))) {
+ rd_kafka_q_serve(rk->rk_background.q, 10 * 1000, 0,
+ RD_KAFKA_Q_CB_RETURN,
+ rd_kafka_background_queue_serve, NULL);
+ }
+
+ /* Inform the user that they terminated the client before
+ * all outstanding events were handled. */
+ if (rd_kafka_q_len(rk->rk_background.q) > 0)
+ rd_kafka_log(rk, LOG_INFO, "BGQUEUE",
+ "Purging %d unserved events from background queue",
+ rd_kafka_q_len(rk->rk_background.q));
+ rd_kafka_q_disable(rk->rk_background.q);
+ rd_kafka_q_purge(rk->rk_background.q);
+
+ rd_kafka_dbg(rk, GENERIC, "BGQUEUE", "Background queue thread exiting");
+
+ rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BACKGROUND);
+
+ rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
+
+ return 0;
+}
+
+
+/**
+ * @brief Create the background thread.
+ *
+ * @locks_acquired rk_init_lock
+ * @locks_required rd_kafka_wrlock()
+ */
+rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+#ifndef _WIN32
+ sigset_t newset, oldset;
+#endif
+
+ if (rk->rk_background.q) {
+ rd_snprintf(errstr, errstr_size,
+ "Background thread already created");
+ return RD_KAFKA_RESP_ERR__CONFLICT;
+ }
+
+ rk->rk_background.q = rd_kafka_q_new(rk);
+
+ mtx_lock(&rk->rk_init_lock);
+ rk->rk_init_wait_cnt++;
+
+#ifndef _WIN32
+ /* Block all signals in newly created threads.
+ * To avoid race condition we block all signals in the calling
+ * thread, which the new thread will inherit its sigmask from,
+ * and then restore the original sigmask of the calling thread when
+ * we're done creating the thread. */
+ sigemptyset(&oldset);
+ sigfillset(&newset);
+ if (rk->rk_conf.term_sig) {
+ struct sigaction sa_term = {.sa_handler =
+ rd_kafka_term_sig_handler};
+ sigaction(rk->rk_conf.term_sig, &sa_term, NULL);
+ }
+ pthread_sigmask(SIG_SETMASK, &newset, &oldset);
+#endif
+
+
+ if ((thrd_create(&rk->rk_background.thread,
+ rd_kafka_background_thread_main, rk)) !=
+ thrd_success) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to create background thread: %s",
+ rd_strerror(errno));
+ rd_kafka_q_destroy_owner(rk->rk_background.q);
+ rk->rk_background.q = NULL;
+ rk->rk_init_wait_cnt--;
+ mtx_unlock(&rk->rk_init_lock);
+
+#ifndef _WIN32
+ /* Restore sigmask of caller */
+ pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+#endif
+ return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ }
+
+ mtx_unlock(&rk->rk_init_lock);
+
+#ifndef _WIN32
+ /* Restore sigmask of caller */
+ pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+#endif
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c
new file mode 100644
index 000000000..e8fc27b11
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.c
@@ -0,0 +1,5867 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#if defined(__MINGW32__)
+#include <ws2tcpip.h>
+#endif
+
+#ifndef _WIN32
+#define _GNU_SOURCE
+/*
+ * AIX defines this and the value needs to be set correctly. For Solaris,
+ * src/rd.h defines _POSIX_SOURCE to be 200809L, which corresponds to XPG7,
+ * which itself is not compatible with _XOPEN_SOURCE on that platform.
+ */
+#if !defined(_AIX) && !defined(__sun)
+#define _XOPEN_SOURCE
+#endif
+#include <signal.h>
+#endif
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_msgset.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_proto.h"
+#include "rdkafka_buf.h"
+#include "rdkafka_request.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_interceptor.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_txnmgr.h"
+#include "rdkafka_fetcher.h"
+#include "rdtime.h"
+#include "rdcrc32.h"
+#include "rdrand.h"
+#include "rdkafka_lz4.h"
+#if WITH_SSL
+#include <openssl/err.h>
+#endif
+#include "rdendian.h"
+#include "rdunittest.h"
+
+
+static const int rd_kafka_max_block_ms = 1000;
+
+const char *rd_kafka_broker_state_names[] = {
+ "INIT", "DOWN", "TRY_CONNECT", "CONNECT", "SSL_HANDSHAKE",
+ "AUTH_LEGACY", "UP", "UPDATE", "APIVERSION_QUERY", "AUTH_HANDSHAKE",
+ "AUTH_REQ"};
+
+const char *rd_kafka_secproto_names[] = {
+ [RD_KAFKA_PROTO_PLAINTEXT] = "plaintext",
+ [RD_KAFKA_PROTO_SSL] = "ssl",
+ [RD_KAFKA_PROTO_SASL_PLAINTEXT] = "sasl_plaintext",
+ [RD_KAFKA_PROTO_SASL_SSL] = "sasl_ssl",
+ NULL};
+
+
+/**
+ * @returns true for logical brokers (e.g., coordinators) without an address set
+ *
+ * @locks_required rkb_lock
+ */
+#define rd_kafka_broker_is_addrless(rkb) (*(rkb)->rkb_nodename == '\0')
+
+/**
+ * @returns true if the broker needs a persistent connection
+ * @locaility broker thread
+ */
+static RD_INLINE rd_bool_t
+rd_kafka_broker_needs_persistent_connection(rd_kafka_broker_t *rkb) {
+ return rkb->rkb_persistconn.internal ||
+ rd_atomic32_get(&rkb->rkb_persistconn.coord);
+}
+
+
+/**
+ * @returns > 0 if a connection to this broker is needed, else 0.
+ * @locality broker thread
+ * @locks none
+ */
+static RD_INLINE int rd_kafka_broker_needs_connection(rd_kafka_broker_t *rkb) {
+ return rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT &&
+ !rd_kafka_terminating(rkb->rkb_rk) &&
+ !rd_kafka_fatal_error_code(rkb->rkb_rk) &&
+ (!rkb->rkb_rk->rk_conf.sparse_connections ||
+ rd_kafka_broker_needs_persistent_connection(rkb));
+}
+
+
+static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb,
+ rd_kafka_op_t *rko);
+static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb);
+
+
+#define rd_kafka_broker_terminating(rkb) \
+ (rd_refcnt_get(&(rkb)->rkb_refcnt) <= 1)
+
+
+/**
+ * Construct broker nodename.
+ */
+static void rd_kafka_mk_nodename(char *dest,
+ size_t dsize,
+ const char *name,
+ uint16_t port) {
+ rd_snprintf(dest, dsize, "%s:%hu", name, port);
+}
+
+/**
+ * Construct descriptive broker name
+ */
+static void rd_kafka_mk_brokername(char *dest,
+ size_t dsize,
+ rd_kafka_secproto_t proto,
+ const char *nodename,
+ int32_t nodeid,
+ rd_kafka_confsource_t source) {
+
+ /* Prepend protocol name to brokername, unless it is a
+ * standard plaintext or logical broker in which case we
+ * omit the protocol part. */
+ if (proto != RD_KAFKA_PROTO_PLAINTEXT && source != RD_KAFKA_LOGICAL) {
+ int r = rd_snprintf(dest, dsize, "%s://",
+ rd_kafka_secproto_names[proto]);
+ if (r >= (int)dsize) /* Skip proto name if it wont fit.. */
+ r = 0;
+
+ dest += r;
+ dsize -= r;
+ }
+
+ if (nodeid == RD_KAFKA_NODEID_UA)
+ rd_snprintf(dest, dsize, "%s%s", nodename,
+ source == RD_KAFKA_LOGICAL
+ ? ""
+ : (source == RD_KAFKA_INTERNAL ? "/internal"
+ : "/bootstrap"));
+ else
+ rd_snprintf(dest, dsize, "%s/%" PRId32, nodename, nodeid);
+}
+
+
+/**
+ * @brief Enable protocol feature(s) for the current broker.
+ *
+ * @locks broker_lock MUST be held
+ * @locality broker thread
+ */
+static void rd_kafka_broker_feature_enable(rd_kafka_broker_t *rkb,
+ int features) {
+ if (features & rkb->rkb_features)
+ return;
+
+ rkb->rkb_features |= features;
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE,
+ "FEATURE", "Updated enabled protocol features +%s to %s",
+ rd_kafka_features2str(features),
+ rd_kafka_features2str(rkb->rkb_features));
+}
+
+
+/**
+ * @brief Disable protocol feature(s) for the current broker.
+ *
+ * @locks broker_lock MUST be held
+ * @locality broker thread
+ */
+static void rd_kafka_broker_feature_disable(rd_kafka_broker_t *rkb,
+ int features) {
+ if (!(features & rkb->rkb_features))
+ return;
+
+ rkb->rkb_features &= ~features;
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FEATURE,
+ "FEATURE", "Updated enabled protocol features -%s to %s",
+ rd_kafka_features2str(features),
+ rd_kafka_features2str(rkb->rkb_features));
+}
+
+
+/**
+ * @brief Set protocol feature(s) for the current broker.
+ *
+ * @remark This replaces the previous feature set.
+ *
+ * @locality broker thread
+ * @locks rd_kafka_broker_lock()
+ */
+static void rd_kafka_broker_features_set(rd_kafka_broker_t *rkb, int features) {
+ if (rkb->rkb_features == features)
+ return;
+
+ rkb->rkb_features = features;
+ rd_rkb_dbg(rkb, BROKER, "FEATURE",
+ "Updated enabled protocol features to %s",
+ rd_kafka_features2str(rkb->rkb_features));
+}
+
+
+/**
+ * @brief Check and return supported ApiVersion for \p ApiKey.
+ *
+ * @returns the highest supported ApiVersion in the specified range (inclusive)
+ * or -1 if the ApiKey is not supported or no matching ApiVersion.
+ * The current feature set is also returned in \p featuresp
+ * @locks none
+ * @locality any
+ */
+int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
+ int16_t ApiKey,
+ int16_t minver,
+ int16_t maxver,
+ int *featuresp) {
+ struct rd_kafka_ApiVersion skel = {.ApiKey = ApiKey};
+ struct rd_kafka_ApiVersion ret = RD_ZERO_INIT, *retp;
+
+ rd_kafka_broker_lock(rkb);
+ if (featuresp)
+ *featuresp = rkb->rkb_features;
+
+ if (rkb->rkb_features & RD_KAFKA_FEATURE_UNITTEST) {
+ /* For unit tests let the broker support everything. */
+ rd_kafka_broker_unlock(rkb);
+ return maxver;
+ }
+
+ retp =
+ bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt,
+ sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp);
+ if (retp)
+ ret = *retp;
+ rd_kafka_broker_unlock(rkb);
+
+ if (!retp)
+ return -1;
+
+ if (ret.MaxVer < maxver) {
+ if (ret.MaxVer < minver)
+ return -1;
+ else
+ return ret.MaxVer;
+ } else if (ret.MinVer > maxver)
+ return -1;
+ else
+ return maxver;
+}
+
+
+/**
+ * @brief Set broker state.
+ *
+ * \c rkb->rkb_state is the previous state, while
+ * \p state is the new state.
+ *
+ * @locks rd_kafka_broker_lock() MUST be held.
+ * @locality broker thread
+ */
+void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state) {
+ rd_bool_t trigger_monitors = rd_false;
+
+ if ((int)rkb->rkb_state == state)
+ return;
+
+ rd_kafka_dbg(rkb->rkb_rk, BROKER, "STATE",
+ "%s: Broker changed state %s -> %s", rkb->rkb_name,
+ rd_kafka_broker_state_names[rkb->rkb_state],
+ rd_kafka_broker_state_names[state]);
+
+ if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
+ /* no-op */
+ } else if (state == RD_KAFKA_BROKER_STATE_DOWN &&
+ !rkb->rkb_down_reported) {
+ /* Propagate ALL_BROKERS_DOWN event if all brokers are
+ * now down, unless we're terminating.
+ * Only trigger for brokers that has an address set,
+ * e.g., not logical brokers that lost their address. */
+ if (rd_atomic32_add(&rkb->rkb_rk->rk_broker_down_cnt, 1) ==
+ rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) -
+ rd_atomic32_get(
+ &rkb->rkb_rk->rk_broker_addrless_cnt) &&
+ !rd_kafka_broker_is_addrless(rkb) &&
+ !rd_kafka_terminating(rkb->rkb_rk))
+ rd_kafka_op_err(
+ rkb->rkb_rk, RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN,
+ "%i/%i brokers are down",
+ rd_atomic32_get(&rkb->rkb_rk->rk_broker_down_cnt),
+ rd_atomic32_get(&rkb->rkb_rk->rk_broker_cnt) -
+ rd_atomic32_get(
+ &rkb->rkb_rk->rk_broker_addrless_cnt));
+ rkb->rkb_down_reported = 1;
+
+ } else if (rd_kafka_broker_state_is_up(state) &&
+ rkb->rkb_down_reported) {
+ rd_atomic32_sub(&rkb->rkb_rk->rk_broker_down_cnt, 1);
+ rkb->rkb_down_reported = 0;
+ }
+
+ if (rkb->rkb_source != RD_KAFKA_INTERNAL) {
+ if (rd_kafka_broker_state_is_up(state) &&
+ !rd_kafka_broker_state_is_up(rkb->rkb_state)) {
+ /* Up -> Down */
+ rd_atomic32_add(&rkb->rkb_rk->rk_broker_up_cnt, 1);
+
+ trigger_monitors = rd_true;
+
+ if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
+ rd_atomic32_add(
+ &rkb->rkb_rk->rk_logical_broker_up_cnt, 1);
+
+ } else if (rd_kafka_broker_state_is_up(rkb->rkb_state) &&
+ !rd_kafka_broker_state_is_up(state)) {
+ /* ~Down(!Up) -> Up */
+ rd_atomic32_sub(&rkb->rkb_rk->rk_broker_up_cnt, 1);
+
+ trigger_monitors = rd_true;
+
+ if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
+ rd_atomic32_sub(
+ &rkb->rkb_rk->rk_logical_broker_up_cnt, 1);
+ }
+
+ /* If the connection or connection attempt failed and there
+ * are coord_reqs or cgrp awaiting this coordinator to come up
+ * then trigger the monitors so that rd_kafka_coord_req_fsm()
+ * is triggered, which in turn may trigger a new coordinator
+ * query. */
+ if (state == RD_KAFKA_BROKER_STATE_DOWN &&
+ rd_atomic32_get(&rkb->rkb_persistconn.coord) > 0)
+ trigger_monitors = rd_true;
+ }
+
+ rkb->rkb_state = state;
+ rkb->rkb_ts_state = rd_clock();
+
+ if (trigger_monitors)
+ rd_kafka_broker_trigger_monitors(rkb);
+
+ /* Call on_broker_state_change interceptors */
+ rd_kafka_interceptors_on_broker_state_change(
+ rkb->rkb_rk, rkb->rkb_nodeid,
+ rd_kafka_secproto_names[rkb->rkb_proto], rkb->rkb_origname,
+ rkb->rkb_port, rd_kafka_broker_state_names[rkb->rkb_state]);
+
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+}
+
+
+/**
+ * @brief Set, log and propagate broker fail error.
+ *
+ * @param rkb Broker connection that failed.
+ * @param level Syslog level. LOG_DEBUG will not be logged unless debugging
+ * is enabled.
+ * @param err The type of error that occurred.
+ * @param fmt Format string.
+ * @param ap Format string arguments.
+ *
+ * @locks none
+ * @locality broker thread
+ */
+static void rd_kafka_broker_set_error(rd_kafka_broker_t *rkb,
+ int level,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ va_list ap) {
+ char errstr[512];
+ char extra[128];
+ size_t of = 0, ofe;
+ rd_bool_t identical, suppress;
+ int state_duration_ms = (int)((rd_clock() - rkb->rkb_ts_state) / 1000);
+
+
+ /* If this is a logical broker we include its current nodename/address
+ * in the log message. */
+ rd_kafka_broker_lock(rkb);
+ if (rkb->rkb_source == RD_KAFKA_LOGICAL &&
+ !rd_kafka_broker_is_addrless(rkb)) {
+ of = (size_t)rd_snprintf(errstr, sizeof(errstr),
+ "%s: ", rkb->rkb_nodename);
+ if (of > sizeof(errstr))
+ of = 0; /* If nodename overflows the entire buffer we
+ * skip it completely since the error message
+ * itself is more important. */
+ }
+ rd_kafka_broker_unlock(rkb);
+
+ ofe = (size_t)rd_vsnprintf(errstr + of, sizeof(errstr) - of, fmt, ap);
+ if (ofe > sizeof(errstr) - of)
+ ofe = sizeof(errstr) - of;
+ of += ofe;
+
+ /* Provide more meaningful error messages in certain cases */
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT &&
+ !strcmp(errstr, "Disconnected")) {
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY) {
+ /* A disconnect while requesting ApiVersion typically
+ * means we're connecting to a SSL-listener as
+ * PLAINTEXT, but may also be caused by connecting to
+ * a broker that does not support ApiVersion (<0.10). */
+
+ if (rkb->rkb_proto != RD_KAFKA_PROTO_SSL &&
+ rkb->rkb_proto != RD_KAFKA_PROTO_SASL_SSL)
+ rd_kafka_broker_set_error(
+ rkb, level, err,
+ "Disconnected while requesting "
+ "ApiVersion: "
+ "might be caused by incorrect "
+ "security.protocol configuration "
+ "(connecting to a SSL listener?) or "
+ "broker version is < 0.10 "
+ "(see api.version.request)",
+ ap /*ignored*/);
+ else
+ rd_kafka_broker_set_error(
+ rkb, level, err,
+ "Disconnected while requesting "
+ "ApiVersion: "
+ "might be caused by broker version "
+ "< 0.10 (see api.version.request)",
+ ap /*ignored*/);
+ return;
+
+ } else if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP &&
+ state_duration_ms < 2000 /*2s*/ &&
+ rkb->rkb_rk->rk_conf.security_protocol !=
+ RD_KAFKA_PROTO_SASL_SSL &&
+ rkb->rkb_rk->rk_conf.security_protocol !=
+ RD_KAFKA_PROTO_SASL_PLAINTEXT) {
+ /* If disconnected shortly after transitioning to UP
+ * state it typically means the broker listener is
+ * configured for SASL authentication but the client
+ * is not. */
+ rd_kafka_broker_set_error(
+ rkb, level, err,
+ "Disconnected: verify that security.protocol "
+ "is correctly configured, broker might "
+ "require SASL authentication",
+ ap /*ignored*/);
+ return;
+ }
+ }
+
+ /* Check if error is identical to last error (prior to appending
+ * the variable suffix "after Xms in state Y"), if so we should
+ * suppress it. */
+ identical = err == rkb->rkb_last_err.err &&
+ !strcmp(rkb->rkb_last_err.errstr, errstr);
+ suppress = identical && rd_interval(&rkb->rkb_suppress.fail_error,
+ 30 * 1000 * 1000 /*30s*/, 0) <= 0;
+
+ /* Copy last error prior to adding extras */
+ rkb->rkb_last_err.err = err;
+ rd_strlcpy(rkb->rkb_last_err.errstr, errstr,
+ sizeof(rkb->rkb_last_err.errstr));
+
+ /* Time since last state change to help debug connection issues */
+ ofe = rd_snprintf(extra, sizeof(extra), "after %dms in state %s",
+ state_duration_ms,
+ rd_kafka_broker_state_names[rkb->rkb_state]);
+
+ /* Number of suppressed identical logs */
+ if (identical && !suppress && rkb->rkb_last_err.cnt >= 1 &&
+ ofe + 30 < sizeof(extra)) {
+ size_t r =
+ (size_t)rd_snprintf(extra + ofe, sizeof(extra) - ofe,
+ ", %d identical error(s) suppressed",
+ rkb->rkb_last_err.cnt);
+ if (r < sizeof(extra) - ofe)
+ ofe += r;
+ else
+ ofe = sizeof(extra);
+ }
+
+ /* Append the extra info if there is enough room */
+ if (ofe > 0 && of + ofe + 4 < sizeof(errstr))
+ rd_snprintf(errstr + of, sizeof(errstr) - of, " (%s)", extra);
+
+ /* Don't log interrupt-wakeups when terminating */
+ if (err == RD_KAFKA_RESP_ERR__INTR && rd_kafka_terminating(rkb->rkb_rk))
+ suppress = rd_true;
+
+ if (!suppress)
+ rkb->rkb_last_err.cnt = 1;
+ else
+ rkb->rkb_last_err.cnt++;
+
+ rd_rkb_dbg(rkb, BROKER, "FAIL", "%s (%s)%s%s", errstr,
+ rd_kafka_err2name(err),
+ identical ? ": identical to last error" : "",
+ suppress ? ": error log suppressed" : "");
+
+ if (level != LOG_DEBUG && (level <= LOG_CRIT || !suppress)) {
+ rd_kafka_log(rkb->rkb_rk, level, "FAIL", "%s: %s",
+ rkb->rkb_name, errstr);
+
+ /* Send ERR op to application for processing. */
+ rd_kafka_q_op_err(rkb->rkb_rk->rk_rep, err, "%s: %s",
+ rkb->rkb_name, errstr);
+ }
+}
+
+
+/**
+ * @brief Failure propagation to application.
+ *
+ * Will tear down connection to broker and trigger a reconnect.
+ *
+ * \p level is the log level, <=LOG_INFO will be logged while =LOG_DEBUG will
+ * be debug-logged.
+ *
+ * @locality broker thread
+ */
+void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
+ int level,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ va_list ap;
+ rd_kafka_bufq_t tmpq_waitresp, tmpq;
+ int old_state;
+ rd_kafka_toppar_t *rktp;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ if (rkb->rkb_transport) {
+ rd_kafka_transport_close(rkb->rkb_transport);
+ rkb->rkb_transport = NULL;
+
+ if (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP)
+ rd_atomic32_add(&rkb->rkb_c.disconnects, 1);
+ }
+
+ rkb->rkb_req_timeouts = 0;
+
+ if (rkb->rkb_recv_buf) {
+ rd_kafka_buf_destroy(rkb->rkb_recv_buf);
+ rkb->rkb_recv_buf = NULL;
+ }
+
+ va_start(ap, fmt);
+ rd_kafka_broker_set_error(rkb, level, err, fmt, ap);
+ va_end(ap);
+
+ rd_kafka_broker_lock(rkb);
+
+ /* If we're currently asking for ApiVersion and the connection
+ * went down it probably means the broker does not support that request
+ * and tore down the connection. In this case we disable that feature
+ * flag. */
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_APIVERSION_QUERY)
+ rd_kafka_broker_feature_disable(rkb,
+ RD_KAFKA_FEATURE_APIVERSION);
+
+ /* Set broker state */
+ old_state = rkb->rkb_state;
+ rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_DOWN);
+
+ /* Unlock broker since a requeue will try to lock it. */
+ rd_kafka_broker_unlock(rkb);
+
+ rd_atomic64_set(&rkb->rkb_c.ts_send, 0);
+ rd_atomic64_set(&rkb->rkb_c.ts_recv, 0);
+
+ /*
+ * Purge all buffers
+ * (put bufs on a temporary queue since bufs may be requeued,
+ * make sure outstanding requests are re-enqueued before
+ * bufs on outbufs queue.)
+ */
+ rd_kafka_bufq_init(&tmpq_waitresp);
+ rd_kafka_bufq_init(&tmpq);
+ rd_kafka_bufq_concat(&tmpq_waitresp, &rkb->rkb_waitresps);
+ rd_kafka_bufq_concat(&tmpq, &rkb->rkb_outbufs);
+ rd_atomic32_init(&rkb->rkb_blocking_request_cnt, 0);
+
+ /* Purge the in-flight buffers (might get re-enqueued in case
+ * of retries). */
+ rd_kafka_bufq_purge(rkb, &tmpq_waitresp, err);
+
+ /* Purge the waiting-in-output-queue buffers,
+ * might also get re-enqueued. */
+ rd_kafka_bufq_purge(rkb, &tmpq,
+ /* If failure was caused by a timeout,
+ * adjust the error code for in-queue requests. */
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT
+ ? RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE
+ : err);
+
+ /* Update bufq for connection reset:
+ * - Purge connection-setup requests from outbufs since they will be
+ * reissued on the next connect.
+ * - Reset any partially sent buffer's offset.
+ */
+ rd_kafka_bufq_connection_reset(rkb, &rkb->rkb_outbufs);
+
+ /* Extra debugging for tracking termination-hang issues:
+ * show what is keeping this broker from decommissioning. */
+ if (rd_kafka_terminating(rkb->rkb_rk) &&
+ !rd_kafka_broker_terminating(rkb)) {
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "BRKTERM",
+ "terminating: broker still has %d refcnt(s), "
+ "%" PRId32 " buffer(s), %d partition(s)",
+ rd_refcnt_get(&rkb->rkb_refcnt),
+ rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
+ rkb->rkb_toppar_cnt);
+ rd_kafka_bufq_dump(rkb, "BRKOUTBUFS", &rkb->rkb_outbufs);
+ }
+
+ /* If this broker acts as the preferred (follower) replica for any
+ * partition, delegate the partition back to the leader. */
+ TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
+ rd_kafka_toppar_lock(rktp);
+ if (unlikely(rktp->rktp_broker != rkb)) {
+ /* Currently migrating away from this
+ * broker, skip. */
+ rd_kafka_toppar_unlock(rktp);
+ continue;
+ }
+ rd_kafka_toppar_unlock(rktp);
+
+ if (rktp->rktp_leader_id != rktp->rktp_broker_id) {
+ rd_kafka_toppar_delegate_to_leader(rktp);
+ }
+ }
+
+ /* Query for topic leaders to quickly pick up on failover. */
+ if (err != RD_KAFKA_RESP_ERR__DESTROY &&
+ old_state >= RD_KAFKA_BROKER_STATE_UP)
+ rd_kafka_metadata_refresh_known_topics(
+ rkb->rkb_rk, NULL, rd_true /*force*/, "broker down");
+}
+
+
+
+/**
+ * @brief Handle broker connection close.
+ *
+ * @locality broker thread
+ */
+void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ const char *errstr) {
+ int log_level = LOG_ERR;
+
+ if (!rkb->rkb_rk->rk_conf.log_connection_close) {
+ /* Silence all connection closes */
+ log_level = LOG_DEBUG;
+
+ } else {
+ /* Silence close logs for connections that are idle,
+ * it is most likely the broker's idle connection
+ * reaper kicking in.
+ *
+ * Indications there might be an error and not an
+ * idle disconnect:
+ * - If the connection age is low a disconnect
+ * typically indicates a failure, such as protocol mismatch.
+ * - If the connection hasn't been idle long enough.
+ * - There are outstanding requests, or requests enqueued.
+ *
+ * For non-idle connections, adjust log level:
+ * - requests in-flight: LOG_WARNING
+ * - else: LOG_INFO
+ */
+ rd_ts_t now = rd_clock();
+ rd_ts_t minidle =
+ RD_MAX(60 * 1000 /*60s*/,
+ rkb->rkb_rk->rk_conf.socket_timeout_ms) *
+ 1000;
+ int inflight = rd_kafka_bufq_cnt(&rkb->rkb_waitresps);
+ int inqueue = rd_kafka_bufq_cnt(&rkb->rkb_outbufs);
+
+ if (rkb->rkb_ts_state + minidle < now &&
+ rd_atomic64_get(&rkb->rkb_c.ts_send) + minidle < now &&
+ inflight + inqueue == 0)
+ log_level = LOG_DEBUG;
+ else if (inflight > 1)
+ log_level = LOG_WARNING;
+ else
+ log_level = LOG_INFO;
+ }
+
+ rd_kafka_broker_fail(rkb, log_level, err, "%s", errstr);
+}
+
+
+/**
+ * @brief Purge requests in \p rkbq matching request \p ApiKey
+ * and partition \p rktp.
+ *
+ * @warning ApiKey must be RD_KAFKAP_Produce
+ *
+ * @returns the number of purged buffers.
+ *
+ * @locality broker thread
+ */
+static int rd_kafka_broker_bufq_purge_by_toppar(rd_kafka_broker_t *rkb,
+ rd_kafka_bufq_t *rkbq,
+ int64_t ApiKey,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_buf_t *rkbuf, *tmp;
+ int cnt = 0;
+
+ rd_assert(ApiKey == RD_KAFKAP_Produce);
+
+ TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) {
+
+ if (rkbuf->rkbuf_reqhdr.ApiKey != ApiKey ||
+ rkbuf->rkbuf_u.Produce.batch.rktp != rktp ||
+ /* Skip partially sent buffers and let them transmit.
+ * The alternative would be to kill the connection here,
+ * which is more drastic and costly. */
+ rd_slice_offset(&rkbuf->rkbuf_reader) > 0)
+ continue;
+
+ rd_kafka_bufq_deq(rkbq, rkbuf);
+
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+
+/**
+ * Scan bufq for buffer timeouts, trigger buffer callback on timeout.
+ *
+ * If \p partial_cntp is non-NULL any partially sent buffers will increase
+ * the provided counter by 1.
+ *
+ * @param ApiKey Only match requests with this ApiKey, or -1 for all.
+ * @param now If 0, all buffers will time out, else the current clock.
+ * @param description "N requests timed out <description>", e.g., "in flight".
+ * Only used if log_first_n > 0.
+ * @param log_first_n Log the first N request timeouts.
+ *
+ * @returns the number of timed out buffers.
+ *
+ * @locality broker thread
+ */
+static int rd_kafka_broker_bufq_timeout_scan(rd_kafka_broker_t *rkb,
+ int is_waitresp_q,
+ rd_kafka_bufq_t *rkbq,
+ int *partial_cntp,
+ int16_t ApiKey,
+ rd_kafka_resp_err_t err,
+ rd_ts_t now,
+ const char *description,
+ int log_first_n) {
+ rd_kafka_buf_t *rkbuf, *tmp;
+ int cnt = 0;
+ int idx = -1;
+ const rd_kafka_buf_t *holb;
+
+restart:
+ holb = TAILQ_FIRST(&rkbq->rkbq_bufs);
+
+ TAILQ_FOREACH_SAFE(rkbuf, &rkbq->rkbq_bufs, rkbuf_link, tmp) {
+ rd_kafka_broker_state_t pre_state, post_state;
+
+ idx++;
+
+ if (likely(now && rkbuf->rkbuf_ts_timeout > now))
+ continue;
+
+ if (ApiKey != -1 && rkbuf->rkbuf_reqhdr.ApiKey != ApiKey)
+ continue;
+
+ if (partial_cntp && rd_slice_offset(&rkbuf->rkbuf_reader) > 0)
+ (*partial_cntp)++;
+
+ /* Convert rkbuf_ts_sent to elapsed time since request */
+ if (rkbuf->rkbuf_ts_sent)
+ rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
+ else
+ rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_enq;
+
+ rd_kafka_bufq_deq(rkbq, rkbuf);
+
+ if (now && cnt < log_first_n) {
+ char holbstr[256];
+ /* Head of line blocking:
+ * If this is not the first request in queue, but the
+ * initial first request did not time out,
+ * it typically means the first request is a
+ * long-running blocking one, holding up the
+ * sub-sequent requests.
+ * In this case log what is likely holding up the
+ * requests and what caused this request to time out. */
+ if (holb && holb == TAILQ_FIRST(&rkbq->rkbq_bufs)) {
+ rd_snprintf(
+ holbstr, sizeof(holbstr),
+ ": possibly held back by "
+ "preceeding%s %sRequest with "
+ "timeout in %dms",
+ (holb->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING)
+ ? " blocking"
+ : "",
+ rd_kafka_ApiKey2str(
+ holb->rkbuf_reqhdr.ApiKey),
+ (int)((holb->rkbuf_ts_timeout - now) /
+ 1000));
+ /* Only log the HOLB once */
+ holb = NULL;
+ } else {
+ *holbstr = '\0';
+ }
+
+ rd_rkb_log(
+ rkb, LOG_NOTICE, "REQTMOUT",
+ "Timed out %sRequest %s "
+ "(after %" PRId64 "ms, timeout #%d)%s",
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ description, rkbuf->rkbuf_ts_sent / 1000, cnt,
+ holbstr);
+ }
+
+ if (is_waitresp_q &&
+ rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
+ rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 0)
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+
+ pre_state = rd_kafka_broker_get_state(rkb);
+
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
+ cnt++;
+
+ /* If the buf_callback() triggered a broker state change
+ * (typically through broker_fail()) we can't trust the
+ * queue we are scanning to not have been touched, so we
+ * either restart the scan or bail out (if broker is now down),
+ * depending on the new state. #2326 */
+ post_state = rd_kafka_broker_get_state(rkb);
+ if (pre_state != post_state) {
+ /* If the new state is DOWN it means broker_fail()
+ * was called which may have modified the queues,
+ * to keep things safe we stop scanning this queue. */
+ if (post_state == RD_KAFKA_BROKER_STATE_DOWN)
+ break;
+ /* Else start scanning the queue from the beginning. */
+ goto restart;
+ }
+ }
+
+ return cnt;
+}
+
+
+/**
+ * Scan the wait-response and outbuf queues for message timeouts.
+ *
+ * Locality: Broker thread
+ */
+static void rd_kafka_broker_timeout_scan(rd_kafka_broker_t *rkb, rd_ts_t now) {
+ int inflight_cnt, retry_cnt, outq_cnt;
+ int partial_cnt = 0;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ /* In-flight requests waiting for response */
+ inflight_cnt = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 1, &rkb->rkb_waitresps, NULL, -1, RD_KAFKA_RESP_ERR__TIMED_OUT,
+ now, "in flight", 5);
+ /* Requests in retry queue */
+ retry_cnt = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 0, &rkb->rkb_retrybufs, NULL, -1,
+ RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in retry queue", 0);
+ /* Requests in local queue not sent yet.
+ * partial_cnt is included in outq_cnt and denotes a request
+ * that has been partially transmitted. */
+ outq_cnt = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 0, &rkb->rkb_outbufs, &partial_cnt, -1,
+ RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE, now, "in output queue", 0);
+
+ if (inflight_cnt + retry_cnt + outq_cnt + partial_cnt > 0) {
+ rd_rkb_log(rkb, LOG_WARNING, "REQTMOUT",
+ "Timed out %i in-flight, %i retry-queued, "
+ "%i out-queue, %i partially-sent requests",
+ inflight_cnt, retry_cnt, outq_cnt, partial_cnt);
+
+ rkb->rkb_req_timeouts += inflight_cnt + outq_cnt;
+ rd_atomic64_add(&rkb->rkb_c.req_timeouts,
+ inflight_cnt + outq_cnt);
+
+ /* If this was a partially sent request that timed out, or the
+ * number of timed out requests have reached the
+ * socket.max.fails threshold, we need to take down the
+ * connection. */
+ if (partial_cnt > 0 ||
+ (rkb->rkb_rk->rk_conf.socket_max_fails &&
+ rkb->rkb_req_timeouts >=
+ rkb->rkb_rk->rk_conf.socket_max_fails &&
+ rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP)) {
+ char rttinfo[32];
+ /* Print average RTT (if avail) to help diagnose. */
+ rd_avg_calc(&rkb->rkb_avg_rtt, now);
+ if (rkb->rkb_avg_rtt.ra_v.avg)
+ rd_snprintf(rttinfo, sizeof(rttinfo),
+ " (average rtt %.3fms)",
+ (float)(rkb->rkb_avg_rtt.ra_v.avg /
+ 1000.0f));
+ else
+ rttinfo[0] = 0;
+ rd_kafka_broker_fail(rkb, LOG_ERR,
+ RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "%i request(s) timed out: "
+ "disconnect%s",
+ rkb->rkb_req_timeouts, rttinfo);
+ }
+ }
+}
+
+
+
+static ssize_t rd_kafka_broker_send(rd_kafka_broker_t *rkb, rd_slice_t *slice) {
+ ssize_t r;
+ char errstr[128];
+
+ rd_kafka_assert(rkb->rkb_rk,
+ rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP);
+ rd_kafka_assert(rkb->rkb_rk, rkb->rkb_transport);
+
+ r = rd_kafka_transport_send(rkb->rkb_transport, slice, errstr,
+ sizeof(errstr));
+
+ if (r == -1) {
+ rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Send failed: %s", errstr);
+ rd_atomic64_add(&rkb->rkb_c.tx_err, 1);
+ return -1;
+ }
+
+ rd_atomic64_add(&rkb->rkb_c.tx_bytes, r);
+ rd_atomic64_add(&rkb->rkb_c.tx, 1);
+ return r;
+}
+
+
+
+static int rd_kafka_broker_resolve(rd_kafka_broker_t *rkb,
+ const char *nodename,
+ rd_bool_t reset_cached_addr) {
+ const char *errstr;
+ int save_idx = 0;
+
+ if (!*nodename && rkb->rkb_source == RD_KAFKA_LOGICAL) {
+ rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__RESOLVE,
+ "Logical broker has no address yet");
+ return -1;
+ }
+
+ if (rkb->rkb_rsal &&
+ (reset_cached_addr ||
+ rkb->rkb_ts_rsal_last +
+ (rkb->rkb_rk->rk_conf.broker_addr_ttl * 1000) <
+ rd_clock())) {
+ /* Address list has expired. */
+
+ /* Save the address index to make sure we still round-robin
+ * if we get the same address list back */
+ save_idx = rkb->rkb_rsal->rsal_curr;
+
+ rd_sockaddr_list_destroy(rkb->rkb_rsal);
+ rkb->rkb_rsal = NULL;
+ }
+
+ if (!rkb->rkb_rsal) {
+ /* Resolve */
+ rkb->rkb_rsal = rd_getaddrinfo(
+ nodename, RD_KAFKA_PORT_STR, AI_ADDRCONFIG,
+ rkb->rkb_rk->rk_conf.broker_addr_family, SOCK_STREAM,
+ IPPROTO_TCP, rkb->rkb_rk->rk_conf.resolve_cb,
+ rkb->rkb_rk->rk_conf.opaque, &errstr);
+
+ if (!rkb->rkb_rsal) {
+ rd_kafka_broker_fail(
+ rkb, LOG_ERR, RD_KAFKA_RESP_ERR__RESOLVE,
+ "Failed to resolve '%s': %s", nodename, errstr);
+ return -1;
+ } else {
+ rkb->rkb_ts_rsal_last = rd_clock();
+ /* Continue at previous round-robin position */
+ if (rkb->rkb_rsal->rsal_cnt > save_idx)
+ rkb->rkb_rsal->rsal_curr = save_idx;
+ }
+ }
+
+ return 0;
+}
+
+
+static void rd_kafka_broker_buf_enq0(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf) {
+ rd_ts_t now;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ if (rkb->rkb_rk->rk_conf.sparse_connections &&
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT) {
+ /* Sparse connections:
+ * Trigger connection when a new request is enqueued. */
+ rkb->rkb_persistconn.internal++;
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(rkb,
+ RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_broker_unlock(rkb);
+ }
+
+ now = rd_clock();
+ rkbuf->rkbuf_ts_enq = now;
+ rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_SENT;
+
+ /* Calculate request attempt timeout */
+ rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now);
+
+ if (likely(rkbuf->rkbuf_prio == RD_KAFKA_PRIO_NORMAL)) {
+ /* Insert request at tail of queue */
+ TAILQ_INSERT_TAIL(&rkb->rkb_outbufs.rkbq_bufs, rkbuf,
+ rkbuf_link);
+
+ } else {
+ /* Insert request after any requests with a higher or
+ * equal priority.
+ * Also make sure the request is after added any partially
+ * sent request (of any prio).
+ * We need to check if buf corrid is set rather than
+ * rkbuf_of since SSL_write may return 0 and expect the
+ * exact same arguments the next call. */
+ rd_kafka_buf_t *prev, *after = NULL;
+
+ TAILQ_FOREACH(prev, &rkb->rkb_outbufs.rkbq_bufs, rkbuf_link) {
+ if (prev->rkbuf_prio < rkbuf->rkbuf_prio &&
+ prev->rkbuf_corrid == 0)
+ break;
+ after = prev;
+ }
+
+ if (after)
+ TAILQ_INSERT_AFTER(&rkb->rkb_outbufs.rkbq_bufs, after,
+ rkbuf, rkbuf_link);
+ else
+ TAILQ_INSERT_HEAD(&rkb->rkb_outbufs.rkbq_bufs, rkbuf,
+ rkbuf_link);
+ }
+
+ rd_atomic32_add(&rkb->rkb_outbufs.rkbq_cnt, 1);
+ if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
+ rd_atomic32_add(&rkb->rkb_outbufs.rkbq_msg_cnt,
+ rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
+}
+
+
+/**
+ * Finalize a stuffed rkbuf for sending to broker.
+ */
+static void rd_kafka_buf_finalize(rd_kafka_t *rk, rd_kafka_buf_t *rkbuf) {
+ size_t totsize;
+
+ rd_assert(!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE));
+
+ if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
+ /* Empty struct tags */
+ rd_kafka_buf_write_i8(rkbuf, 0);
+ }
+
+ /* Calculate total request buffer length. */
+ totsize = rd_buf_len(&rkbuf->rkbuf_buf) - 4;
+
+ /* Set up a buffer reader for sending the buffer. */
+ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
+
+ /**
+ * Update request header fields
+ */
+ /* Total request length */
+ rd_kafka_buf_update_i32(rkbuf, 0, (int32_t)totsize);
+
+ /* ApiVersion */
+ rd_kafka_buf_update_i16(rkbuf, 4 + 2, rkbuf->rkbuf_reqhdr.ApiVersion);
+}
+
+
+void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+
+
+ rkbuf->rkbuf_cb = resp_cb;
+ rkbuf->rkbuf_opaque = opaque;
+
+ rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
+
+ rd_kafka_broker_buf_enq0(rkb, rkbuf);
+}
+
+
+/**
+ * Enqueue buffer on broker's xmit queue, but fail buffer immediately
+ * if broker is not up.
+ *
+ * Locality: broker thread
+ */
+static int rd_kafka_broker_buf_enq2(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf) {
+ if (unlikely(rkb->rkb_source == RD_KAFKA_INTERNAL)) {
+ /* Fail request immediately if this is the internal broker. */
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb,
+ RD_KAFKA_RESP_ERR__TRANSPORT, NULL,
+ rkbuf);
+ return -1;
+ }
+
+ rd_kafka_broker_buf_enq0(rkb, rkbuf);
+
+ return 0;
+}
+
+
+
+/**
+ * Enqueue buffer for tranmission.
+ * Responses are enqueued on 'replyq' (RD_KAFKA_OP_RECV_BUF)
+ *
+ * Locality: any thread
+ */
+void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+
+ assert(rkbuf->rkbuf_rkb == rkb);
+ if (resp_cb) {
+ rkbuf->rkbuf_replyq = replyq;
+ rkbuf->rkbuf_cb = resp_cb;
+ rkbuf->rkbuf_opaque = opaque;
+ } else {
+ rd_dassert(!replyq.q);
+ }
+
+ /* Unmaked buffers will be finalized after the make callback. */
+ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE))
+ rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
+
+ if (thrd_is_current(rkb->rkb_thread)) {
+ rd_kafka_broker_buf_enq2(rkb, rkbuf);
+
+ } else {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_BUF);
+ rko->rko_u.xbuf.rkbuf = rkbuf;
+ rd_kafka_q_enq(rkb->rkb_ops, rko);
+ }
+}
+
+
+
+/**
+ * @returns the current broker state change version.
+ * Pass this value to future rd_kafka_brokers_wait_state_change() calls
+ * to avoid the race condition where a state-change happens between
+ * an initial call to some API that fails and the sub-sequent
+ * .._wait_state_change() call.
+ */
+int rd_kafka_brokers_get_state_version(rd_kafka_t *rk) {
+ int version;
+ mtx_lock(&rk->rk_broker_state_change_lock);
+ version = rk->rk_broker_state_change_version;
+ mtx_unlock(&rk->rk_broker_state_change_lock);
+ return version;
+}
+
+/**
+ * @brief Wait at most \p timeout_ms for any state change for any broker.
+ * \p stored_version is the value previously returned by
+ * rd_kafka_brokers_get_state_version() prior to another API call
+ * that failed due to invalid state.
+ *
+ * Triggers:
+ * - broker state changes
+ * - broker transitioning from blocking to non-blocking
+ * - partition leader changes
+ * - group state changes
+ *
+ * @remark There is no guarantee that a state change actually took place.
+ *
+ * @returns 1 if a state change was signaled (maybe), else 0 (timeout)
+ *
+ * @locality any thread
+ */
+int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk,
+ int stored_version,
+ int timeout_ms) {
+ int r;
+ mtx_lock(&rk->rk_broker_state_change_lock);
+ if (stored_version != rk->rk_broker_state_change_version)
+ r = 1;
+ else
+ r = cnd_timedwait_ms(&rk->rk_broker_state_change_cnd,
+ &rk->rk_broker_state_change_lock,
+ timeout_ms) == thrd_success;
+ mtx_unlock(&rk->rk_broker_state_change_lock);
+ return r;
+}
+
+
+/**
+ * @brief Same as rd_kafka_brokers_wait_state_change() but will trigger
+ * the wakeup asynchronously through the provided \p eonce.
+ *
+ * If the eonce was added to the wait list its reference count
+ * will have been updated, this reference is later removed by
+ * rd_kafka_broker_state_change_trigger_eonce() by calling trigger().
+ *
+ * @returns 1 if the \p eonce was added to the wait-broker-state-changes list,
+ * or 0 if the \p stored_version is outdated in which case the
+ * caller should redo the broker lookup.
+ */
+int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
+ int stored_version,
+ rd_kafka_enq_once_t *eonce) {
+ int r = 1;
+ mtx_lock(&rk->rk_broker_state_change_lock);
+
+ if (stored_version != rk->rk_broker_state_change_version)
+ r = 0;
+ else {
+ rd_kafka_enq_once_add_source(eonce, "wait broker state change");
+ rd_list_add(&rk->rk_broker_state_change_waiters, eonce);
+ }
+
+ mtx_unlock(&rk->rk_broker_state_change_lock);
+ return r;
+}
+
+
+/**
+ * @brief eonce trigger callback for rd_list_apply() call in
+ * rd_kafka_brokers_broadcast_state_change()
+ */
+static int rd_kafka_broker_state_change_trigger_eonce(void *elem,
+ void *opaque) {
+ rd_kafka_enq_once_t *eonce = elem;
+ rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
+ "broker state change");
+ return 0; /* remove eonce from list */
+}
+
+
+/**
+ * @brief Broadcast broker state change to listeners, if any.
+ *
+ * @locality any thread
+ */
+void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk) {
+
+ rd_kafka_dbg(rk, GENERIC, "BROADCAST", "Broadcasting state change");
+
+ mtx_lock(&rk->rk_broker_state_change_lock);
+
+ /* Bump version */
+ rk->rk_broker_state_change_version++;
+
+ /* Trigger waiters */
+ rd_list_apply(&rk->rk_broker_state_change_waiters,
+ rd_kafka_broker_state_change_trigger_eonce, NULL);
+
+ /* Broadcast to listeners */
+ cnd_broadcast(&rk->rk_broker_state_change_cnd);
+
+ mtx_unlock(&rk->rk_broker_state_change_lock);
+}
+
+
+/**
+ * @returns a random broker (with refcnt increased) with matching \p state
+ * and where the \p filter function returns 0.
+ *
+ * Uses reservoir sampling.
+ *
+ * @param is_up Any broker that is up (UP or UPDATE state), \p state is ignored.
+ * @param filtered_cnt Optional pointer to integer which will be set to the
+ * number of brokers that matches the \p state or \p is_up but
+ * were filtered out by \p filter.
+ * @param filter is an optional callback used to filter out undesired brokers.
+ * The filter function should return 1 to filter out a broker,
+ * or 0 to keep it in the list of eligible brokers to return.
+ * rd_kafka_broker_lock() is held during the filter callback.
+ *
+ *
+ * @locks rd_kafka_*lock() MUST be held
+ * @locality any
+ */
+static rd_kafka_broker_t *
+rd_kafka_broker_random0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ rd_bool_t is_up,
+ int state,
+ int *filtered_cnt,
+ int (*filter)(rd_kafka_broker_t *rk, void *opaque),
+ void *opaque) {
+ rd_kafka_broker_t *rkb, *good = NULL;
+ int cnt = 0;
+ int fcnt = 0;
+
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
+ continue;
+
+ rd_kafka_broker_lock(rkb);
+ if ((is_up && rd_kafka_broker_state_is_up(rkb->rkb_state)) ||
+ (!is_up && (int)rkb->rkb_state == state)) {
+ if (filter && filter(rkb, opaque)) {
+ /* Filtered out */
+ fcnt++;
+ } else {
+ if (cnt < 1 || rd_jitter(0, cnt) < 1) {
+ if (good)
+ rd_kafka_broker_destroy(good);
+ rd_kafka_broker_keep_fl(func, line,
+ rkb);
+ good = rkb;
+ }
+ cnt += 1;
+ }
+ }
+ rd_kafka_broker_unlock(rkb);
+ }
+
+ if (filtered_cnt)
+ *filtered_cnt = fcnt;
+
+ return good;
+}
+
+#define rd_kafka_broker_random(rk, state, filter, opaque) \
+ rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk, rd_false, state, \
+ NULL, filter, opaque)
+
+
+/**
+ * @returns the broker (with refcnt increased) with the highest weight based
+ * based on the provided weighing function.
+ *
+ * If multiple brokers share the same weight reservoir sampling will be used
+ * to randomly select one.
+ *
+ * @param weight_cb Weighing function that should return the sort weight
+ * for the given broker.
+ * Higher weight is better.
+ * A weight of <= 0 will filter out the broker.
+ * The passed broker object is locked.
+ * @param features (optional) Required broker features.
+ *
+ * @locks_required rk(read)
+ * @locality any
+ */
+static rd_kafka_broker_t *
+rd_kafka_broker_weighted(rd_kafka_t *rk,
+ int (*weight_cb)(rd_kafka_broker_t *rkb),
+ int features) {
+ rd_kafka_broker_t *rkb, *good = NULL;
+ int highest = 0;
+ int cnt = 0;
+
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ int weight;
+
+ rd_kafka_broker_lock(rkb);
+ if (features && (rkb->rkb_features & features) != features)
+ weight = 0;
+ else
+ weight = weight_cb(rkb);
+ rd_kafka_broker_unlock(rkb);
+
+ if (weight <= 0 || weight < highest)
+ continue;
+
+ if (weight > highest) {
+ highest = weight;
+ cnt = 0;
+ }
+
+ /* If same weight (cnt > 0), use reservoir sampling */
+ if (cnt < 1 || rd_jitter(0, cnt) < 1) {
+ if (good)
+ rd_kafka_broker_destroy(good);
+ rd_kafka_broker_keep(rkb);
+ good = rkb;
+ }
+ cnt++;
+ }
+
+ return good;
+}
+
+/**
+ * @brief Weighing function to select a usable broker connections,
+ * promoting connections according to the scoring below.
+ *
+ * Priority order:
+ * - is not a bootstrap broker
+ * - least idle last 10 minutes (unless blocking)
+ * - least idle hours (if above 10 minutes idle)
+ * - is not a logical broker (these connections have dedicated use and should
+ * preferably not be used for other purposes)
+ * - is not blocking
+ *
+ * Will prefer the most recently used broker connection for two reasons:
+ * - this connection is most likely to function properly.
+ * - allows truly idle connections to be killed by the broker's/LB's
+ * idle connection reaper.
+ *
+ * Connection must be up.
+ *
+ * @locks_required rkb
+ */
+static int rd_kafka_broker_weight_usable(rd_kafka_broker_t *rkb) {
+ int weight = 0;
+
+ if (!rd_kafka_broker_state_is_up(rkb->rkb_state))
+ return 0;
+
+ weight +=
+ 2000 * (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb));
+ weight += 10 * !RD_KAFKA_BROKER_IS_LOGICAL(rkb);
+
+ if (likely(!rd_atomic32_get(&rkb->rkb_blocking_request_cnt))) {
+ rd_ts_t tx_last = rd_atomic64_get(&rkb->rkb_c.ts_send);
+ int idle = (int)((rd_clock() -
+ (tx_last > 0 ? tx_last : rkb->rkb_ts_state)) /
+ 1000000);
+
+ weight += 1; /* is not blocking */
+
+ /* Prefer least idle broker (based on last 10 minutes use) */
+ if (idle < 0)
+ ; /*clock going backwards? do nothing */
+ else if (idle < 600 /*10 minutes*/)
+ weight += 1000 + (600 - idle);
+ else /* Else least idle hours (capped to 100h) */
+ weight += 100 + (100 - RD_MIN((idle / 3600), 100));
+ }
+
+ return weight;
+}
+
+
+/**
+ * @brief Returns a random broker (with refcnt increased) in state \p state.
+ *
+ * Uses Reservoir sampling.
+ *
+ * @param filter is optional, see rd_kafka_broker_random().
+ *
+ * @sa rd_kafka_broker_random
+ *
+ * @locks rd_kafka_*lock(rk) MUST be held.
+ * @locality any thread
+ */
+rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk,
+ int state,
+ int (*filter)(rd_kafka_broker_t *rkb,
+ void *opaque),
+ void *opaque,
+ const char *reason) {
+ rd_kafka_broker_t *rkb;
+
+ rkb = rd_kafka_broker_random(rk, state, filter, opaque);
+
+ if (!rkb && rk->rk_conf.sparse_connections) {
+ /* Sparse connections:
+ * If no eligible broker was found, schedule
+ * a random broker for connecting. */
+ rd_kafka_connect_any(rk, reason);
+ }
+
+ return rkb;
+}
+
+
+/**
+ * @brief Returns a random broker (with refcnt increased) which is up.
+ *
+ * @param filtered_cnt optional, see rd_kafka_broker_random0().
+ * @param filter is optional, see rd_kafka_broker_random0().
+ *
+ * @sa rd_kafka_broker_random
+ *
+ * @locks rd_kafka_*lock(rk) MUST be held.
+ * @locality any thread
+ */
+rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk,
+ int *filtered_cnt,
+ int (*filter)(rd_kafka_broker_t *rkb,
+ void *opaque),
+ void *opaque,
+ const char *reason) {
+ rd_kafka_broker_t *rkb;
+
+ rkb = rd_kafka_broker_random0(__FUNCTION__, __LINE__, rk,
+ rd_true /*is_up*/, -1, filtered_cnt,
+ filter, opaque);
+
+ if (!rkb && rk->rk_conf.sparse_connections) {
+ /* Sparse connections:
+ * If no eligible broker was found, schedule
+ * a random broker for connecting. */
+ rd_kafka_connect_any(rk, reason);
+ }
+
+ return rkb;
+}
+
+
+/**
+ * @brief Spend at most \p timeout_ms to acquire a usable (Up) broker.
+ *
+ * Prefers the most recently used broker, see rd_kafka_broker_weight_usable().
+ *
+ * @param features (optional) Required broker features.
+ *
+ * @returns A probably usable broker with increased refcount, or NULL on timeout
+ * @locks rd_kafka_*lock() if !do_lock
+ * @locality any
+ *
+ * @sa rd_kafka_broker_any_up()
+ */
+rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk,
+ int timeout_ms,
+ rd_dolock_t do_lock,
+ int features,
+ const char *reason) {
+ const rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+
+ while (1) {
+ rd_kafka_broker_t *rkb;
+ int remains;
+ int version = rd_kafka_brokers_get_state_version(rk);
+
+ if (do_lock)
+ rd_kafka_rdlock(rk);
+
+ rkb = rd_kafka_broker_weighted(
+ rk, rd_kafka_broker_weight_usable, features);
+
+ if (!rkb && rk->rk_conf.sparse_connections) {
+ /* Sparse connections:
+ * If no eligible broker was found, schedule
+ * a random broker for connecting. */
+ rd_kafka_connect_any(rk, reason);
+ }
+
+ if (do_lock)
+ rd_kafka_rdunlock(rk);
+
+ if (rkb)
+ return rkb;
+
+ remains = rd_timeout_remains(ts_end);
+ if (rd_timeout_expired(remains))
+ return NULL;
+
+ rd_kafka_brokers_wait_state_change(rk, version, remains);
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * @returns the broker handle for \p broker_id using cached metadata
+ * information (if available) in state == \p state,
+ * with refcount increaesd.
+ *
+ * Otherwise enqueues the \p eonce on the wait-state-change queue
+ * which will be triggered on broker state changes.
+ * It may also be triggered erroneously, so the caller
+ * should call rd_kafka_broker_get_async() again when
+ * the eonce is triggered.
+ *
+ * @locks none
+ * @locality any thread
+ */
+rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk,
+ int32_t broker_id,
+ int state,
+ rd_kafka_enq_once_t *eonce) {
+ int version;
+ do {
+ rd_kafka_broker_t *rkb;
+
+ version = rd_kafka_brokers_get_state_version(rk);
+
+ rd_kafka_rdlock(rk);
+ rkb = rd_kafka_broker_find_by_nodeid0(rk, broker_id, state,
+ rd_true);
+ rd_kafka_rdunlock(rk);
+
+ if (rkb)
+ return rkb;
+
+ } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce));
+
+ return NULL; /* eonce added to wait list */
+}
+
+
+/**
+ * @brief Asynchronously look up current list of broker ids until available.
+ * Bootstrap and logical brokers are excluded from the list.
+ *
+ * To be called repeatedly with an valid eonce until a non-NULL
+ * list is returned.
+ *
+ * @param rk Client instance.
+ * @param eonce For triggering asynchronously on state change
+ * in case broker list isn't yet available.
+ * @return List of int32_t with broker nodeids when ready, NULL when the eonce
+ * was added to the wait list.
+ */
+rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk,
+ rd_kafka_enq_once_t *eonce) {
+ rd_list_t *nodeids = NULL;
+ int version, i, broker_cnt;
+
+ do {
+ rd_kafka_broker_t *rkb;
+ version = rd_kafka_brokers_get_state_version(rk);
+
+ rd_kafka_rdlock(rk);
+ broker_cnt = rd_atomic32_get(&rk->rk_broker_cnt);
+ if (nodeids) {
+ if (broker_cnt > rd_list_cnt(nodeids)) {
+ rd_list_destroy(nodeids);
+ /* Will be recreated just after */
+ nodeids = NULL;
+ } else {
+ rd_list_set_cnt(nodeids, 0);
+ }
+ }
+ if (!nodeids) {
+ nodeids = rd_list_new(0, NULL);
+ rd_list_init_int32(nodeids, broker_cnt);
+ }
+ i = 0;
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ rd_kafka_broker_lock(rkb);
+ if (rkb->rkb_nodeid != -1 &&
+ !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
+ rd_list_set_int32(nodeids, i++,
+ rkb->rkb_nodeid);
+ }
+ rd_kafka_broker_unlock(rkb);
+ }
+ rd_kafka_rdunlock(rk);
+
+ if (!rd_list_empty(nodeids))
+ return nodeids;
+ } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce));
+
+ if (nodeids) {
+ rd_list_destroy(nodeids);
+ }
+ return NULL; /* eonce added to wait list */
+}
+
+
+/**
+ * @returns the current controller using cached metadata information,
+ * and only if the broker's state == \p state.
+ * The reference count is increased for the returned broker.
+ *
+ * @locks none
+ * @locality any thread
+ */
+
+static rd_kafka_broker_t *rd_kafka_broker_controller_nowait(rd_kafka_t *rk,
+ int state) {
+ rd_kafka_broker_t *rkb;
+
+ rd_kafka_rdlock(rk);
+
+ if (rk->rk_controllerid == -1) {
+ rd_kafka_rdunlock(rk);
+ rd_kafka_metadata_refresh_brokers(rk, NULL,
+ "lookup controller");
+ return NULL;
+ }
+
+ rkb = rd_kafka_broker_find_by_nodeid0(rk, rk->rk_controllerid, state,
+ rd_true);
+
+ rd_kafka_rdunlock(rk);
+
+ return rkb;
+}
+
+
+/**
+ * @returns the current controller using cached metadata information if
+ * available in state == \p state, with refcount increaesd.
+ *
+ * Otherwise enqueues the \p eonce on the wait-controller queue
+ * which will be triggered on controller updates or broker state
+ * changes. It may also be triggered erroneously, so the caller
+ * should call rd_kafka_broker_controller_async() again when
+ * the eonce is triggered.
+ *
+ * @locks none
+ * @locality any thread
+ */
+rd_kafka_broker_t *
+rd_kafka_broker_controller_async(rd_kafka_t *rk,
+ int state,
+ rd_kafka_enq_once_t *eonce) {
+ int version;
+ do {
+ rd_kafka_broker_t *rkb;
+
+ version = rd_kafka_brokers_get_state_version(rk);
+
+ rkb = rd_kafka_broker_controller_nowait(rk, state);
+ if (rkb)
+ return rkb;
+
+ } while (!rd_kafka_brokers_wait_state_change_async(rk, version, eonce));
+
+ return NULL; /* eonce added to wait list */
+}
+
+
+/**
+ * @returns the current controller using cached metadata information,
+ * blocking up to \p abs_timeout for the controller to be known
+ * and to reach state == \p state. The reference count is increased
+ * for the returned broker.
+ *
+ * @locks none
+ * @locality any thread
+ */
+rd_kafka_broker_t *
+rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout) {
+
+ while (1) {
+ int version = rd_kafka_brokers_get_state_version(rk);
+ rd_kafka_broker_t *rkb;
+ int remains_ms;
+
+ rkb = rd_kafka_broker_controller_nowait(rk, state);
+ if (rkb)
+ return rkb;
+
+ remains_ms = rd_timeout_remains(abs_timeout);
+ if (rd_timeout_expired(remains_ms))
+ return NULL;
+
+ rd_kafka_brokers_wait_state_change(rk, version, remains_ms);
+ }
+}
+
+
+
+/**
+ * Find a waitresp (rkbuf awaiting response) by the correlation id.
+ */
+static rd_kafka_buf_t *rd_kafka_waitresp_find(rd_kafka_broker_t *rkb,
+ int32_t corrid) {
+ rd_kafka_buf_t *rkbuf;
+ rd_ts_t now = rd_clock();
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ TAILQ_FOREACH(rkbuf, &rkb->rkb_waitresps.rkbq_bufs, rkbuf_link)
+ if (rkbuf->rkbuf_corrid == corrid) {
+ /* Convert ts_sent to RTT */
+ rkbuf->rkbuf_ts_sent = now - rkbuf->rkbuf_ts_sent;
+ rd_avg_add(&rkb->rkb_avg_rtt, rkbuf->rkbuf_ts_sent);
+
+ if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
+ rd_atomic32_sub(&rkb->rkb_blocking_request_cnt, 1) == 1)
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+
+ rd_kafka_bufq_deq(&rkb->rkb_waitresps, rkbuf);
+ return rkbuf;
+ }
+ return NULL;
+}
+
+
+
+/**
+ * Map a response message to a request.
+ */
+static int rd_kafka_req_response(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_buf_t *req;
+ int log_decode_errors = LOG_ERR;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+
+ /* Find corresponding request message by correlation id */
+ if (unlikely(!(req = rd_kafka_waitresp_find(
+ rkb, rkbuf->rkbuf_reshdr.CorrId)))) {
+ /* unknown response. probably due to request timeout */
+ rd_atomic64_add(&rkb->rkb_c.rx_corrid_err, 1);
+ rd_rkb_dbg(rkb, BROKER, "RESPONSE",
+ "Response for unknown CorrId %" PRId32
+ " (timed out?)",
+ rkbuf->rkbuf_reshdr.CorrId);
+ rd_kafka_interceptors_on_response_received(
+ rkb->rkb_rk, -1, rd_kafka_broker_name(rkb), rkb->rkb_nodeid,
+ -1, -1, rkbuf->rkbuf_reshdr.CorrId, rkbuf->rkbuf_totlen, -1,
+ RD_KAFKA_RESP_ERR__NOENT);
+ rd_kafka_buf_destroy(rkbuf);
+ return -1;
+ }
+
+ rd_rkb_dbg(rkb, PROTOCOL, "RECV",
+ "Received %sResponse (v%hd, %" PRIusz
+ " bytes, CorrId %" PRId32 ", rtt %.2fms)",
+ rd_kafka_ApiKey2str(req->rkbuf_reqhdr.ApiKey),
+ req->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen,
+ rkbuf->rkbuf_reshdr.CorrId,
+ (float)req->rkbuf_ts_sent / 1000.0f);
+
+ /* Copy request's header and certain flags to response object's
+ * reqhdr for convenience. */
+ rkbuf->rkbuf_reqhdr = req->rkbuf_reqhdr;
+ rkbuf->rkbuf_flags |=
+ (req->rkbuf_flags & RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK);
+ rkbuf->rkbuf_ts_sent = req->rkbuf_ts_sent; /* copy rtt */
+
+ /* Set up response reader slice starting past the response header */
+ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf,
+ RD_KAFKAP_RESHDR_SIZE,
+ rd_buf_len(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE);
+
+ /* In case of flexibleVersion, skip the response header tags.
+ * The ApiVersion request/response is different since it needs
+ * be backwards compatible and thus has no header tags. */
+ if (req->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion)
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ if (!rkbuf->rkbuf_rkb) {
+ rkbuf->rkbuf_rkb = rkb;
+ rd_kafka_broker_keep(rkbuf->rkbuf_rkb);
+ } else
+ rd_assert(rkbuf->rkbuf_rkb == rkb);
+
+ /* Call callback. */
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, rkbuf, req);
+
+ return 0;
+
+err_parse:
+ rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, rkbuf->rkbuf_err, NULL, req);
+ rd_kafka_buf_destroy(rkbuf);
+ return -1;
+}
+
+
+
+int rd_kafka_recv(rd_kafka_broker_t *rkb) {
+ rd_kafka_buf_t *rkbuf;
+ ssize_t r;
+ /* errstr is not set by buf_read errors, so default it here. */
+ char errstr[512] = "Protocol parse failure";
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ const int log_decode_errors = LOG_ERR;
+
+
+ /* It is impossible to estimate the correct size of the response
+ * so we split the read up in two parts: first we read the protocol
+ * length and correlation id (i.e., the Response header), and then
+ * when we know the full length of the response we allocate a new
+ * buffer and call receive again.
+ * All this in an async fashion (e.g., partial reads).
+ */
+ if (!(rkbuf = rkb->rkb_recv_buf)) {
+ /* No receive in progress: create new buffer */
+
+ rkbuf = rd_kafka_buf_new(2, RD_KAFKAP_RESHDR_SIZE);
+
+ rkb->rkb_recv_buf = rkbuf;
+
+ /* Set up buffer reader for the response header. */
+ rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_RESHDR_SIZE,
+ RD_KAFKAP_RESHDR_SIZE);
+ }
+
+ rd_dassert(rd_buf_write_remains(&rkbuf->rkbuf_buf) > 0);
+
+ r = rd_kafka_transport_recv(rkb->rkb_transport, &rkbuf->rkbuf_buf,
+ errstr, sizeof(errstr));
+ if (unlikely(r <= 0)) {
+ if (r == 0)
+ return 0; /* EAGAIN */
+ err = RD_KAFKA_RESP_ERR__TRANSPORT;
+ rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
+ goto err;
+ }
+
+ rd_atomic64_set(&rkb->rkb_c.ts_recv, rd_clock());
+
+ if (rkbuf->rkbuf_totlen == 0) {
+ /* Packet length not known yet. */
+
+ if (unlikely(rd_buf_write_pos(&rkbuf->rkbuf_buf) <
+ RD_KAFKAP_RESHDR_SIZE)) {
+ /* Need response header for packet length and corrid.
+ * Wait for more data. */
+ return 0;
+ }
+
+ rd_assert(!rkbuf->rkbuf_rkb);
+ rkbuf->rkbuf_rkb = rkb; /* Protocol parsing code needs
+ * the rkb for logging, but we dont
+ * want to keep a reference to the
+ * broker this early since that extra
+ * refcount will mess with the broker's
+ * refcount-based termination code. */
+
+ /* Initialize reader */
+ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0,
+ RD_KAFKAP_RESHDR_SIZE);
+
+ /* Read protocol header */
+ rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.Size);
+ rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reshdr.CorrId);
+
+ rkbuf->rkbuf_rkb = NULL; /* Reset */
+
+ rkbuf->rkbuf_totlen = rkbuf->rkbuf_reshdr.Size;
+
+ /* Make sure message size is within tolerable limits. */
+ if (rkbuf->rkbuf_totlen < 4 /*CorrId*/ ||
+ rkbuf->rkbuf_totlen >
+ (size_t)rkb->rkb_rk->rk_conf.recv_max_msg_size) {
+ rd_snprintf(errstr, sizeof(errstr),
+ "Invalid response size %" PRId32
+ " (0..%i): "
+ "increase receive.message.max.bytes",
+ rkbuf->rkbuf_reshdr.Size,
+ rkb->rkb_rk->rk_conf.recv_max_msg_size);
+ err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
+ goto err;
+ }
+
+ rkbuf->rkbuf_totlen -= 4; /*CorrId*/
+
+ if (rkbuf->rkbuf_totlen > 0) {
+ /* Allocate another buffer that fits all data (short of
+ * the common response header). We want all
+ * data to be in contigious memory. */
+
+ rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf,
+ rkbuf->rkbuf_totlen);
+ }
+ }
+
+ if (rd_buf_write_pos(&rkbuf->rkbuf_buf) - RD_KAFKAP_RESHDR_SIZE ==
+ rkbuf->rkbuf_totlen) {
+ /* Message is complete, pass it on to the original requester. */
+ rkb->rkb_recv_buf = NULL;
+ rd_atomic64_add(&rkb->rkb_c.rx, 1);
+ rd_atomic64_add(&rkb->rkb_c.rx_bytes,
+ rd_buf_write_pos(&rkbuf->rkbuf_buf));
+ rd_kafka_req_response(rkb, rkbuf);
+ }
+
+ return 1;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ if (!strcmp(errstr, "Disconnected"))
+ rd_kafka_broker_conn_closed(rkb, err, errstr);
+ else
+ rd_kafka_broker_fail(rkb, LOG_ERR, err, "Receive failed: %s",
+ errstr);
+ return -1;
+}
+
+
+/**
+ * Linux version of socket_cb providing racefree CLOEXEC.
+ */
+int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque) {
+#ifdef SOCK_CLOEXEC
+ return socket(domain, type | SOCK_CLOEXEC, protocol);
+#else
+ return rd_kafka_socket_cb_generic(domain, type, protocol, opaque);
+#endif
+}
+
+/**
+ * Fallback version of socket_cb NOT providing racefree CLOEXEC,
+ * but setting CLOEXEC after socket creation (if FD_CLOEXEC is defined).
+ */
+int rd_kafka_socket_cb_generic(int domain,
+ int type,
+ int protocol,
+ void *opaque) {
+ int s;
+ int on = 1;
+ s = (int)socket(domain, type, protocol);
+ if (s == -1)
+ return -1;
+#ifdef FD_CLOEXEC
+ if (fcntl(s, F_SETFD, FD_CLOEXEC, &on) == -1)
+ fprintf(stderr,
+ "WARNING: librdkafka: %s: "
+ "fcntl(FD_CLOEXEC) failed: %s: ignoring\n",
+ __FUNCTION__, rd_strerror(errno));
+#endif
+ return s;
+}
+
+
+
+/**
+ * @brief Update the reconnect backoff.
+ * Should be called when a connection is made, or all addresses
+ * a broker resolves to has been exhausted without successful connect.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+static void
+rd_kafka_broker_update_reconnect_backoff(rd_kafka_broker_t *rkb,
+ const rd_kafka_conf_t *conf,
+ rd_ts_t now) {
+ int backoff;
+
+ /* If last connection attempt was more than reconnect.backoff.max.ms
+ * ago, reset the reconnect backoff to the initial
+ * reconnect.backoff.ms value. */
+ if (rkb->rkb_ts_reconnect + (conf->reconnect_backoff_max_ms * 1000) <
+ now)
+ rkb->rkb_reconnect_backoff_ms = conf->reconnect_backoff_ms;
+
+ /* Apply -25%...+50% jitter to next backoff. */
+ backoff = rd_jitter((int)((float)rkb->rkb_reconnect_backoff_ms * 0.75),
+ (int)((float)rkb->rkb_reconnect_backoff_ms * 1.5));
+
+ /* Cap to reconnect.backoff.max.ms. */
+ backoff = RD_MIN(backoff, conf->reconnect_backoff_max_ms);
+
+ /* Set time of next reconnect */
+ rkb->rkb_ts_reconnect = now + (backoff * 1000);
+ rkb->rkb_reconnect_backoff_ms = RD_MIN(
+ rkb->rkb_reconnect_backoff_ms * 2, conf->reconnect_backoff_max_ms);
+}
+
+
+/**
+ * @brief Calculate time until next reconnect attempt.
+ *
+ * @returns the number of milliseconds to the next connection attempt, or 0
+ * if immediate.
+ * @locality broker thread
+ * @locks none
+ */
+
+static RD_INLINE int
+rd_kafka_broker_reconnect_backoff(const rd_kafka_broker_t *rkb, rd_ts_t now) {
+ rd_ts_t remains;
+
+ if (unlikely(rkb->rkb_ts_reconnect == 0))
+ return 0; /* immediate */
+
+ remains = rkb->rkb_ts_reconnect - now;
+ if (remains <= 0)
+ return 0; /* immediate */
+
+ return (int)(remains / 1000);
+}
+
+
+/**
+ * @brief Unittest for reconnect.backoff.ms
+ */
+static int rd_ut_reconnect_backoff(void) {
+ rd_kafka_broker_t rkb = RD_ZERO_INIT;
+ rd_kafka_conf_t conf = {.reconnect_backoff_ms = 10,
+ .reconnect_backoff_max_ms = 90};
+ rd_ts_t now = 1000000;
+ int backoff;
+
+ rkb.rkb_reconnect_backoff_ms = conf.reconnect_backoff_ms;
+
+ /* broker's backoff is the initial reconnect.backoff.ms=10 */
+ rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
+ backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
+ RD_UT_ASSERT_RANGE(backoff, 7, 15, "%d");
+
+ /* .. 20 */
+ rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
+ backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
+ RD_UT_ASSERT_RANGE(backoff, 15, 30, "%d");
+
+ /* .. 40 */
+ rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
+ backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
+ RD_UT_ASSERT_RANGE(backoff, 30, 60, "%d");
+
+ /* .. 80, the jitter is capped at reconnect.backoff.max.ms=90 */
+ rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
+ backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
+ RD_UT_ASSERT_RANGE(backoff, 60, conf.reconnect_backoff_max_ms, "%d");
+
+ /* .. 90, capped by reconnect.backoff.max.ms */
+ rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
+ backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
+ RD_UT_ASSERT_RANGE(backoff, 67, conf.reconnect_backoff_max_ms, "%d");
+
+ /* .. 90, should remain at capped value. */
+ rd_kafka_broker_update_reconnect_backoff(&rkb, &conf, now);
+ backoff = rd_kafka_broker_reconnect_backoff(&rkb, now);
+ RD_UT_ASSERT_RANGE(backoff, 67, conf.reconnect_backoff_max_ms, "%d");
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Initiate asynchronous connection attempt to the next address
+ * in the broker's address list.
+ * While the connect is asynchronous and its IO served in the
+ * CONNECT state, the initial name resolve is blocking.
+ *
+ * @returns -1 on error, 0 if broker does not have a hostname, or 1
+ * if the connection is now in progress.
+ */
+static int rd_kafka_broker_connect(rd_kafka_broker_t *rkb) {
+ const rd_sockaddr_inx_t *sinx;
+ char errstr[512];
+ char nodename[RD_KAFKA_NODENAME_SIZE];
+ rd_bool_t reset_cached_addr = rd_false;
+
+ rd_rkb_dbg(rkb, BROKER, "CONNECT", "broker in state %s connecting",
+ rd_kafka_broker_state_names[rkb->rkb_state]);
+
+ rd_atomic32_add(&rkb->rkb_c.connects, 1);
+
+ rd_kafka_broker_lock(rkb);
+ rd_strlcpy(nodename, rkb->rkb_nodename, sizeof(nodename));
+
+ /* If the nodename was changed since the last connect,
+ * reset the address cache. */
+ reset_cached_addr = (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch);
+ rkb->rkb_connect_epoch = rkb->rkb_nodename_epoch;
+ /* Logical brokers might not have a hostname set, in which case
+ * we should not try to connect. */
+ if (*nodename)
+ rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_CONNECT);
+ rd_kafka_broker_unlock(rkb);
+
+ if (!*nodename) {
+ rd_rkb_dbg(rkb, BROKER, "CONNECT",
+ "broker has no address yet: postponing connect");
+ return 0;
+ }
+
+ rd_kafka_broker_update_reconnect_backoff(rkb, &rkb->rkb_rk->rk_conf,
+ rd_clock());
+
+ if (rd_kafka_broker_resolve(rkb, nodename, reset_cached_addr) == -1)
+ return -1;
+
+ sinx = rd_sockaddr_list_next(rkb->rkb_rsal);
+
+ rd_kafka_assert(rkb->rkb_rk, !rkb->rkb_transport);
+
+ if (!(rkb->rkb_transport = rd_kafka_transport_connect(
+ rkb, sinx, errstr, sizeof(errstr)))) {
+ rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "%s", errstr);
+ return -1;
+ }
+
+ rkb->rkb_ts_connect = rd_clock();
+
+ return 1;
+}
+
+
+/**
+ * @brief Call when connection is ready to transition to fully functional
+ * UP state.
+ *
+ * @locality Broker thread
+ */
+void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb) {
+
+ rkb->rkb_max_inflight = rkb->rkb_rk->rk_conf.max_inflight;
+
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(rkb, RD_KAFKA_BROKER_STATE_UP);
+ rd_kafka_broker_unlock(rkb);
+
+ /* Request metadata (async):
+ * try locally known topics first and if there are none try
+ * getting just the broker list. */
+ if (rd_kafka_metadata_refresh_known_topics(
+ NULL, rkb, rd_false /*dont force*/, "connected") ==
+ RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ rd_kafka_metadata_refresh_brokers(NULL, rkb, "connected");
+}
+
+
+
+static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb);
+
+
+/**
+ * @brief Parses and handles SaslMechanism response, transitions
+ * the broker state.
+ *
+ */
+static void rd_kafka_broker_handle_SaslHandshake(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t MechCnt;
+ int16_t ErrorCode;
+ int i = 0;
+ char *mechs = "(n/a)";
+ size_t msz, mof = 0;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return;
+
+ if (err)
+ goto err;
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ rd_kafka_buf_read_i32(rkbuf, &MechCnt);
+
+ if (MechCnt < 0 || MechCnt > 100)
+ rd_kafka_buf_parse_fail(
+ rkbuf, "Invalid MechanismCount %" PRId32, MechCnt);
+
+ /* Build a CSV string of supported mechanisms. */
+ msz = RD_MIN(511, 1 + (MechCnt * 32));
+ mechs = rd_alloca(msz);
+ *mechs = '\0';
+
+ for (i = 0; i < MechCnt; i++) {
+ rd_kafkap_str_t mech;
+ rd_kafka_buf_read_str(rkbuf, &mech);
+
+ mof += rd_snprintf(mechs + mof, msz - mof, "%s%.*s",
+ i ? "," : "", RD_KAFKAP_STR_PR(&mech));
+
+ if (mof >= msz)
+ break;
+ }
+
+ rd_rkb_dbg(rkb, PROTOCOL | RD_KAFKA_DBG_SECURITY | RD_KAFKA_DBG_BROKER,
+ "SASLMECHS", "Broker supported SASL mechanisms: %s", mechs);
+
+ if (ErrorCode) {
+ err = ErrorCode;
+ goto err;
+ }
+
+ /* Circle back to connect_auth() to start proper AUTH state. */
+ rd_kafka_broker_connect_auth(rkb);
+ return;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION,
+ "SASL %s mechanism handshake failed: %s: "
+ "broker's supported mechanisms: %s",
+ rkb->rkb_rk->rk_conf.sasl.mechanisms,
+ rd_kafka_err2str(err), mechs);
+}
+
+
+/**
+ * @brief Transition state to:
+ * - AUTH_HANDSHAKE (if SASL is configured and handshakes supported)
+ * - AUTH (if SASL is configured but no handshake is required or
+ * not supported, or has already taken place.)
+ * - UP (if SASL is not configured)
+ *
+ * @locks_acquired rkb
+ */
+static void rd_kafka_broker_connect_auth(rd_kafka_broker_t *rkb) {
+
+ if ((rkb->rkb_proto == RD_KAFKA_PROTO_SASL_PLAINTEXT ||
+ rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL)) {
+
+ rd_rkb_dbg(rkb, SECURITY | RD_KAFKA_DBG_BROKER, "AUTH",
+ "Auth in state %s (handshake %ssupported)",
+ rd_kafka_broker_state_names[rkb->rkb_state],
+ (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)
+ ? ""
+ : "not ");
+
+ /* Broker >= 0.10.0: send request to select mechanism */
+ if (rkb->rkb_state != RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE &&
+ (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) {
+
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE);
+ rd_kafka_broker_unlock(rkb);
+
+ rd_kafka_SaslHandshakeRequest(
+ rkb, rkb->rkb_rk->rk_conf.sasl.mechanisms,
+ RD_KAFKA_NO_REPLYQ,
+ rd_kafka_broker_handle_SaslHandshake, NULL);
+ } else {
+ /* Either Handshake succeeded (protocol selected)
+ * or Handshakes were not supported.
+ * In both cases continue with authentication. */
+ char sasl_errstr[512];
+
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(
+ rkb,
+ (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ)
+ ? RD_KAFKA_BROKER_STATE_AUTH_REQ
+ : RD_KAFKA_BROKER_STATE_AUTH_LEGACY);
+ rd_kafka_broker_unlock(rkb);
+
+ if (rd_kafka_sasl_client_new(
+ rkb->rkb_transport, sasl_errstr,
+ sizeof(sasl_errstr)) == -1) {
+ rd_kafka_broker_fail(
+ rkb, LOG_ERR,
+ RD_KAFKA_RESP_ERR__AUTHENTICATION,
+ "Failed to initialize "
+ "SASL authentication: %s",
+ sasl_errstr);
+ return;
+ }
+ }
+
+ return;
+ }
+
+ /* No authentication required. */
+ rd_kafka_broker_connect_up(rkb);
+}
+
+
+/**
+ * @brief Specify API versions to use for this connection.
+ *
+ * @param apis is an allocated list of supported partitions.
+ * If NULL the default set will be used based on the
+ * \p broker.version.fallback property.
+ * @param api_cnt number of elements in \p apis
+ *
+ * @remark \p rkb takes ownership of \p apis.
+ *
+ * @locality Broker thread
+ * @locks_required rkb
+ */
+static void rd_kafka_broker_set_api_versions(rd_kafka_broker_t *rkb,
+ struct rd_kafka_ApiVersion *apis,
+ size_t api_cnt) {
+
+ if (rkb->rkb_ApiVersions)
+ rd_free(rkb->rkb_ApiVersions);
+
+
+ if (!apis) {
+ rd_rkb_dbg(
+ rkb, PROTOCOL | RD_KAFKA_DBG_BROKER, "APIVERSION",
+ "Using (configuration fallback) %s protocol features",
+ rkb->rkb_rk->rk_conf.broker_version_fallback);
+
+
+ rd_kafka_get_legacy_ApiVersions(
+ rkb->rkb_rk->rk_conf.broker_version_fallback, &apis,
+ &api_cnt, rkb->rkb_rk->rk_conf.broker_version_fallback);
+
+ /* Make a copy to store on broker. */
+ rd_kafka_ApiVersions_copy(apis, api_cnt, &apis, &api_cnt);
+ }
+
+ rkb->rkb_ApiVersions = apis;
+ rkb->rkb_ApiVersions_cnt = api_cnt;
+
+ /* Update feature set based on supported broker APIs. */
+ rd_kafka_broker_features_set(
+ rkb, rd_kafka_features_check(rkb, apis, api_cnt));
+}
+
+
+/**
+ * Handler for ApiVersion response.
+ */
+static void rd_kafka_broker_handle_ApiVersion(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ struct rd_kafka_ApiVersion *apis = NULL;
+ size_t api_cnt = 0;
+ int16_t retry_ApiVersion = -1;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return;
+
+ err = rd_kafka_handle_ApiVersion(rk, rkb, err, rkbuf, request, &apis,
+ &api_cnt);
+
+ /* Broker does not support our ApiVersionRequest version,
+ * see if we can downgrade to an older version. */
+ if (err == RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION) {
+ size_t i;
+
+ /* Find the broker's highest supported version for
+ * ApiVersionRequest and use that to retry. */
+ for (i = 0; i < api_cnt; i++) {
+ if (apis[i].ApiKey == RD_KAFKAP_ApiVersion) {
+ retry_ApiVersion =
+ RD_MIN(request->rkbuf_reqhdr.ApiVersion - 1,
+ apis[i].MaxVer);
+ break;
+ }
+ }
+
+ /* Before v3 the broker would not return its supported
+ * ApiVersionRequests, so we go straight for version 0. */
+ if (i == api_cnt && request->rkbuf_reqhdr.ApiVersion > 0)
+ retry_ApiVersion = 0;
+
+ } else if (err == RD_KAFKA_RESP_ERR_INVALID_REQUEST) {
+ rd_rkb_log(rkb, LOG_ERR, "APIVERSION",
+ "ApiVersionRequest v%hd failed due to "
+ "invalid request: "
+ "check client.software.name (\"%s\") and "
+ "client.software.version (\"%s\") "
+ "for invalid characters: "
+ "falling back to older request version",
+ request->rkbuf_reqhdr.ApiVersion,
+ rk->rk_conf.sw_name, rk->rk_conf.sw_version);
+ retry_ApiVersion = 0;
+ }
+
+ if (err && apis)
+ rd_free(apis);
+
+ if (retry_ApiVersion != -1) {
+ /* Retry request with a lower version */
+ rd_rkb_dbg(
+ rkb, BROKER | RD_KAFKA_DBG_FEATURE | RD_KAFKA_DBG_PROTOCOL,
+ "APIVERSION",
+ "ApiVersionRequest v%hd failed due to %s: "
+ "retrying with v%hd",
+ request->rkbuf_reqhdr.ApiVersion, rd_kafka_err2name(err),
+ retry_ApiVersion);
+ rd_kafka_ApiVersionRequest(
+ rkb, retry_ApiVersion, RD_KAFKA_NO_REPLYQ,
+ rd_kafka_broker_handle_ApiVersion, NULL);
+ return;
+ }
+
+
+ if (err) {
+ if (rkb->rkb_transport)
+ rd_kafka_broker_fail(
+ rkb, LOG_WARNING, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "ApiVersionRequest failed: %s: "
+ "probably due to broker version < 0.10 "
+ "(see api.version.request configuration)",
+ rd_kafka_err2str(err));
+ return;
+ }
+
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_api_versions(rkb, apis, api_cnt);
+ rd_kafka_broker_unlock(rkb);
+
+ rd_kafka_broker_connect_auth(rkb);
+}
+
+
+/**
+ * Call when asynchronous connection attempt completes, either succesfully
+ * (if errstr is NULL) or fails.
+ *
+ * @locks_acquired rkb
+ * @locality broker thread
+ */
+void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr) {
+
+ if (errstr) {
+ /* Connect failed */
+ rd_kafka_broker_fail(rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "%s", errstr);
+ return;
+ }
+
+ /* Connect succeeded */
+ rkb->rkb_connid++;
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "CONNECTED",
+ "Connected (#%d)", rkb->rkb_connid);
+ rkb->rkb_max_inflight = 1; /* Hold back other requests until
+ * ApiVersion, SaslHandshake, etc
+ * are done. */
+
+ rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN);
+
+ rd_kafka_broker_lock(rkb);
+
+ if (rkb->rkb_rk->rk_conf.api_version_request &&
+ rd_interval_immediate(&rkb->rkb_ApiVersion_fail_intvl, 0, 0) > 0) {
+ /* Use ApiVersion to query broker for supported API versions. */
+ rd_kafka_broker_feature_enable(rkb,
+ RD_KAFKA_FEATURE_APIVERSION);
+ }
+
+ if (!(rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION)) {
+ /* Use configured broker.version.fallback to
+ * figure out API versions.
+ * In case broker.version.fallback indicates a version
+ * that supports ApiVersionRequest it will update
+ * rkb_features to have FEATURE_APIVERSION set which will
+ * trigger an ApiVersionRequest below. */
+ rd_kafka_broker_set_api_versions(rkb, NULL, 0);
+ }
+
+ if (rkb->rkb_features & RD_KAFKA_FEATURE_APIVERSION) {
+ /* Query broker for supported API versions.
+ * This may fail with a disconnect on non-supporting brokers
+ * so hold off any other requests until we get a response,
+ * and if the connection is torn down we disable this feature.
+ */
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_APIVERSION_QUERY);
+ rd_kafka_broker_unlock(rkb);
+
+ rd_kafka_ApiVersionRequest(
+ rkb, -1 /* Use highest version we support */,
+ RD_KAFKA_NO_REPLYQ, rd_kafka_broker_handle_ApiVersion,
+ NULL);
+ } else {
+ rd_kafka_broker_unlock(rkb);
+
+ /* Authenticate if necessary */
+ rd_kafka_broker_connect_auth(rkb);
+ }
+}
+
+
+
+/**
+ * @brief Checks if the given API request+version is supported by the broker.
+ * @returns 1 if supported, else 0.
+ * @locality broker thread
+ * @locks none
+ */
+static RD_INLINE int rd_kafka_broker_request_supported(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf) {
+ struct rd_kafka_ApiVersion skel = {.ApiKey =
+ rkbuf->rkbuf_reqhdr.ApiKey};
+ struct rd_kafka_ApiVersion *ret;
+
+ if (unlikely(rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_ApiVersion))
+ return 1; /* ApiVersion requests are used to detect
+ * the supported API versions, so should always
+ * be allowed through. */
+
+ /* First try feature flags, if any, which may cover a larger
+ * set of APIs. */
+ if (rkbuf->rkbuf_features)
+ return (rkb->rkb_features & rkbuf->rkbuf_features) ==
+ rkbuf->rkbuf_features;
+
+ /* Then try the ApiVersion map. */
+ ret =
+ bsearch(&skel, rkb->rkb_ApiVersions, rkb->rkb_ApiVersions_cnt,
+ sizeof(*rkb->rkb_ApiVersions), rd_kafka_ApiVersion_key_cmp);
+ if (!ret)
+ return 0;
+
+ return ret->MinVer <= rkbuf->rkbuf_reqhdr.ApiVersion &&
+ rkbuf->rkbuf_reqhdr.ApiVersion <= ret->MaxVer;
+}
+
+
+/**
+ * Send queued messages to broker
+ *
+ * Locality: io thread
+ */
+int rd_kafka_send(rd_kafka_broker_t *rkb) {
+ rd_kafka_buf_t *rkbuf;
+ unsigned int cnt = 0;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
+ rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight &&
+ (rkbuf = TAILQ_FIRST(&rkb->rkb_outbufs.rkbq_bufs))) {
+ ssize_t r;
+ size_t pre_of = rd_slice_offset(&rkbuf->rkbuf_reader);
+ rd_ts_t now;
+
+ if (unlikely(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE)) {
+ /* Request has not been created/baked yet,
+ * call its make callback. */
+ rd_kafka_resp_err_t err;
+
+ err = rkbuf->rkbuf_make_req_cb(
+ rkb, rkbuf, rkbuf->rkbuf_make_opaque);
+
+ rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_NEED_MAKE;
+
+ /* Free the make_opaque */
+ if (rkbuf->rkbuf_free_make_opaque_cb &&
+ rkbuf->rkbuf_make_opaque) {
+ rkbuf->rkbuf_free_make_opaque_cb(
+ rkbuf->rkbuf_make_opaque);
+ rkbuf->rkbuf_make_opaque = NULL;
+ }
+
+ if (unlikely(err)) {
+ rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_PROTOCOL,
+ "MAKEREQ",
+ "Failed to make %sRequest: %s",
+ rd_kafka_ApiKey2str(
+ rkbuf->rkbuf_reqhdr.ApiKey),
+ rd_kafka_err2str(err));
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, err,
+ NULL, rkbuf);
+ continue;
+ }
+
+ rd_kafka_buf_finalize(rkb->rkb_rk, rkbuf);
+ }
+
+ /* Check for broker support */
+ if (unlikely(!rd_kafka_broker_request_supported(rkb, rkbuf))) {
+ rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
+ rd_rkb_dbg(
+ rkb, BROKER | RD_KAFKA_DBG_PROTOCOL, "UNSUPPORTED",
+ "Failing %sResponse "
+ "(v%hd, %" PRIusz " bytes, CorrId %" PRId32
+ "): "
+ "request not supported by broker "
+ "(missing api.version.request=false or "
+ "incorrect broker.version.fallback config?)",
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_totlen,
+ rkbuf->rkbuf_reshdr.CorrId);
+ rd_kafka_buf_callback(
+ rkb->rkb_rk, rkb,
+ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, NULL,
+ rkbuf);
+ continue;
+ }
+
+ /* Set CorrId header field, unless this is the latter part
+ * of a partial send in which case the corrid has already
+ * been set.
+ * Due to how SSL_write() will accept a buffer but still
+ * return 0 in some cases we can't rely on the buffer offset
+ * but need to use corrid to check this. SSL_write() expects
+ * us to send the same buffer again when 0 is returned.
+ */
+ if (rkbuf->rkbuf_corrid == 0 ||
+ rkbuf->rkbuf_connid != rkb->rkb_connid) {
+ rd_assert(rd_slice_offset(&rkbuf->rkbuf_reader) == 0);
+ rkbuf->rkbuf_corrid = ++rkb->rkb_corrid;
+ rd_kafka_buf_update_i32(rkbuf, 4 + 2 + 2,
+ rkbuf->rkbuf_corrid);
+ rkbuf->rkbuf_connid = rkb->rkb_connid;
+ } else if (pre_of > RD_KAFKAP_REQHDR_SIZE) {
+ rd_kafka_assert(NULL,
+ rkbuf->rkbuf_connid == rkb->rkb_connid);
+ }
+
+ if (0) {
+ rd_rkb_dbg(
+ rkb, PROTOCOL, "SEND",
+ "Send %s corrid %" PRId32
+ " at "
+ "offset %" PRIusz "/%" PRIusz,
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_corrid, pre_of,
+ rd_slice_size(&rkbuf->rkbuf_reader));
+ }
+
+ if ((r = rd_kafka_broker_send(rkb, &rkbuf->rkbuf_reader)) == -1)
+ return -1;
+
+ now = rd_clock();
+ rd_atomic64_set(&rkb->rkb_c.ts_send, now);
+
+ /* Partial send? Continue next time. */
+ if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0) {
+ rd_rkb_dbg(
+ rkb, PROTOCOL, "SEND",
+ "Sent partial %sRequest "
+ "(v%hd, "
+ "%" PRIdsz "+%" PRIdsz "/%" PRIusz
+ " bytes, "
+ "CorrId %" PRId32 ")",
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion, (ssize_t)pre_of, r,
+ rd_slice_size(&rkbuf->rkbuf_reader),
+ rkbuf->rkbuf_corrid);
+ return 0;
+ }
+
+ rd_rkb_dbg(rkb, PROTOCOL, "SEND",
+ "Sent %sRequest (v%hd, %" PRIusz " bytes @ %" PRIusz
+ ", "
+ "CorrId %" PRId32 ")",
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion,
+ rd_slice_size(&rkbuf->rkbuf_reader), pre_of,
+ rkbuf->rkbuf_corrid);
+
+ rd_atomic64_add(&rkb->rkb_c.reqtype[rkbuf->rkbuf_reqhdr.ApiKey],
+ 1);
+
+ /* Notify transport layer of full request sent */
+ if (likely(rkb->rkb_transport != NULL))
+ rd_kafka_transport_request_sent(rkb, rkbuf);
+
+ /* Entire buffer sent, unlink from outbuf */
+ rd_kafka_bufq_deq(&rkb->rkb_outbufs, rkbuf);
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_SENT;
+
+ /* Store time for RTT calculation */
+ rkbuf->rkbuf_ts_sent = now;
+
+ /* Add to outbuf_latency averager */
+ rd_avg_add(&rkb->rkb_avg_outbuf_latency,
+ rkbuf->rkbuf_ts_sent - rkbuf->rkbuf_ts_enq);
+
+ if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_BLOCKING &&
+ rd_atomic32_add(&rkb->rkb_blocking_request_cnt, 1) == 1)
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+
+ /* Put buffer on response wait list unless we are not
+ * expecting a response (required_acks=0). */
+ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NO_RESPONSE))
+ rd_kafka_bufq_enq(&rkb->rkb_waitresps, rkbuf);
+ else { /* Call buffer callback for delivery report. */
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf);
+ }
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+
+/**
+ * Add 'rkbuf' to broker 'rkb's retry queue.
+ */
+void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
+
+ /* Restore original replyq since replyq.q will have been NULLed
+ * by buf_callback()/replyq_enq(). */
+ if (!rkbuf->rkbuf_replyq.q && rkbuf->rkbuf_orig_replyq.q) {
+ rkbuf->rkbuf_replyq = rkbuf->rkbuf_orig_replyq;
+ rd_kafka_replyq_clear(&rkbuf->rkbuf_orig_replyq);
+ }
+
+ /* If called from another thread than rkb's broker thread
+ * enqueue the buffer on the broker's op queue. */
+ if (!thrd_is_current(rkb->rkb_thread)) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_XMIT_RETRY);
+ rko->rko_u.xbuf.rkbuf = rkbuf;
+ rd_kafka_q_enq(rkb->rkb_ops, rko);
+ return;
+ }
+
+ rd_rkb_dbg(rkb, PROTOCOL, "RETRY",
+ "Retrying %sRequest (v%hd, %" PRIusz
+ " bytes, retry %d/%d, "
+ "prev CorrId %" PRId32 ") in %dms",
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion,
+ rd_slice_size(&rkbuf->rkbuf_reader), rkbuf->rkbuf_retries,
+ rkbuf->rkbuf_max_retries, rkbuf->rkbuf_corrid,
+ rkb->rkb_rk->rk_conf.retry_backoff_ms);
+
+ rd_atomic64_add(&rkb->rkb_c.tx_retries, 1);
+
+ rkbuf->rkbuf_ts_retry =
+ rd_clock() + (rkb->rkb_rk->rk_conf.retry_backoff_ms * 1000);
+ /* Precaution: time out the request if it hasn't moved from the
+ * retry queue within the retry interval (such as when the broker is
+ * down). */
+ // FIXME: implememt this properly.
+ rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_ts_retry + (5 * 1000 * 1000);
+
+ /* Reset send offset */
+ rd_slice_seek(&rkbuf->rkbuf_reader, 0);
+ rkbuf->rkbuf_corrid = 0;
+
+ rd_kafka_bufq_enq(&rkb->rkb_retrybufs, rkbuf);
+}
+
+
+/**
+ * Move buffers that have expired their retry backoff time from the
+ * retry queue to the outbuf.
+ */
+static void rd_kafka_broker_retry_bufs_move(rd_kafka_broker_t *rkb,
+ rd_ts_t *next_wakeup) {
+ rd_ts_t now = rd_clock();
+ rd_kafka_buf_t *rkbuf;
+ int cnt = 0;
+
+ while ((rkbuf = TAILQ_FIRST(&rkb->rkb_retrybufs.rkbq_bufs))) {
+ if (rkbuf->rkbuf_ts_retry > now) {
+ if (rkbuf->rkbuf_ts_retry < *next_wakeup)
+ *next_wakeup = rkbuf->rkbuf_ts_retry;
+ break;
+ }
+
+ rd_kafka_bufq_deq(&rkb->rkb_retrybufs, rkbuf);
+
+ rd_kafka_broker_buf_enq0(rkb, rkbuf);
+ cnt++;
+ }
+
+ if (cnt > 0)
+ rd_rkb_dbg(rkb, BROKER, "RETRY",
+ "Moved %d retry buffer(s) to output queue", cnt);
+}
+
+
+/**
+ * @brief Propagate delivery report for entire message queue.
+ *
+ * @param err The error which will be set on each message.
+ * @param status The status which will be set on each message.
+ *
+ * To avoid extra iterations, the \p err and \p status are set on
+ * the message as they are popped off the OP_DR msgq in rd_kafka_poll() et.al
+ */
+void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_t *rk = rkt->rkt_rk;
+
+ if (unlikely(rd_kafka_msgq_len(rkmq) == 0))
+ return;
+
+ if (err && rd_kafka_is_transactional(rk))
+ rd_atomic64_add(&rk->rk_eos.txn_dr_fails,
+ rd_kafka_msgq_len(rkmq));
+
+ /* Call on_acknowledgement() interceptors */
+ rd_kafka_interceptors_on_acknowledgement_queue(rk, rkmq, err);
+
+ if (rk->rk_drmode != RD_KAFKA_DR_MODE_NONE &&
+ (!rk->rk_conf.dr_err_only || err)) {
+ /* Pass all messages to application thread in one op. */
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_DR);
+ rko->rko_err = err;
+ rko->rko_u.dr.rkt = rd_kafka_topic_keep(rkt);
+ rd_kafka_msgq_init(&rko->rko_u.dr.msgq);
+
+ /* Move all messages to op's msgq */
+ rd_kafka_msgq_move(&rko->rko_u.dr.msgq, rkmq);
+
+ rd_kafka_q_enq(rk->rk_rep, rko);
+
+ } else {
+ /* No delivery report callback. */
+
+ /* Destroy the messages right away. */
+ rd_kafka_msgq_purge(rk, rkmq);
+ }
+}
+
+
+/**
+ * @brief Trigger delivery reports for implicitly acked messages.
+ *
+ * @locks none
+ * @locality broker thread - either last or current leader
+ */
+void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ uint64_t last_msgid) {
+ rd_kafka_msgq_t acked = RD_KAFKA_MSGQ_INITIALIZER(acked);
+ rd_kafka_msgq_t acked2 = RD_KAFKA_MSGQ_INITIALIZER(acked2);
+ rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+
+ if (rktp->rktp_rkt->rkt_conf.required_acks != 0)
+ status = RD_KAFKA_MSG_STATUS_PERSISTED;
+
+ rd_kafka_msgq_move_acked(&acked, &rktp->rktp_xmit_msgq, last_msgid,
+ status);
+ rd_kafka_msgq_move_acked(&acked2, &rktp->rktp_msgq, last_msgid, status);
+
+ /* Insert acked2 into acked in correct order */
+ rd_kafka_msgq_insert_msgq(&acked, &acked2,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+
+ if (!rd_kafka_msgq_len(&acked))
+ return;
+
+ rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "IMPLICITACK",
+ "%.*s [%" PRId32
+ "] %d message(s) implicitly acked "
+ "by subsequent batch success "
+ "(msgids %" PRIu64 "..%" PRIu64
+ ", "
+ "last acked %" PRIu64 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_msgq_len(&acked),
+ rd_kafka_msgq_first(&acked)->rkm_u.producer.msgid,
+ rd_kafka_msgq_last(&acked)->rkm_u.producer.msgid,
+ last_msgid);
+
+ /* Trigger delivery reports */
+ rd_kafka_dr_msgq(rktp->rktp_rkt, &acked, RD_KAFKA_RESP_ERR_NO_ERROR);
+}
+
+
+
+/**
+ * @brief Map existing partitions to this broker using the
+ * toppar's leader_id. Only undelegated partitions
+ * matching this broker are mapped.
+ *
+ * @locks none
+ * @locality any
+ */
+static void rd_kafka_broker_map_partitions(rd_kafka_broker_t *rkb) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_topic_t *rkt;
+ int cnt = 0;
+
+ if (rkb->rkb_nodeid == -1 || RD_KAFKA_BROKER_IS_LOGICAL(rkb))
+ return;
+
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ int i;
+
+ rd_kafka_topic_wrlock(rkt);
+ for (i = 0; i < rkt->rkt_partition_cnt; i++) {
+ rd_kafka_toppar_t *rktp = rkt->rkt_p[i];
+
+ /* Only map undelegated partitions matching this
+ * broker*/
+ rd_kafka_toppar_lock(rktp);
+ if (rktp->rktp_leader_id == rkb->rkb_nodeid &&
+ !(rktp->rktp_broker && rktp->rktp_next_broker)) {
+ rd_kafka_toppar_broker_update(
+ rktp, rktp->rktp_leader_id, rkb,
+ "broker node information updated");
+ cnt++;
+ }
+ rd_kafka_toppar_unlock(rktp);
+ }
+ rd_kafka_topic_wrunlock(rkt);
+ }
+ rd_kafka_rdunlock(rk);
+
+ rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_BROKER, "LEADER",
+ "Mapped %d partition(s) to broker", cnt);
+}
+
+
+/**
+ * @brief Broker id comparator
+ */
+static int rd_kafka_broker_cmp_by_id(const void *_a, const void *_b) {
+ const rd_kafka_broker_t *a = _a, *b = _b;
+ return RD_CMP(a->rkb_nodeid, b->rkb_nodeid);
+}
+
+
+/**
+ * @brief Set the broker logname (used in logs) to a copy of \p logname.
+ *
+ * @locality any
+ * @locks none
+ */
+static void rd_kafka_broker_set_logname(rd_kafka_broker_t *rkb,
+ const char *logname) {
+ mtx_lock(&rkb->rkb_logname_lock);
+ if (rkb->rkb_logname)
+ rd_free(rkb->rkb_logname);
+ rkb->rkb_logname = rd_strdup(logname);
+ mtx_unlock(&rkb->rkb_logname_lock);
+}
+
+
+
+/**
+ * @brief Prepare destruction of the broker object.
+ *
+ * Since rd_kafka_broker_terminating() relies on the refcnt of the
+ * broker to reach 1, we need to loose any self-references
+ * to avoid a hang (waiting for refcnt decrease) on destruction.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+static void rd_kafka_broker_prepare_destroy(rd_kafka_broker_t *rkb) {
+ rd_kafka_broker_monitor_del(&rkb->rkb_coord_monitor);
+}
+
+
+/**
+ * @brief Serve a broker op (an op posted by another thread to be handled by
+ * this broker's thread).
+ *
+ * @returns true if calling op loop should break out, else false to continue.
+ * @locality broker thread
+ * @locks none
+ */
+static RD_WARN_UNUSED_RESULT rd_bool_t
+rd_kafka_broker_op_serve(rd_kafka_broker_t *rkb, rd_kafka_op_t *rko) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_resp_err_t topic_err;
+ rd_bool_t wakeup = rd_false;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ switch (rko->rko_type) {
+ case RD_KAFKA_OP_NODE_UPDATE: {
+ enum { _UPD_NAME = 0x1, _UPD_ID = 0x2 } updated = 0;
+ char brokername[RD_KAFKA_NODENAME_SIZE];
+
+ /* Need kafka_wrlock for updating rk_broker_by_id */
+ rd_kafka_wrlock(rkb->rkb_rk);
+ rd_kafka_broker_lock(rkb);
+
+ if (strcmp(rkb->rkb_nodename, rko->rko_u.node.nodename)) {
+ rd_rkb_dbg(rkb, BROKER, "UPDATE",
+ "Nodename changed from %s to %s",
+ rkb->rkb_nodename, rko->rko_u.node.nodename);
+ rd_strlcpy(rkb->rkb_nodename, rko->rko_u.node.nodename,
+ sizeof(rkb->rkb_nodename));
+ rkb->rkb_nodename_epoch++;
+ updated |= _UPD_NAME;
+ }
+
+ if (rko->rko_u.node.nodeid != -1 &&
+ !RD_KAFKA_BROKER_IS_LOGICAL(rkb) &&
+ rko->rko_u.node.nodeid != rkb->rkb_nodeid) {
+ int32_t old_nodeid = rkb->rkb_nodeid;
+ rd_rkb_dbg(rkb, BROKER, "UPDATE",
+ "NodeId changed from %" PRId32
+ " to %" PRId32,
+ rkb->rkb_nodeid, rko->rko_u.node.nodeid);
+
+ rkb->rkb_nodeid = rko->rko_u.node.nodeid;
+
+ /* Update system thread name */
+ rd_kafka_set_thread_sysname("rdk:broker%" PRId32,
+ rkb->rkb_nodeid);
+
+ /* Update broker_by_id sorted list */
+ if (old_nodeid == -1)
+ rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb);
+ rd_list_sort(&rkb->rkb_rk->rk_broker_by_id,
+ rd_kafka_broker_cmp_by_id);
+
+ updated |= _UPD_ID;
+ }
+
+ rd_kafka_mk_brokername(brokername, sizeof(brokername),
+ rkb->rkb_proto, rkb->rkb_nodename,
+ rkb->rkb_nodeid, RD_KAFKA_LEARNED);
+ if (strcmp(rkb->rkb_name, brokername)) {
+ /* Udate the name copy used for logging. */
+ rd_kafka_broker_set_logname(rkb, brokername);
+
+ rd_rkb_dbg(rkb, BROKER, "UPDATE",
+ "Name changed from %s to %s", rkb->rkb_name,
+ brokername);
+ rd_strlcpy(rkb->rkb_name, brokername,
+ sizeof(rkb->rkb_name));
+ }
+ rd_kafka_broker_unlock(rkb);
+ rd_kafka_wrunlock(rkb->rkb_rk);
+
+ if (updated & _UPD_NAME)
+ rd_kafka_broker_fail(rkb, LOG_DEBUG,
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Broker hostname updated");
+ else if (updated & _UPD_ID) {
+ /* Map existing partitions to this broker. */
+ rd_kafka_broker_map_partitions(rkb);
+
+ /* If broker is currently in state up we need
+ * to trigger a state change so it exits its
+ * state&type based .._serve() loop. */
+ rd_kafka_broker_lock(rkb);
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP)
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_UPDATE);
+ rd_kafka_broker_unlock(rkb);
+ }
+
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+ break;
+ }
+
+ case RD_KAFKA_OP_XMIT_BUF:
+ rd_kafka_broker_buf_enq2(rkb, rko->rko_u.xbuf.rkbuf);
+ rko->rko_u.xbuf.rkbuf = NULL; /* buffer now owned by broker */
+ if (rko->rko_replyq.q) {
+ /* Op will be reused for forwarding response. */
+ rko = NULL;
+ }
+ break;
+
+ case RD_KAFKA_OP_XMIT_RETRY:
+ rd_kafka_broker_buf_retry(rkb, rko->rko_u.xbuf.rkbuf);
+ rko->rko_u.xbuf.rkbuf = NULL;
+ break;
+
+ case RD_KAFKA_OP_PARTITION_JOIN:
+ /*
+ * Add partition to broker toppars
+ */
+ rktp = rko->rko_rktp;
+ rd_kafka_toppar_lock(rktp);
+
+ /* Abort join if instance is terminating */
+ if (rd_kafka_terminating(rkb->rkb_rk) ||
+ (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE)) {
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
+ "Topic %s [%" PRId32
+ "]: not joining broker: "
+ "%s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_terminating(rkb->rkb_rk)
+ ? "instance is terminating"
+ : "partition removed");
+
+ rd_kafka_broker_destroy(rktp->rktp_next_broker);
+ rktp->rktp_next_broker = NULL;
+ rd_kafka_toppar_unlock(rktp);
+ break;
+ }
+
+ /* See if we are still the next broker */
+ if (rktp->rktp_next_broker != rkb) {
+ rd_rkb_dbg(
+ rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
+ "Topic %s [%" PRId32
+ "]: not joining broker "
+ "(next broker %s)",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rktp->rktp_next_broker
+ ? rd_kafka_broker_name(rktp->rktp_next_broker)
+ : "(none)");
+
+ /* Need temporary refcount so we can safely unlock
+ * after q_enq(). */
+ rd_kafka_toppar_keep(rktp);
+
+ /* No, forward this op to the new next broker. */
+ rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko);
+ rko = NULL;
+
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_destroy(rktp);
+
+ break;
+ }
+
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
+ "Topic %s [%" PRId32
+ "]: joining broker "
+ "(rktp %p, %d message(s) queued)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rktp, rd_kafka_msgq_len(&rktp->rktp_msgq));
+
+ rd_kafka_assert(NULL,
+ !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB));
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_RKB;
+ rd_kafka_toppar_keep(rktp);
+ rd_kafka_broker_lock(rkb);
+ TAILQ_INSERT_TAIL(&rkb->rkb_toppars, rktp, rktp_rkblink);
+ rkb->rkb_toppar_cnt++;
+ rd_kafka_broker_unlock(rkb);
+ rktp->rktp_broker = rkb;
+ rd_assert(!rktp->rktp_msgq_wakeup_q);
+ rktp->rktp_msgq_wakeup_q = rd_kafka_q_keep(rkb->rkb_ops);
+ rd_kafka_broker_keep(rkb);
+
+ if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) {
+ rd_kafka_broker_active_toppar_add(rkb, rktp, "joining");
+
+ if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
+ /* Wait for all outstanding requests from
+ * the previous leader to finish before
+ * producing anything to this new leader. */
+ rd_kafka_idemp_drain_toppar(
+ rktp,
+ "wait for outstanding requests to "
+ "finish before producing to "
+ "new leader");
+ }
+ }
+
+ rd_kafka_broker_destroy(rktp->rktp_next_broker);
+ rktp->rktp_next_broker = NULL;
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+ break;
+
+ case RD_KAFKA_OP_PARTITION_LEAVE:
+ /*
+ * Remove partition from broker toppars
+ */
+ rktp = rko->rko_rktp;
+
+ /* If there is a topic-wide error, use it as error code
+ * when failing messages below. */
+ topic_err = rd_kafka_topic_get_error(rktp->rktp_rkt);
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Multiple PARTITION_LEAVEs are possible during partition
+ * migration, make sure we're supposed to handle this one. */
+ if (unlikely(rktp->rktp_broker != rkb)) {
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
+ "Topic %s [%" PRId32
+ "]: "
+ "ignoring PARTITION_LEAVE: "
+ "not delegated to broker (%s)",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rktp->rktp_broker
+ ? rd_kafka_broker_name(rktp->rktp_broker)
+ : "none");
+ rd_kafka_toppar_unlock(rktp);
+ break;
+ }
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Remove from fetcher list */
+ rd_kafka_toppar_fetch_decide(rktp, rkb, 1 /*force remove*/);
+
+ if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER) {
+ /* Purge any ProduceRequests for this toppar
+ * in the output queue. */
+ rd_kafka_broker_bufq_purge_by_toppar(
+ rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp,
+ RD_KAFKA_RESP_ERR__RETRY);
+ }
+
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
+ "Topic %s [%" PRId32
+ "]: leaving broker "
+ "(%d messages in xmitq, next broker %s, rktp %p)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_msgq_len(&rktp->rktp_xmit_msgq),
+ rktp->rktp_next_broker
+ ? rd_kafka_broker_name(rktp->rktp_next_broker)
+ : "(none)",
+ rktp);
+
+ /* Insert xmitq(broker-local) messages to the msgq(global)
+ * at their sorted position to maintain ordering. */
+ rd_kafka_msgq_insert_msgq(
+ &rktp->rktp_msgq, &rktp->rktp_xmit_msgq,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+
+ if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER)
+ rd_kafka_broker_active_toppar_del(rkb, rktp, "leaving");
+
+ rd_kafka_broker_lock(rkb);
+ TAILQ_REMOVE(&rkb->rkb_toppars, rktp, rktp_rkblink);
+ rkb->rkb_toppar_cnt--;
+ rd_kafka_broker_unlock(rkb);
+ rd_kafka_broker_destroy(rktp->rktp_broker);
+ if (rktp->rktp_msgq_wakeup_q) {
+ rd_kafka_q_destroy(rktp->rktp_msgq_wakeup_q);
+ rktp->rktp_msgq_wakeup_q = NULL;
+ }
+ rktp->rktp_broker = NULL;
+
+ rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_RKB);
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_RKB;
+
+ if (rktp->rktp_next_broker) {
+ /* There is a next broker we need to migrate to. */
+ rko->rko_type = RD_KAFKA_OP_PARTITION_JOIN;
+ rd_kafka_q_enq(rktp->rktp_next_broker->rkb_ops, rko);
+ rko = NULL;
+ } else {
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_TOPIC, "TOPBRK",
+ "Topic %s [%" PRId32
+ "]: no next broker, "
+ "failing %d message(s) in partition queue",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&rktp->rktp_msgq));
+ rd_kafka_assert(NULL, rd_kafka_msgq_len(
+ &rktp->rktp_xmit_msgq) == 0);
+ rd_kafka_dr_msgq(
+ rktp->rktp_rkt, &rktp->rktp_msgq,
+ rd_kafka_terminating(rkb->rkb_rk)
+ ? RD_KAFKA_RESP_ERR__DESTROY
+ : (topic_err
+ ? topic_err
+ : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION));
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_destroy(rktp); /* from JOIN */
+
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+ break;
+
+ case RD_KAFKA_OP_TERMINATE:
+ /* nop: just a wake-up. */
+ rd_rkb_dbg(rkb, BROKER, "TERM",
+ "Received TERMINATE op in state %s: "
+ "%d refcnts, %d toppar(s), %d active toppar(s), "
+ "%d outbufs, %d waitresps, %d retrybufs",
+ rd_kafka_broker_state_names[rkb->rkb_state],
+ rd_refcnt_get(&rkb->rkb_refcnt), rkb->rkb_toppar_cnt,
+ rkb->rkb_active_toppar_cnt,
+ (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
+ (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps),
+ (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs));
+ /* Expedite termination by bringing down the broker
+ * and trigger a state change.
+ * This makes sure any eonce dependent on state changes
+ * are triggered. */
+ rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY,
+ "Client is terminating");
+
+ rd_kafka_broker_prepare_destroy(rkb);
+ wakeup = rd_true;
+ break;
+
+ case RD_KAFKA_OP_WAKEUP:
+ wakeup = rd_true;
+ break;
+
+ case RD_KAFKA_OP_PURGE:
+ rd_kafka_broker_handle_purge_queues(rkb, rko);
+ rko = NULL; /* the rko is reused for the reply */
+ break;
+
+ case RD_KAFKA_OP_CONNECT:
+ /* Sparse connections: connection requested, transition
+ * to TRY_CONNECT state to trigger new connection. */
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT) {
+ rd_rkb_dbg(rkb, BROKER, "CONNECT",
+ "Received CONNECT op");
+ rkb->rkb_persistconn.internal++;
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_broker_unlock(rkb);
+
+ } else if (rkb->rkb_state >=
+ RD_KAFKA_BROKER_STATE_TRY_CONNECT) {
+ rd_bool_t do_disconnect = rd_false;
+
+ /* If the nodename was changed since the last connect,
+ * close the current connection. */
+
+ rd_kafka_broker_lock(rkb);
+ do_disconnect =
+ (rkb->rkb_connect_epoch != rkb->rkb_nodename_epoch);
+ rd_kafka_broker_unlock(rkb);
+
+ if (do_disconnect)
+ rd_kafka_broker_fail(
+ rkb, LOG_DEBUG,
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Closing connection due to "
+ "nodename change");
+ }
+
+ /* Expedite next reconnect */
+ rkb->rkb_ts_reconnect = 0;
+
+ wakeup = rd_true;
+ break;
+
+ default:
+ rd_kafka_assert(rkb->rkb_rk, !*"unhandled op type");
+ break;
+ }
+
+ if (rko)
+ rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
+
+ return wakeup;
+}
+
+
+
+/**
+ * @brief Serve broker ops.
+ * @returns the number of ops served
+ */
+static RD_WARN_UNUSED_RESULT int
+rd_kafka_broker_ops_serve(rd_kafka_broker_t *rkb, rd_ts_t timeout_us) {
+ rd_kafka_op_t *rko;
+ int cnt = 0;
+
+ while ((rko = rd_kafka_q_pop(rkb->rkb_ops, timeout_us, 0)) &&
+ (cnt++, !rd_kafka_broker_op_serve(rkb, rko)))
+ timeout_us = RD_POLL_NOWAIT;
+
+ return cnt;
+}
+
+/**
+ * @brief Serve broker ops and IOs.
+ *
+ * If a connection exists, poll IO first based on timeout.
+ * Use remaining timeout for ops queue poll.
+ *
+ * If no connection, poll ops queue using timeout.
+ *
+ * Sparse connections: if there's need for a connection, set
+ * timeout to NOWAIT.
+ *
+ * @param abs_timeout Maximum block time (absolute time).
+ *
+ * @returns true on wakeup (broker state machine needs to be served),
+ * else false.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+static RD_WARN_UNUSED_RESULT rd_bool_t
+rd_kafka_broker_ops_io_serve(rd_kafka_broker_t *rkb, rd_ts_t abs_timeout) {
+ rd_ts_t now;
+ rd_bool_t wakeup;
+
+ if (unlikely(rd_kafka_terminating(rkb->rkb_rk)))
+ abs_timeout = rd_clock() + 1000;
+ else if (unlikely(rd_kafka_broker_needs_connection(rkb)))
+ abs_timeout = RD_POLL_NOWAIT;
+ else if (unlikely(abs_timeout == RD_POLL_INFINITE))
+ abs_timeout =
+ rd_clock() + ((rd_ts_t)rd_kafka_max_block_ms * 1000);
+
+
+ if (likely(rkb->rkb_transport != NULL)) {
+ /* Poll and serve IO events and also poll the ops queue.
+ *
+ * The return value indicates if ops_serve() below should
+ * use a timeout or not.
+ *
+ * If there are ops enqueued cut the timeout short so
+ * that they're processed as soon as possible.
+ */
+ if (abs_timeout > 0 && rd_kafka_q_len(rkb->rkb_ops) > 0)
+ abs_timeout = RD_POLL_NOWAIT;
+
+ if (rd_kafka_transport_io_serve(
+ rkb->rkb_transport, rkb->rkb_ops,
+ rd_timeout_remains(abs_timeout)))
+ abs_timeout = RD_POLL_NOWAIT;
+ }
+
+
+ /* Serve broker ops */
+ wakeup =
+ rd_kafka_broker_ops_serve(rkb, rd_timeout_remains_us(abs_timeout));
+
+ rd_atomic64_add(&rkb->rkb_c.wakeups, 1);
+
+ /* An op might have triggered the need for a connection, if so
+ * transition to TRY_CONNECT state. */
+ if (unlikely(rd_kafka_broker_needs_connection(rkb) &&
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_INIT)) {
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(rkb,
+ RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_broker_unlock(rkb);
+ wakeup = rd_true;
+ }
+
+ /* Scan queues for timeouts. */
+ now = rd_clock();
+ if (rd_interval(&rkb->rkb_timeout_scan_intvl, 1000000, now) > 0)
+ rd_kafka_broker_timeout_scan(rkb, now);
+
+ return wakeup;
+}
+
+
+/**
+ * @brief Consumer: Serve the toppars assigned to this broker.
+ *
+ * @returns the minimum Fetch backoff time (abs timestamp) for the
+ * partitions to fetch.
+ *
+ * @locality broker thread
+ */
+static rd_ts_t rd_kafka_broker_consumer_toppars_serve(rd_kafka_broker_t *rkb) {
+ rd_kafka_toppar_t *rktp, *rktp_tmp;
+ rd_ts_t min_backoff = RD_TS_MAX;
+
+ TAILQ_FOREACH_SAFE(rktp, &rkb->rkb_toppars, rktp_rkblink, rktp_tmp) {
+ rd_ts_t backoff;
+
+ /* Serve toppar to update desired rktp state */
+ backoff = rd_kafka_broker_consumer_toppar_serve(rkb, rktp);
+ if (backoff < min_backoff)
+ min_backoff = backoff;
+ }
+
+ return min_backoff;
+}
+
+
+/**
+ * @brief Scan toppar's xmit and producer queue for message timeouts and
+ * enqueue delivery reports for timed out messages.
+ *
+ * @param abs_next_timeout will be set to the next message timeout, or 0
+ * if no timeout.
+ *
+ * @returns the number of messages timed out.
+ *
+ * @locality toppar's broker handler thread
+ * @locks toppar_lock MUST be held
+ */
+static int rd_kafka_broker_toppar_msgq_scan(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ rd_ts_t now,
+ rd_ts_t *abs_next_timeout) {
+ rd_kafka_msgq_t xtimedout = RD_KAFKA_MSGQ_INITIALIZER(xtimedout);
+ rd_kafka_msgq_t qtimedout = RD_KAFKA_MSGQ_INITIALIZER(qtimedout);
+ int xcnt, qcnt, cnt;
+ uint64_t first, last;
+ rd_ts_t next;
+
+ *abs_next_timeout = 0;
+
+ xcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_xmit_msgq, &xtimedout,
+ now, &next);
+ if (next && next < *abs_next_timeout)
+ *abs_next_timeout = next;
+
+ qcnt = rd_kafka_msgq_age_scan(rktp, &rktp->rktp_msgq, &qtimedout, now,
+ &next);
+ if (next && (!*abs_next_timeout || next < *abs_next_timeout))
+ *abs_next_timeout = next;
+
+ cnt = xcnt + qcnt;
+ if (likely(cnt == 0))
+ return 0;
+
+ /* Insert queue-timedout into xmitqueue-timedout in a sorted fashion */
+ rd_kafka_msgq_insert_msgq(&xtimedout, &qtimedout,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+
+ first = rd_kafka_msgq_first(&xtimedout)->rkm_u.producer.msgid;
+ last = rd_kafka_msgq_last(&xtimedout)->rkm_u.producer.msgid;
+
+ rd_rkb_dbg(rkb, MSG, "TIMEOUT",
+ "%s [%" PRId32
+ "]: timed out %d+%d message(s) "
+ "(MsgId %" PRIu64 "..%" PRIu64
+ "): message.timeout.ms exceeded",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, xcnt,
+ qcnt, first, last);
+
+ /* Trigger delivery report for timed out messages */
+ rd_kafka_dr_msgq(rktp->rktp_rkt, &xtimedout,
+ RD_KAFKA_RESP_ERR__MSG_TIMED_OUT);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Producer: Check this broker's toppars for message timeouts.
+ *
+ * This is only used by the internal broker to enforce message timeouts.
+ *
+ * @returns the next absolute scan time.
+ *
+ * @locality internal broker thread.
+ */
+static rd_ts_t rd_kafka_broker_toppars_timeout_scan(rd_kafka_broker_t *rkb,
+ rd_ts_t now) {
+ rd_kafka_toppar_t *rktp;
+ rd_ts_t next = now + (1000 * 1000);
+
+ TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
+ rd_ts_t this_next;
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (unlikely(rktp->rktp_broker != rkb)) {
+ /* Currently migrating away from this
+ * broker. */
+ rd_kafka_toppar_unlock(rktp);
+ continue;
+ }
+
+ /* Scan queues for msg timeouts */
+ rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &this_next);
+
+ rd_kafka_toppar_unlock(rktp);
+
+ if (this_next && this_next < next)
+ next = this_next;
+ }
+
+ return next;
+}
+
+
+/**
+ * @brief Idle function for the internal broker handle.
+ */
+static void rd_kafka_broker_internal_serve(rd_kafka_broker_t *rkb,
+ rd_ts_t abs_timeout) {
+ int initial_state = rkb->rkb_state;
+ rd_bool_t wakeup;
+
+ if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER) {
+ /* Consumer */
+ do {
+ rd_kafka_broker_consumer_toppars_serve(rkb);
+
+ wakeup = rd_kafka_broker_ops_io_serve(rkb, abs_timeout);
+
+ } while (!rd_kafka_broker_terminating(rkb) &&
+ (int)rkb->rkb_state == initial_state && !wakeup &&
+ !rd_timeout_expired(rd_timeout_remains(abs_timeout)));
+ } else {
+ /* Producer */
+ rd_ts_t next_timeout_scan = 0;
+
+ do {
+ rd_ts_t now = rd_clock();
+
+ if (now >= next_timeout_scan)
+ next_timeout_scan =
+ rd_kafka_broker_toppars_timeout_scan(rkb,
+ now);
+
+ wakeup = rd_kafka_broker_ops_io_serve(
+ rkb, RD_MIN(abs_timeout, next_timeout_scan));
+
+ } while (!rd_kafka_broker_terminating(rkb) &&
+ (int)rkb->rkb_state == initial_state && !wakeup &&
+ !rd_timeout_expired(rd_timeout_remains(abs_timeout)));
+ }
+}
+
+
+/**
+ * @returns the number of requests that may be enqueued before
+ * queue.backpressure.threshold is reached.
+ */
+
+static RD_INLINE unsigned int
+rd_kafka_broker_outbufs_space(rd_kafka_broker_t *rkb) {
+ int r = rkb->rkb_rk->rk_conf.queue_backpressure_thres -
+ rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt);
+ return r < 0 ? 0 : (unsigned int)r;
+}
+
+
+
+/**
+ * @brief Update \p *next_wakeup_ptr to \p maybe_next_wakeup if it is sooner.
+ *
+ * Both parameters are absolute timestamps.
+ * \p maybe_next_wakeup must not be 0.
+ */
+#define rd_kafka_set_next_wakeup(next_wakeup_ptr, maybe_next_wakeup) \
+ do { \
+ rd_ts_t *__n = (next_wakeup_ptr); \
+ rd_ts_t __m = (maybe_next_wakeup); \
+ rd_dassert(__m != 0); \
+ if (__m < *__n) \
+ *__n = __m; \
+ } while (0)
+
+
+/**
+ * @brief Serve a toppar for producing.
+ *
+ * @param next_wakeup will be updated to when the next wake-up/attempt is
+ * desired. Does not take the current value into
+ * consideration, even if it is lower.
+ * @param do_timeout_scan perform msg timeout scan
+ * @param may_send if set to false there is something on the global level
+ * that prohibits sending messages, such as a transactional
+ * state.
+ * @param flushing App is calling flush(): override linger.ms as immediate.
+ *
+ * @returns the number of messages produced.
+ *
+ * @locks none
+ * @locality broker thread
+ */
+static int rd_kafka_toppar_producer_serve(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const rd_kafka_pid_t pid,
+ rd_ts_t now,
+ rd_ts_t *next_wakeup,
+ rd_bool_t do_timeout_scan,
+ rd_bool_t may_send,
+ rd_bool_t flushing) {
+ int cnt = 0;
+ int r;
+ rd_kafka_msg_t *rkm;
+ int move_cnt = 0;
+ int max_requests;
+ int reqcnt;
+ int inflight = 0;
+ uint64_t epoch_base_msgid = 0;
+ rd_bool_t batch_ready = rd_false;
+
+ /* By limiting the number of not-yet-sent buffers (rkb_outbufs) we
+ * provide a backpressure mechanism to the producer loop
+ * which allows larger message batches to accumulate and thus
+ * increase throughput.
+ * This comes at no latency cost since there are already
+ * buffers enqueued waiting for transmission. */
+ max_requests = rd_kafka_broker_outbufs_space(rkb);
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (unlikely(rktp->rktp_broker != rkb)) {
+ /* Currently migrating away from this
+ * broker. */
+ rd_kafka_toppar_unlock(rktp);
+ return 0;
+ }
+
+ if (unlikely(do_timeout_scan)) {
+ int timeoutcnt;
+ rd_ts_t next;
+
+ /* Scan queues for msg timeouts */
+ timeoutcnt =
+ rd_kafka_broker_toppar_msgq_scan(rkb, rktp, now, &next);
+
+ if (next)
+ rd_kafka_set_next_wakeup(next_wakeup, next);
+
+ if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
+ if (!rd_kafka_pid_valid(pid)) {
+ /* If we don't have a PID, we can't transmit
+ * any messages. */
+ rd_kafka_toppar_unlock(rktp);
+ return 0;
+
+ } else if (timeoutcnt > 0) {
+ /* Message timeouts will lead to gaps the in
+ * the message sequence and thus trigger
+ * OutOfOrderSequence errors from the broker.
+ * Bump the epoch to reset the base msgid after
+ * draining all partitions. */
+
+ /* Must not hold toppar lock */
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_idemp_drain_epoch_bump(
+ rkb->rkb_rk, RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "%d message(s) timed out "
+ "on %s [%" PRId32 "]",
+ timeoutcnt, rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition);
+ return 0;
+ }
+ }
+ }
+
+ if (unlikely(!may_send)) {
+ /* Sends prohibited on the broker or instance level */
+ max_requests = 0;
+ } else if (unlikely(rd_kafka_fatal_error_code(rkb->rkb_rk))) {
+ /* Fatal error has been raised, don't produce. */
+ max_requests = 0;
+ } else if (unlikely(RD_KAFKA_TOPPAR_IS_PAUSED(rktp))) {
+ /* Partition is paused */
+ max_requests = 0;
+ } else if (unlikely(rd_kafka_is_transactional(rkb->rkb_rk) &&
+ !rd_kafka_txn_toppar_may_send_msg(rktp))) {
+ /* Partition not registered in transaction yet */
+ max_requests = 0;
+ } else if (max_requests > 0) {
+ /* Move messages from locked partition produce queue
+ * to broker-local xmit queue. */
+ if ((move_cnt = rktp->rktp_msgq.rkmq_msg_cnt) > 0) {
+
+ rd_kafka_msgq_insert_msgq(
+ &rktp->rktp_xmit_msgq, &rktp->rktp_msgq,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+ }
+
+ /* Calculate maximum wait-time to honour
+ * queue.buffering.max.ms contract.
+ * Unless flushing in which case immediate
+ * wakeups are allowed. */
+ batch_ready = rd_kafka_msgq_allow_wakeup_at(
+ &rktp->rktp_msgq, &rktp->rktp_xmit_msgq,
+ /* Only update the broker thread wakeup time
+ * if connection is up and messages can actually be
+ * sent, otherwise the wakeup can't do much. */
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP ? next_wakeup
+ : NULL,
+ now, flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us,
+ /* Batch message count threshold */
+ rkb->rkb_rk->rk_conf.batch_num_messages,
+ /* Batch total size threshold */
+ rkb->rkb_rk->rk_conf.batch_size);
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+
+ if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
+ /* Update the partition's cached PID, and reset the
+ * base msg sequence if necessary */
+ rd_bool_t did_purge = rd_false;
+
+ if (unlikely(!rd_kafka_pid_eq(pid, rktp->rktp_eos.pid))) {
+ /* Flush any ProduceRequests for this partition in the
+ * output buffer queue to speed up recovery. */
+ rd_kafka_broker_bufq_purge_by_toppar(
+ rkb, &rkb->rkb_outbufs, RD_KAFKAP_Produce, rktp,
+ RD_KAFKA_RESP_ERR__RETRY);
+ did_purge = rd_true;
+
+ if (rd_kafka_pid_valid(rktp->rktp_eos.pid))
+ rd_rkb_dbg(
+ rkb, QUEUE, "TOPPAR",
+ "%.*s [%" PRId32
+ "] PID has changed: "
+ "must drain requests for all "
+ "partitions before resuming reset "
+ "of PID",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+ }
+
+ inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight);
+
+ if (unlikely(rktp->rktp_eos.wait_drain)) {
+ if (inflight) {
+ /* Waiting for in-flight requests to
+ * drain/finish before producing anything more.
+ * This is used to recover to a consistent
+ * state when the partition leader
+ * has changed, or timed out messages
+ * have been removed from the queue. */
+
+ rd_rkb_dbg(
+ rkb, QUEUE, "TOPPAR",
+ "%.*s [%" PRId32
+ "] waiting for "
+ "%d in-flight request(s) to drain "
+ "from queue before continuing "
+ "to produce",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, inflight);
+
+ /* Flush any ProduceRequests for this
+ * partition in the output buffer queue to
+ * speed up draining. */
+ if (!did_purge)
+ rd_kafka_broker_bufq_purge_by_toppar(
+ rkb, &rkb->rkb_outbufs,
+ RD_KAFKAP_Produce, rktp,
+ RD_KAFKA_RESP_ERR__RETRY);
+
+ return 0;
+ }
+
+ rd_rkb_dbg(rkb, QUEUE, "TOPPAR",
+ "%.*s [%" PRId32
+ "] all in-flight requests "
+ "drained from queue",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+
+ rktp->rktp_eos.wait_drain = rd_false;
+ }
+
+ /* Limit the number of in-flight requests (per partition)
+ * to the broker's sequence de-duplication window. */
+ max_requests = RD_MIN(max_requests,
+ RD_KAFKA_IDEMP_MAX_INFLIGHT - inflight);
+ }
+
+
+ /* Check if allowed to create and enqueue a ProduceRequest */
+ if (max_requests <= 0)
+ return 0;
+
+ r = rktp->rktp_xmit_msgq.rkmq_msg_cnt;
+ if (r == 0)
+ return 0;
+
+ rd_kafka_msgq_verify_order(rktp, &rktp->rktp_xmit_msgq, 0, rd_false);
+
+ rd_rkb_dbg(rkb, QUEUE, "TOPPAR",
+ "%.*s [%" PRId32
+ "] %d message(s) in "
+ "xmit queue (%d added from partition queue)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, r, move_cnt);
+
+ rkm = TAILQ_FIRST(&rktp->rktp_xmit_msgq.rkmq_msgs);
+ rd_dassert(rkm != NULL);
+
+ if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
+ /* Update the partition's cached PID, and reset the
+ * base msg sequence if necessary */
+ if (unlikely(!rd_kafka_pid_eq(pid, rktp->rktp_eos.pid))) {
+ /* Attempt to change the pid, it will fail if there
+ * are outstanding messages in-flight, in which case
+ * we eventually come back here to retry. */
+ if (!rd_kafka_toppar_pid_change(
+ rktp, pid, rkm->rkm_u.producer.msgid))
+ return 0;
+ }
+
+ rd_kafka_toppar_lock(rktp);
+ /* Idempotent producer epoch base msgid, this is passed to the
+ * ProduceRequest and msgset writer to adjust the protocol-level
+ * per-message sequence number. */
+ epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid;
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ if (unlikely(rkb->rkb_state != RD_KAFKA_BROKER_STATE_UP)) {
+ /* There are messages to send but connection is not up. */
+ rd_rkb_dbg(rkb, BROKER, "TOPPAR",
+ "%.*s [%" PRId32
+ "] "
+ "%d message(s) queued but broker not up",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, r);
+ rkb->rkb_persistconn.internal++;
+ return 0;
+ }
+
+ /* Attempt to fill the batch size, but limit our waiting
+ * to queue.buffering.max.ms, batch.num.messages, and batch.size. */
+ if (!batch_ready) {
+ /* Wait for more messages or queue.buffering.max.ms
+ * to expire. */
+ return 0;
+ }
+
+ /* Send Produce requests for this toppar, honouring the
+ * queue backpressure threshold. */
+ for (reqcnt = 0; reqcnt < max_requests; reqcnt++) {
+ r = rd_kafka_ProduceRequest(rkb, rktp, pid, epoch_base_msgid);
+ if (likely(r > 0))
+ cnt += r;
+ else
+ break;
+ }
+
+ /* Update the allowed wake-up time based on remaining messages
+ * in the queue. */
+ if (cnt > 0) {
+ rd_kafka_toppar_lock(rktp);
+ batch_ready = rd_kafka_msgq_allow_wakeup_at(
+ &rktp->rktp_msgq, &rktp->rktp_xmit_msgq, next_wakeup, now,
+ flushing ? 1 : rkb->rkb_rk->rk_conf.buffering_max_us,
+ /* Batch message count threshold */
+ rkb->rkb_rk->rk_conf.batch_num_messages,
+ /* Batch total size threshold */
+ rkb->rkb_rk->rk_conf.batch_size);
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ return cnt;
+}
+
+
+
+/**
+ * @brief Produce from all toppars assigned to this broker.
+ *
+ * @param next_wakeup is updated if the next IO/ops timeout should be
+ * less than the input value (i.e., sooner).
+ *
+ * @returns the total number of messages produced.
+ */
+static int rd_kafka_broker_produce_toppars(rd_kafka_broker_t *rkb,
+ rd_ts_t now,
+ rd_ts_t *next_wakeup,
+ rd_bool_t do_timeout_scan) {
+ rd_kafka_toppar_t *rktp;
+ int cnt = 0;
+ rd_ts_t ret_next_wakeup = *next_wakeup;
+ rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER;
+ rd_bool_t may_send = rd_true;
+ rd_bool_t flushing = rd_false;
+
+ /* Round-robin serve each toppar. */
+ rktp = rkb->rkb_active_toppar_next;
+ if (unlikely(!rktp))
+ return 0;
+
+ if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
+ /* Idempotent producer: get a copy of the current pid. */
+ pid = rd_kafka_idemp_get_pid(rkb->rkb_rk);
+
+ /* If we don't have a valid pid, or the transaction state
+ * prohibits sending messages, return immedatiely,
+ * unless the per-partition timeout scan needs to run.
+ * The broker threads are woken up when a PID is acquired
+ * or the transaction state changes. */
+ if (!rd_kafka_pid_valid(pid))
+ may_send = rd_false;
+ else if (rd_kafka_is_transactional(rkb->rkb_rk) &&
+ !rd_kafka_txn_may_send_msg(rkb->rkb_rk))
+ may_send = rd_false;
+
+ if (!may_send && !do_timeout_scan)
+ return 0;
+ }
+
+ flushing = may_send && rd_atomic32_get(&rkb->rkb_rk->rk_flushing) > 0;
+
+ do {
+ rd_ts_t this_next_wakeup = ret_next_wakeup;
+
+ /* Try producing toppar */
+ cnt += rd_kafka_toppar_producer_serve(
+ rkb, rktp, pid, now, &this_next_wakeup, do_timeout_scan,
+ may_send, flushing);
+
+ rd_kafka_set_next_wakeup(&ret_next_wakeup, this_next_wakeup);
+
+ } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
+ rktp_activelink)) !=
+ rkb->rkb_active_toppar_next);
+
+ /* Update next starting toppar to produce in round-robin list. */
+ rd_kafka_broker_active_toppar_next(
+ rkb,
+ CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp, rktp_activelink));
+
+ *next_wakeup = ret_next_wakeup;
+
+ return cnt;
+}
+
+/**
+ * @brief Producer serving
+ */
+static void rd_kafka_broker_producer_serve(rd_kafka_broker_t *rkb,
+ rd_ts_t abs_timeout) {
+ rd_interval_t timeout_scan;
+ unsigned int initial_state = rkb->rkb_state;
+ rd_ts_t now;
+ int cnt = 0;
+
+ rd_interval_init(&timeout_scan);
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ rd_kafka_broker_lock(rkb);
+
+ while (!rd_kafka_broker_terminating(rkb) &&
+ rkb->rkb_state == initial_state &&
+ (abs_timeout > (now = rd_clock()))) {
+ rd_bool_t do_timeout_scan;
+ rd_ts_t next_wakeup = abs_timeout;
+ rd_bool_t overshot;
+
+ rd_kafka_broker_unlock(rkb);
+
+ /* Perform timeout scan on first iteration, thus
+ * on each state change, to make sure messages in
+ * partition rktp_xmit_msgq are timed out before
+ * being attempted to re-transmit. */
+ overshot = rd_interval(&timeout_scan, 1000 * 1000, now) >= 0;
+ do_timeout_scan = cnt++ == 0 || overshot;
+
+ rd_kafka_broker_produce_toppars(rkb, now, &next_wakeup,
+ do_timeout_scan);
+
+ /* Check and move retry buffers */
+ if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0))
+ rd_kafka_broker_retry_bufs_move(rkb, &next_wakeup);
+
+ if (rd_kafka_broker_ops_io_serve(rkb, next_wakeup))
+ return; /* Wakeup */
+
+ rd_kafka_broker_lock(rkb);
+ }
+
+ rd_kafka_broker_unlock(rkb);
+}
+
+
+
+/**
+ * Consumer serving
+ */
+static void rd_kafka_broker_consumer_serve(rd_kafka_broker_t *rkb,
+ rd_ts_t abs_timeout) {
+ unsigned int initial_state = rkb->rkb_state;
+ rd_ts_t now;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ rd_kafka_broker_lock(rkb);
+
+ while (!rd_kafka_broker_terminating(rkb) &&
+ rkb->rkb_state == initial_state &&
+ abs_timeout > (now = rd_clock())) {
+ rd_ts_t min_backoff;
+
+ rd_kafka_broker_unlock(rkb);
+
+ /* Serve toppars */
+ min_backoff = rd_kafka_broker_consumer_toppars_serve(rkb);
+ if (rkb->rkb_ts_fetch_backoff > now &&
+ rkb->rkb_ts_fetch_backoff < min_backoff)
+ min_backoff = rkb->rkb_ts_fetch_backoff;
+
+ if (min_backoff < RD_TS_MAX &&
+ rkb->rkb_state != RD_KAFKA_BROKER_STATE_UP) {
+ /* There are partitions to fetch but the
+ * connection is not up. */
+ rkb->rkb_persistconn.internal++;
+ }
+
+ /* Send Fetch request message for all underflowed toppars
+ * if the connection is up and there are no outstanding
+ * fetch requests for this connection. */
+ if (!rkb->rkb_fetching &&
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP) {
+ if (min_backoff < now) {
+ rd_kafka_broker_fetch_toppars(rkb, now);
+ min_backoff = abs_timeout;
+ } else if (min_backoff < RD_TS_MAX)
+ rd_rkb_dbg(rkb, FETCH, "FETCH",
+ "Fetch backoff for %" PRId64 "ms",
+ (min_backoff - now) / 1000);
+ } else {
+ /* Nothing needs to be done, next wakeup
+ * is from ops, state change, IO, or this timeout */
+ min_backoff = abs_timeout;
+ }
+
+ /* Check and move retry buffers */
+ if (unlikely(rd_atomic32_get(&rkb->rkb_retrybufs.rkbq_cnt) > 0))
+ rd_kafka_broker_retry_bufs_move(rkb, &min_backoff);
+
+ if (min_backoff > abs_timeout)
+ min_backoff = abs_timeout;
+
+ if (rd_kafka_broker_ops_io_serve(rkb, min_backoff))
+ return; /* Wakeup */
+
+ rd_kafka_broker_lock(rkb);
+ }
+
+ rd_kafka_broker_unlock(rkb);
+}
+
+
+
+/**
+ * @brief Check if connections.max.idle.ms has been exceeded and if so
+ * close the connection.
+ *
+ * @remark Must only be called if connections.max.idle.ms > 0 and
+ * the current broker state is UP (or UPDATE).
+ *
+ * @locality broker thread
+ */
+static RD_INLINE void rd_kafka_broker_idle_check(rd_kafka_broker_t *rkb) {
+ rd_ts_t ts_send = rd_atomic64_get(&rkb->rkb_c.ts_send);
+ rd_ts_t ts_recv = rd_atomic64_get(&rkb->rkb_c.ts_recv);
+ rd_ts_t ts_last_activity = RD_MAX(ts_send, ts_recv);
+ int idle_ms;
+
+ /* If nothing has been sent yet, use the connection time as
+ * last activity. */
+ if (unlikely(!ts_last_activity))
+ ts_last_activity = rkb->rkb_ts_state;
+
+ idle_ms = (int)((rd_clock() - ts_last_activity) / 1000);
+
+ if (likely(idle_ms < rkb->rkb_rk->rk_conf.connections_max_idle_ms))
+ return;
+
+ rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Connection max idle time exceeded "
+ "(%dms since last activity)",
+ idle_ms);
+}
+
+
+/**
+ * @brief Serve broker thread according to client type.
+ * May be called in any broker state.
+ *
+ * This function is to be called from the state machine in
+ * rd_kafka_broker_thread_main, and will return when
+ * there was a state change, or the handle is terminating.
+ *
+ * Broker threads are triggered by three things:
+ * - Ops from other parts of librdkafka / app.
+ * This is the rkb_ops queue which is served from
+ * rd_kafka_broker_ops_io_serve().
+ * - IO from broker socket.
+ * The ops queue is also IO-triggered to provide
+ * quick wakeup when thread is blocking on IO.
+ * Also serverd from rd_kafka_broker_ops_io_serve().
+ * When there is no broker socket only the ops
+ * queue is served.
+ * - Ops/IO timeout when there were no ops or
+ * IO events within a variable timeout.
+ *
+ * For each iteration of the loops in producer_serve(), consumer_serve(),
+ * etc, the Ops and IO are polled, and the client type specific
+ * logic is executed. For the consumer this logic checks which partitions
+ * to fetch or backoff, and sends Fetch requests.
+ * The producer checks for messages to batch and transmit.
+ * All types check for request timeouts, etc.
+ *
+ * Wakeups
+ * =======
+ * The logic returns a next wakeup time which controls how long the
+ * next Ops/IO poll may block before the logic wants to run again;
+ * this is typically controlled by `linger.ms` on the Producer
+ * and fetch backoffs on the consumer.
+ *
+ * Remote threads may also want to wake up the Ops/IO poll so that
+ * the logic is run more quickly. For example when a new message
+ * is enqueued by produce() it is important that it is batched
+ * and transmitted within the configured `linger.ms`.
+ *
+ * Any op enqueued on the broker ops queue (rkb_ops) will automatically
+ * trigger a wakeup of the broker thread (either by wakeup_fd IO event
+ * or by the conditional variable of rkb_ops being triggered - or both).
+ *
+ * Produced messages are not enqueued on the rkb_ops queue but on
+ * the partition's rktp_msgq message queue. To provide quick wakeups
+ * the partition has a reference to the partition's current leader broker
+ * thread's rkb_ops queue, rktp_msgq_wakeup_q.
+ * When enqueuing a message on the partition queue and the queue was
+ * previously empty, the rktp_msgq_wakeup_q (which is rkb_ops) is woken up
+ * by rd_kafka_q_yield(), which sets a YIELD flag and triggers the cond var
+ * to wake up the broker thread (without allocating and enqueuing an rko).
+ * This also triggers the wakeup_fd of rkb_ops, if necessary.
+ *
+ * When sparse connections is enabled the broker will linger in the
+ * INIT state until there's a need for a connection, in which case
+ * it will set its state to DOWN to trigger the connection.
+ * This is controlled both by the shared rkb_persistconn atomic counters
+ * that may be updated from other parts of the code, as well as the
+ * temporary per broker_serve() rkb_persistconn.internal counter which
+ * is used by the broker handler code to detect if a connection is needed,
+ * such as when a partition is being produced to.
+ *
+ *
+ * @param timeout_ms The maximum timeout for blocking Ops/IO.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+static void rd_kafka_broker_serve(rd_kafka_broker_t *rkb, int timeout_ms) {
+ rd_ts_t abs_timeout;
+
+ if (unlikely(rd_kafka_terminating(rkb->rkb_rk) ||
+ timeout_ms == RD_POLL_NOWAIT))
+ timeout_ms = 1;
+ else if (timeout_ms == RD_POLL_INFINITE)
+ timeout_ms = rd_kafka_max_block_ms;
+
+ abs_timeout = rd_timeout_init(timeout_ms);
+ /* Must be a valid absolute time from here on. */
+ rd_assert(abs_timeout > 0);
+
+ /* rkb_persistconn.internal is the per broker_serve()
+ * automatic counter that keeps track of anything
+ * in the producer/consumer logic needs this broker connection
+ * to be up.
+ * The value is reset here on each serve(). If there are queued
+ * requests we know right away that a connection is needed. */
+ rkb->rkb_persistconn.internal =
+ rd_atomic32_get(&rkb->rkb_outbufs.rkbq_cnt) > 0;
+
+ if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
+ rd_kafka_broker_internal_serve(rkb, abs_timeout);
+ return;
+ }
+
+ if (rkb->rkb_rk->rk_type == RD_KAFKA_PRODUCER)
+ rd_kafka_broker_producer_serve(rkb, abs_timeout);
+ else if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER)
+ rd_kafka_broker_consumer_serve(rkb, abs_timeout);
+
+ if (rkb->rkb_rk->rk_conf.connections_max_idle_ms &&
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_UP)
+ rd_kafka_broker_idle_check(rkb);
+}
+
+
+/**
+ * @returns true if all broker addresses have been tried.
+ *
+ * @locality broker thread
+ * @locks_required none
+ * @locks_acquired none
+ */
+static rd_bool_t
+rd_kafka_broker_addresses_exhausted(const rd_kafka_broker_t *rkb) {
+ return !rkb->rkb_rsal || rkb->rkb_rsal->rsal_cnt == 0 ||
+ rkb->rkb_rsal->rsal_curr + 1 == rkb->rkb_rsal->rsal_cnt;
+}
+
+
+static int rd_kafka_broker_thread_main(void *arg) {
+ rd_kafka_broker_t *rkb = arg;
+ rd_kafka_t *rk = rkb->rkb_rk;
+
+ rd_kafka_set_thread_name("%s", rkb->rkb_name);
+ rd_kafka_set_thread_sysname("rdk:broker%" PRId32, rkb->rkb_nodeid);
+
+ rd_kafka_interceptors_on_thread_start(rk, RD_KAFKA_THREAD_BROKER);
+
+ (void)rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
+
+ /* Our own refcount was increased just prior to thread creation,
+ * when refcount drops to 1 it is just us left and the broker
+ * thread should terminate. */
+
+ /* Acquire lock (which was held by thread creator during creation)
+ * to synchronise state. */
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_unlock(rkb);
+
+ rd_rkb_dbg(rkb, BROKER, "BRKMAIN", "Enter main broker thread");
+
+ while (!rd_kafka_broker_terminating(rkb)) {
+ int backoff;
+ int r;
+ rd_kafka_broker_state_t orig_state;
+
+ redo:
+ orig_state = rkb->rkb_state;
+
+ switch (rkb->rkb_state) {
+ case RD_KAFKA_BROKER_STATE_INIT:
+ /* Check if there is demand for a connection
+ * to this broker, if so jump to TRY_CONNECT state. */
+ if (!rd_kafka_broker_needs_connection(rkb)) {
+ rd_kafka_broker_serve(rkb,
+ rd_kafka_max_block_ms);
+ break;
+ }
+
+ /* The INIT state also exists so that an initial
+ * connection failure triggers a state transition
+ * which might trigger a ALL_BROKERS_DOWN error. */
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_broker_unlock(rkb);
+ goto redo; /* effectively a fallthru to TRY_CONNECT */
+
+ case RD_KAFKA_BROKER_STATE_DOWN:
+ rd_kafka_broker_lock(rkb);
+ if (rkb->rkb_rk->rk_conf.sparse_connections)
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_INIT);
+ else
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_TRY_CONNECT);
+ rd_kafka_broker_unlock(rkb);
+ goto redo; /* effectively a fallthru to TRY_CONNECT */
+
+ case RD_KAFKA_BROKER_STATE_TRY_CONNECT:
+ if (rkb->rkb_source == RD_KAFKA_INTERNAL) {
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_UP);
+ rd_kafka_broker_unlock(rkb);
+ break;
+ }
+
+ if (unlikely(rd_kafka_terminating(rkb->rkb_rk)))
+ rd_kafka_broker_serve(rkb, 1000);
+
+ if (!rd_kafka_sasl_ready(rkb->rkb_rk)) {
+ /* SASL provider not yet ready. */
+ rd_kafka_broker_serve(rkb,
+ rd_kafka_max_block_ms);
+ /* Continue while loop to try again (as long as
+ * we are not terminating). */
+ continue;
+ }
+
+ /* Throttle & jitter reconnects to avoid
+ * thundering horde of reconnecting clients after
+ * a broker / network outage. Issue #403 */
+ backoff =
+ rd_kafka_broker_reconnect_backoff(rkb, rd_clock());
+ if (backoff > 0) {
+ rd_rkb_dbg(rkb, BROKER, "RECONNECT",
+ "Delaying next reconnect by %dms",
+ backoff);
+ rd_kafka_broker_serve(rkb, (int)backoff);
+ continue;
+ }
+
+ /* Initiate asynchronous connection attempt.
+ * Only the host lookup is blocking here. */
+ r = rd_kafka_broker_connect(rkb);
+ if (r == -1) {
+ /* Immediate failure, most likely host
+ * resolving failed.
+ * Try the next resolve result until we've
+ * tried them all, in which case we sleep a
+ * short while to avoid busy looping. */
+ if (rd_kafka_broker_addresses_exhausted(rkb))
+ rd_kafka_broker_serve(
+ rkb, rd_kafka_max_block_ms);
+ } else if (r == 0) {
+ /* Broker has no hostname yet, wait
+ * for hostname to be set and connection
+ * triggered by received OP_CONNECT. */
+ rd_kafka_broker_serve(rkb,
+ rd_kafka_max_block_ms);
+ } else {
+ /* Connection in progress, state will
+ * have changed to STATE_CONNECT. */
+ }
+
+ break;
+
+ case RD_KAFKA_BROKER_STATE_CONNECT:
+ case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE:
+ case RD_KAFKA_BROKER_STATE_AUTH_LEGACY:
+ case RD_KAFKA_BROKER_STATE_AUTH_REQ:
+ case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE:
+ case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY:
+ /* Asynchronous connect in progress. */
+ rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms);
+
+ /* Connect failure.
+ * Try the next resolve result until we've
+ * tried them all, in which case we back off the next
+ * connection attempt to avoid busy looping. */
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN &&
+ rd_kafka_broker_addresses_exhausted(rkb))
+ rd_kafka_broker_update_reconnect_backoff(
+ rkb, &rkb->rkb_rk->rk_conf, rd_clock());
+ else if (
+ rkb->rkb_state == orig_state &&
+ rd_clock() >=
+ (rkb->rkb_ts_connect +
+ (rd_ts_t)rk->rk_conf
+ .socket_connection_setup_timeout_ms *
+ 1000))
+ rd_kafka_broker_fail(
+ rkb, LOG_WARNING,
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Connection setup timed out in state %s",
+ rd_kafka_broker_state_names
+ [rkb->rkb_state]);
+
+ break;
+
+ case RD_KAFKA_BROKER_STATE_UPDATE:
+ /* FALLTHRU */
+ case RD_KAFKA_BROKER_STATE_UP:
+ rd_kafka_broker_serve(rkb, rd_kafka_max_block_ms);
+
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_UPDATE) {
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(
+ rkb, RD_KAFKA_BROKER_STATE_UP);
+ rd_kafka_broker_unlock(rkb);
+ }
+ break;
+ }
+
+ if (rd_kafka_terminating(rkb->rkb_rk)) {
+ /* Handle is terminating: fail the send+retry queue
+ * to speed up termination, otherwise we'll
+ * need to wait for request timeouts. */
+ r = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 0, &rkb->rkb_outbufs, NULL, -1,
+ RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0);
+ r += rd_kafka_broker_bufq_timeout_scan(
+ rkb, 0, &rkb->rkb_retrybufs, NULL, -1,
+ RD_KAFKA_RESP_ERR__DESTROY, 0, NULL, 0);
+ rd_rkb_dbg(
+ rkb, BROKER, "TERMINATE",
+ "Handle is terminating in state %s: "
+ "%d refcnts (%p), %d toppar(s), "
+ "%d active toppar(s), "
+ "%d outbufs, %d waitresps, %d retrybufs: "
+ "failed %d request(s) in retry+outbuf",
+ rd_kafka_broker_state_names[rkb->rkb_state],
+ rd_refcnt_get(&rkb->rkb_refcnt), &rkb->rkb_refcnt,
+ rkb->rkb_toppar_cnt, rkb->rkb_active_toppar_cnt,
+ (int)rd_kafka_bufq_cnt(&rkb->rkb_outbufs),
+ (int)rd_kafka_bufq_cnt(&rkb->rkb_waitresps),
+ (int)rd_kafka_bufq_cnt(&rkb->rkb_retrybufs), r);
+ }
+ }
+
+ if (rkb->rkb_source != RD_KAFKA_INTERNAL) {
+ rd_kafka_wrlock(rkb->rkb_rk);
+ TAILQ_REMOVE(&rkb->rkb_rk->rk_brokers, rkb, rkb_link);
+ if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb))
+ rd_list_remove(&rkb->rkb_rk->rk_broker_by_id, rkb);
+ (void)rd_atomic32_sub(&rkb->rkb_rk->rk_broker_cnt, 1);
+ rd_kafka_wrunlock(rkb->rkb_rk);
+ }
+
+ rd_kafka_broker_fail(rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__DESTROY,
+ "Broker handle is terminating");
+
+ /* Disable and drain ops queue.
+ * Simply purging the ops queue risks leaving dangling references
+ * for ops such as PARTITION_JOIN/PARTITION_LEAVE where the broker
+ * reference is not maintained in the rko (but in rktp_next_leader).
+ * #1596 */
+ rd_kafka_q_disable(rkb->rkb_ops);
+ while (rd_kafka_broker_ops_serve(rkb, RD_POLL_NOWAIT))
+ ;
+
+ rd_kafka_broker_destroy(rkb);
+
+#if WITH_SSL
+ /* Remove OpenSSL per-thread error state to avoid memory leaks */
+#if OPENSSL_VERSION_NUMBER >= 0x10100000L && !defined(LIBRESSL_VERSION_NUMBER)
+ /*(OpenSSL libraries handle thread init and deinit)
+ * https://github.com/openssl/openssl/pull/1048 */
+#elif OPENSSL_VERSION_NUMBER >= 0x10000000L
+ ERR_remove_thread_state(NULL);
+#endif
+#endif
+
+ rd_kafka_interceptors_on_thread_exit(rk, RD_KAFKA_THREAD_BROKER);
+
+ rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
+
+ return 0;
+}
+
+
+/**
+ * Final destructor. Refcnt must be 0.
+ */
+void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb) {
+
+ rd_assert(thrd_is_current(rkb->rkb_thread));
+ rd_assert(TAILQ_EMPTY(&rkb->rkb_monitors));
+ rd_assert(TAILQ_EMPTY(&rkb->rkb_outbufs.rkbq_bufs));
+ rd_assert(TAILQ_EMPTY(&rkb->rkb_waitresps.rkbq_bufs));
+ rd_assert(TAILQ_EMPTY(&rkb->rkb_retrybufs.rkbq_bufs));
+ rd_assert(TAILQ_EMPTY(&rkb->rkb_toppars));
+
+ if (rkb->rkb_source != RD_KAFKA_INTERNAL &&
+ (rkb->rkb_rk->rk_conf.security_protocol ==
+ RD_KAFKA_PROTO_SASL_PLAINTEXT ||
+ rkb->rkb_rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL))
+ rd_kafka_sasl_broker_term(rkb);
+
+ if (rkb->rkb_wakeup_fd[0] != -1)
+ rd_socket_close(rkb->rkb_wakeup_fd[0]);
+ if (rkb->rkb_wakeup_fd[1] != -1)
+ rd_socket_close(rkb->rkb_wakeup_fd[1]);
+
+ if (rkb->rkb_recv_buf)
+ rd_kafka_buf_destroy(rkb->rkb_recv_buf);
+
+ if (rkb->rkb_rsal)
+ rd_sockaddr_list_destroy(rkb->rkb_rsal);
+
+ if (rkb->rkb_ApiVersions)
+ rd_free(rkb->rkb_ApiVersions);
+ rd_free(rkb->rkb_origname);
+
+ rd_kafka_q_purge(rkb->rkb_ops);
+ rd_kafka_q_destroy_owner(rkb->rkb_ops);
+
+ rd_avg_destroy(&rkb->rkb_avg_int_latency);
+ rd_avg_destroy(&rkb->rkb_avg_outbuf_latency);
+ rd_avg_destroy(&rkb->rkb_avg_rtt);
+ rd_avg_destroy(&rkb->rkb_avg_throttle);
+
+ mtx_lock(&rkb->rkb_logname_lock);
+ rd_free(rkb->rkb_logname);
+ rkb->rkb_logname = NULL;
+ mtx_unlock(&rkb->rkb_logname_lock);
+ mtx_destroy(&rkb->rkb_logname_lock);
+
+ mtx_destroy(&rkb->rkb_lock);
+
+ rd_refcnt_destroy(&rkb->rkb_refcnt);
+
+ rd_free(rkb);
+}
+
+
+/**
+ * Returns the internal broker with refcnt increased.
+ */
+rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk) {
+ rd_kafka_broker_t *rkb;
+
+ mtx_lock(&rk->rk_internal_rkb_lock);
+ rkb = rk->rk_internal_rkb;
+ if (rkb)
+ rd_kafka_broker_keep(rkb);
+ mtx_unlock(&rk->rk_internal_rkb_lock);
+
+ return rkb;
+}
+
+
+/**
+ * Adds a broker with refcount set to 1.
+ * If 'source' is RD_KAFKA_INTERNAL an internal broker is added
+ * that does not actually represent or connect to a real broker, it is used
+ * for serving unassigned toppar's op queues.
+ *
+ * Locks: rd_kafka_wrlock(rk) must be held
+ */
+rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk,
+ rd_kafka_confsource_t source,
+ rd_kafka_secproto_t proto,
+ const char *name,
+ uint16_t port,
+ int32_t nodeid) {
+ rd_kafka_broker_t *rkb;
+#ifndef _WIN32
+ int r;
+ sigset_t newset, oldset;
+#endif
+
+ rkb = rd_calloc(1, sizeof(*rkb));
+
+ if (source != RD_KAFKA_LOGICAL) {
+ rd_kafka_mk_nodename(rkb->rkb_nodename,
+ sizeof(rkb->rkb_nodename), name, port);
+ rd_kafka_mk_brokername(rkb->rkb_name, sizeof(rkb->rkb_name),
+ proto, rkb->rkb_nodename, nodeid,
+ source);
+ } else {
+ /* Logical broker does not have a nodename (address) or port
+ * at initialization. */
+ rd_snprintf(rkb->rkb_name, sizeof(rkb->rkb_name), "%s", name);
+ }
+
+ rkb->rkb_source = source;
+ rkb->rkb_rk = rk;
+ rkb->rkb_ts_state = rd_clock();
+ rkb->rkb_nodeid = nodeid;
+ rkb->rkb_proto = proto;
+ rkb->rkb_port = port;
+ rkb->rkb_origname = rd_strdup(name);
+
+ mtx_init(&rkb->rkb_lock, mtx_plain);
+ mtx_init(&rkb->rkb_logname_lock, mtx_plain);
+ rkb->rkb_logname = rd_strdup(rkb->rkb_name);
+ TAILQ_INIT(&rkb->rkb_toppars);
+ CIRCLEQ_INIT(&rkb->rkb_active_toppars);
+ TAILQ_INIT(&rkb->rkb_monitors);
+ rd_kafka_bufq_init(&rkb->rkb_outbufs);
+ rd_kafka_bufq_init(&rkb->rkb_waitresps);
+ rd_kafka_bufq_init(&rkb->rkb_retrybufs);
+ rkb->rkb_ops = rd_kafka_q_new(rk);
+ rd_avg_init(&rkb->rkb_avg_int_latency, RD_AVG_GAUGE, 0, 100 * 1000, 2,
+ rk->rk_conf.stats_interval_ms ? 1 : 0);
+ rd_avg_init(&rkb->rkb_avg_outbuf_latency, RD_AVG_GAUGE, 0, 100 * 1000,
+ 2, rk->rk_conf.stats_interval_ms ? 1 : 0);
+ rd_avg_init(&rkb->rkb_avg_rtt, RD_AVG_GAUGE, 0, 500 * 1000, 2,
+ rk->rk_conf.stats_interval_ms ? 1 : 0);
+ rd_avg_init(&rkb->rkb_avg_throttle, RD_AVG_GAUGE, 0, 5000 * 1000, 2,
+ rk->rk_conf.stats_interval_ms ? 1 : 0);
+ rd_refcnt_init(&rkb->rkb_refcnt, 0);
+ rd_kafka_broker_keep(rkb); /* rk_broker's refcount */
+
+ rkb->rkb_reconnect_backoff_ms = rk->rk_conf.reconnect_backoff_ms;
+ rd_atomic32_init(&rkb->rkb_persistconn.coord, 0);
+
+ rd_atomic64_init(&rkb->rkb_c.ts_send, 0);
+ rd_atomic64_init(&rkb->rkb_c.ts_recv, 0);
+
+ /* ApiVersion fallback interval */
+ if (rkb->rkb_rk->rk_conf.api_version_request) {
+ rd_interval_init(&rkb->rkb_ApiVersion_fail_intvl);
+ rd_interval_fixed(
+ &rkb->rkb_ApiVersion_fail_intvl,
+ (rd_ts_t)rkb->rkb_rk->rk_conf.api_version_fallback_ms *
+ 1000);
+ }
+
+ rd_interval_init(&rkb->rkb_suppress.unsupported_compression);
+ rd_interval_init(&rkb->rkb_suppress.unsupported_kip62);
+ rd_interval_init(&rkb->rkb_suppress.fail_error);
+
+#ifndef _WIN32
+ /* Block all signals in newly created thread.
+ * To avoid race condition we block all signals in the calling
+ * thread, which the new thread will inherit its sigmask from,
+ * and then restore the original sigmask of the calling thread when
+ * we're done creating the thread.
+ * NOTE: term_sig remains unblocked since we use it on termination
+ * to quickly interrupt system calls. */
+ sigemptyset(&oldset);
+ sigfillset(&newset);
+ if (rkb->rkb_rk->rk_conf.term_sig)
+ sigdelset(&newset, rkb->rkb_rk->rk_conf.term_sig);
+ pthread_sigmask(SIG_SETMASK, &newset, &oldset);
+#endif
+
+ /*
+ * Fd-based queue wake-ups using a non-blocking pipe.
+ * Writes are best effort, if the socket queue is full
+ * the write fails (silently) but this has no effect on latency
+ * since the POLLIN flag will already have been raised for fd.
+ */
+ rkb->rkb_wakeup_fd[0] = -1;
+ rkb->rkb_wakeup_fd[1] = -1;
+
+#ifndef _WIN32
+ if ((r = rd_pipe_nonblocking(rkb->rkb_wakeup_fd)) == -1) {
+ rd_rkb_log(rkb, LOG_ERR, "WAKEUPFD",
+ "Failed to setup broker queue wake-up fds: "
+ "%s: disabling low-latency mode",
+ rd_strerror(r));
+
+ } else if (source == RD_KAFKA_INTERNAL) {
+ /* nop: internal broker has no IO transport. */
+
+ } else {
+ char onebyte = 1;
+
+ rd_rkb_dbg(rkb, QUEUE, "WAKEUPFD",
+ "Enabled low-latency ops queue wake-ups");
+ rd_kafka_q_io_event_enable(rkb->rkb_ops, rkb->rkb_wakeup_fd[1],
+ &onebyte, sizeof(onebyte));
+ }
+#endif
+
+ /* Lock broker's lock here to synchronise state, i.e., hold off
+ * the broker thread until we've finalized the rkb. */
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_keep(rkb); /* broker thread's refcnt */
+ if (thrd_create(&rkb->rkb_thread, rd_kafka_broker_thread_main, rkb) !=
+ thrd_success) {
+ rd_kafka_broker_unlock(rkb);
+
+ rd_kafka_log(rk, LOG_CRIT, "THREAD",
+ "Unable to create broker thread");
+
+ /* Send ERR op back to application for processing. */
+ rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
+ "Unable to create broker thread");
+
+ rd_free(rkb);
+
+#ifndef _WIN32
+ /* Restore sigmask of caller */
+ pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+#endif
+
+ return NULL;
+ }
+
+ if (rkb->rkb_source != RD_KAFKA_INTERNAL) {
+ if (rk->rk_conf.security_protocol ==
+ RD_KAFKA_PROTO_SASL_PLAINTEXT ||
+ rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL)
+ rd_kafka_sasl_broker_init(rkb);
+
+ /* Insert broker at head of list, idea is that
+ * newer brokers are more relevant than old ones,
+ * and in particular LEARNED brokers are more relevant
+ * than CONFIGURED (bootstrap) and LOGICAL brokers. */
+ TAILQ_INSERT_HEAD(&rkb->rkb_rk->rk_brokers, rkb, rkb_link);
+ (void)rd_atomic32_add(&rkb->rkb_rk->rk_broker_cnt, 1);
+
+ if (rkb->rkb_nodeid != -1 && !RD_KAFKA_BROKER_IS_LOGICAL(rkb)) {
+ rd_list_add(&rkb->rkb_rk->rk_broker_by_id, rkb);
+ rd_list_sort(&rkb->rkb_rk->rk_broker_by_id,
+ rd_kafka_broker_cmp_by_id);
+ }
+
+ rd_rkb_dbg(rkb, BROKER, "BROKER",
+ "Added new broker with NodeId %" PRId32,
+ rkb->rkb_nodeid);
+ }
+
+ /* Call on_broker_state_change interceptors */
+ rd_kafka_interceptors_on_broker_state_change(
+ rk, rkb->rkb_nodeid, rd_kafka_secproto_names[rkb->rkb_proto],
+ rkb->rkb_origname, rkb->rkb_port,
+ rd_kafka_broker_state_names[rkb->rkb_state]);
+
+ rd_kafka_broker_unlock(rkb);
+
+ /* Add broker state monitor for the coordinator request to use.
+ * This is needed by the transactions implementation and DeleteGroups.
+ */
+ rd_kafka_broker_monitor_add(&rkb->rkb_coord_monitor, rkb, rk->rk_ops,
+ rd_kafka_coord_rkb_monitor_cb);
+
+
+#ifndef _WIN32
+ /* Restore sigmask of caller */
+ pthread_sigmask(SIG_SETMASK, &oldset, NULL);
+#endif
+
+ return rkb;
+}
+
+
+/**
+ * @brief Adds a logical broker.
+ *
+ * Logical brokers act just like any broker handle, but will not have
+ * an initial address set. The address (or nodename is it is called
+ * internally) can be set from another broker handle
+ * by calling rd_kafka_broker_set_nodename().
+ *
+ * This allows maintaining a logical group coordinator broker
+ * handle that can ambulate between real broker addresses.
+ *
+ * Logical broker constraints:
+ * - will not have a broker-id set (-1).
+ * - will not have a port set (0).
+ * - the address for the broker may change.
+ * - the name of broker will not correspond to the address,
+ * but the \p name given here.
+ *
+ * @returns a new broker, holding a refcount for the caller.
+ *
+ * @locality any rdkafka thread
+ * @locks none
+ */
+rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk,
+ const char *name) {
+ rd_kafka_broker_t *rkb;
+
+ rd_kafka_wrlock(rk);
+ rkb = rd_kafka_broker_add(rk, RD_KAFKA_LOGICAL,
+ rk->rk_conf.security_protocol, name,
+ 0 /*port*/, -1 /*brokerid*/);
+ rd_assert(rkb && *"failed to create broker thread");
+ rd_kafka_wrunlock(rk);
+
+ rd_atomic32_add(&rk->rk_broker_addrless_cnt, 1);
+
+ rd_dassert(RD_KAFKA_BROKER_IS_LOGICAL(rkb));
+ rd_kafka_broker_keep(rkb);
+ return rkb;
+}
+
+
+/**
+ * @brief Update the nodename (address) of broker \p rkb
+ * with the nodename from broker \p from_rkb (may be NULL).
+ *
+ * If \p rkb is connected, the connection will be torn down.
+ * A new connection may be attempted to the new address
+ * if a persistent connection is needed (standard connection rules).
+ *
+ * The broker's logname is also updated to include \p from_rkb's
+ * broker id.
+ *
+ * @param from_rkb Use the nodename from this broker. If NULL, clear
+ * the \p rkb nodename.
+ *
+ * @remark Must only be called for logical brokers.
+ *
+ * @locks none
+ */
+void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb,
+ rd_kafka_broker_t *from_rkb) {
+ char nodename[RD_KAFKA_NODENAME_SIZE];
+ char brokername[RD_KAFKA_NODENAME_SIZE];
+ int32_t nodeid;
+ rd_bool_t changed = rd_false;
+
+ rd_assert(RD_KAFKA_BROKER_IS_LOGICAL(rkb));
+
+ rd_assert(rkb != from_rkb);
+
+ /* Get nodename from from_rkb */
+ if (from_rkb) {
+ rd_kafka_broker_lock(from_rkb);
+ rd_strlcpy(nodename, from_rkb->rkb_nodename, sizeof(nodename));
+ nodeid = from_rkb->rkb_nodeid;
+ rd_kafka_broker_unlock(from_rkb);
+ } else {
+ *nodename = '\0';
+ nodeid = -1;
+ }
+
+ /* Set nodename on rkb */
+ rd_kafka_broker_lock(rkb);
+ if (strcmp(rkb->rkb_nodename, nodename)) {
+ rd_rkb_dbg(rkb, BROKER, "NODENAME",
+ "Broker nodename changed from \"%s\" to \"%s\"",
+ rkb->rkb_nodename, nodename);
+ rd_strlcpy(rkb->rkb_nodename, nodename,
+ sizeof(rkb->rkb_nodename));
+ rkb->rkb_nodename_epoch++;
+ changed = rd_true;
+ }
+
+ if (rkb->rkb_nodeid != nodeid) {
+ rd_rkb_dbg(rkb, BROKER, "NODEID",
+ "Broker nodeid changed from %" PRId32 " to %" PRId32,
+ rkb->rkb_nodeid, nodeid);
+ rkb->rkb_nodeid = nodeid;
+ }
+
+ rd_kafka_broker_unlock(rkb);
+
+ /* Update the log name to include (or exclude) the nodeid.
+ * The nodeid is appended as "..logname../nodeid" */
+ rd_kafka_mk_brokername(brokername, sizeof(brokername), rkb->rkb_proto,
+ rkb->rkb_name, nodeid, rkb->rkb_source);
+
+ rd_kafka_broker_set_logname(rkb, brokername);
+
+ if (!changed)
+ return;
+
+ if (!rd_kafka_broker_is_addrless(rkb))
+ rd_atomic32_sub(&rkb->rkb_rk->rk_broker_addrless_cnt, 1);
+ else
+ rd_atomic32_add(&rkb->rkb_rk->rk_broker_addrless_cnt, 1);
+
+ /* Trigger a disconnect & reconnect */
+ rd_kafka_broker_schedule_connection(rkb);
+}
+
+
+/**
+ * @brief Find broker by nodeid (not -1) and
+ * possibly filtered by state (unless -1).
+ *
+ * @param do_connect If sparse connections are enabled and the broker is found
+ * but not up, a connection will be triggered.
+ *
+ * @locks: rd_kafka_*lock() MUST be held
+ * @remark caller must release rkb reference by rd_kafka_broker_destroy()
+ */
+rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ int32_t nodeid,
+ int state,
+ rd_bool_t do_connect) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_broker_t skel = {.rkb_nodeid = nodeid};
+
+ if (rd_kafka_terminating(rk))
+ return NULL;
+
+ rkb = rd_list_find(&rk->rk_broker_by_id, &skel,
+ rd_kafka_broker_cmp_by_id);
+
+ if (!rkb)
+ return NULL;
+
+ if (state != -1) {
+ int broker_state;
+ rd_kafka_broker_lock(rkb);
+ broker_state = (int)rkb->rkb_state;
+ rd_kafka_broker_unlock(rkb);
+
+ if (broker_state != state) {
+ if (do_connect &&
+ broker_state == RD_KAFKA_BROKER_STATE_INIT &&
+ rk->rk_conf.sparse_connections)
+ rd_kafka_broker_schedule_connection(rkb);
+ return NULL;
+ }
+ }
+
+ rd_kafka_broker_keep_fl(func, line, rkb);
+ return rkb;
+}
+
+/**
+ * Locks: rd_kafka_rdlock(rk) must be held
+ * NOTE: caller must release rkb reference by rd_kafka_broker_destroy()
+ */
+static rd_kafka_broker_t *rd_kafka_broker_find(rd_kafka_t *rk,
+ rd_kafka_secproto_t proto,
+ const char *name,
+ uint16_t port) {
+ rd_kafka_broker_t *rkb;
+ char nodename[RD_KAFKA_NODENAME_SIZE];
+
+ rd_kafka_mk_nodename(nodename, sizeof(nodename), name, port);
+
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ if (RD_KAFKA_BROKER_IS_LOGICAL(rkb))
+ continue;
+
+ rd_kafka_broker_lock(rkb);
+ if (!rd_kafka_terminating(rk) && rkb->rkb_proto == proto &&
+ !strcmp(rkb->rkb_nodename, nodename)) {
+ rd_kafka_broker_keep(rkb);
+ rd_kafka_broker_unlock(rkb);
+ return rkb;
+ }
+ rd_kafka_broker_unlock(rkb);
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Parse a broker host name.
+ * The string 'name' is modified and null-terminated portions of it
+ * are returned in 'proto', 'host', and 'port'.
+ *
+ * Returns 0 on success or -1 on parse error.
+ */
+static int rd_kafka_broker_name_parse(rd_kafka_t *rk,
+ char **name,
+ rd_kafka_secproto_t *proto,
+ const char **host,
+ uint16_t *port) {
+ char *s = *name;
+ char *orig;
+ char *n, *t, *t2;
+
+ /* Save a temporary copy of the original name for logging purposes */
+ rd_strdupa(&orig, *name);
+
+ /* Find end of this name (either by delimiter or end of string */
+ if ((n = strchr(s, ',')))
+ *n = '\0';
+ else
+ n = s + strlen(s) - 1;
+
+
+ /* Check if this looks like an url. */
+ if ((t = strstr(s, "://"))) {
+ int i;
+ /* "proto://host[:port]" */
+
+ if (t == s) {
+ rd_kafka_log(rk, LOG_WARNING, "BROKER",
+ "Broker name \"%s\" parse error: "
+ "empty protocol name",
+ orig);
+ return -1;
+ }
+
+ /* Make protocol uppercase */
+ for (t2 = s; t2 < t; t2++)
+ *t2 = toupper(*t2);
+
+ *t = '\0';
+
+ /* Find matching protocol by name. */
+ for (i = 0; i < RD_KAFKA_PROTO_NUM; i++)
+ if (!rd_strcasecmp(s, rd_kafka_secproto_names[i]))
+ break;
+
+ /* Unsupported protocol */
+ if (i == RD_KAFKA_PROTO_NUM) {
+ rd_kafka_log(rk, LOG_WARNING, "BROKER",
+ "Broker name \"%s\" parse error: "
+ "unsupported protocol \"%s\"",
+ orig, s);
+
+ return -1;
+ }
+
+ *proto = i;
+
+ /* Enforce protocol */
+ if (rk->rk_conf.security_protocol != *proto) {
+ rd_kafka_log(
+ rk, LOG_WARNING, "BROKER",
+ "Broker name \"%s\" parse error: "
+ "protocol \"%s\" does not match "
+ "security.protocol setting \"%s\"",
+ orig, s,
+ rd_kafka_secproto_names[rk->rk_conf
+ .security_protocol]);
+ return -1;
+ }
+
+ /* Hostname starts here */
+ s = t + 3;
+
+ /* Ignore anything that looks like the path part of an URL */
+ if ((t = strchr(s, '/')))
+ *t = '\0';
+
+ } else
+ *proto = rk->rk_conf.security_protocol; /* Default protocol */
+
+
+ *port = RD_KAFKA_PORT;
+ /* Check if port has been specified, but try to identify IPv6
+ * addresses first:
+ * t = last ':' in string
+ * t2 = first ':' in string
+ * If t and t2 are equal then only one ":" exists in name
+ * and thus an IPv4 address with port specified.
+ * Else if not equal and t is prefixed with "]" then it's an
+ * IPv6 address with port specified.
+ * Else no port specified. */
+ if ((t = strrchr(s, ':')) &&
+ ((t2 = strchr(s, ':')) == t || *(t - 1) == ']')) {
+ *t = '\0';
+ *port = atoi(t + 1);
+ }
+
+ /* Empty host name -> localhost */
+ if (!*s)
+ s = "localhost";
+
+ *host = s;
+ *name = n + 1; /* past this name. e.g., next name/delimiter to parse */
+
+ return 0;
+}
+
+/**
+ * @brief Adds a (csv list of) broker(s).
+ * Returns the number of brokers succesfully added.
+ *
+ * @locality any thread
+ * @locks none
+ */
+int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist) {
+ char *s_copy = rd_strdup(brokerlist);
+ char *s = s_copy;
+ int cnt = 0;
+ rd_kafka_broker_t *rkb;
+ int pre_cnt = rd_atomic32_get(&rk->rk_broker_cnt);
+
+ /* Parse comma-separated list of brokers. */
+ while (*s) {
+ uint16_t port;
+ const char *host;
+ rd_kafka_secproto_t proto;
+
+ if (*s == ',' || *s == ' ') {
+ s++;
+ continue;
+ }
+
+ if (rd_kafka_broker_name_parse(rk, &s, &proto, &host, &port) ==
+ -1)
+ break;
+
+ rd_kafka_wrlock(rk);
+
+ if ((rkb = rd_kafka_broker_find(rk, proto, host, port)) &&
+ rkb->rkb_source == RD_KAFKA_CONFIGURED) {
+ cnt++;
+ } else if (rd_kafka_broker_add(rk, RD_KAFKA_CONFIGURED, proto,
+ host, port,
+ RD_KAFKA_NODEID_UA) != NULL)
+ cnt++;
+
+ /* If rd_kafka_broker_find returned a broker its
+ * reference needs to be released
+ * See issue #193 */
+ if (rkb)
+ rd_kafka_broker_destroy(rkb);
+
+ rd_kafka_wrunlock(rk);
+ }
+
+ rd_free(s_copy);
+
+ if (rk->rk_conf.sparse_connections && cnt > 0 && pre_cnt == 0) {
+ /* Sparse connections:
+ * If this was the first set of brokers added,
+ * select a random one to trigger the initial cluster
+ * connection. */
+ rd_kafka_rdlock(rk);
+ rd_kafka_connect_any(rk, "bootstrap servers added");
+ rd_kafka_rdunlock(rk);
+ }
+
+ return cnt;
+}
+
+
+int rd_kafka_brokers_add(rd_kafka_t *rk, const char *brokerlist) {
+ return rd_kafka_brokers_add0(rk, brokerlist);
+}
+
+
+/**
+ * @brief Adds a new broker or updates an existing one.
+ *
+ * @param rkbp if non-NULL, will be set to the broker object with
+ * refcount increased, or NULL on error.
+ *
+ * @locks none
+ * @locality any
+ */
+void rd_kafka_broker_update(rd_kafka_t *rk,
+ rd_kafka_secproto_t proto,
+ const struct rd_kafka_metadata_broker *mdb,
+ rd_kafka_broker_t **rkbp) {
+ rd_kafka_broker_t *rkb;
+ char nodename[RD_KAFKA_NODENAME_SIZE];
+ int needs_update = 0;
+
+ rd_kafka_mk_nodename(nodename, sizeof(nodename), mdb->host, mdb->port);
+
+ rd_kafka_wrlock(rk);
+ if (unlikely(rd_kafka_terminating(rk))) {
+ /* Dont update metadata while terminating, do this
+ * after acquiring lock for proper synchronisation */
+ rd_kafka_wrunlock(rk);
+ if (rkbp)
+ *rkbp = NULL;
+ return;
+ }
+
+ if ((rkb = rd_kafka_broker_find_by_nodeid(rk, mdb->id))) {
+ /* Broker matched by nodeid, see if we need to update
+ * the hostname. */
+ if (strcmp(rkb->rkb_nodename, nodename))
+ needs_update = 1;
+ } else if ((rkb = rd_kafka_broker_find(rk, proto, mdb->host,
+ mdb->port))) {
+ /* Broker matched by hostname (but not by nodeid),
+ * update the nodeid. */
+ needs_update = 1;
+
+ } else if ((rkb = rd_kafka_broker_add(rk, RD_KAFKA_LEARNED, proto,
+ mdb->host, mdb->port, mdb->id))) {
+ rd_kafka_broker_keep(rkb);
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ if (rkb) {
+ /* Existing broker */
+ if (needs_update) {
+ rd_kafka_op_t *rko;
+ rko = rd_kafka_op_new(RD_KAFKA_OP_NODE_UPDATE);
+ rd_strlcpy(rko->rko_u.node.nodename, nodename,
+ sizeof(rko->rko_u.node.nodename));
+ rko->rko_u.node.nodeid = mdb->id;
+ /* Perform a blocking op request so that all
+ * broker-related state, such as the rk broker list,
+ * is up to date by the time this call returns.
+ * Ignore&destroy the response. */
+ rd_kafka_op_err_destroy(
+ rd_kafka_op_req(rkb->rkb_ops, rko, -1));
+ }
+ }
+
+ if (rkbp)
+ *rkbp = rkb;
+ else if (rkb)
+ rd_kafka_broker_destroy(rkb);
+}
+
+
+/**
+ * @returns the broker id, or RD_KAFKA_NODEID_UA if \p rkb is NULL.
+ *
+ * @locality any
+ * @locks_required none
+ * @locks_acquired rkb_lock
+ */
+int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb) {
+ int32_t broker_id;
+
+ if (unlikely(!rkb))
+ return RD_KAFKA_NODEID_UA;
+
+ /* Avoid locking if already on the broker thread */
+ if (thrd_is_current(rkb->rkb_thread))
+ return rkb->rkb_nodeid;
+
+ rd_kafka_broker_lock(rkb);
+ broker_id = rkb->rkb_nodeid;
+ rd_kafka_broker_unlock(rkb);
+
+ return broker_id;
+}
+
+
+/**
+ * Returns a thread-safe temporary copy of the broker name.
+ * Must not be called more than 4 times from the same expression.
+ *
+ * Locks: none
+ * Locality: any thread
+ */
+const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb) {
+ static RD_TLS char ret[4][RD_KAFKA_NODENAME_SIZE];
+ static RD_TLS int reti = 0;
+
+ reti = (reti + 1) % 4;
+ mtx_lock(&rkb->rkb_logname_lock);
+ rd_snprintf(ret[reti], sizeof(ret[reti]), "%s", rkb->rkb_logname);
+ mtx_unlock(&rkb->rkb_logname_lock);
+
+ return ret[reti];
+}
+
+
+
+/**
+ * @brief Send dummy OP to broker thread to wake it up from IO sleep.
+ *
+ * @locality any
+ * @locks any
+ */
+void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_WAKEUP);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
+ rd_kafka_q_enq(rkb->rkb_ops, rko);
+ rd_rkb_dbg(rkb, QUEUE, "WAKEUP", "Wake-up: %s", reason);
+}
+
+/**
+ * @brief Wake up all broker threads that are in at least state \p min_state
+ *
+ * @locality any
+ * @locks none: rd_kafka_*lock() MUST NOT be held
+ *
+ * @returns the number of broker threads woken up
+ */
+int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk,
+ int min_state,
+ const char *reason) {
+ int cnt = 0;
+ rd_kafka_broker_t *rkb;
+
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkb, &rk->rk_brokers, rkb_link) {
+ int do_wakeup;
+
+ rd_kafka_broker_lock(rkb);
+ do_wakeup = (int)rkb->rkb_state >= min_state;
+ rd_kafka_broker_unlock(rkb);
+
+ if (do_wakeup) {
+ rd_kafka_broker_wakeup(rkb, reason);
+ cnt += 1;
+ }
+ }
+ rd_kafka_rdunlock(rk);
+
+ if (cnt > 0)
+ rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_QUEUE, "WAKEUP",
+ "Wake-up sent to %d broker thread%s in "
+ "state >= %s: %s",
+ cnt, cnt > 1 ? "s" : "",
+ rd_kafka_broker_state_names[min_state], reason);
+
+ return cnt;
+}
+
+/**
+ * @brief Filter out brokers that have at least one connection attempt.
+ */
+static int rd_kafka_broker_filter_never_connected(rd_kafka_broker_t *rkb,
+ void *opaque) {
+ return rd_atomic32_get(&rkb->rkb_c.connects);
+}
+
+
+/**
+ * @brief Sparse connections:
+ * Select a random broker to connect to if no brokers are up.
+ *
+ * This is a non-blocking call, the connection is
+ * performed by the selected broker thread.
+ *
+ * @locality any
+ * @locks rd_kafka_rdlock() MUST be held
+ */
+void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason) {
+ rd_kafka_broker_t *rkb;
+ rd_ts_t suppr;
+
+ /* Don't count connections to logical brokers since they serve
+ * a specific purpose (group coordinator) and their connections
+ * should not be reused for other purposes.
+ * rd_kafka_broker_random() will not return LOGICAL brokers. */
+ if (rd_atomic32_get(&rk->rk_broker_up_cnt) -
+ rd_atomic32_get(&rk->rk_logical_broker_up_cnt) >
+ 0 ||
+ rd_atomic32_get(&rk->rk_broker_cnt) -
+ rd_atomic32_get(&rk->rk_broker_addrless_cnt) ==
+ 0)
+ return;
+
+ mtx_lock(&rk->rk_suppress.sparse_connect_lock);
+ suppr = rd_interval(&rk->rk_suppress.sparse_connect_random,
+ rk->rk_conf.sparse_connect_intvl * 1000, 0);
+ mtx_unlock(&rk->rk_suppress.sparse_connect_lock);
+
+ if (suppr <= 0) {
+ rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT",
+ "Not selecting any broker for cluster connection: "
+ "still suppressed for %" PRId64 "ms: %s",
+ -suppr / 1000, reason);
+ return;
+ }
+
+ /* First pass: only match brokers never connected to,
+ * to try to exhaust the available brokers
+ * so that an ERR_ALL_BROKERS_DOWN error can be raised. */
+ rkb = rd_kafka_broker_random(rk, RD_KAFKA_BROKER_STATE_INIT,
+ rd_kafka_broker_filter_never_connected,
+ NULL);
+ /* Second pass: match any non-connected/non-connecting broker. */
+ if (!rkb)
+ rkb = rd_kafka_broker_random(rk, RD_KAFKA_BROKER_STATE_INIT,
+ NULL, NULL);
+
+ if (!rkb) {
+ /* No brokers matched:
+ * this happens if there are brokers in > INIT state,
+ * in which case they're already connecting. */
+
+ rd_kafka_dbg(rk, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT",
+ "Cluster connection already in progress: %s",
+ reason);
+ return;
+ }
+
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CONNECT",
+ "Selected for cluster connection: "
+ "%s (broker has %d connection attempt(s))",
+ reason, rd_atomic32_get(&rkb->rkb_c.connects));
+
+ rd_kafka_broker_schedule_connection(rkb);
+
+ rd_kafka_broker_destroy(rkb); /* refcnt from ..broker_random() */
+}
+
+
+
+/**
+ * @brief Send PURGE queue request to broker.
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb,
+ int purge_flags,
+ rd_kafka_replyq_t replyq) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_PURGE);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
+ rko->rko_replyq = replyq;
+ rko->rko_u.purge.flags = purge_flags;
+ rd_kafka_q_enq(rkb->rkb_ops, rko);
+}
+
+
+/**
+ * @brief Handle purge queues request
+ *
+ * @locality broker thread
+ * @locks none
+ */
+static void rd_kafka_broker_handle_purge_queues(rd_kafka_broker_t *rkb,
+ rd_kafka_op_t *rko) {
+ int purge_flags = rko->rko_u.purge.flags;
+ int inflight_cnt = 0, retry_cnt = 0, outq_cnt = 0, partial_cnt = 0;
+
+ rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGE",
+ "Purging queues with flags %s",
+ rd_kafka_purge_flags2str(purge_flags));
+
+
+ /**
+ * First purge any Produce requests to move the
+ * messages from the request's message queue to delivery reports.
+ */
+
+ /* Purge in-flight ProduceRequests */
+ if (purge_flags & RD_KAFKA_PURGE_F_INFLIGHT)
+ inflight_cnt = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 1, &rkb->rkb_waitresps, NULL, RD_KAFKAP_Produce,
+ RD_KAFKA_RESP_ERR__PURGE_INFLIGHT, 0, NULL, 0);
+
+ if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) {
+ /* Requests in retry queue */
+ retry_cnt = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 0, &rkb->rkb_retrybufs, NULL, RD_KAFKAP_Produce,
+ RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0);
+
+ /* Requests in transmit queue not completely sent yet.
+ * partial_cnt is included in outq_cnt and denotes a request
+ * that has been partially transmitted. */
+ outq_cnt = rd_kafka_broker_bufq_timeout_scan(
+ rkb, 0, &rkb->rkb_outbufs, &partial_cnt, RD_KAFKAP_Produce,
+ RD_KAFKA_RESP_ERR__PURGE_QUEUE, 0, NULL, 0);
+
+ /* Purging a partially transmitted request will mess up
+ * the protocol stream, so we need to disconnect from the broker
+ * to get a clean protocol socket. */
+ if (partial_cnt)
+ rd_kafka_broker_fail(
+ rkb, LOG_DEBUG, RD_KAFKA_RESP_ERR__PURGE_QUEUE,
+ "Purged %d partially sent request: "
+ "forcing disconnect",
+ partial_cnt);
+ }
+
+ rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
+ "Purged %i in-flight, %i retry-queued, "
+ "%i out-queue, %i partially-sent requests",
+ inflight_cnt, retry_cnt, outq_cnt, partial_cnt);
+
+ /* Purge partition queues */
+ if (purge_flags & RD_KAFKA_PURGE_F_QUEUE) {
+ rd_kafka_toppar_t *rktp;
+ int msg_cnt = 0;
+ int part_cnt = 0;
+
+ TAILQ_FOREACH(rktp, &rkb->rkb_toppars, rktp_rkblink) {
+ int r;
+
+ r = rd_kafka_toppar_purge_queues(
+ rktp, purge_flags, rd_true /*include xmit msgq*/);
+ if (r > 0) {
+ msg_cnt += r;
+ part_cnt++;
+ }
+ }
+
+ rd_rkb_dbg(rkb, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
+ "Purged %i message(s) from %d partition(s)", msg_cnt,
+ part_cnt);
+ }
+
+ rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
+}
+
+
+/**
+ * @brief Add toppar to broker's active list list.
+ *
+ * For consumer this means the fetch list.
+ * For producers this is all partitions assigned to this broker.
+ *
+ * @locality broker thread
+ * @locks rktp_lock MUST be held
+ */
+void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const char *reason) {
+ int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER;
+
+ if (is_consumer && rktp->rktp_fetch)
+ return; /* Already added */
+
+ CIRCLEQ_INSERT_TAIL(&rkb->rkb_active_toppars, rktp, rktp_activelink);
+ rkb->rkb_active_toppar_cnt++;
+
+ if (is_consumer)
+ rktp->rktp_fetch = 1;
+
+ if (unlikely(rkb->rkb_active_toppar_cnt == 1))
+ rd_kafka_broker_active_toppar_next(rkb, rktp);
+
+ rd_rkb_dbg(rkb, TOPIC, "FETCHADD",
+ "Added %.*s [%" PRId32
+ "] to %s list (%d entries, opv %d, "
+ "%d messages queued): %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, is_consumer ? "fetch" : "active",
+ rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version,
+ rd_kafka_msgq_len(&rktp->rktp_msgq), reason);
+}
+
+
+/**
+ * @brief Remove toppar from active list.
+ *
+ * Locality: broker thread
+ * Locks: none
+ */
+void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const char *reason) {
+ int is_consumer = rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER;
+
+ if (is_consumer && !rktp->rktp_fetch)
+ return; /* Not added */
+
+ CIRCLEQ_REMOVE(&rkb->rkb_active_toppars, rktp, rktp_activelink);
+ rd_kafka_assert(NULL, rkb->rkb_active_toppar_cnt > 0);
+ rkb->rkb_active_toppar_cnt--;
+
+ if (is_consumer)
+ rktp->rktp_fetch = 0;
+
+ if (rkb->rkb_active_toppar_next == rktp) {
+ /* Update next pointer */
+ rd_kafka_broker_active_toppar_next(
+ rkb, CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
+ rktp_activelink));
+ }
+
+ rd_rkb_dbg(rkb, TOPIC, "FETCHADD",
+ "Removed %.*s [%" PRId32
+ "] from %s list "
+ "(%d entries, opv %d): %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, is_consumer ? "fetch" : "active",
+ rkb->rkb_active_toppar_cnt, rktp->rktp_fetch_version,
+ reason);
+}
+
+
+/**
+ * @brief Schedule connection for \p rkb.
+ * Will trigger disconnection for logical brokers whose nodename
+ * was changed.
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_CONNECT);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
+ rd_kafka_q_enq(rkb->rkb_ops, rko);
+}
+
+
+/**
+ * @brief Add need for persistent connection to \p rkb
+ * with rkb_persistconn atomic counter \p acntp
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb,
+ rd_atomic32_t *acntp) {
+
+ if (rd_atomic32_add(acntp, 1) == 1) {
+ /* First one, trigger event. */
+ rd_kafka_broker_schedule_connection(rkb);
+ }
+}
+
+
+/**
+ * @brief Remove need for persistent connection to \p rkb
+ * with rkb_persistconn atomic counter \p acntp
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb,
+ rd_atomic32_t *acntp) {
+ int32_t r = rd_atomic32_sub(acntp, 1);
+ rd_assert(r >= 0);
+}
+
+
+
+/**
+ * @brief OP_BROKER_MONITOR callback trampoline which
+ * calls the rkbmon's callback.
+ *
+ * @locality monitoree's op handler thread
+ * @locks none
+ */
+static rd_kafka_op_res_t rd_kafka_broker_monitor_op_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY)
+ rko->rko_u.broker_monitor.cb(rko->rko_u.broker_monitor.rkb);
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+/**
+ * @brief Trigger ops for registered monitors when the broker
+ * state goes from or to UP.
+ *
+ * @locality broker thread
+ * @locks rkb_lock MUST be held
+ */
+static void rd_kafka_broker_trigger_monitors(rd_kafka_broker_t *rkb) {
+ rd_kafka_broker_monitor_t *rkbmon;
+
+ TAILQ_FOREACH(rkbmon, &rkb->rkb_monitors, rkbmon_link) {
+ rd_kafka_op_t *rko =
+ rd_kafka_op_new_cb(rkb->rkb_rk, RD_KAFKA_OP_BROKER_MONITOR,
+ rd_kafka_broker_monitor_op_cb);
+ rd_kafka_broker_keep(rkb);
+ rko->rko_u.broker_monitor.rkb = rkb;
+ rko->rko_u.broker_monitor.cb = rkbmon->rkbmon_cb;
+ rd_kafka_q_enq(rkbmon->rkbmon_q, rko);
+ }
+}
+
+
+/**
+ * @brief Adds a monitor for when the broker goes up or down.
+ *
+ * The callback will be triggered on the caller's op queue handler thread.
+ *
+ * Use rd_kafka_broker_is_up() in your callback to get the current
+ * state of the broker, since it might have changed since the event
+ * was enqueued.
+ *
+ * @param rkbmon monitoree's monitor.
+ * @param rkb broker to monitor.
+ * @param rkq queue for event op.
+ * @param callback callback to be triggered from \p rkq's op handler.
+ * @opaque opaque passed to callback.
+ *
+ * @locks none
+ * @locality any
+ */
+void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_q_t *rkq,
+ void (*callback)(rd_kafka_broker_t *rkb)) {
+ rd_assert(!rkbmon->rkbmon_rkb);
+ rkbmon->rkbmon_rkb = rkb;
+ rkbmon->rkbmon_q = rkq;
+ rd_kafka_q_keep(rkbmon->rkbmon_q);
+ rkbmon->rkbmon_cb = callback;
+
+ rd_kafka_broker_keep(rkb);
+
+ rd_kafka_broker_lock(rkb);
+ TAILQ_INSERT_TAIL(&rkb->rkb_monitors, rkbmon, rkbmon_link);
+ rd_kafka_broker_unlock(rkb);
+}
+
+
+/**
+ * @brief Removes a monitor previously added with
+ * rd_kafka_broker_monitor_add().
+ *
+ * @warning The rkbmon's callback may still be called after
+ * _del() has been called due to the buffering nature
+ * of op queues.
+ *
+ * @locks none
+ * @locality any
+ */
+void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon) {
+ rd_kafka_broker_t *rkb = rkbmon->rkbmon_rkb;
+
+ if (!rkb)
+ return;
+
+ rd_kafka_broker_lock(rkb);
+ rkbmon->rkbmon_rkb = NULL;
+ rd_kafka_q_destroy(rkbmon->rkbmon_q);
+ TAILQ_REMOVE(&rkb->rkb_monitors, rkbmon, rkbmon_link);
+ rd_kafka_broker_unlock(rkb);
+
+ rd_kafka_broker_destroy(rkb);
+}
+
+/**
+ * @name Unit tests
+ * @{
+ *
+ */
+int unittest_broker(void) {
+ int fails = 0;
+
+ fails += rd_ut_reconnect_backoff();
+
+ return fails;
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h
new file mode 100644
index 000000000..1e454d4d7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_broker.h
@@ -0,0 +1,607 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_BROKER_H_
+#define _RDKAFKA_BROKER_H_
+
+#include "rdkafka_feature.h"
+
+
+extern const char *rd_kafka_broker_state_names[];
+extern const char *rd_kafka_secproto_names[];
+
+
+/**
+ * @enum Broker states
+ */
+typedef enum {
+ RD_KAFKA_BROKER_STATE_INIT,
+ RD_KAFKA_BROKER_STATE_DOWN,
+ RD_KAFKA_BROKER_STATE_TRY_CONNECT,
+ RD_KAFKA_BROKER_STATE_CONNECT,
+ RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE,
+ RD_KAFKA_BROKER_STATE_AUTH_LEGACY,
+
+ /* Any state >= STATE_UP means the Kafka protocol layer
+ * is operational (to some degree). */
+ RD_KAFKA_BROKER_STATE_UP,
+ RD_KAFKA_BROKER_STATE_UPDATE,
+ RD_KAFKA_BROKER_STATE_APIVERSION_QUERY,
+ RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE,
+ RD_KAFKA_BROKER_STATE_AUTH_REQ,
+} rd_kafka_broker_state_t;
+
+/**
+ * @struct Broker state monitor.
+ *
+ * @warning The monitor object lifetime should be the same as
+ * the rd_kafka_t object, not shorter.
+ */
+typedef struct rd_kafka_broker_monitor_s {
+ TAILQ_ENTRY(rd_kafka_broker_monitor_s) rkbmon_link; /**< rkb_monitors*/
+ struct rd_kafka_broker_s *rkbmon_rkb; /**< Broker being monitored. */
+ rd_kafka_q_t *rkbmon_q; /**< Queue to enqueue op on. */
+
+ /**< Callback triggered on the monitoree's op handler thread.
+ * Do note that the callback might be triggered even after
+ * it has been deleted due to the queueing nature of op queues. */
+ void (*rkbmon_cb)(rd_kafka_broker_t *rkb);
+} rd_kafka_broker_monitor_t;
+
+
+/**
+ * @struct Broker instance
+ */
+struct rd_kafka_broker_s { /* rd_kafka_broker_t */
+ TAILQ_ENTRY(rd_kafka_broker_s) rkb_link;
+
+ int32_t rkb_nodeid; /**< Broker Node Id.
+ * @locks rkb_lock */
+#define RD_KAFKA_NODEID_UA -1
+
+ rd_sockaddr_list_t *rkb_rsal;
+ rd_ts_t rkb_ts_rsal_last;
+ const rd_sockaddr_inx_t *rkb_addr_last; /* Last used connect address */
+
+ rd_kafka_transport_t *rkb_transport;
+
+ uint32_t rkb_corrid;
+ int rkb_connid; /* Connection id, increased by
+ * one for each connection by
+ * this broker. Used as a safe-guard
+ * to help troubleshooting buffer
+ * problems across disconnects. */
+
+ rd_kafka_q_t *rkb_ops;
+
+ mtx_t rkb_lock;
+
+ int rkb_blocking_max_ms; /* Maximum IO poll blocking
+ * time. */
+
+ /* Toppars handled by this broker */
+ TAILQ_HEAD(, rd_kafka_toppar_s) rkb_toppars;
+ int rkb_toppar_cnt;
+
+ /* Active toppars that are eligible for:
+ * - (consumer) fetching due to underflow
+ * - (producer) producing
+ *
+ * The circleq provides round-robin scheduling for both cases.
+ */
+ CIRCLEQ_HEAD(, rd_kafka_toppar_s) rkb_active_toppars;
+ int rkb_active_toppar_cnt;
+ rd_kafka_toppar_t *rkb_active_toppar_next; /* Next 'first' toppar
+ * in fetch list.
+ * This is used for
+ * round-robin. */
+
+
+ rd_kafka_cgrp_t *rkb_cgrp;
+
+ rd_ts_t rkb_ts_fetch_backoff;
+ int rkb_fetching;
+
+ rd_kafka_broker_state_t rkb_state; /**< Current broker state */
+
+ rd_ts_t rkb_ts_state; /* Timestamp of last
+ * state change */
+ rd_interval_t rkb_timeout_scan_intvl; /* Waitresp timeout scan
+ * interval. */
+
+ rd_atomic32_t rkb_blocking_request_cnt; /* The number of
+ * in-flight blocking
+ * requests.
+ * A blocking request is
+ * one that is known to
+ * possibly block on the
+ * broker for longer than
+ * the typical processing
+ * time, e.g.:
+ * JoinGroup, SyncGroup */
+
+ int rkb_features; /* Protocol features supported
+ * by this broker.
+ * See RD_KAFKA_FEATURE_* in
+ * rdkafka_proto.h */
+
+ struct rd_kafka_ApiVersion *rkb_ApiVersions; /* Broker's supported APIs
+ * (MUST be sorted) */
+ size_t rkb_ApiVersions_cnt;
+ rd_interval_t rkb_ApiVersion_fail_intvl; /* Controls how long
+ * the fallback proto
+ * will be used after
+ * ApiVersionRequest
+ * failure. */
+
+ rd_kafka_confsource_t rkb_source;
+ struct {
+ rd_atomic64_t tx_bytes;
+ rd_atomic64_t tx; /**< Kafka requests */
+ rd_atomic64_t tx_err;
+ rd_atomic64_t tx_retries;
+ rd_atomic64_t req_timeouts; /* Accumulated value */
+
+ rd_atomic64_t rx_bytes;
+ rd_atomic64_t rx; /**< Kafka responses */
+ rd_atomic64_t rx_err;
+ rd_atomic64_t rx_corrid_err; /* CorrId misses */
+ rd_atomic64_t rx_partial; /* Partial messages received
+ * and dropped. */
+ rd_atomic64_t zbuf_grow; /* Compression/decompression buffer
+ grows needed */
+ rd_atomic64_t buf_grow; /* rkbuf grows needed */
+ rd_atomic64_t wakeups; /* Poll wakeups */
+
+ rd_atomic32_t connects; /**< Connection attempts,
+ * successful or not. */
+
+ rd_atomic32_t disconnects; /**< Disconnects.
+ * Always peer-triggered. */
+
+ rd_atomic64_t reqtype[RD_KAFKAP__NUM]; /**< Per request-type
+ * counter */
+
+ rd_atomic64_t ts_send; /**< Timestamp of last send */
+ rd_atomic64_t ts_recv; /**< Timestamp of last receive */
+ } rkb_c;
+
+ int rkb_req_timeouts; /* Current value */
+
+ thrd_t rkb_thread;
+
+ rd_refcnt_t rkb_refcnt;
+
+ rd_kafka_t *rkb_rk;
+
+ rd_kafka_buf_t *rkb_recv_buf;
+
+ int rkb_max_inflight; /* Maximum number of in-flight
+ * requests to broker.
+ * Compared to rkb_waitresps length.*/
+ rd_kafka_bufq_t rkb_outbufs;
+ rd_kafka_bufq_t rkb_waitresps;
+ rd_kafka_bufq_t rkb_retrybufs;
+
+ rd_avg_t rkb_avg_int_latency; /* Current internal latency period*/
+ rd_avg_t rkb_avg_outbuf_latency; /**< Current latency
+ * between buf_enq0
+ * and writing to socket
+ */
+ rd_avg_t rkb_avg_rtt; /* Current RTT period */
+ rd_avg_t rkb_avg_throttle; /* Current throttle period */
+
+ /* These are all protected by rkb_lock */
+ char rkb_name[RD_KAFKA_NODENAME_SIZE]; /* Displ name */
+ char rkb_nodename[RD_KAFKA_NODENAME_SIZE]; /* host:port*/
+ uint16_t rkb_port; /* TCP port */
+ char *rkb_origname; /* Original
+ * host name */
+ int rkb_nodename_epoch; /**< Bumped each time
+ * the nodename is changed.
+ * Compared to
+ * rkb_connect_epoch
+ * to trigger a reconnect
+ * for logical broker
+ * when the nodename is
+ * updated. */
+ int rkb_connect_epoch; /**< The value of
+ * rkb_nodename_epoch at the
+ * last connection attempt.
+ */
+
+ /* Logging name is a copy of rkb_name, protected by its own mutex */
+ char *rkb_logname;
+ mtx_t rkb_logname_lock;
+
+ rd_socket_t rkb_wakeup_fd[2]; /* Wake-up fds (r/w) to wake
+ * up from IO-wait when
+ * queues have content. */
+
+ /**< Current, exponentially increased, reconnect backoff. */
+ int rkb_reconnect_backoff_ms;
+
+ /**< Absolute timestamp of next allowed reconnect. */
+ rd_ts_t rkb_ts_reconnect;
+
+ /** Absolute time of last connection attempt. */
+ rd_ts_t rkb_ts_connect;
+
+ /**< Persistent connection demand is tracked by
+ * a counter for each type of demand.
+ * The broker thread will maintain a persistent connection
+ * if any of the counters are non-zero, and revert to
+ * on-demand mode when they all reach zero.
+ * After incrementing any of the counters a broker wakeup
+ * should be signalled to expedite handling. */
+ struct {
+ /**< Producer: partitions are being produced to.
+ * Consumer: partitions are being fetched from.
+ *
+ * Counter is maintained by the broker handler thread
+ * itself, no need for atomic/locking.
+ * Is reset to 0 on each producer|consumer_serve() loop
+ * and updated according to current need, which
+ * will trigger a state transition to
+ * TRY_CONNECT if a connection is needed. */
+ int internal;
+
+ /**< Consumer: Broker is the group coordinator.
+ * Counter is maintained by cgrp logic in
+ * rdkafka main thread.
+ *
+ * Producer: Broker is the transaction coordinator.
+ * Counter is maintained by rdkafka_idempotence.c.
+ *
+ * All: A coord_req_t is waiting for this broker to come up.
+ */
+
+ rd_atomic32_t coord;
+ } rkb_persistconn;
+
+ /**< Currently registered state monitors.
+ * @locks rkb_lock */
+ TAILQ_HEAD(, rd_kafka_broker_monitor_s) rkb_monitors;
+
+ /**< Coordinator request's broker monitor.
+ * Will trigger the coord_req fsm on broker state change. */
+ rd_kafka_broker_monitor_t rkb_coord_monitor;
+
+ rd_kafka_secproto_t rkb_proto;
+
+ int rkb_down_reported; /* Down event reported */
+#if WITH_SASL_CYRUS
+ rd_kafka_timer_t rkb_sasl_kinit_refresh_tmr;
+#endif
+
+
+ /*
+ * Log suppression
+ */
+ struct {
+ /**< Log: compression type not supported by broker. */
+ rd_interval_t unsupported_compression;
+
+ /**< Log: KIP-62 not supported by broker. */
+ rd_interval_t unsupported_kip62;
+
+ /**< Log: KIP-345 not supported by broker. */
+ rd_interval_t unsupported_kip345;
+
+ /**< Log & Error: identical broker_fail() errors. */
+ rd_interval_t fail_error;
+ } rkb_suppress;
+
+ /** Last error. This is used to suppress repeated logs. */
+ struct {
+ char errstr[512]; /**< Last error string */
+ rd_kafka_resp_err_t err; /**< Last error code */
+ int cnt; /**< Number of identical errors */
+ } rkb_last_err;
+};
+
+#define rd_kafka_broker_keep(rkb) rd_refcnt_add(&(rkb)->rkb_refcnt)
+#define rd_kafka_broker_keep_fl(FUNC, LINE, RKB) \
+ rd_refcnt_add_fl(FUNC, LINE, &(RKB)->rkb_refcnt)
+#define rd_kafka_broker_lock(rkb) mtx_lock(&(rkb)->rkb_lock)
+#define rd_kafka_broker_unlock(rkb) mtx_unlock(&(rkb)->rkb_lock)
+
+
+/**
+ * @brief Locks broker, acquires the states, unlocks, and returns
+ * the state.
+ * @locks broker_lock MUST NOT be held.
+ * @locality any
+ */
+static RD_INLINE RD_UNUSED rd_kafka_broker_state_t
+rd_kafka_broker_get_state(rd_kafka_broker_t *rkb) {
+ rd_kafka_broker_state_t state;
+ rd_kafka_broker_lock(rkb);
+ state = rkb->rkb_state;
+ rd_kafka_broker_unlock(rkb);
+ return state;
+}
+
+
+
+/**
+ * @returns true if the broker state is UP or UPDATE
+ */
+#define rd_kafka_broker_state_is_up(state) \
+ ((state) == RD_KAFKA_BROKER_STATE_UP || \
+ (state) == RD_KAFKA_BROKER_STATE_UPDATE)
+
+
+/**
+ * @returns true if the broker connection is up, else false.
+ * @locks broker_lock MUST NOT be held.
+ * @locality any
+ */
+static RD_UNUSED RD_INLINE rd_bool_t
+rd_kafka_broker_is_up(rd_kafka_broker_t *rkb) {
+ rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb);
+ return rd_kafka_broker_state_is_up(state);
+}
+
+
+/**
+ * @brief Broker comparator
+ */
+static RD_UNUSED RD_INLINE int rd_kafka_broker_cmp(const void *_a,
+ const void *_b) {
+ const rd_kafka_broker_t *a = _a, *b = _b;
+ return RD_CMP(a, b);
+}
+
+
+/**
+ * @returns true if broker supports \p features, else false.
+ */
+static RD_UNUSED int rd_kafka_broker_supports(rd_kafka_broker_t *rkb,
+ int features) {
+ const rd_bool_t do_lock = !thrd_is_current(rkb->rkb_thread);
+ int r;
+
+ if (do_lock)
+ rd_kafka_broker_lock(rkb);
+
+ r = (rkb->rkb_features & features) == features;
+
+ if (do_lock)
+ rd_kafka_broker_unlock(rkb);
+ return r;
+}
+
+int16_t rd_kafka_broker_ApiVersion_supported(rd_kafka_broker_t *rkb,
+ int16_t ApiKey,
+ int16_t minver,
+ int16_t maxver,
+ int *featuresp);
+
+rd_kafka_broker_t *rd_kafka_broker_find_by_nodeid0_fl(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ int32_t nodeid,
+ int state,
+ rd_bool_t do_connect);
+
+#define rd_kafka_broker_find_by_nodeid0(rk, nodeid, state, do_connect) \
+ rd_kafka_broker_find_by_nodeid0_fl(__FUNCTION__, __LINE__, rk, nodeid, \
+ state, do_connect)
+#define rd_kafka_broker_find_by_nodeid(rk, nodeid) \
+ rd_kafka_broker_find_by_nodeid0(rk, nodeid, -1, rd_false)
+
+
+/**
+ * Filter out brokers that don't support Idempotent Producer.
+ */
+static RD_INLINE RD_UNUSED int
+rd_kafka_broker_filter_non_idempotent(rd_kafka_broker_t *rkb, void *opaque) {
+ return !(rkb->rkb_features & RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER);
+}
+
+
+rd_kafka_broker_t *rd_kafka_broker_any(rd_kafka_t *rk,
+ int state,
+ int (*filter)(rd_kafka_broker_t *rkb,
+ void *opaque),
+ void *opaque,
+ const char *reason);
+rd_kafka_broker_t *rd_kafka_broker_any_up(rd_kafka_t *rk,
+ int *filtered_cnt,
+ int (*filter)(rd_kafka_broker_t *rkb,
+ void *opaque),
+ void *opaque,
+ const char *reason);
+rd_kafka_broker_t *rd_kafka_broker_any_usable(rd_kafka_t *rk,
+ int timeout_ms,
+ rd_dolock_t do_lock,
+ int features,
+ const char *reason);
+
+rd_kafka_broker_t *
+rd_kafka_broker_prefer(rd_kafka_t *rk, int32_t broker_id, int state);
+
+rd_kafka_broker_t *rd_kafka_broker_get_async(rd_kafka_t *rk,
+ int32_t broker_id,
+ int state,
+ rd_kafka_enq_once_t *eonce);
+
+rd_list_t *rd_kafka_brokers_get_nodeids_async(rd_kafka_t *rk,
+ rd_kafka_enq_once_t *eonce);
+
+rd_kafka_broker_t *
+rd_kafka_broker_controller(rd_kafka_t *rk, int state, rd_ts_t abs_timeout);
+rd_kafka_broker_t *rd_kafka_broker_controller_async(rd_kafka_t *rk,
+ int state,
+ rd_kafka_enq_once_t *eonce);
+
+int rd_kafka_brokers_add0(rd_kafka_t *rk, const char *brokerlist);
+void rd_kafka_broker_set_state(rd_kafka_broker_t *rkb, int state);
+
+void rd_kafka_broker_fail(rd_kafka_broker_t *rkb,
+ int level,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 4, 5);
+
+void rd_kafka_broker_conn_closed(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ const char *errstr);
+
+void rd_kafka_broker_destroy_final(rd_kafka_broker_t *rkb);
+
+#define rd_kafka_broker_destroy(rkb) \
+ rd_refcnt_destroywrapper(&(rkb)->rkb_refcnt, \
+ rd_kafka_broker_destroy_final(rkb))
+
+
+void rd_kafka_broker_update(rd_kafka_t *rk,
+ rd_kafka_secproto_t proto,
+ const struct rd_kafka_metadata_broker *mdb,
+ rd_kafka_broker_t **rkbp);
+rd_kafka_broker_t *rd_kafka_broker_add(rd_kafka_t *rk,
+ rd_kafka_confsource_t source,
+ rd_kafka_secproto_t proto,
+ const char *name,
+ uint16_t port,
+ int32_t nodeid);
+
+rd_kafka_broker_t *rd_kafka_broker_add_logical(rd_kafka_t *rk,
+ const char *name);
+
+/** @define returns true if broker is logical. No locking is needed. */
+#define RD_KAFKA_BROKER_IS_LOGICAL(rkb) ((rkb)->rkb_source == RD_KAFKA_LOGICAL)
+
+void rd_kafka_broker_set_nodename(rd_kafka_broker_t *rkb,
+ rd_kafka_broker_t *from_rkb);
+
+void rd_kafka_broker_connect_up(rd_kafka_broker_t *rkb);
+void rd_kafka_broker_connect_done(rd_kafka_broker_t *rkb, const char *errstr);
+
+int rd_kafka_send(rd_kafka_broker_t *rkb);
+int rd_kafka_recv(rd_kafka_broker_t *rkb);
+
+void rd_kafka_dr_msgq(rd_kafka_topic_t *rkt,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_resp_err_t err);
+
+void rd_kafka_dr_implicit_ack(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ uint64_t last_msgid);
+
+void rd_kafka_broker_buf_enq1(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+void rd_kafka_broker_buf_enq_replyq(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+void rd_kafka_broker_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
+
+
+rd_kafka_broker_t *rd_kafka_broker_internal(rd_kafka_t *rk);
+
+void msghdr_print(rd_kafka_t *rk,
+ const char *what,
+ const struct msghdr *msg,
+ int hexdump);
+
+int32_t rd_kafka_broker_id(rd_kafka_broker_t *rkb);
+const char *rd_kafka_broker_name(rd_kafka_broker_t *rkb);
+void rd_kafka_broker_wakeup(rd_kafka_broker_t *rkb, const char *reason);
+int rd_kafka_all_brokers_wakeup(rd_kafka_t *rk,
+ int min_state,
+ const char *reason);
+
+void rd_kafka_connect_any(rd_kafka_t *rk, const char *reason);
+
+void rd_kafka_broker_purge_queues(rd_kafka_broker_t *rkb,
+ int purge_flags,
+ rd_kafka_replyq_t replyq);
+
+int rd_kafka_brokers_get_state_version(rd_kafka_t *rk);
+int rd_kafka_brokers_wait_state_change(rd_kafka_t *rk,
+ int stored_version,
+ int timeout_ms);
+int rd_kafka_brokers_wait_state_change_async(rd_kafka_t *rk,
+ int stored_version,
+ rd_kafka_enq_once_t *eonce);
+void rd_kafka_brokers_broadcast_state_change(rd_kafka_t *rk);
+
+
+
+/**
+ * Updates the current toppar active round-robin next pointer.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_broker_active_toppar_next(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *sugg_next) {
+ if (CIRCLEQ_EMPTY(&rkb->rkb_active_toppars) ||
+ (void *)sugg_next == CIRCLEQ_ENDC(&rkb->rkb_active_toppars))
+ rkb->rkb_active_toppar_next = NULL;
+ else if (sugg_next)
+ rkb->rkb_active_toppar_next = sugg_next;
+ else
+ rkb->rkb_active_toppar_next =
+ CIRCLEQ_FIRST(&rkb->rkb_active_toppars);
+}
+
+
+void rd_kafka_broker_active_toppar_add(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const char *reason);
+
+void rd_kafka_broker_active_toppar_del(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const char *reason);
+
+
+void rd_kafka_broker_schedule_connection(rd_kafka_broker_t *rkb);
+
+void rd_kafka_broker_persistent_connection_add(rd_kafka_broker_t *rkb,
+ rd_atomic32_t *acntp);
+
+void rd_kafka_broker_persistent_connection_del(rd_kafka_broker_t *rkb,
+ rd_atomic32_t *acntp);
+
+
+void rd_kafka_broker_monitor_add(rd_kafka_broker_monitor_t *rkbmon,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_q_t *rkq,
+ void (*callback)(rd_kafka_broker_t *rkb));
+
+void rd_kafka_broker_monitor_del(rd_kafka_broker_monitor_t *rkbmon);
+
+int unittest_broker(void);
+
+#endif /* _RDKAFKA_BROKER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c
new file mode 100644
index 000000000..5a0e131e8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.c
@@ -0,0 +1,530 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_buf.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_interceptor.h"
+
+void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf) {
+
+ switch (rkbuf->rkbuf_reqhdr.ApiKey) {
+ case RD_KAFKAP_Metadata:
+ if (rkbuf->rkbuf_u.Metadata.topics)
+ rd_list_destroy(rkbuf->rkbuf_u.Metadata.topics);
+ if (rkbuf->rkbuf_u.Metadata.reason)
+ rd_free(rkbuf->rkbuf_u.Metadata.reason);
+ if (rkbuf->rkbuf_u.Metadata.rko)
+ rd_kafka_op_reply(rkbuf->rkbuf_u.Metadata.rko,
+ RD_KAFKA_RESP_ERR__DESTROY);
+ if (rkbuf->rkbuf_u.Metadata.decr) {
+ /* Decrease metadata cache's full_.._sent state. */
+ mtx_lock(rkbuf->rkbuf_u.Metadata.decr_lock);
+ rd_kafka_assert(NULL,
+ (*rkbuf->rkbuf_u.Metadata.decr) > 0);
+ (*rkbuf->rkbuf_u.Metadata.decr)--;
+ mtx_unlock(rkbuf->rkbuf_u.Metadata.decr_lock);
+ }
+ break;
+
+ case RD_KAFKAP_Produce:
+ rd_kafka_msgbatch_destroy(&rkbuf->rkbuf_batch);
+ break;
+ }
+
+ if (rkbuf->rkbuf_response)
+ rd_kafka_buf_destroy(rkbuf->rkbuf_response);
+
+ if (rkbuf->rkbuf_make_opaque && rkbuf->rkbuf_free_make_opaque_cb)
+ rkbuf->rkbuf_free_make_opaque_cb(rkbuf->rkbuf_make_opaque);
+
+ rd_kafka_replyq_destroy(&rkbuf->rkbuf_replyq);
+ rd_kafka_replyq_destroy(&rkbuf->rkbuf_orig_replyq);
+
+ rd_buf_destroy(&rkbuf->rkbuf_buf);
+
+ if (rkbuf->rkbuf_rktp_vers)
+ rd_list_destroy(rkbuf->rkbuf_rktp_vers);
+
+ if (rkbuf->rkbuf_rkb)
+ rd_kafka_broker_destroy(rkbuf->rkbuf_rkb);
+
+ rd_refcnt_destroy(&rkbuf->rkbuf_refcnt);
+
+ rd_free(rkbuf);
+}
+
+
+
+/**
+ * @brief Pushes \p buf of size \p len as a new segment on the buffer.
+ *
+ * \p buf will NOT be freed by the buffer.
+ */
+void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf,
+ const void *buf,
+ size_t len,
+ int allow_crc_calc,
+ void (*free_cb)(void *)) {
+ rd_buf_push(&rkbuf->rkbuf_buf, buf, len, free_cb);
+
+ if (allow_crc_calc && (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC))
+ rkbuf->rkbuf_crc = rd_crc32_update(rkbuf->rkbuf_crc, buf, len);
+}
+
+
+
+/**
+ * @brief Create a new buffer with \p segcmt initial segments and \p size bytes
+ * of initial backing memory.
+ * The underlying buffer will grow as needed.
+ *
+ * If \p rk is non-NULL (typical case):
+ * Additional space for the Kafka protocol headers is inserted automatically.
+ */
+rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags) {
+ rd_kafka_buf_t *rkbuf;
+
+ rkbuf = rd_calloc(1, sizeof(*rkbuf));
+
+ rkbuf->rkbuf_flags = flags;
+
+ rd_buf_init(&rkbuf->rkbuf_buf, segcnt, size);
+ rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
+
+ return rkbuf;
+}
+
+
+/**
+ * @brief Create new request buffer with the request-header written (will
+ * need to be updated with Length, etc, later)
+ */
+rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
+ int16_t ApiKey,
+ int segcnt,
+ size_t size,
+ rd_bool_t is_flexver) {
+ rd_kafka_buf_t *rkbuf;
+
+ /* Make room for common protocol request headers */
+ size += RD_KAFKAP_REQHDR_SIZE +
+ RD_KAFKAP_STR_SIZE(rkb->rkb_rk->rk_client_id) +
+ /* Flexible version adds a tag list to the headers
+ * and to the end of the payload, both of which we send
+ * as empty (1 byte each). */
+ (is_flexver ? 1 + 1 : 0);
+ segcnt += 1; /* headers */
+
+ rkbuf = rd_kafka_buf_new0(segcnt, size, 0);
+
+ rkbuf->rkbuf_rkb = rkb;
+ rd_kafka_broker_keep(rkb);
+
+ rkbuf->rkbuf_rel_timeout = rkb->rkb_rk->rk_conf.socket_timeout_ms;
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_DEFAULT_RETRIES;
+
+ rkbuf->rkbuf_reqhdr.ApiKey = ApiKey;
+
+ /* Write request header, will be updated later. */
+ /* Length: updated later */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+ /* ApiKey */
+ rd_kafka_buf_write_i16(rkbuf, rkbuf->rkbuf_reqhdr.ApiKey);
+ /* ApiVersion: updated later */
+ rd_kafka_buf_write_i16(rkbuf, 0);
+ /* CorrId: updated later */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* ClientId */
+ rd_kafka_buf_write_kstr(rkbuf, rkb->rkb_rk->rk_client_id);
+
+ if (is_flexver) {
+ /* Must set flexver after writing the client id since
+ * it is still a standard non-compact string. */
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
+
+ /* Empty request header tags */
+ rd_kafka_buf_write_i8(rkbuf, 0);
+ }
+
+ return rkbuf;
+}
+
+
+
+/**
+ * @brief Create new read-only rkbuf shadowing a memory region.
+ *
+ * @remark \p free_cb (possibly NULL) will be used to free \p ptr when
+ * buffer refcount reaches 0.
+ * @remark the buffer may only be read from, not written to.
+ *
+ * @warning If the caller has log_decode_errors > 0 then it must set up
+ * \c rkbuf->rkbuf_rkb to a refcnt-increased broker object.
+ */
+rd_kafka_buf_t *
+rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *)) {
+ rd_kafka_buf_t *rkbuf;
+
+ rkbuf = rd_calloc(1, sizeof(*rkbuf));
+
+ rkbuf->rkbuf_reqhdr.ApiKey = RD_KAFKAP_None;
+
+ rd_buf_init(&rkbuf->rkbuf_buf, 1, 0);
+ rd_buf_push(&rkbuf->rkbuf_buf, ptr, size, free_cb);
+
+ rkbuf->rkbuf_totlen = size;
+
+ /* Initialize reader slice */
+ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
+
+ rd_refcnt_init(&rkbuf->rkbuf_refcnt, 1);
+
+ return rkbuf;
+}
+
+
+
+void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
+ TAILQ_INSERT_TAIL(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
+ rd_atomic32_add(&rkbufq->rkbq_cnt, 1);
+ if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
+ rd_atomic32_add(&rkbufq->rkbq_msg_cnt,
+ rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
+}
+
+void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf) {
+ TAILQ_REMOVE(&rkbufq->rkbq_bufs, rkbuf, rkbuf_link);
+ rd_kafka_assert(NULL, rd_atomic32_get(&rkbufq->rkbq_cnt) > 0);
+ rd_atomic32_sub(&rkbufq->rkbq_cnt, 1);
+ if (rkbuf->rkbuf_reqhdr.ApiKey == RD_KAFKAP_Produce)
+ rd_atomic32_sub(&rkbufq->rkbq_msg_cnt,
+ rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq));
+}
+
+void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq) {
+ TAILQ_INIT(&rkbufq->rkbq_bufs);
+ rd_atomic32_init(&rkbufq->rkbq_cnt, 0);
+ rd_atomic32_init(&rkbufq->rkbq_msg_cnt, 0);
+}
+
+/**
+ * Concat all buffers from 'src' to tail of 'dst'
+ */
+void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src) {
+ TAILQ_CONCAT(&dst->rkbq_bufs, &src->rkbq_bufs, rkbuf_link);
+ (void)rd_atomic32_add(&dst->rkbq_cnt, rd_atomic32_get(&src->rkbq_cnt));
+ (void)rd_atomic32_add(&dst->rkbq_msg_cnt,
+ rd_atomic32_get(&src->rkbq_msg_cnt));
+ rd_kafka_bufq_init(src);
+}
+
+/**
+ * Purge the wait-response queue.
+ * NOTE: 'rkbufq' must be a temporary queue and not one of rkb_waitresps
+ * or rkb_outbufs since buffers may be re-enqueued on those queues.
+ * 'rkbufq' needs to be bufq_init():ed before reuse after this call.
+ */
+void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb,
+ rd_kafka_bufq_t *rkbufq,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_buf_t *rkbuf, *tmp;
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ rd_rkb_dbg(rkb, QUEUE, "BUFQ", "Purging bufq with %i buffers",
+ rd_atomic32_get(&rkbufq->rkbq_cnt));
+
+ TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, err, NULL, rkbuf);
+ }
+}
+
+
+/**
+ * @brief Update bufq for connection reset:
+ *
+ * - Purge connection-setup API requests from the queue.
+ * - Reset any partially sent buffer's offset. (issue #756)
+ *
+ * Request types purged:
+ * ApiVersion
+ * SaslHandshake
+ */
+void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb,
+ rd_kafka_bufq_t *rkbufq) {
+ rd_kafka_buf_t *rkbuf, *tmp;
+ rd_ts_t now = rd_clock();
+
+ rd_kafka_assert(rkb->rkb_rk, thrd_is_current(rkb->rkb_thread));
+
+ rd_rkb_dbg(rkb, QUEUE, "BUFQ",
+ "Updating %d buffers on connection reset",
+ rd_atomic32_get(&rkbufq->rkbq_cnt));
+
+ TAILQ_FOREACH_SAFE(rkbuf, &rkbufq->rkbq_bufs, rkbuf_link, tmp) {
+ switch (rkbuf->rkbuf_reqhdr.ApiKey) {
+ case RD_KAFKAP_ApiVersion:
+ case RD_KAFKAP_SaslHandshake:
+ rd_kafka_bufq_deq(rkbufq, rkbuf);
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb,
+ RD_KAFKA_RESP_ERR__DESTROY, NULL,
+ rkbuf);
+ break;
+ default:
+ /* Reset buffer send position and corrid */
+ rd_slice_seek(&rkbuf->rkbuf_reader, 0);
+ rkbuf->rkbuf_corrid = 0;
+ /* Reset timeout */
+ rd_kafka_buf_calc_timeout(rkb->rkb_rk, rkbuf, now);
+ break;
+ }
+ }
+}
+
+
+void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb,
+ const char *fac,
+ rd_kafka_bufq_t *rkbq) {
+ rd_kafka_buf_t *rkbuf;
+ int cnt = rd_kafka_bufq_cnt(rkbq);
+ rd_ts_t now;
+
+ if (!cnt)
+ return;
+
+ now = rd_clock();
+
+ rd_rkb_dbg(rkb, BROKER, fac, "bufq with %d buffer(s):", cnt);
+
+ TAILQ_FOREACH(rkbuf, &rkbq->rkbq_bufs, rkbuf_link) {
+ rd_rkb_dbg(rkb, BROKER, fac,
+ " Buffer %s (%" PRIusz " bytes, corrid %" PRId32
+ ", "
+ "connid %d, prio %d, retry %d in %lldms, "
+ "timeout in %lldms)",
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_totlen, rkbuf->rkbuf_corrid,
+ rkbuf->rkbuf_connid, rkbuf->rkbuf_prio,
+ rkbuf->rkbuf_retries,
+ rkbuf->rkbuf_ts_retry
+ ? (rkbuf->rkbuf_ts_retry - now) / 1000LL
+ : 0,
+ rkbuf->rkbuf_ts_timeout
+ ? (rkbuf->rkbuf_ts_timeout - now) / 1000LL
+ : 0);
+ }
+}
+
+
+
+/**
+ * @brief Calculate the effective timeout for a request attempt
+ */
+void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk,
+ rd_kafka_buf_t *rkbuf,
+ rd_ts_t now) {
+ if (likely(rkbuf->rkbuf_rel_timeout)) {
+ /* Default:
+ * Relative timeout, set request timeout to
+ * to now + rel timeout. */
+ rkbuf->rkbuf_ts_timeout = now + rkbuf->rkbuf_rel_timeout * 1000;
+ } else if (!rkbuf->rkbuf_force_timeout) {
+ /* Use absolute timeout, limited by socket.timeout.ms */
+ rd_ts_t sock_timeout =
+ now + rk->rk_conf.socket_timeout_ms * 1000;
+
+ rkbuf->rkbuf_ts_timeout =
+ RD_MIN(sock_timeout, rkbuf->rkbuf_abs_timeout);
+ } else {
+ /* Use absolue timeout without limit. */
+ rkbuf->rkbuf_ts_timeout = rkbuf->rkbuf_abs_timeout;
+ }
+}
+
+/**
+ * Retry failed request, if permitted.
+ * @remark \p rkb may be NULL
+ * @remark the retry count is only increased for actually transmitted buffers,
+ * if there is a failure while the buffers lingers in the output queue
+ * (rkb_outbufs) then the retry counter is not increased.
+ * Returns 1 if the request was scheduled for retry, else 0.
+ */
+int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf) {
+ int incr_retry = rd_kafka_buf_was_sent(rkbuf) ? 1 : 0;
+
+ /* Don't allow retries of dummy/empty buffers */
+ rd_assert(rd_buf_len(&rkbuf->rkbuf_buf) > 0);
+
+ if (unlikely(!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL ||
+ rd_kafka_terminating(rkb->rkb_rk) ||
+ rkbuf->rkbuf_retries + incr_retry >
+ rkbuf->rkbuf_max_retries))
+ return 0;
+
+ /* Absolute timeout, check for expiry. */
+ if (rkbuf->rkbuf_abs_timeout && rkbuf->rkbuf_abs_timeout < rd_clock())
+ return 0; /* Expired */
+
+ /* Try again */
+ rkbuf->rkbuf_ts_sent = 0;
+ rkbuf->rkbuf_ts_timeout = 0; /* Will be updated in calc_timeout() */
+ rkbuf->rkbuf_retries += incr_retry;
+ rd_kafka_buf_keep(rkbuf);
+ rd_kafka_broker_buf_retry(rkb, rkbuf);
+ return 1;
+}
+
+
+/**
+ * @brief Handle RD_KAFKA_OP_RECV_BUF.
+ */
+void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
+ rd_kafka_buf_t *request, *response;
+ rd_kafka_t *rk;
+
+ request = rko->rko_u.xbuf.rkbuf;
+ rko->rko_u.xbuf.rkbuf = NULL;
+
+ /* NULL on op_destroy() */
+ if (request->rkbuf_replyq.q) {
+ int32_t version = request->rkbuf_replyq.version;
+ /* Current queue usage is done, but retain original replyq for
+ * future retries, stealing
+ * the current reference. */
+ request->rkbuf_orig_replyq = request->rkbuf_replyq;
+ rd_kafka_replyq_clear(&request->rkbuf_replyq);
+ /* Callback might need to version check so we retain the
+ * version across the clear() call which clears it. */
+ request->rkbuf_replyq.version = version;
+ }
+
+ if (!request->rkbuf_cb) {
+ rd_kafka_buf_destroy(request);
+ return;
+ }
+
+ /* Let buf_callback() do destroy()s */
+ response = request->rkbuf_response; /* May be NULL */
+ request->rkbuf_response = NULL;
+
+ if (!(rk = rko->rko_rk)) {
+ rd_assert(request->rkbuf_rkb != NULL);
+ rk = request->rkbuf_rkb->rkb_rk;
+ }
+
+ rd_kafka_buf_callback(rk, request->rkbuf_rkb, err, response, request);
+}
+
+
+
+/**
+ * Call request.rkbuf_cb(), but:
+ * - if the rkbuf has a rkbuf_replyq the buffer is enqueued on that queue
+ * with op type RD_KAFKA_OP_RECV_BUF.
+ * - else call rkbuf_cb().
+ *
+ * \p response may be NULL.
+ *
+ * Will decrease refcount for both response and request, eventually.
+ *
+ * The decision to retry, and the call to buf_retry(), is delegated
+ * to the buffer's response callback.
+ */
+void rd_kafka_buf_callback(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *response,
+ rd_kafka_buf_t *request) {
+
+ rd_kafka_interceptors_on_response_received(
+ rk, -1, rkb ? rd_kafka_broker_name(rkb) : "",
+ rkb ? rd_kafka_broker_id(rkb) : -1, request->rkbuf_reqhdr.ApiKey,
+ request->rkbuf_reqhdr.ApiVersion, request->rkbuf_reshdr.CorrId,
+ response ? response->rkbuf_totlen : 0,
+ response ? response->rkbuf_ts_sent : -1, err);
+
+ if (err != RD_KAFKA_RESP_ERR__DESTROY && request->rkbuf_replyq.q) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
+
+ rd_kafka_assert(NULL, !request->rkbuf_response);
+ request->rkbuf_response = response;
+
+ /* Increment refcnt since rko_rkbuf will be decref:ed
+ * if replyq_enq() fails and we dont want the rkbuf gone in that
+ * case. */
+ rd_kafka_buf_keep(request);
+ rko->rko_u.xbuf.rkbuf = request;
+
+ rko->rko_err = err;
+
+ /* Copy original replyq for future retries, with its own
+ * queue reference. */
+ rd_kafka_replyq_copy(&request->rkbuf_orig_replyq,
+ &request->rkbuf_replyq);
+
+ rd_kafka_replyq_enq(&request->rkbuf_replyq, rko, 0);
+
+ rd_kafka_buf_destroy(request); /* from keep above */
+ return;
+ }
+
+ if (request->rkbuf_cb)
+ request->rkbuf_cb(rk, rkb, err, response, request,
+ request->rkbuf_opaque);
+
+ rd_kafka_buf_destroy(request);
+ if (response)
+ rd_kafka_buf_destroy(response);
+}
+
+
+
+/**
+ * @brief Set the maker callback, which will be called just prior to sending
+ * to construct the buffer contents.
+ *
+ * Use this when the usable ApiVersion must be known but the broker may
+ * currently be down.
+ *
+ * See rd_kafka_make_req_cb_t documentation for more info.
+ */
+void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
+ rd_kafka_make_req_cb_t *make_cb,
+ void *make_opaque,
+ void (*free_make_opaque_cb)(void *make_opaque)) {
+ rd_assert(!rkbuf->rkbuf_make_req_cb &&
+ !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_NEED_MAKE));
+
+ rkbuf->rkbuf_make_req_cb = make_cb;
+ rkbuf->rkbuf_make_opaque = make_opaque;
+ rkbuf->rkbuf_free_make_opaque_cb = free_make_opaque_cb;
+
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NEED_MAKE;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h
new file mode 100644
index 000000000..b4f606317
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_buf.h
@@ -0,0 +1,1407 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_BUF_H_
+#define _RDKAFKA_BUF_H_
+
+#include "rdkafka_int.h"
+#include "rdcrc32.h"
+#include "rdlist.h"
+#include "rdbuf.h"
+#include "rdkafka_msgbatch.h"
+
+typedef struct rd_kafka_broker_s rd_kafka_broker_t;
+
+#define RD_KAFKA_HEADERS_IOV_CNT 2
+
+
+/**
+ * Temporary buffer with memory aligned writes to accommodate
+ * effective and platform safe struct writes.
+ */
+typedef struct rd_tmpabuf_s {
+ size_t size;
+ size_t of;
+ char *buf;
+ int failed;
+ int assert_on_fail;
+} rd_tmpabuf_t;
+
+/**
+ * @brief Allocate new tmpabuf with \p size bytes pre-allocated.
+ */
+static RD_UNUSED void
+rd_tmpabuf_new(rd_tmpabuf_t *tab, size_t size, int assert_on_fail) {
+ tab->buf = rd_malloc(size);
+ tab->size = size;
+ tab->of = 0;
+ tab->failed = 0;
+ tab->assert_on_fail = assert_on_fail;
+}
+
+/**
+ * @brief Free memory allocated by tmpabuf
+ */
+static RD_UNUSED void rd_tmpabuf_destroy(rd_tmpabuf_t *tab) {
+ rd_free(tab->buf);
+}
+
+/**
+ * @returns 1 if a previous operation failed.
+ */
+static RD_UNUSED RD_INLINE int rd_tmpabuf_failed(rd_tmpabuf_t *tab) {
+ return tab->failed;
+}
+
+/**
+ * @brief Allocate \p size bytes for writing, returning an aligned pointer
+ * to the memory.
+ * @returns the allocated pointer (within the tmpabuf) on success or
+ * NULL if the requested number of bytes + alignment is not available
+ * in the tmpabuf.
+ */
+static RD_UNUSED void *
+rd_tmpabuf_alloc0(const char *func, int line, rd_tmpabuf_t *tab, size_t size) {
+ void *ptr;
+
+ if (unlikely(tab->failed))
+ return NULL;
+
+ if (unlikely(tab->of + size > tab->size)) {
+ if (tab->assert_on_fail) {
+ fprintf(stderr,
+ "%s: %s:%d: requested size %" PRIusz
+ " + %" PRIusz " > %" PRIusz "\n",
+ __FUNCTION__, func, line, tab->of, size,
+ tab->size);
+ assert(!*"rd_tmpabuf_alloc: not enough size in buffer");
+ }
+ return NULL;
+ }
+
+ ptr = (void *)(tab->buf + tab->of);
+ tab->of += RD_ROUNDUP(size, 8);
+
+ return ptr;
+}
+
+#define rd_tmpabuf_alloc(tab, size) \
+ rd_tmpabuf_alloc0(__FUNCTION__, __LINE__, tab, size)
+
+/**
+ * @brief Write \p buf of \p size bytes to tmpabuf memory in an aligned fashion.
+ *
+ * @returns the allocated and written-to pointer (within the tmpabuf) on success
+ * or NULL if the requested number of bytes + alignment is not
+ * available in the tmpabuf.
+ */
+static RD_UNUSED void *rd_tmpabuf_write0(const char *func,
+ int line,
+ rd_tmpabuf_t *tab,
+ const void *buf,
+ size_t size) {
+ void *ptr = rd_tmpabuf_alloc0(func, line, tab, size);
+
+ if (likely(ptr && size))
+ memcpy(ptr, buf, size);
+
+ return ptr;
+}
+#define rd_tmpabuf_write(tab, buf, size) \
+ rd_tmpabuf_write0(__FUNCTION__, __LINE__, tab, buf, size)
+
+
+/**
+ * @brief Wrapper for rd_tmpabuf_write() that takes a nul-terminated string.
+ */
+static RD_UNUSED char *rd_tmpabuf_write_str0(const char *func,
+ int line,
+ rd_tmpabuf_t *tab,
+ const char *str) {
+ return rd_tmpabuf_write0(func, line, tab, str, strlen(str) + 1);
+}
+#define rd_tmpabuf_write_str(tab, str) \
+ rd_tmpabuf_write_str0(__FUNCTION__, __LINE__, tab, str)
+
+
+
+/**
+ * Response handling callback.
+ *
+ * NOTE: Callbacks must check for 'err == RD_KAFKA_RESP_ERR__DESTROY'
+ * which indicates that some entity is terminating (rd_kafka_t, broker,
+ * toppar, queue, etc) and the callback may not be called in the
+ * correct thread. In this case the callback must perform just
+ * the most minimal cleanup and dont trigger any other operations.
+ *
+ * NOTE: rkb, reply and request may be NULL, depending on error situation.
+ */
+typedef void(rd_kafka_resp_cb_t)(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+
+/**
+ * @brief Sender callback. This callback is used to construct and send (enq)
+ * a rkbuf on a particular broker.
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_send_req_cb_t)(rd_kafka_broker_t *rkb,
+ rd_kafka_op_t *rko,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *reply_opaque);
+
+
+/**
+ * @brief Request maker. A callback that constructs the actual contents
+ * of a request.
+ *
+ * When constructing a request the ApiVersion typically needs to be selected
+ * which requires the broker's supported ApiVersions to be known, which in
+ * turn requires the broker connection to be UP.
+ *
+ * As a buffer constructor you have two choices:
+ * a. acquire the broker handle, wait for it to come up, and then construct
+ * the request buffer, or
+ * b. acquire the broker handle, enqueue an uncrafted/unmaked
+ * request on the broker request queue, and when the broker is up
+ * the make_req_cb will be called for you to construct the request.
+ *
+ * From a code complexity standpoint, the latter option is usually the least
+ * complex and voids the caller to care about any of the broker state.
+ * Any information that is required to construct the request is passed through
+ * the make_opaque, which can be automatically freed by the buffer code
+ * when it has been used, or handled by the caller (in which case it must
+ * outlive the lifetime of the buffer).
+ *
+ * Usage:
+ *
+ * 1. Construct an rkbuf with the appropriate ApiKey.
+ * 2. Make a copy or reference of any data that is needed to construct the
+ * request, e.g., through rd_kafka_topic_partition_list_copy(). This
+ * data is passed by the make_opaque.
+ * 3. Set the make callback by calling rd_kafka_buf_set_maker() and pass
+ * the make_opaque data and a free function, if needed.
+ * 4. The callback will eventually be called from the broker thread.
+ * 5. In the make callback construct the request on the passed rkbuf.
+ * 6. The request is sent to the broker and the make_opaque is freed.
+ *
+ * See rd_kafka_ListOffsetsRequest() in rdkafka_request.c for an example.
+ *
+ */
+typedef rd_kafka_resp_err_t(rd_kafka_make_req_cb_t)(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ void *make_opaque);
+
+/**
+ * @struct Request and response buffer
+ *
+ */
+struct rd_kafka_buf_s { /* rd_kafka_buf_t */
+ TAILQ_ENTRY(rd_kafka_buf_s) rkbuf_link;
+
+ int32_t rkbuf_corrid;
+
+ rd_ts_t rkbuf_ts_retry; /* Absolute send retry time */
+
+ int rkbuf_flags; /* RD_KAFKA_OP_F */
+
+ /** What convenience flags to copy from request to response along
+ * with the reqhdr. */
+#define RD_KAFKA_BUF_FLAGS_RESP_COPY_MASK (RD_KAFKA_OP_F_FLEXVER)
+
+ rd_kafka_prio_t rkbuf_prio; /**< Request priority */
+
+ rd_buf_t rkbuf_buf; /**< Send/Recv byte buffer */
+ rd_slice_t rkbuf_reader; /**< Buffer slice reader for rkbuf_buf */
+
+ int rkbuf_connid; /* broker connection id (used when buffer
+ * was partially sent). */
+ size_t rkbuf_totlen; /* recv: total expected length,
+ * send: not used */
+
+ rd_crc32_t rkbuf_crc; /* Current CRC calculation */
+
+ struct rd_kafkap_reqhdr rkbuf_reqhdr; /* Request header.
+ * These fields are encoded
+ * and written to output buffer
+ * on buffer finalization.
+ * Note:
+ * The request's
+ * reqhdr is copied to the
+ * response's reqhdr as a
+ * convenience. */
+ struct rd_kafkap_reshdr rkbuf_reshdr; /* Response header.
+ * Decoded fields are copied
+ * here from the buffer
+ * to provide an ease-of-use
+ * interface to the header */
+
+ int32_t rkbuf_expected_size; /* expected size of message */
+
+ rd_kafka_replyq_t rkbuf_replyq; /* Enqueue response on replyq */
+ rd_kafka_replyq_t rkbuf_orig_replyq; /* Original replyq to be used
+ * for retries from inside
+ * the rkbuf_cb() callback
+ * since rkbuf_replyq will
+ * have been reset. */
+ rd_kafka_resp_cb_t *rkbuf_cb; /* Response callback */
+ struct rd_kafka_buf_s *rkbuf_response; /* Response buffer */
+
+ rd_kafka_make_req_cb_t *rkbuf_make_req_cb; /**< Callback to construct
+ * the request itself.
+ * Will be used if
+ * RD_KAFKA_OP_F_NEED_MAKE
+ * is set. */
+ void *rkbuf_make_opaque; /**< Opaque passed to rkbuf_make_req_cb.
+ * Will be freed automatically after use
+ * by the rkbuf code. */
+ void (*rkbuf_free_make_opaque_cb)(void *); /**< Free function for
+ * rkbuf_make_opaque. */
+
+ struct rd_kafka_broker_s *rkbuf_rkb; /**< Optional broker object
+ * with refcnt increased used
+ * for logging decode errors
+ * if log_decode_errors is > 0 */
+
+ rd_refcnt_t rkbuf_refcnt;
+ void *rkbuf_opaque;
+
+ int rkbuf_max_retries; /**< Maximum retries to attempt. */
+ int rkbuf_retries; /**< Retries so far. */
+
+
+ int rkbuf_features; /* Required feature(s) that must be
+ * supported by broker. */
+
+ rd_ts_t rkbuf_ts_enq;
+ rd_ts_t rkbuf_ts_sent; /* Initially: Absolute time of transmission,
+ * after response: RTT. */
+
+ /* Request timeouts:
+ * rkbuf_ts_timeout is the effective absolute request timeout used
+ * by the timeout scanner to see if a request has timed out.
+ * It is set when a request is enqueued on the broker transmit
+ * queue based on the relative or absolute timeout:
+ *
+ * rkbuf_rel_timeout is the per-request-transmit relative timeout,
+ * this value is reused for each sub-sequent retry of a request.
+ *
+ * rkbuf_abs_timeout is the absolute request timeout, spanning
+ * all retries.
+ * This value is effectively limited by socket.timeout.ms for
+ * each transmission, but the absolute timeout for a request's
+ * lifetime is the absolute value.
+ *
+ * Use rd_kafka_buf_set_timeout() to set a relative timeout
+ * that will be reused on retry,
+ * or rd_kafka_buf_set_abs_timeout() to set a fixed absolute timeout
+ * for the case where the caller knows the request will be
+ * semantically outdated when that absolute time expires, such as for
+ * session.timeout.ms-based requests.
+ *
+ * The decision to retry a request is delegated to the rkbuf_cb
+ * response callback, which should use rd_kafka_err_action()
+ * and check the return actions for RD_KAFKA_ERR_ACTION_RETRY to be set
+ * and then call rd_kafka_buf_retry().
+ * rd_kafka_buf_retry() will enqueue the request on the rkb_retrybufs
+ * queue with a backoff time of retry.backoff.ms.
+ * The rkb_retrybufs queue is served by the broker thread's timeout
+ * scanner.
+ * @warning rkb_retrybufs is NOT purged on broker down.
+ */
+ rd_ts_t rkbuf_ts_timeout; /* Request timeout (absolute time). */
+ rd_ts_t
+ rkbuf_abs_timeout; /* Absolute timeout for request, including
+ * retries.
+ * Mutually exclusive with rkbuf_rel_timeout*/
+ int rkbuf_rel_timeout; /* Relative timeout (ms), used for retries.
+ * Defaults to socket.timeout.ms.
+ * Mutually exclusive with rkbuf_abs_timeout*/
+ rd_bool_t rkbuf_force_timeout; /**< Force request timeout to be
+ * remaining abs_timeout regardless
+ * of socket.timeout.ms. */
+
+
+ int64_t rkbuf_offset; /* Used by OffsetCommit */
+
+ rd_list_t *rkbuf_rktp_vers; /* Toppar + Op Version map.
+ * Used by FetchRequest. */
+
+ rd_kafka_resp_err_t rkbuf_err; /* Buffer parsing error code */
+
+ union {
+ struct {
+ rd_list_t *topics; /* Requested topics (char *) */
+ char *reason; /* Textual reason */
+ rd_kafka_op_t *rko; /* Originating rko with replyq
+ * (if any) */
+ rd_bool_t all_topics; /**< Full/All topics requested */
+ rd_bool_t cgrp_update; /**< Update cgrp with topic
+ * status from response. */
+
+ int *decr; /* Decrement this integer by one
+ * when request is complete:
+ * typically points to metadata
+ * cache's full_.._sent.
+ * Will be performed with
+ * decr_lock held. */
+ mtx_t *decr_lock;
+
+ } Metadata;
+ struct {
+ rd_kafka_msgbatch_t batch; /**< MessageSet/batch */
+ } Produce;
+ struct {
+ rd_bool_t commit; /**< true = txn commit,
+ * false = txn abort */
+ } EndTxn;
+ } rkbuf_u;
+
+#define rkbuf_batch rkbuf_u.Produce.batch
+
+ const char *rkbuf_uflow_mitigation; /**< Buffer read underflow
+ * human readable mitigation
+ * string (const memory).
+ * This is used to hint the
+ * user why the underflow
+ * might have occurred, which
+ * depends on request type. */
+};
+
+
+
+/**
+ * @name Read buffer interface
+ *
+ * Memory reading helper macros to be used when parsing network responses.
+ *
+ * Assumptions:
+ * - an 'err_parse:' goto-label must be available for error bailouts,
+ * the error code will be set in rkbuf->rkbuf_err
+ * - local `int log_decode_errors` variable set to the logging level
+ * to log parse errors (or 0 to turn off logging).
+ */
+
+#define rd_kafka_buf_parse_fail(rkbuf, ...) \
+ do { \
+ if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \
+ rd_rkb_log( \
+ rkbuf->rkbuf_rkb, log_decode_errors, "PROTOERR", \
+ "Protocol parse failure for %s v%hd%s " \
+ "at %" PRIusz "/%" PRIusz \
+ " (%s:%i) " \
+ "(incorrect broker.version.fallback?)", \
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \
+ rkbuf->rkbuf_reqhdr.ApiVersion, \
+ (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER \
+ ? "(flex)" \
+ : ""), \
+ rd_slice_offset(&rkbuf->rkbuf_reader), \
+ rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \
+ __LINE__); \
+ rd_rkb_log(rkbuf->rkbuf_rkb, log_decode_errors, \
+ "PROTOERR", __VA_ARGS__); \
+ } \
+ (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__BAD_MSG; \
+ goto err_parse; \
+ } while (0)
+
+/**
+ * @name Fail buffer reading due to buffer underflow.
+ */
+#define rd_kafka_buf_underflow_fail(rkbuf, wantedlen, ...) \
+ do { \
+ if (log_decode_errors > 0 && rkbuf->rkbuf_rkb) { \
+ char __tmpstr[256]; \
+ rd_snprintf(__tmpstr, sizeof(__tmpstr), \
+ ": " __VA_ARGS__); \
+ if (strlen(__tmpstr) == 2) \
+ __tmpstr[0] = '\0'; \
+ rd_rkb_log( \
+ rkbuf->rkbuf_rkb, log_decode_errors, "PROTOUFLOW", \
+ "Protocol read buffer underflow " \
+ "for %s v%hd " \
+ "at %" PRIusz "/%" PRIusz \
+ " (%s:%i): " \
+ "expected %" PRIusz \
+ " bytes > " \
+ "%" PRIusz " remaining bytes (%s)%s", \
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey), \
+ rkbuf->rkbuf_reqhdr.ApiVersion, \
+ rd_slice_offset(&rkbuf->rkbuf_reader), \
+ rd_slice_size(&rkbuf->rkbuf_reader), __FUNCTION__, \
+ __LINE__, wantedlen, \
+ rd_slice_remains(&rkbuf->rkbuf_reader), \
+ rkbuf->rkbuf_uflow_mitigation \
+ ? rkbuf->rkbuf_uflow_mitigation \
+ : "incorrect broker.version.fallback?", \
+ __tmpstr); \
+ } \
+ (rkbuf)->rkbuf_err = RD_KAFKA_RESP_ERR__UNDERFLOW; \
+ goto err_parse; \
+ } while (0)
+
+
+/**
+ * Returns the number of remaining bytes available to read.
+ */
+#define rd_kafka_buf_read_remain(rkbuf) rd_slice_remains(&(rkbuf)->rkbuf_reader)
+
+/**
+ * Checks that at least 'len' bytes remain to be read in buffer, else fails.
+ */
+#define rd_kafka_buf_check_len(rkbuf, len) \
+ do { \
+ size_t __len0 = (size_t)(len); \
+ if (unlikely(__len0 > rd_kafka_buf_read_remain(rkbuf))) { \
+ rd_kafka_buf_underflow_fail(rkbuf, __len0); \
+ } \
+ } while (0)
+
+/**
+ * Skip (as in read and ignore) the next 'len' bytes.
+ */
+#define rd_kafka_buf_skip(rkbuf, len) \
+ do { \
+ size_t __len1 = (size_t)(len); \
+ if (__len1 && \
+ !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
+ rd_kafka_buf_check_len(rkbuf, __len1); \
+ } while (0)
+
+/**
+ * Skip (as in read and ignore) up to fixed position \p pos.
+ */
+#define rd_kafka_buf_skip_to(rkbuf, pos) \
+ do { \
+ size_t __len1 = \
+ (size_t)(pos)-rd_slice_offset(&(rkbuf)->rkbuf_reader); \
+ if (__len1 && \
+ !rd_slice_read(&(rkbuf)->rkbuf_reader, NULL, __len1)) \
+ rd_kafka_buf_check_len(rkbuf, __len1); \
+ } while (0)
+
+
+
+/**
+ * Read 'len' bytes and copy to 'dstptr'
+ */
+#define rd_kafka_buf_read(rkbuf, dstptr, len) \
+ do { \
+ size_t __len2 = (size_t)(len); \
+ if (!rd_slice_read(&(rkbuf)->rkbuf_reader, dstptr, __len2)) \
+ rd_kafka_buf_check_len(rkbuf, __len2); \
+ } while (0)
+
+
+/**
+ * @brief Read \p len bytes at slice offset \p offset and copy to \p dstptr
+ * without affecting the current reader position.
+ */
+#define rd_kafka_buf_peek(rkbuf, offset, dstptr, len) \
+ do { \
+ size_t __len2 = (size_t)(len); \
+ if (!rd_slice_peek(&(rkbuf)->rkbuf_reader, offset, dstptr, \
+ __len2)) \
+ rd_kafka_buf_check_len(rkbuf, (offset) + (__len2)); \
+ } while (0)
+
+
+/**
+ * Read a 16,32,64-bit integer and store it in 'dstptr'
+ */
+#define rd_kafka_buf_read_i64(rkbuf, dstptr) \
+ do { \
+ int64_t _v; \
+ int64_t *_vp = dstptr; \
+ rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
+ *_vp = be64toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_peek_i64(rkbuf, of, dstptr) \
+ do { \
+ int64_t _v; \
+ int64_t *_vp = dstptr; \
+ rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
+ *_vp = be64toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_read_i32(rkbuf, dstptr) \
+ do { \
+ int32_t _v; \
+ int32_t *_vp = dstptr; \
+ rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
+ *_vp = be32toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_peek_i32(rkbuf, of, dstptr) \
+ do { \
+ int32_t _v; \
+ int32_t *_vp = dstptr; \
+ rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
+ *_vp = be32toh(_v); \
+ } while (0)
+
+
+/* Same as .._read_i32 but does a direct assignment.
+ * dst is assumed to be a scalar, not pointer. */
+#define rd_kafka_buf_read_i32a(rkbuf, dst) \
+ do { \
+ int32_t _v; \
+ rd_kafka_buf_read(rkbuf, &_v, 4); \
+ dst = (int32_t)be32toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_read_i16(rkbuf, dstptr) \
+ do { \
+ int16_t _v; \
+ int16_t *_vp = dstptr; \
+ rd_kafka_buf_read(rkbuf, &_v, sizeof(_v)); \
+ *_vp = (int16_t)be16toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_peek_i16(rkbuf, of, dstptr) \
+ do { \
+ int16_t _v; \
+ int16_t *_vp = dstptr; \
+ rd_kafka_buf_peek(rkbuf, of, &_v, sizeof(_v)); \
+ *_vp = be16toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_read_i16a(rkbuf, dst) \
+ do { \
+ int16_t _v; \
+ rd_kafka_buf_read(rkbuf, &_v, 2); \
+ dst = (int16_t)be16toh(_v); \
+ } while (0)
+
+#define rd_kafka_buf_read_i8(rkbuf, dst) rd_kafka_buf_read(rkbuf, dst, 1)
+
+#define rd_kafka_buf_peek_i8(rkbuf, of, dst) \
+ rd_kafka_buf_peek(rkbuf, of, dst, 1)
+
+#define rd_kafka_buf_read_bool(rkbuf, dstptr) \
+ do { \
+ int8_t _v; \
+ rd_bool_t *_dst = dstptr; \
+ rd_kafka_buf_read(rkbuf, &_v, 1); \
+ *_dst = (rd_bool_t)_v; \
+ } while (0)
+
+
+/**
+ * @brief Read varint and store in int64_t \p dst
+ */
+#define rd_kafka_buf_read_varint(rkbuf, dstptr) \
+ do { \
+ int64_t _v; \
+ int64_t *_vp = dstptr; \
+ size_t _r = rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_v); \
+ if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
+ rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
+ "varint parsing failed"); \
+ *_vp = _v; \
+ } while (0)
+
+
+/**
+ * @brief Read unsigned varint and store in uint64_t \p dst
+ */
+#define rd_kafka_buf_read_uvarint(rkbuf, dstptr) \
+ do { \
+ uint64_t _v; \
+ uint64_t *_vp = dstptr; \
+ size_t _r = \
+ rd_slice_read_uvarint(&(rkbuf)->rkbuf_reader, &_v); \
+ if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
+ rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
+ "uvarint parsing failed"); \
+ *_vp = _v; \
+ } while (0)
+
+
+/**
+ * @brief Read Kafka COMPACT_STRING (VARINT+N) or
+ * standard String representation (2+N).
+ *
+ * The kstr data will be updated to point to the rkbuf. */
+#define rd_kafka_buf_read_str(rkbuf, kstr) \
+ do { \
+ int _klen; \
+ if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \
+ uint64_t _uva; \
+ rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
+ (kstr)->len = ((int32_t)_uva) - 1; \
+ _klen = (kstr)->len; \
+ } else { \
+ rd_kafka_buf_read_i16a(rkbuf, (kstr)->len); \
+ _klen = RD_KAFKAP_STR_LEN(kstr); \
+ } \
+ if (RD_KAFKAP_STR_IS_NULL(kstr)) \
+ (kstr)->str = NULL; \
+ else if (RD_KAFKAP_STR_LEN(kstr) == 0) \
+ (kstr)->str = ""; \
+ else if (!((kstr)->str = rd_slice_ensure_contig( \
+ &rkbuf->rkbuf_reader, _klen))) \
+ rd_kafka_buf_check_len(rkbuf, _klen); \
+ } while (0)
+
+/* Read Kafka String representation (2+N) and write it to the \p tmpabuf
+ * with a trailing nul byte. */
+#define rd_kafka_buf_read_str_tmpabuf(rkbuf, tmpabuf, dst) \
+ do { \
+ rd_kafkap_str_t _kstr; \
+ size_t _slen; \
+ char *_dst; \
+ rd_kafka_buf_read_str(rkbuf, &_kstr); \
+ _slen = RD_KAFKAP_STR_LEN(&_kstr); \
+ if (!(_dst = rd_tmpabuf_write(tmpabuf, _kstr.str, _slen + 1))) \
+ rd_kafka_buf_parse_fail( \
+ rkbuf, \
+ "Not enough room in tmpabuf: " \
+ "%" PRIusz "+%" PRIusz " > %" PRIusz, \
+ (tmpabuf)->of, _slen + 1, (tmpabuf)->size); \
+ _dst[_slen] = '\0'; \
+ dst = (void *)_dst; \
+ } while (0)
+
+/**
+ * Skip a string.
+ */
+#define rd_kafka_buf_skip_str(rkbuf) \
+ do { \
+ int16_t _slen; \
+ rd_kafka_buf_read_i16(rkbuf, &_slen); \
+ rd_kafka_buf_skip(rkbuf, RD_KAFKAP_STR_LEN0(_slen)); \
+ } while (0)
+
+/* Read Kafka Bytes representation (4+N).
+ * The 'kbytes' will be updated to point to rkbuf data */
+#define rd_kafka_buf_read_bytes(rkbuf, kbytes) \
+ do { \
+ int _klen; \
+ rd_kafka_buf_read_i32a(rkbuf, _klen); \
+ (kbytes)->len = _klen; \
+ if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \
+ (kbytes)->data = NULL; \
+ (kbytes)->len = 0; \
+ } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \
+ (kbytes)->data = ""; \
+ else if (!((kbytes)->data = rd_slice_ensure_contig( \
+ &(rkbuf)->rkbuf_reader, _klen))) \
+ rd_kafka_buf_check_len(rkbuf, _klen); \
+ } while (0)
+
+
+/**
+ * @brief Read \p size bytes from buffer, setting \p *ptr to the start
+ * of the memory region.
+ */
+#define rd_kafka_buf_read_ptr(rkbuf, ptr, size) \
+ do { \
+ size_t _klen = size; \
+ if (!(*(ptr) = (void *)rd_slice_ensure_contig( \
+ &(rkbuf)->rkbuf_reader, _klen))) \
+ rd_kafka_buf_check_len(rkbuf, _klen); \
+ } while (0)
+
+
+/**
+ * @brief Read varint-lengted Kafka Bytes representation
+ */
+#define rd_kafka_buf_read_bytes_varint(rkbuf, kbytes) \
+ do { \
+ int64_t _len2; \
+ size_t _r = \
+ rd_slice_read_varint(&(rkbuf)->rkbuf_reader, &_len2); \
+ if (unlikely(RD_UVARINT_UNDERFLOW(_r))) \
+ rd_kafka_buf_underflow_fail(rkbuf, (size_t)0, \
+ "varint parsing failed"); \
+ (kbytes)->len = (int32_t)_len2; \
+ if (RD_KAFKAP_BYTES_IS_NULL(kbytes)) { \
+ (kbytes)->data = NULL; \
+ (kbytes)->len = 0; \
+ } else if (RD_KAFKAP_BYTES_LEN(kbytes) == 0) \
+ (kbytes)->data = ""; \
+ else if (!((kbytes)->data = rd_slice_ensure_contig( \
+ &(rkbuf)->rkbuf_reader, (size_t)_len2))) \
+ rd_kafka_buf_check_len(rkbuf, _len2); \
+ } while (0)
+
+
+/**
+ * @brief Read throttle_time_ms (i32) from response and pass the value
+ * to the throttle handling code.
+ */
+#define rd_kafka_buf_read_throttle_time(rkbuf) \
+ do { \
+ int32_t _throttle_time_ms; \
+ rd_kafka_buf_read_i32(rkbuf, &_throttle_time_ms); \
+ rd_kafka_op_throttle_time((rkbuf)->rkbuf_rkb, \
+ (rkbuf)->rkbuf_rkb->rkb_rk->rk_rep, \
+ _throttle_time_ms); \
+ } while (0)
+
+
+/**
+ * @brief Discard all KIP-482 Tags at the current position in the buffer.
+ */
+#define rd_kafka_buf_skip_tags(rkbuf) \
+ do { \
+ uint64_t _tagcnt; \
+ if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
+ break; \
+ rd_kafka_buf_read_uvarint(rkbuf, &_tagcnt); \
+ while (_tagcnt-- > 0) { \
+ uint64_t _tagtype, _taglen; \
+ rd_kafka_buf_read_uvarint(rkbuf, &_tagtype); \
+ rd_kafka_buf_read_uvarint(rkbuf, &_taglen); \
+ if (_taglen > 1) \
+ rd_kafka_buf_skip(rkbuf, \
+ (size_t)(_taglen - 1)); \
+ } \
+ } while (0)
+
+/**
+ * @brief Write tags at the current position in the buffer.
+ * @remark Currently always writes empty tags.
+ * @remark Change to ..write_uvarint() when actual tags are supported.
+ */
+#define rd_kafka_buf_write_tags(rkbuf) \
+ do { \
+ if (!((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) \
+ break; \
+ rd_kafka_buf_write_i8(rkbuf, 0); \
+ } while (0)
+
+
+/**
+ * @brief Reads an ARRAY or COMPACT_ARRAY count depending on buffer type.
+ */
+#define rd_kafka_buf_read_arraycnt(rkbuf, arrcnt, maxval) \
+ do { \
+ if ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) { \
+ uint64_t _uva; \
+ rd_kafka_buf_read_uvarint(rkbuf, &_uva); \
+ *(arrcnt) = (int32_t)_uva - 1; \
+ } else { \
+ rd_kafka_buf_read_i32(rkbuf, arrcnt); \
+ } \
+ if (*(arrcnt) < -1 || \
+ ((maxval) != -1 && *(arrcnt) > (maxval))) \
+ rd_kafka_buf_parse_fail( \
+ rkbuf, "ApiArrayCnt %" PRId32 " out of range", \
+ *(arrcnt)); \
+ } while (0)
+
+
+
+/**
+ * @returns true if buffer has been sent on wire, else 0.
+ */
+#define rd_kafka_buf_was_sent(rkbuf) ((rkbuf)->rkbuf_flags & RD_KAFKA_OP_F_SENT)
+
+typedef struct rd_kafka_bufq_s {
+ TAILQ_HEAD(, rd_kafka_buf_s) rkbq_bufs;
+ rd_atomic32_t rkbq_cnt;
+ rd_atomic32_t rkbq_msg_cnt;
+} rd_kafka_bufq_t;
+
+#define rd_kafka_bufq_cnt(rkbq) rd_atomic32_get(&(rkbq)->rkbq_cnt)
+
+/**
+ * @brief Set buffer's request timeout to relative \p timeout_ms measured
+ * from the time the buffer is sent on the underlying socket.
+ *
+ * @param now Reuse current time from existing rd_clock() var, else 0.
+ *
+ * The relative timeout value is reused upon request retry.
+ */
+static RD_INLINE void
+rd_kafka_buf_set_timeout(rd_kafka_buf_t *rkbuf, int timeout_ms, rd_ts_t now) {
+ if (!now)
+ now = rd_clock();
+ rkbuf->rkbuf_rel_timeout = timeout_ms;
+ rkbuf->rkbuf_abs_timeout = 0;
+}
+
+
+/**
+ * @brief Calculate the effective timeout for a request attempt
+ */
+void rd_kafka_buf_calc_timeout(const rd_kafka_t *rk,
+ rd_kafka_buf_t *rkbuf,
+ rd_ts_t now);
+
+
+/**
+ * @brief Set buffer's request timeout to relative \p timeout_ms measured
+ * from \p now.
+ *
+ * @param now Reuse current time from existing rd_clock() var, else 0.
+ * @param force If true: force request timeout to be same as remaining
+ * abs timeout, regardless of socket.timeout.ms.
+ * If false: cap each request timeout to socket.timeout.ms.
+ *
+ * The remaining time is used as timeout for request retries.
+ */
+static RD_INLINE void rd_kafka_buf_set_abs_timeout0(rd_kafka_buf_t *rkbuf,
+ int timeout_ms,
+ rd_ts_t now,
+ rd_bool_t force) {
+ if (!now)
+ now = rd_clock();
+ rkbuf->rkbuf_rel_timeout = 0;
+ rkbuf->rkbuf_abs_timeout = now + ((rd_ts_t)timeout_ms * 1000);
+ rkbuf->rkbuf_force_timeout = force;
+}
+
+#define rd_kafka_buf_set_abs_timeout(rkbuf, timeout_ms, now) \
+ rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_false)
+
+
+#define rd_kafka_buf_set_abs_timeout_force(rkbuf, timeout_ms, now) \
+ rd_kafka_buf_set_abs_timeout0(rkbuf, timeout_ms, now, rd_true)
+
+
+#define rd_kafka_buf_keep(rkbuf) rd_refcnt_add(&(rkbuf)->rkbuf_refcnt)
+#define rd_kafka_buf_destroy(rkbuf) \
+ rd_refcnt_destroywrapper(&(rkbuf)->rkbuf_refcnt, \
+ rd_kafka_buf_destroy_final(rkbuf))
+
+void rd_kafka_buf_destroy_final(rd_kafka_buf_t *rkbuf);
+void rd_kafka_buf_push0(rd_kafka_buf_t *rkbuf,
+ const void *buf,
+ size_t len,
+ int allow_crc_calc,
+ void (*free_cb)(void *));
+#define rd_kafka_buf_push(rkbuf, buf, len, free_cb) \
+ rd_kafka_buf_push0(rkbuf, buf, len, 1 /*allow_crc*/, free_cb)
+rd_kafka_buf_t *rd_kafka_buf_new0(int segcnt, size_t size, int flags);
+#define rd_kafka_buf_new(segcnt, size) rd_kafka_buf_new0(segcnt, size, 0)
+rd_kafka_buf_t *rd_kafka_buf_new_request0(rd_kafka_broker_t *rkb,
+ int16_t ApiKey,
+ int segcnt,
+ size_t size,
+ rd_bool_t is_flexver);
+#define rd_kafka_buf_new_request(rkb, ApiKey, segcnt, size) \
+ rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, rd_false)
+
+#define rd_kafka_buf_new_flexver_request(rkb, ApiKey, segcnt, size, \
+ is_flexver) \
+ rd_kafka_buf_new_request0(rkb, ApiKey, segcnt, size, is_flexver)
+
+rd_kafka_buf_t *
+rd_kafka_buf_new_shadow(const void *ptr, size_t size, void (*free_cb)(void *));
+void rd_kafka_bufq_enq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf);
+void rd_kafka_bufq_deq(rd_kafka_bufq_t *rkbufq, rd_kafka_buf_t *rkbuf);
+void rd_kafka_bufq_init(rd_kafka_bufq_t *rkbufq);
+void rd_kafka_bufq_concat(rd_kafka_bufq_t *dst, rd_kafka_bufq_t *src);
+void rd_kafka_bufq_purge(rd_kafka_broker_t *rkb,
+ rd_kafka_bufq_t *rkbufq,
+ rd_kafka_resp_err_t err);
+void rd_kafka_bufq_connection_reset(rd_kafka_broker_t *rkb,
+ rd_kafka_bufq_t *rkbufq);
+void rd_kafka_bufq_dump(rd_kafka_broker_t *rkb,
+ const char *fac,
+ rd_kafka_bufq_t *rkbq);
+
+int rd_kafka_buf_retry(rd_kafka_broker_t *rkb, rd_kafka_buf_t *rkbuf);
+
+void rd_kafka_buf_handle_op(rd_kafka_op_t *rko, rd_kafka_resp_err_t err);
+void rd_kafka_buf_callback(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *response,
+ rd_kafka_buf_t *request);
+
+
+
+/**
+ *
+ * Write buffer interface
+ *
+ */
+
+/**
+ * Set request API type version
+ */
+static RD_UNUSED RD_INLINE void
+rd_kafka_buf_ApiVersion_set(rd_kafka_buf_t *rkbuf,
+ int16_t version,
+ int features) {
+ rkbuf->rkbuf_reqhdr.ApiVersion = version;
+ rkbuf->rkbuf_features = features;
+}
+
+
+/**
+ * @returns the ApiVersion for a request
+ */
+#define rd_kafka_buf_ApiVersion(rkbuf) ((rkbuf)->rkbuf_reqhdr.ApiVersion)
+
+
+
+/**
+ * Write (copy) data to buffer at current write-buffer position.
+ * There must be enough space allocated in the rkbuf.
+ * Returns offset to written destination buffer.
+ */
+static RD_INLINE size_t rd_kafka_buf_write(rd_kafka_buf_t *rkbuf,
+ const void *data,
+ size_t len) {
+ size_t r;
+
+ r = rd_buf_write(&rkbuf->rkbuf_buf, data, len);
+
+ if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC)
+ rkbuf->rkbuf_crc = rd_crc32_update(rkbuf->rkbuf_crc, data, len);
+
+ return r;
+}
+
+
+
+/**
+ * Write (copy) 'data' to buffer at 'ptr'.
+ * There must be enough space to fit 'len'.
+ * This will overwrite the buffer at given location and length.
+ *
+ * NOTE: rd_kafka_buf_update() MUST NOT be called when a CRC calculation
+ * is in progress (between rd_kafka_buf_crc_init() & .._crc_finalize())
+ */
+static RD_INLINE void rd_kafka_buf_update(rd_kafka_buf_t *rkbuf,
+ size_t of,
+ const void *data,
+ size_t len) {
+ rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC));
+ rd_buf_write_update(&rkbuf->rkbuf_buf, of, data, len);
+}
+
+/**
+ * Write int8_t to buffer.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_i8(rd_kafka_buf_t *rkbuf, int8_t v) {
+ return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
+}
+
+/**
+ * Update int8_t in buffer at offset 'of'.
+ * 'of' should have been previously returned by `.._buf_write_i8()`.
+ */
+static RD_INLINE void
+rd_kafka_buf_update_i8(rd_kafka_buf_t *rkbuf, size_t of, int8_t v) {
+ rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
+}
+
+/**
+ * Write int16_t to buffer.
+ * The value will be endian-swapped before write.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_i16(rd_kafka_buf_t *rkbuf,
+ int16_t v) {
+ v = htobe16(v);
+ return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
+}
+
+/**
+ * Update int16_t in buffer at offset 'of'.
+ * 'of' should have been previously returned by `.._buf_write_i16()`.
+ */
+static RD_INLINE void
+rd_kafka_buf_update_i16(rd_kafka_buf_t *rkbuf, size_t of, int16_t v) {
+ v = htobe16(v);
+ rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
+}
+
+/**
+ * Write int32_t to buffer.
+ * The value will be endian-swapped before write.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_i32(rd_kafka_buf_t *rkbuf,
+ int32_t v) {
+ v = (int32_t)htobe32(v);
+ return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
+}
+
+/**
+ * Update int32_t in buffer at offset 'of'.
+ * 'of' should have been previously returned by `.._buf_write_i32()`.
+ */
+static RD_INLINE void
+rd_kafka_buf_update_i32(rd_kafka_buf_t *rkbuf, size_t of, int32_t v) {
+ v = htobe32(v);
+ rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
+}
+
+/**
+ * Update int32_t in buffer at offset 'of'.
+ * 'of' should have been previously returned by `.._buf_write_i32()`.
+ */
+static RD_INLINE void
+rd_kafka_buf_update_u32(rd_kafka_buf_t *rkbuf, size_t of, uint32_t v) {
+ v = htobe32(v);
+ rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
+}
+
+
+/**
+ * @brief Write varint-encoded signed value to buffer.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_varint(rd_kafka_buf_t *rkbuf,
+ int64_t v) {
+ char varint[RD_UVARINT_ENC_SIZEOF(v)];
+ size_t sz;
+
+ sz = rd_uvarint_enc_i64(varint, sizeof(varint), v);
+
+ return rd_kafka_buf_write(rkbuf, varint, sz);
+}
+
+/**
+ * @brief Write varint-encoded unsigned value to buffer.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_uvarint(rd_kafka_buf_t *rkbuf,
+ uint64_t v) {
+ char varint[RD_UVARINT_ENC_SIZEOF(v)];
+ size_t sz;
+
+ sz = rd_uvarint_enc_u64(varint, sizeof(varint), v);
+
+ return rd_kafka_buf_write(rkbuf, varint, sz);
+}
+
+
+
+/**
+ * @brief Write standard or flexver arround count field to buffer.
+ * Use this when the array count is known beforehand, else use
+ * rd_kafka_buf_write_arraycnt_pos().
+ */
+static RD_INLINE RD_UNUSED size_t
+rd_kafka_buf_write_arraycnt(rd_kafka_buf_t *rkbuf, size_t cnt) {
+
+ /* Count must fit in 31-bits minus the per-byte carry-bit */
+ rd_assert(cnt + 1 < (size_t)(INT_MAX >> 4));
+
+ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER))
+ return rd_kafka_buf_write_i32(rkbuf, (int32_t)cnt);
+
+ /* CompactArray has a base of 1, 0 is for Null arrays */
+ cnt += 1;
+ return rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)cnt);
+}
+
+
+/**
+ * @brief Write array count field to buffer (i32) for later update with
+ * rd_kafka_buf_finalize_arraycnt().
+ */
+#define rd_kafka_buf_write_arraycnt_pos(rkbuf) rd_kafka_buf_write_i32(rkbuf, 0)
+
+
+/**
+ * @brief Write the final array count to the position returned from
+ * rd_kafka_buf_write_arraycnt_pos().
+ *
+ * Update int32_t in buffer at offset 'of' but serialize it as
+ * compact uvarint (that must not exceed 4 bytes storage)
+ * if the \p rkbuf is marked as FLEXVER, else just update it as
+ * as a standard update_i32().
+ *
+ * @remark For flexibleVersions this will shrink the buffer and move data
+ * and may thus be costly.
+ */
+static RD_INLINE void
+rd_kafka_buf_finalize_arraycnt(rd_kafka_buf_t *rkbuf, size_t of, size_t cnt) {
+ char buf[sizeof(int32_t)];
+ size_t sz, r;
+
+ rd_assert(cnt < (size_t)INT_MAX);
+
+ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
+ rd_kafka_buf_update_i32(rkbuf, of, (int32_t)cnt);
+ return;
+ }
+
+ /* CompactArray has a base of 1, 0 is for Null arrays */
+ cnt += 1;
+
+ sz = rd_uvarint_enc_u64(buf, sizeof(buf), (uint64_t)cnt);
+ rd_assert(!RD_UVARINT_OVERFLOW(sz));
+ if (cnt < 127)
+ rd_assert(sz == 1);
+ rd_buf_write_update(&rkbuf->rkbuf_buf, of, buf, sz);
+
+ if (sz < sizeof(int32_t)) {
+ /* Varint occupies less space than the allotted 4 bytes, erase
+ * the remaining bytes. */
+ r = rd_buf_erase(&rkbuf->rkbuf_buf, of + sz,
+ sizeof(int32_t) - sz);
+ rd_assert(r == sizeof(int32_t) - sz);
+ }
+}
+
+
+/**
+ * Write int64_t to buffer.
+ * The value will be endian-swapped before write.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_i64(rd_kafka_buf_t *rkbuf,
+ int64_t v) {
+ v = htobe64(v);
+ return rd_kafka_buf_write(rkbuf, &v, sizeof(v));
+}
+
+/**
+ * Update int64_t in buffer at address 'ptr'.
+ * 'of' should have been previously returned by `.._buf_write_i64()`.
+ */
+static RD_INLINE void
+rd_kafka_buf_update_i64(rd_kafka_buf_t *rkbuf, size_t of, int64_t v) {
+ v = htobe64(v);
+ rd_kafka_buf_update(rkbuf, of, &v, sizeof(v));
+}
+
+
+/**
+ * @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer.
+ *
+ * @remark Copies the string.
+ *
+ * @returns the offset in \p rkbuf where the string was written.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_kstr(rd_kafka_buf_t *rkbuf,
+ const rd_kafkap_str_t *kstr) {
+ size_t len, r;
+
+ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
+ /* Standard string */
+ if (!kstr || RD_KAFKAP_STR_IS_NULL(kstr))
+ return rd_kafka_buf_write_i16(rkbuf, -1);
+
+ if (RD_KAFKAP_STR_IS_SERIALIZED(kstr))
+ return rd_kafka_buf_write(rkbuf,
+ RD_KAFKAP_STR_SER(kstr),
+ RD_KAFKAP_STR_SIZE(kstr));
+
+ len = RD_KAFKAP_STR_LEN(kstr);
+ r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len);
+ rd_kafka_buf_write(rkbuf, kstr->str, len);
+
+ return r;
+ }
+
+ /* COMPACT_STRING lengths are:
+ * 0 = NULL,
+ * 1 = empty
+ * N.. = length + 1
+ */
+ if (!kstr || RD_KAFKAP_STR_IS_NULL(kstr))
+ len = 0;
+ else
+ len = RD_KAFKAP_STR_LEN(kstr) + 1;
+
+ r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len);
+ if (len > 1)
+ rd_kafka_buf_write(rkbuf, kstr->str, len - 1);
+ return r;
+}
+
+
+
+/**
+ * @brief Write standard (2-byte header) or KIP-482 COMPACT_STRING to buffer.
+ *
+ * @remark Copies the string.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_str(rd_kafka_buf_t *rkbuf,
+ const char *str,
+ size_t len) {
+ size_t r;
+
+ if (!(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)) {
+ /* Standard string */
+ if (!str)
+ len = RD_KAFKAP_STR_LEN_NULL;
+ else if (len == (size_t)-1)
+ len = strlen(str);
+ r = rd_kafka_buf_write_i16(rkbuf, (int16_t)len);
+ if (str)
+ rd_kafka_buf_write(rkbuf, str, len);
+ return r;
+ }
+
+ /* COMPACT_STRING lengths are:
+ * 0 = NULL,
+ * 1 = empty
+ * N.. = length + 1
+ */
+ if (!str)
+ len = 0;
+ else if (len == (size_t)-1)
+ len = strlen(str) + 1;
+ else
+ len++;
+
+ r = rd_kafka_buf_write_uvarint(rkbuf, (uint64_t)len);
+ if (len > 1)
+ rd_kafka_buf_write(rkbuf, str, len - 1);
+ return r;
+}
+
+
+
+/**
+ * Push (i.e., no copy) Kafka string to buffer iovec
+ */
+static RD_INLINE void rd_kafka_buf_push_kstr(rd_kafka_buf_t *rkbuf,
+ const rd_kafkap_str_t *kstr) {
+ rd_kafka_buf_push(rkbuf, RD_KAFKAP_STR_SER(kstr),
+ RD_KAFKAP_STR_SIZE(kstr), NULL);
+}
+
+
+
+/**
+ * Write (copy) Kafka bytes to buffer.
+ */
+static RD_INLINE size_t
+rd_kafka_buf_write_kbytes(rd_kafka_buf_t *rkbuf,
+ const rd_kafkap_bytes_t *kbytes) {
+ size_t len;
+
+ if (!kbytes || RD_KAFKAP_BYTES_IS_NULL(kbytes))
+ return rd_kafka_buf_write_i32(rkbuf, -1);
+
+ if (RD_KAFKAP_BYTES_IS_SERIALIZED(kbytes))
+ return rd_kafka_buf_write(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
+ RD_KAFKAP_BYTES_SIZE(kbytes));
+
+ len = RD_KAFKAP_BYTES_LEN(kbytes);
+ rd_kafka_buf_write_i32(rkbuf, (int32_t)len);
+ rd_kafka_buf_write(rkbuf, kbytes->data, len);
+
+ return 4 + len;
+}
+
+/**
+ * Push (i.e., no copy) Kafka bytes to buffer iovec
+ */
+static RD_INLINE void
+rd_kafka_buf_push_kbytes(rd_kafka_buf_t *rkbuf,
+ const rd_kafkap_bytes_t *kbytes) {
+ rd_kafka_buf_push(rkbuf, RD_KAFKAP_BYTES_SER(kbytes),
+ RD_KAFKAP_BYTES_SIZE(kbytes), NULL);
+}
+
+/**
+ * Write (copy) binary bytes to buffer as Kafka bytes encapsulate data.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_bytes(rd_kafka_buf_t *rkbuf,
+ const void *payload,
+ size_t size) {
+ size_t r;
+ if (!payload)
+ size = RD_KAFKAP_BYTES_LEN_NULL;
+ r = rd_kafka_buf_write_i32(rkbuf, (int32_t)size);
+ if (payload)
+ rd_kafka_buf_write(rkbuf, payload, size);
+ return r;
+}
+
+
+/**
+ * @brief Write bool to buffer.
+ */
+static RD_INLINE size_t rd_kafka_buf_write_bool(rd_kafka_buf_t *rkbuf,
+ rd_bool_t v) {
+ return rd_kafka_buf_write_i8(rkbuf, (int8_t)v);
+}
+
+
+/**
+ * Write Kafka Message to buffer
+ * The number of bytes written is returned in '*outlenp'.
+ *
+ * Returns the buffer offset of the first byte.
+ */
+size_t rd_kafka_buf_write_Message(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ int64_t Offset,
+ int8_t MagicByte,
+ int8_t Attributes,
+ int64_t Timestamp,
+ const void *key,
+ int32_t key_len,
+ const void *payload,
+ int32_t len,
+ int *outlenp);
+
+/**
+ * Start calculating CRC from now and track it in '*crcp'.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_buf_crc_init(rd_kafka_buf_t *rkbuf) {
+ rd_kafka_assert(NULL, !(rkbuf->rkbuf_flags & RD_KAFKA_OP_F_CRC));
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_CRC;
+ rkbuf->rkbuf_crc = rd_crc32_init();
+}
+
+/**
+ * Finalizes CRC calculation and returns the calculated checksum.
+ */
+static RD_INLINE RD_UNUSED rd_crc32_t
+rd_kafka_buf_crc_finalize(rd_kafka_buf_t *rkbuf) {
+ rkbuf->rkbuf_flags &= ~RD_KAFKA_OP_F_CRC;
+ return rd_crc32_finalize(rkbuf->rkbuf_crc);
+}
+
+
+
+/**
+ * @brief Check if buffer's replyq.version is outdated.
+ * @param rkbuf: may be NULL, for convenience.
+ *
+ * @returns 1 if this is an outdated buffer, else 0.
+ */
+static RD_UNUSED RD_INLINE int
+rd_kafka_buf_version_outdated(const rd_kafka_buf_t *rkbuf, int version) {
+ return rkbuf && rkbuf->rkbuf_replyq.version &&
+ rkbuf->rkbuf_replyq.version < version;
+}
+
+
+void rd_kafka_buf_set_maker(rd_kafka_buf_t *rkbuf,
+ rd_kafka_make_req_cb_t *make_cb,
+ void *make_opaque,
+ void (*free_make_opaque_cb)(void *make_opaque));
+
+#endif /* _RDKAFKA_BUF_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c
new file mode 100644
index 000000000..2a19e4549
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.c
@@ -0,0 +1,552 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name SSL certificates
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_transport_int.h"
+
+
+#if WITH_SSL
+#include "rdkafka_ssl.h"
+
+#include <openssl/x509.h>
+#include <openssl/evp.h>
+
+/**
+ * @brief OpenSSL password query callback using a conf struct.
+ *
+ * @locality application thread
+ */
+static int
+rd_kafka_conf_ssl_passwd_cb(char *buf, int size, int rwflag, void *userdata) {
+ const rd_kafka_conf_t *conf = userdata;
+ int pwlen;
+
+ if (!conf->ssl.key_password)
+ return -1;
+
+ pwlen = (int)strlen(conf->ssl.key_password);
+ memcpy(buf, conf->ssl.key_password, RD_MIN(pwlen, size));
+
+ return pwlen;
+}
+
+
+
+static const char *rd_kafka_cert_type_names[] = {"public-key", "private-key",
+ "CA"};
+
+static const char *rd_kafka_cert_enc_names[] = {"PKCS#12", "DER", "PEM"};
+
+
+/**
+ * @brief Destroy a certificate
+ */
+static void rd_kafka_cert_destroy(rd_kafka_cert_t *cert) {
+ if (rd_refcnt_sub(&cert->refcnt) > 0)
+ return;
+
+ if (cert->x509)
+ X509_free(cert->x509);
+ if (cert->pkey)
+ EVP_PKEY_free(cert->pkey);
+ if (cert->store)
+ X509_STORE_free(cert->store);
+
+ rd_free(cert);
+}
+
+
+/**
+ * @brief Create a copy of a cert
+ */
+static rd_kafka_cert_t *rd_kafka_cert_dup(rd_kafka_cert_t *src) {
+ rd_refcnt_add(&src->refcnt);
+ return src;
+}
+
+
+#if OPENSSL_VERSION_NUMBER < 0x30000000
+/**
+ * @brief Print the OpenSSL error stack to stdout, for development use.
+ */
+static RD_UNUSED void rd_kafka_print_ssl_errors(void) {
+ unsigned long l;
+ const char *file, *data;
+ int line, flags;
+
+ while ((l = ERR_get_error_line_data(&file, &line, &data, &flags)) !=
+ 0) {
+ char buf[256];
+
+ ERR_error_string_n(l, buf, sizeof(buf));
+
+ printf("ERR: %s:%d: %s: %s:\n", file, line, buf,
+ (flags & ERR_TXT_STRING) ? data : "");
+ printf(" %lu:%s : %s : %s : %d : %s (%p, %d, fl 0x%x)\n", l,
+ ERR_lib_error_string(l), ERR_func_error_string(l), file,
+ line,
+ (flags & ERR_TXT_STRING) && data && *data
+ ? data
+ : ERR_reason_error_string(l),
+ data, data ? (int)strlen(data) : -1,
+ flags & ERR_TXT_STRING);
+ }
+}
+#endif
+
+
+/**
+ * @returns a cert structure with a copy of the memory in \p buffer on success,
+ * or NULL on failure in which case errstr will have a human-readable
+ * error string written to it.
+ */
+static rd_kafka_cert_t *rd_kafka_cert_new(const rd_kafka_conf_t *conf,
+ rd_kafka_cert_type_t type,
+ rd_kafka_cert_enc_t encoding,
+ const void *buffer,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+ static const rd_bool_t
+ valid[RD_KAFKA_CERT__CNT][RD_KAFKA_CERT_ENC__CNT] = {
+ /* Valid encodings per certificate type */
+ [RD_KAFKA_CERT_PUBLIC_KEY] = {[RD_KAFKA_CERT_ENC_PKCS12] =
+ rd_true,
+ [RD_KAFKA_CERT_ENC_DER] = rd_true,
+ [RD_KAFKA_CERT_ENC_PEM] =
+ rd_true},
+ [RD_KAFKA_CERT_PRIVATE_KEY] =
+ {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
+ [RD_KAFKA_CERT_ENC_DER] = rd_true,
+ [RD_KAFKA_CERT_ENC_PEM] = rd_true},
+ [RD_KAFKA_CERT_CA] = {[RD_KAFKA_CERT_ENC_PKCS12] = rd_true,
+ [RD_KAFKA_CERT_ENC_DER] = rd_true,
+ [RD_KAFKA_CERT_ENC_PEM] = rd_true},
+ };
+ const char *action = "", *ssl_errstr = NULL, *extra = "";
+ BIO *bio;
+ rd_kafka_cert_t *cert = NULL;
+ PKCS12 *p12 = NULL;
+
+ if ((int)type < 0 || type >= RD_KAFKA_CERT__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid certificate type %d",
+ (int)type);
+ return NULL;
+ }
+
+ if ((int)encoding < 0 || encoding >= RD_KAFKA_CERT_ENC__CNT) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid certificate encoding %d", (int)encoding);
+ return NULL;
+ }
+
+ if (!valid[type][encoding]) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid encoding %s for certificate type %s",
+ rd_kafka_cert_enc_names[encoding],
+ rd_kafka_cert_type_names[type]);
+ return NULL;
+ }
+
+ action = "read memory";
+ bio = BIO_new_mem_buf((void *)buffer, (long)size);
+ if (!bio)
+ goto fail;
+
+ if (encoding == RD_KAFKA_CERT_ENC_PKCS12) {
+ action = "read PKCS#12";
+ p12 = d2i_PKCS12_bio(bio, NULL);
+ if (!p12)
+ goto fail;
+ }
+
+ cert = rd_calloc(1, sizeof(*cert));
+ cert->type = type;
+ cert->encoding = encoding;
+
+ rd_refcnt_init(&cert->refcnt, 1);
+
+ switch (type) {
+ case RD_KAFKA_CERT_CA:
+ cert->store = X509_STORE_new();
+
+ switch (encoding) {
+ case RD_KAFKA_CERT_ENC_PKCS12: {
+ EVP_PKEY *ign_pkey;
+ X509 *ign_cert;
+ STACK_OF(X509) *cas = NULL;
+ int i;
+
+ action = "parse PKCS#12";
+ if (!PKCS12_parse(p12, conf->ssl.key_password,
+ &ign_pkey, &ign_cert, &cas))
+ goto fail;
+
+ EVP_PKEY_free(ign_pkey);
+ X509_free(ign_cert);
+
+ if (!cas || sk_X509_num(cas) < 1) {
+ action =
+ "retrieve at least one CA "
+ "cert from PKCS#12";
+ if (cas)
+ sk_X509_pop_free(cas, X509_free);
+ goto fail;
+ }
+
+ for (i = 0; i < sk_X509_num(cas); i++) {
+ if (!X509_STORE_add_cert(
+ cert->store, sk_X509_value(cas, i))) {
+ action =
+ "add certificate to "
+ "X.509 store";
+ sk_X509_pop_free(cas, X509_free);
+ goto fail;
+ }
+ }
+
+ sk_X509_pop_free(cas, X509_free);
+ } break;
+
+ case RD_KAFKA_CERT_ENC_DER: {
+ X509 *x509;
+
+ action = "read DER / X.509 ASN.1";
+ if (!(x509 = d2i_X509_bio(bio, NULL)))
+ goto fail;
+
+ if (!X509_STORE_add_cert(cert->store, x509)) {
+ action =
+ "add certificate to "
+ "X.509 store";
+ X509_free(x509);
+ goto fail;
+ }
+
+ X509_free(x509);
+ } break;
+
+ case RD_KAFKA_CERT_ENC_PEM: {
+ X509 *x509;
+ int cnt = 0;
+
+ action = "read PEM";
+
+ /* This will read one certificate per call
+ * until an error occurs or the end of the
+ * buffer is reached (which is an error
+ * we'll need to clear). */
+ while ((x509 = PEM_read_bio_X509(
+ bio, NULL, rd_kafka_conf_ssl_passwd_cb,
+ (void *)conf))) {
+
+ if (!X509_STORE_add_cert(cert->store, x509)) {
+ action =
+ "add certificate to "
+ "X.509 store";
+ X509_free(x509);
+ goto fail;
+ }
+
+ X509_free(x509);
+ cnt++;
+ }
+
+ if (!BIO_eof(bio)) {
+ /* Encountered parse error before
+ * reaching end, propagate error and
+ * fail. */
+ goto fail;
+ }
+
+ if (!cnt) {
+ action =
+ "retrieve at least one "
+ "CA cert from PEM";
+
+ goto fail;
+ }
+
+ /* Reached end, which is raised as an error,
+ * so clear it since it is not. */
+ ERR_clear_error();
+ } break;
+
+ default:
+ RD_NOTREACHED();
+ break;
+ }
+ break;
+
+
+ case RD_KAFKA_CERT_PUBLIC_KEY:
+ switch (encoding) {
+ case RD_KAFKA_CERT_ENC_PKCS12: {
+ EVP_PKEY *ign_pkey;
+
+ action = "parse PKCS#12";
+ if (!PKCS12_parse(p12, conf->ssl.key_password,
+ &ign_pkey, &cert->x509, NULL))
+ goto fail;
+
+ EVP_PKEY_free(ign_pkey);
+
+ action = "retrieve public key";
+ if (!cert->x509)
+ goto fail;
+ } break;
+
+ case RD_KAFKA_CERT_ENC_DER:
+ action = "read DER / X.509 ASN.1";
+ cert->x509 = d2i_X509_bio(bio, NULL);
+ if (!cert->x509)
+ goto fail;
+ break;
+
+ case RD_KAFKA_CERT_ENC_PEM:
+ action = "read PEM";
+ cert->x509 = PEM_read_bio_X509(
+ bio, NULL, rd_kafka_conf_ssl_passwd_cb,
+ (void *)conf);
+ if (!cert->x509)
+ goto fail;
+ break;
+
+ default:
+ RD_NOTREACHED();
+ break;
+ }
+ break;
+
+
+ case RD_KAFKA_CERT_PRIVATE_KEY:
+ switch (encoding) {
+ case RD_KAFKA_CERT_ENC_PKCS12: {
+ X509 *x509;
+
+ action = "parse PKCS#12";
+ if (!PKCS12_parse(p12, conf->ssl.key_password,
+ &cert->pkey, &x509, NULL))
+ goto fail;
+
+ X509_free(x509);
+
+ action = "retrieve private key";
+ if (!cert->pkey)
+ goto fail;
+ } break;
+
+ case RD_KAFKA_CERT_ENC_DER:
+ action =
+ "read DER / X.509 ASN.1 and "
+ "convert to EVP_PKEY";
+ cert->pkey = d2i_PrivateKey_bio(bio, NULL);
+ if (!cert->pkey)
+ goto fail;
+ break;
+
+ case RD_KAFKA_CERT_ENC_PEM:
+ action = "read PEM";
+ cert->pkey = PEM_read_bio_PrivateKey(
+ bio, NULL, rd_kafka_conf_ssl_passwd_cb,
+ (void *)conf);
+ if (!cert->pkey)
+ goto fail;
+ break;
+
+ default:
+ RD_NOTREACHED();
+ break;
+ }
+ break;
+
+ default:
+ RD_NOTREACHED();
+ break;
+ }
+
+ if (bio)
+ BIO_free(bio);
+ if (p12)
+ PKCS12_free(p12);
+
+ return cert;
+
+fail:
+ ssl_errstr = rd_kafka_ssl_last_error_str();
+
+ /* OpenSSL 3.x does not provide obsolete ciphers out of the box, so
+ * let's try to identify such an error message and guide the user
+ * to what to do (set up a provider config file and point to it
+ * through the OPENSSL_CONF environment variable).
+ * We could call OSSL_PROVIDER_load("legacy") here, but that would be
+ * a non-obvious side-effect of calling this set function. */
+ if (strstr(action, "parse") && strstr(ssl_errstr, "Algorithm"))
+ extra =
+ ": legacy ciphers may require loading OpenSSL's \"legacy\" "
+ "provider through an OPENSSL_CONF configuration file";
+
+ rd_snprintf(errstr, errstr_size, "Failed to %s %s (encoding %s): %s%s",
+ action, rd_kafka_cert_type_names[type],
+ rd_kafka_cert_enc_names[encoding], ssl_errstr, extra);
+
+ if (cert)
+ rd_kafka_cert_destroy(cert);
+ if (bio)
+ BIO_free(bio);
+ if (p12)
+ PKCS12_free(p12);
+
+ return NULL;
+}
+#endif /* WITH_SSL */
+
+
+/**
+ * @name Public API
+ * @brief These public methods must be available regardless if
+ * librdkafka was built with OpenSSL or not.
+ * @{
+ */
+
+rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert(rd_kafka_conf_t *conf,
+ rd_kafka_cert_type_t cert_type,
+ rd_kafka_cert_enc_t cert_enc,
+ const void *buffer,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+#if !WITH_SSL
+ rd_snprintf(errstr, errstr_size,
+ "librdkafka not built with OpenSSL support");
+ return RD_KAFKA_CONF_INVALID;
+#else
+ rd_kafka_cert_t *cert;
+ rd_kafka_cert_t **cert_map[RD_KAFKA_CERT__CNT] = {
+ [RD_KAFKA_CERT_PUBLIC_KEY] = &conf->ssl.cert,
+ [RD_KAFKA_CERT_PRIVATE_KEY] = &conf->ssl.key,
+ [RD_KAFKA_CERT_CA] = &conf->ssl.ca};
+ rd_kafka_cert_t **certp;
+
+ if ((int)cert_type < 0 || cert_type >= RD_KAFKA_CERT__CNT) {
+ rd_snprintf(errstr, errstr_size, "Invalid certificate type %d",
+ (int)cert_type);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ /* Make sure OpenSSL is loaded */
+ rd_kafka_global_init();
+
+ certp = cert_map[cert_type];
+
+ if (!buffer) {
+ /* Clear current value */
+ if (*certp) {
+ rd_kafka_cert_destroy(*certp);
+ *certp = NULL;
+ }
+ return RD_KAFKA_CONF_OK;
+ }
+
+ cert = rd_kafka_cert_new(conf, cert_type, cert_enc, buffer, size,
+ errstr, errstr_size);
+ if (!cert)
+ return RD_KAFKA_CONF_INVALID;
+
+ if (*certp)
+ rd_kafka_cert_destroy(*certp);
+
+ *certp = cert;
+
+ return RD_KAFKA_CONF_OK;
+#endif
+}
+
+
+
+/**
+ * @brief Destructor called when configuration object is destroyed.
+ */
+void rd_kafka_conf_cert_dtor(int scope, void *pconf) {
+#if WITH_SSL
+ rd_kafka_conf_t *conf = pconf;
+ assert(scope == _RK_GLOBAL);
+ if (conf->ssl.key) {
+ rd_kafka_cert_destroy(conf->ssl.key);
+ conf->ssl.key = NULL;
+ }
+ if (conf->ssl.cert) {
+ rd_kafka_cert_destroy(conf->ssl.cert);
+ conf->ssl.cert = NULL;
+ }
+ if (conf->ssl.ca) {
+ rd_kafka_cert_destroy(conf->ssl.ca);
+ conf->ssl.ca = NULL;
+ }
+#endif
+}
+
+/**
+ * @brief Copy-constructor called when configuration object \p psrcp is
+ * duplicated to \p dstp.
+ */
+void rd_kafka_conf_cert_copy(int scope,
+ void *pdst,
+ const void *psrc,
+ void *dstptr,
+ const void *srcptr,
+ size_t filter_cnt,
+ const char **filter) {
+#if WITH_SSL
+ rd_kafka_conf_t *dconf = pdst;
+ const rd_kafka_conf_t *sconf = psrc;
+
+ assert(scope == _RK_GLOBAL);
+
+ /* Free and reset any exist certs on the destination conf */
+ rd_kafka_conf_cert_dtor(scope, pdst);
+
+ if (sconf->ssl.key)
+ dconf->ssl.key = rd_kafka_cert_dup(sconf->ssl.key);
+
+ if (sconf->ssl.cert)
+ dconf->ssl.cert = rd_kafka_cert_dup(sconf->ssl.cert);
+
+ if (sconf->ssl.ca)
+ dconf->ssl.ca = rd_kafka_cert_dup(sconf->ssl.ca);
+#endif
+}
+
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h
new file mode 100644
index 000000000..b53f46c01
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cert.h
@@ -0,0 +1,61 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDKAFKA_CERT_H_
+#define _RDKAFKA_CERT_H_
+
+
+/**
+ * @struct rd_kafka_cert
+ *
+ * @brief Internal representation of a cert_type,cert_enc,memory tuple.
+ *
+ * @remark Certificates are read-only after construction.
+ */
+typedef struct rd_kafka_cert_s {
+ rd_kafka_cert_type_t type;
+ rd_kafka_cert_enc_t encoding;
+ rd_refcnt_t refcnt;
+#if WITH_SSL
+ X509 *x509; /**< Certificate (public key) */
+ EVP_PKEY *pkey; /**< Private key */
+ X509_STORE *store; /**< CA certificate chain store */
+#endif
+} rd_kafka_cert_t;
+
+void rd_kafka_conf_cert_dtor(int scope, void *pconf);
+void rd_kafka_conf_cert_copy(int scope,
+ void *pdst,
+ const void *psrc,
+ void *dstptr,
+ const void *srcptr,
+ size_t filter_cnt,
+ const char **filter);
+
+#endif /* _RDKAFKA_CERT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c
new file mode 100644
index 000000000..026e93321
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.c
@@ -0,0 +1,5969 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_request.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_assignor.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_metadata.h"
+#include "rdkafka_cgrp.h"
+#include "rdkafka_interceptor.h"
+#include "rdmap.h"
+
+#include "rdunittest.h"
+
+#include <ctype.h>
+#include <stdarg.h>
+
+static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg);
+static rd_kafka_error_t *
+rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *assignment);
+static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg);
+static rd_kafka_error_t *
+rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *partitions);
+static rd_kafka_error_t *
+rd_kafka_cgrp_incremental_unassign(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *partitions);
+
+static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque);
+
+static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg,
+ const char *reason);
+
+static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg);
+
+static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t assignment_lost,
+ rd_bool_t initiating,
+ const char *reason);
+static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t assignment_lost,
+ rd_bool_t initiating,
+ const char *reason);
+
+static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg);
+
+static void
+rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg);
+static rd_kafka_resp_err_t
+rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *rktparlist);
+
+static void rd_kafka_cgrp_group_assignment_set(
+ rd_kafka_cgrp_t *rkcg,
+ const rd_kafka_topic_partition_list_t *partitions);
+static void rd_kafka_cgrp_group_assignment_modify(
+ rd_kafka_cgrp_t *rkcg,
+ rd_bool_t add,
+ const rd_kafka_topic_partition_list_t *partitions);
+
+static void
+rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *assignment);
+
+
+/**
+ * @returns true if the current assignment is lost.
+ */
+rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg) {
+ return rd_atomic32_get(&rkcg->rkcg_assignment_lost) != 0;
+}
+
+
+/**
+ * @brief Call when the current assignment has been lost, with a
+ * human-readable reason.
+ */
+static void rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg,
+ char *fmt,
+ ...) RD_FORMAT(printf, 2, 3);
+static void
+rd_kafka_cgrp_assignment_set_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) {
+ va_list ap;
+ char reason[256];
+
+ if (!rkcg->rkcg_group_assignment)
+ return;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(reason, sizeof(reason), fmt, ap);
+ va_end(ap);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST",
+ "Group \"%s\": "
+ "current assignment of %d partition(s) lost: %s",
+ rkcg->rkcg_group_id->str, rkcg->rkcg_group_assignment->cnt,
+ reason);
+
+ rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_true);
+}
+
+
+/**
+ * @brief Call when the current assignment is no longer considered lost, with a
+ * human-readable reason.
+ */
+static void
+rd_kafka_cgrp_assignment_clear_lost(rd_kafka_cgrp_t *rkcg, char *fmt, ...) {
+ va_list ap;
+ char reason[256];
+
+ if (!rd_atomic32_get(&rkcg->rkcg_assignment_lost))
+ return;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(reason, sizeof(reason), fmt, ap);
+ va_end(ap);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "LOST",
+ "Group \"%s\": "
+ "current assignment no longer considered lost: %s",
+ rkcg->rkcg_group_id->str, reason);
+
+ rd_atomic32_set(&rkcg->rkcg_assignment_lost, rd_false);
+}
+
+
+/**
+ * @brief The rebalance protocol currently in use. This will be
+ * RD_KAFKA_REBALANCE_PROTOCOL_NONE if the consumer has not
+ * (yet) joined a group, else it will match the rebalance
+ * protocol of the configured assignor(s).
+ *
+ * @locality main thread
+ */
+rd_kafka_rebalance_protocol_t
+rd_kafka_cgrp_rebalance_protocol(rd_kafka_cgrp_t *rkcg) {
+ if (!rkcg->rkcg_assignor)
+ return RD_KAFKA_REBALANCE_PROTOCOL_NONE;
+ return rkcg->rkcg_assignor->rkas_protocol;
+}
+
+
+
+/**
+ * @returns true if the cgrp is awaiting a protocol response. This prohibits
+ * the join-state machine to proceed before the current state
+ * is done.
+ */
+static rd_bool_t rd_kafka_cgrp_awaiting_response(rd_kafka_cgrp_t *rkcg) {
+ return rkcg->rkcg_wait_resp != -1;
+}
+
+
+/**
+ * @brief Set flag indicating we are waiting for a coordinator response
+ * for the given request.
+ *
+ * This is used for specific requests to postpone rejoining the group if
+ * there are outstanding JoinGroup or SyncGroup requests.
+ *
+ * @locality main thread
+ */
+static void rd_kafka_cgrp_set_wait_resp(rd_kafka_cgrp_t *rkcg, int16_t ApiKey) {
+ rd_assert(rkcg->rkcg_wait_resp == -1);
+ rkcg->rkcg_wait_resp = ApiKey;
+}
+
+/**
+ * @brief Clear the flag that says we're waiting for a coordinator response
+ * for the given \p request.
+ *
+ * @param request Original request, possibly NULL (for errors).
+ *
+ * @locality main thread
+ */
+static void rd_kafka_cgrp_clear_wait_resp(rd_kafka_cgrp_t *rkcg,
+ int16_t ApiKey) {
+ rd_assert(rkcg->rkcg_wait_resp == ApiKey);
+ rkcg->rkcg_wait_resp = -1;
+}
+
+
+
+/**
+ * @struct Auxillary glue type used for COOPERATIVE rebalance set operations.
+ */
+typedef struct PartitionMemberInfo_s {
+ const rd_kafka_group_member_t *member;
+ rd_bool_t members_match;
+} PartitionMemberInfo_t;
+
+static PartitionMemberInfo_t *
+PartitionMemberInfo_new(const rd_kafka_group_member_t *member,
+ rd_bool_t members_match) {
+ PartitionMemberInfo_t *pmi;
+
+ pmi = rd_calloc(1, sizeof(*pmi));
+ pmi->member = member;
+ pmi->members_match = members_match;
+
+ return pmi;
+}
+
+static void PartitionMemberInfo_free(void *p) {
+ PartitionMemberInfo_t *pmi = p;
+ rd_free(pmi);
+}
+
+typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
+ PartitionMemberInfo_t *) map_toppar_member_info_t;
+
+
+/**
+ * @returns true if consumer has joined the group and thus requires a leave.
+ */
+#define RD_KAFKA_CGRP_HAS_JOINED(rkcg) \
+ (rkcg->rkcg_member_id != NULL && \
+ RD_KAFKAP_STR_LEN((rkcg)->rkcg_member_id) > 0)
+
+
+/**
+ * @returns true if cgrp is waiting for a rebalance_cb to be handled by
+ * the application.
+ */
+#define RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) \
+ ((rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \
+ (rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL)
+
+/**
+ * @returns true if a rebalance is in progress.
+ *
+ * 1. In WAIT_JOIN or WAIT_METADATA state with a member-id set,
+ * this happens on rejoin.
+ * 2. In WAIT_SYNC waiting for the group to rebalance on the broker.
+ * 3. in *_WAIT_UNASSIGN_TO_COMPLETE waiting for unassigned partitions to
+ * stop fetching, et.al.
+ * 4. In _WAIT_*ASSIGN_CALL waiting for the application to handle the
+ * assignment changes in its rebalance callback and then call *assign().
+ * 5. An incremental rebalancing is in progress.
+ * 6. A rebalance-induced rejoin is in progress.
+ */
+#define RD_KAFKA_CGRP_REBALANCING(rkcg) \
+ ((RD_KAFKA_CGRP_HAS_JOINED(rkcg) && \
+ ((rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN || \
+ (rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)) || \
+ (rkcg)->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC || \
+ (rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE || \
+ (rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE || \
+ (rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL || \
+ (rkcg)->rkcg_join_state == \
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL || \
+ (rkcg)->rkcg_rebalance_incr_assignment != NULL || \
+ (rkcg)->rkcg_rebalance_rejoin)
+
+
+
+const char *rd_kafka_cgrp_state_names[] = {
+ "init", "term", "query-coord",
+ "wait-coord", "wait-broker", "wait-broker-transport",
+ "up"};
+
+const char *rd_kafka_cgrp_join_state_names[] = {
+ "init",
+ "wait-join",
+ "wait-metadata",
+ "wait-sync",
+ "wait-assign-call",
+ "wait-unassign-call",
+ "wait-unassign-to-complete",
+ "wait-incr-unassign-to-complete",
+ "steady",
+};
+
+
+/**
+ * @brief Change the cgrp state.
+ *
+ * @returns 1 if the state was changed, else 0.
+ */
+static int rd_kafka_cgrp_set_state(rd_kafka_cgrp_t *rkcg, int state) {
+ if ((int)rkcg->rkcg_state == state)
+ return 0;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPSTATE",
+ "Group \"%.*s\" changed state %s -> %s "
+ "(join-state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_state_names[state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+ rkcg->rkcg_state = state;
+ rkcg->rkcg_ts_statechange = rd_clock();
+
+ rd_kafka_brokers_broadcast_state_change(rkcg->rkcg_rk);
+
+ return 1;
+}
+
+
+void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state) {
+ if ((int)rkcg->rkcg_join_state == join_state)
+ return;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPJOINSTATE",
+ "Group \"%.*s\" changed join state %s -> %s "
+ "(state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ rd_kafka_cgrp_join_state_names[join_state],
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
+ rkcg->rkcg_join_state = join_state;
+}
+
+
+void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg) {
+ rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_subscription);
+ rd_kafka_assert(rkcg->rkcg_rk, !rkcg->rkcg_group_leader.members);
+ rd_kafka_cgrp_set_member_id(rkcg, NULL);
+ if (rkcg->rkcg_group_instance_id)
+ rd_kafkap_str_destroy(rkcg->rkcg_group_instance_id);
+
+ rd_kafka_q_destroy_owner(rkcg->rkcg_q);
+ rd_kafka_q_destroy_owner(rkcg->rkcg_ops);
+ rd_kafka_q_destroy_owner(rkcg->rkcg_wait_coord_q);
+ rd_kafka_assert(rkcg->rkcg_rk, TAILQ_EMPTY(&rkcg->rkcg_topics));
+ rd_kafka_assert(rkcg->rkcg_rk, rd_list_empty(&rkcg->rkcg_toppars));
+ rd_list_destroy(&rkcg->rkcg_toppars);
+ rd_list_destroy(rkcg->rkcg_subscribed_topics);
+ rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics);
+ if (rkcg->rkcg_assignor && rkcg->rkcg_assignor->rkas_destroy_state_cb)
+ rkcg->rkcg_assignor->rkas_destroy_state_cb(
+ rkcg->rkcg_assignor_state);
+ rd_free(rkcg);
+}
+
+
+
+/**
+ * @brief Update the absolute session timeout following a successfull
+ * response from the coordinator.
+ * This timeout is used to enforce the session timeout in the
+ * consumer itself.
+ *
+ * @param reset if true the timeout is updated even if the session has expired.
+ */
+static RD_INLINE void
+rd_kafka_cgrp_update_session_timeout(rd_kafka_cgrp_t *rkcg, rd_bool_t reset) {
+ if (reset || rkcg->rkcg_ts_session_timeout != 0)
+ rkcg->rkcg_ts_session_timeout =
+ rd_clock() +
+ (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000);
+}
+
+
+
+rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
+ const rd_kafkap_str_t *group_id,
+ const rd_kafkap_str_t *client_id) {
+ rd_kafka_cgrp_t *rkcg;
+
+ rkcg = rd_calloc(1, sizeof(*rkcg));
+
+ rkcg->rkcg_rk = rk;
+ rkcg->rkcg_group_id = group_id;
+ rkcg->rkcg_client_id = client_id;
+ rkcg->rkcg_coord_id = -1;
+ rkcg->rkcg_generation_id = -1;
+ rkcg->rkcg_wait_resp = -1;
+
+ rkcg->rkcg_ops = rd_kafka_q_new(rk);
+ rkcg->rkcg_ops->rkq_serve = rd_kafka_cgrp_op_serve;
+ rkcg->rkcg_ops->rkq_opaque = rkcg;
+ rkcg->rkcg_wait_coord_q = rd_kafka_q_new(rk);
+ rkcg->rkcg_wait_coord_q->rkq_serve = rkcg->rkcg_ops->rkq_serve;
+ rkcg->rkcg_wait_coord_q->rkq_opaque = rkcg->rkcg_ops->rkq_opaque;
+ rkcg->rkcg_q = rd_kafka_q_new(rk);
+ rkcg->rkcg_group_instance_id =
+ rd_kafkap_str_new(rk->rk_conf.group_instance_id, -1);
+
+ TAILQ_INIT(&rkcg->rkcg_topics);
+ rd_list_init(&rkcg->rkcg_toppars, 32, NULL);
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+ rkcg->rkcg_subscribed_topics =
+ rd_list_new(0, (void *)rd_kafka_topic_info_destroy);
+ rd_interval_init(&rkcg->rkcg_coord_query_intvl);
+ rd_interval_init(&rkcg->rkcg_heartbeat_intvl);
+ rd_interval_init(&rkcg->rkcg_join_intvl);
+ rd_interval_init(&rkcg->rkcg_timeout_scan_intvl);
+ rd_atomic32_init(&rkcg->rkcg_assignment_lost, rd_false);
+ rd_atomic32_init(&rkcg->rkcg_terminated, rd_false);
+
+ rkcg->rkcg_errored_topics = rd_kafka_topic_partition_list_new(0);
+
+ /* Create a logical group coordinator broker to provide
+ * a dedicated connection for group coordination.
+ * This is needed since JoinGroup may block for up to
+ * max.poll.interval.ms, effectively blocking and timing out
+ * any other protocol requests (such as Metadata).
+ * The address for this broker will be updated when
+ * the group coordinator is assigned. */
+ rkcg->rkcg_coord = rd_kafka_broker_add_logical(rk, "GroupCoordinator");
+
+ if (rk->rk_conf.enable_auto_commit &&
+ rk->rk_conf.auto_commit_interval_ms > 0)
+ rd_kafka_timer_start(
+ &rk->rk_timers, &rkcg->rkcg_offset_commit_tmr,
+ rk->rk_conf.auto_commit_interval_ms * 1000ll,
+ rd_kafka_cgrp_offset_commit_tmr_cb, rkcg);
+
+ return rkcg;
+}
+
+
+/**
+ * @brief Set the group coordinator broker.
+ */
+static void rd_kafka_cgrp_coord_set_broker(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_broker_t *rkb) {
+
+ rd_assert(rkcg->rkcg_curr_coord == NULL);
+
+ rd_assert(RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb));
+
+ rkcg->rkcg_curr_coord = rkb;
+ rd_kafka_broker_keep(rkb);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORDSET",
+ "Group \"%.*s\" coordinator set to broker %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_broker_name(rkb));
+
+ /* Reset query interval to trigger an immediate
+ * coord query if required */
+ if (!rd_interval_disabled(&rkcg->rkcg_coord_query_intvl))
+ rd_interval_reset(&rkcg->rkcg_coord_query_intvl);
+
+ rd_kafka_cgrp_set_state(rkcg,
+ RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT);
+
+ rd_kafka_broker_persistent_connection_add(
+ rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord);
+
+ /* Set the logical coordinator's nodename to the
+ * proper broker's nodename, this will trigger a (re)connect
+ * to the new address. */
+ rd_kafka_broker_set_nodename(rkcg->rkcg_coord, rkb);
+}
+
+
+/**
+ * @brief Reset/clear the group coordinator broker.
+ */
+static void rd_kafka_cgrp_coord_clear_broker(rd_kafka_cgrp_t *rkcg) {
+ rd_kafka_broker_t *rkb = rkcg->rkcg_curr_coord;
+
+ rd_assert(rkcg->rkcg_curr_coord);
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORDCLEAR",
+ "Group \"%.*s\" broker %s is no longer coordinator",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_broker_name(rkb));
+
+ rd_assert(rkcg->rkcg_coord);
+
+ rd_kafka_broker_persistent_connection_del(
+ rkcg->rkcg_coord, &rkcg->rkcg_coord->rkb_persistconn.coord);
+
+ /* Clear the ephemeral broker's nodename.
+ * This will also trigger a disconnect. */
+ rd_kafka_broker_set_nodename(rkcg->rkcg_coord, NULL);
+
+ rkcg->rkcg_curr_coord = NULL;
+ rd_kafka_broker_destroy(rkb); /* from set_coord_broker() */
+}
+
+
+/**
+ * @brief Update/set the group coordinator.
+ *
+ * Will do nothing if there's been no change.
+ *
+ * @returns 1 if the coordinator, or state, was updated, else 0.
+ */
+static int rd_kafka_cgrp_coord_update(rd_kafka_cgrp_t *rkcg, int32_t coord_id) {
+
+ /* Don't do anything while terminating */
+ if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM)
+ return 0;
+
+ /* Check if coordinator changed */
+ if (rkcg->rkcg_coord_id != coord_id) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPCOORD",
+ "Group \"%.*s\" changing coordinator %" PRId32
+ " -> %" PRId32,
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rkcg->rkcg_coord_id, coord_id);
+
+ /* Update coord id */
+ rkcg->rkcg_coord_id = coord_id;
+
+ /* Clear previous broker handle, if any */
+ if (rkcg->rkcg_curr_coord)
+ rd_kafka_cgrp_coord_clear_broker(rkcg);
+ }
+
+
+ if (rkcg->rkcg_curr_coord) {
+ /* There is already a known coordinator and a
+ * corresponding broker handle. */
+ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP)
+ return rd_kafka_cgrp_set_state(
+ rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT);
+
+ } else if (rkcg->rkcg_coord_id != -1) {
+ rd_kafka_broker_t *rkb;
+
+ /* Try to find the coordinator broker handle */
+ rd_kafka_rdlock(rkcg->rkcg_rk);
+ rkb = rd_kafka_broker_find_by_nodeid(rkcg->rkcg_rk, coord_id);
+ rd_kafka_rdunlock(rkcg->rkcg_rk);
+
+ /* It is possible, due to stale metadata, that the
+ * coordinator id points to a broker we still don't know
+ * about. In this case the client will continue
+ * querying metadata and querying for the coordinator
+ * until a match is found. */
+
+ if (rkb) {
+ /* Coordinator is known and broker handle exists */
+ rd_kafka_cgrp_coord_set_broker(rkcg, rkb);
+ rd_kafka_broker_destroy(rkb); /*from find_by_nodeid()*/
+
+ return 1;
+ } else {
+ /* Coordinator is known but no corresponding
+ * broker handle. */
+ return rd_kafka_cgrp_set_state(
+ rkcg, RD_KAFKA_CGRP_STATE_WAIT_BROKER);
+ }
+
+ } else {
+ /* Coordinator still not known, re-query */
+ if (rkcg->rkcg_state >= RD_KAFKA_CGRP_STATE_WAIT_COORD)
+ return rd_kafka_cgrp_set_state(
+ rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
+ }
+
+ return 0; /* no change */
+}
+
+
+
+/**
+ * Handle FindCoordinator response
+ */
+static void rd_kafka_cgrp_handle_FindCoordinator(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode = 0;
+ int32_t CoordId;
+ rd_kafkap_str_t CoordHost = RD_ZERO_INIT;
+ int32_t CoordPort;
+ rd_kafka_cgrp_t *rkcg = opaque;
+ struct rd_kafka_metadata_broker mdb = RD_ZERO_INIT;
+ char *errstr = NULL;
+ int actions;
+
+ if (likely(!(ErrorCode = err))) {
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ rd_kafkap_str_t ErrorMsg;
+
+ rd_kafka_buf_read_str(rkbuf, &ErrorMsg);
+
+ if (!RD_KAFKAP_STR_IS_NULL(&ErrorMsg))
+ RD_KAFKAP_STR_DUPA(&errstr, &ErrorMsg);
+ }
+
+ rd_kafka_buf_read_i32(rkbuf, &CoordId);
+ rd_kafka_buf_read_str(rkbuf, &CoordHost);
+ rd_kafka_buf_read_i32(rkbuf, &CoordPort);
+ }
+
+ if (ErrorCode)
+ goto err;
+
+
+ mdb.id = CoordId;
+ RD_KAFKAP_STR_DUPA(&mdb.host, &CoordHost);
+ mdb.port = CoordPort;
+
+ rd_rkb_dbg(rkb, CGRP, "CGRPCOORD",
+ "Group \"%.*s\" coordinator is %s:%i id %" PRId32,
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), mdb.host, mdb.port,
+ mdb.id);
+ rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto, &mdb, NULL);
+
+ rd_kafka_cgrp_coord_update(rkcg, CoordId);
+ rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */
+ return;
+
+err_parse: /* Parse error */
+ ErrorCode = rkbuf->rkbuf_err;
+ /* FALLTHRU */
+
+err:
+ if (!errstr)
+ errstr = (char *)rd_kafka_err2str(ErrorCode);
+
+ rd_rkb_dbg(rkb, CGRP, "CGRPCOORD",
+ "Group \"%.*s\" FindCoordinator response error: %s: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_err2name(ErrorCode), errstr);
+
+ if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
+ return;
+
+ actions = rd_kafka_err_action(
+ rkb, ErrorCode, request,
+
+ RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE,
+
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT,
+
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT,
+
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE,
+
+ RD_KAFKA_ERR_ACTION_END);
+
+
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ rd_kafka_cgrp_coord_update(rkcg, -1);
+ } else {
+ if (!(actions & RD_KAFKA_ERR_ACTION_RETRY) &&
+ rkcg->rkcg_last_err != ErrorCode) {
+ /* Propagate non-retriable errors to the application */
+ rd_kafka_consumer_err(
+ rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0,
+ NULL, NULL, RD_KAFKA_OFFSET_INVALID,
+ "FindCoordinator response error: %s", errstr);
+
+ /* Suppress repeated errors */
+ rkcg->rkcg_last_err = ErrorCode;
+ }
+
+ /* Retries are performed by the timer-intervalled
+ * coord queries, continue querying */
+ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
+ }
+
+ rd_kafka_cgrp_serve(rkcg); /* Serve updated state, if possible */
+}
+
+
+/**
+ * Query for coordinator.
+ * Ask any broker in state UP
+ *
+ * Locality: main thread
+ */
+void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_resp_err_t err;
+
+ rkb = rd_kafka_broker_any_usable(
+ rkcg->rkcg_rk, RD_POLL_NOWAIT, RD_DO_LOCK,
+ RD_KAFKA_FEATURE_BROKER_GROUP_COORD, "coordinator query");
+
+ if (!rkb) {
+ /* Reset the interval because there were no brokers. When a
+ * broker becomes available, we want to query it immediately. */
+ rd_interval_reset(&rkcg->rkcg_coord_query_intvl);
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPQUERY",
+ "Group \"%.*s\": "
+ "no broker available for coordinator query: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
+ return;
+ }
+
+ rd_rkb_dbg(rkb, CGRP, "CGRPQUERY",
+ "Group \"%.*s\": querying for coordinator: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
+
+ err = rd_kafka_FindCoordinatorRequest(
+ rkb, RD_KAFKA_COORD_GROUP, rkcg->rkcg_group_id->str,
+ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_cgrp_handle_FindCoordinator, rkcg);
+
+ if (err) {
+ rd_rkb_dbg(rkb, CGRP, "CGRPQUERY",
+ "Group \"%.*s\": "
+ "unable to send coordinator query: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_err2str(err));
+ rd_kafka_broker_destroy(rkb);
+ return;
+ }
+
+ if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_QUERY_COORD)
+ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_WAIT_COORD);
+
+ rd_kafka_broker_destroy(rkb);
+
+ /* Back off the next intervalled query since we just sent one. */
+ rd_interval_reset_to_now(&rkcg->rkcg_coord_query_intvl, 0);
+}
+
+/**
+ * @brief Mark the current coordinator as dead.
+ *
+ * @locality main thread
+ */
+void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_resp_err_t err,
+ const char *reason) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COORD",
+ "Group \"%.*s\": "
+ "marking the coordinator (%" PRId32 ") dead: %s: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), rkcg->rkcg_coord_id,
+ rd_kafka_err2str(err), reason);
+
+ rd_kafka_cgrp_coord_update(rkcg, -1);
+
+ /* Re-query for coordinator */
+ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
+ rd_kafka_cgrp_coord_query(rkcg, reason);
+}
+
+
+/**
+ * @returns a new reference to the current coordinator, if available, else NULL.
+ *
+ * @locality rdkafka main thread
+ * @locks_required none
+ * @locks_acquired none
+ */
+rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg) {
+ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP || !rkcg->rkcg_coord)
+ return NULL;
+
+ rd_kafka_broker_keep(rkcg->rkcg_coord);
+
+ return rkcg->rkcg_coord;
+}
+
+
+/**
+ * @brief cgrp handling of LeaveGroup responses
+ * @param opaque must be the cgrp handle.
+ * @locality rdkafka main thread (unless err==ERR__DESTROY)
+ */
+static void rd_kafka_cgrp_handle_LeaveGroup(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = opaque;
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode = 0;
+
+ if (err) {
+ ErrorCode = err;
+ goto err;
+ }
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+err:
+ if (ErrorCode)
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
+ "LeaveGroup response error in state %s: %s",
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_err2str(ErrorCode));
+ else
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
+ "LeaveGroup response received in state %s",
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
+
+ if (ErrorCode != RD_KAFKA_RESP_ERR__DESTROY) {
+ rd_assert(thrd_is_current(rk->rk_thread));
+ rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE;
+ rd_kafka_cgrp_try_terminate(rkcg);
+ }
+
+
+
+ return;
+
+err_parse:
+ ErrorCode = rkbuf->rkbuf_err;
+ goto err;
+}
+
+
+static void rd_kafka_cgrp_leave(rd_kafka_cgrp_t *rkcg) {
+ char *member_id;
+
+ RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id);
+
+ /* Leaving the group invalidates the member id, reset it
+ * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE",
+ "Group \"%.*s\": leave (in state %s): "
+ "LeaveGroupRequest already in-transit",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
+ return;
+ }
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "LEAVE",
+ "Group \"%.*s\": leave (in state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
+
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WAIT_LEAVE;
+
+ if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP) {
+ rd_rkb_dbg(rkcg->rkcg_curr_coord, CONSUMER, "LEAVE",
+ "Leaving group");
+ rd_kafka_LeaveGroupRequest(
+ rkcg->rkcg_coord, rkcg->rkcg_group_id->str, member_id,
+ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_cgrp_handle_LeaveGroup, rkcg);
+ } else
+ rd_kafka_cgrp_handle_LeaveGroup(rkcg->rkcg_rk, rkcg->rkcg_coord,
+ RD_KAFKA_RESP_ERR__WAIT_COORD,
+ NULL, NULL, rkcg);
+}
+
+
+/**
+ * @brief Leave group, if desired.
+ *
+ * @returns true if a LeaveGroup was issued, else false.
+ */
+static rd_bool_t rd_kafka_cgrp_leave_maybe(rd_kafka_cgrp_t *rkcg) {
+
+ /* We were not instructed to leave in the first place. */
+ if (!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE))
+ return rd_false;
+
+ rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE;
+
+ /* Don't send Leave when termating with NO_CONSUMER_CLOSE flag */
+ if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk))
+ return rd_false;
+
+ /* KIP-345: Static group members must not send a LeaveGroupRequest
+ * on termination. */
+ if (RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) &&
+ rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
+ return rd_false;
+
+ rd_kafka_cgrp_leave(rkcg);
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Enqueues a rebalance op, delegating responsibility of calling
+ * incremental_assign / incremental_unassign to the application.
+ * If there is no rebalance handler configured, or the action
+ * should not be delegated to the application for some other
+ * reason, incremental_assign / incremental_unassign will be called
+ * automatically, immediately.
+ *
+ * @param rejoin whether or not to rejoin the group following completion
+ * of the incremental assign / unassign.
+ *
+ * @remarks does not take ownership of \p partitions.
+ */
+void rd_kafka_rebalance_op_incr(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ rd_bool_t rejoin,
+ const char *reason) {
+ rd_kafka_error_t *error;
+
+ /* Flag to rejoin after completion of the incr_assign or incr_unassign,
+ if required. */
+ rkcg->rkcg_rebalance_rejoin = rejoin;
+
+ rd_kafka_wrlock(rkcg->rkcg_rk);
+ rkcg->rkcg_c.ts_rebalance = rd_clock();
+ rkcg->rkcg_c.rebalance_cnt++;
+ rd_kafka_wrunlock(rkcg->rkcg_rk);
+
+ if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk) ||
+ rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
+ /* Total unconditional unassign in these cases */
+ rd_kafka_cgrp_unassign(rkcg);
+
+ /* Now serve the assignment to make updates */
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+ goto done;
+ }
+
+ rd_kafka_cgrp_set_join_state(
+ rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
+ ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL
+ : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL);
+
+ /* Schedule application rebalance callback/event if enabled */
+ if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) {
+ rd_kafka_op_t *rko;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
+ "Group \"%s\": delegating incremental %s of %d "
+ "partition(s) to application on queue %s: %s",
+ rkcg->rkcg_group_id->str,
+ err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+ ? "revoke"
+ : "assign",
+ partitions->cnt,
+ rd_kafka_q_dest_name(rkcg->rkcg_q), reason);
+
+ /* Pause currently assigned partitions while waiting for
+ * rebalance callback to get called to make sure the
+ * application will not receive any more messages that
+ * might block it from serving the rebalance callback
+ * and to not process messages for partitions it
+ * might have lost in the rebalance. */
+ rd_kafka_assignment_pause(rkcg->rkcg_rk,
+ "incremental rebalance");
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE);
+ rko->rko_err = err;
+ rko->rko_u.rebalance.partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ if (rd_kafka_q_enq(rkcg->rkcg_q, rko))
+ goto done; /* Rebalance op successfully enqueued */
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
+ "Group \"%s\": ops queue is disabled, not "
+ "delegating partition %s to application",
+ rkcg->rkcg_group_id->str,
+ err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+ ? "unassign"
+ : "assign");
+ /* FALLTHRU */
+ }
+
+ /* No application rebalance callback/event handler, or it is not
+ * available, do the assign/unassign ourselves.
+ * We need to be careful here not to trigger assignment_serve()
+ * since it may call into the cgrp code again, in which case we
+ * can't really track what the outcome state will be. */
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+ error = rd_kafka_cgrp_incremental_assign(rkcg, partitions);
+ else
+ error = rd_kafka_cgrp_incremental_unassign(rkcg, partitions);
+
+ if (error) {
+ rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE",
+ "Group \"%s\": internal incremental %s "
+ "of %d partition(s) failed: %s: "
+ "unassigning all partitions and rejoining",
+ rkcg->rkcg_group_id->str,
+ err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+ ? "unassign"
+ : "assign",
+ partitions->cnt, rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_cgrp_set_join_state(rkcg,
+ /* This is a clean state for
+ * assignment_done() to rejoin
+ * from. */
+ RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+ rd_kafka_assignment_clear(rkcg->rkcg_rk);
+ }
+
+ /* Now serve the assignment to make updates */
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+
+done:
+ /* Update the current group assignment based on the
+ * added/removed partitions. */
+ rd_kafka_cgrp_group_assignment_modify(
+ rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, partitions);
+}
+
+
+/**
+ * @brief Enqueues a rebalance op, delegating responsibility of calling
+ * assign / unassign to the application. If there is no rebalance
+ * handler configured, or the action should not be delegated to the
+ * application for some other reason, assign / unassign will be
+ * called automatically.
+ *
+ * @remarks \p partitions is copied.
+ */
+static void rd_kafka_rebalance_op(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *assignment,
+ const char *reason) {
+ rd_kafka_error_t *error;
+
+ rd_kafka_wrlock(rkcg->rkcg_rk);
+ rkcg->rkcg_c.ts_rebalance = rd_clock();
+ rkcg->rkcg_c.rebalance_cnt++;
+ rd_kafka_wrunlock(rkcg->rkcg_rk);
+
+ if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk) ||
+ rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
+ /* Unassign */
+ rd_kafka_cgrp_unassign(rkcg);
+
+ /* Now serve the assignment to make updates */
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+ goto done;
+ }
+
+ rd_assert(assignment != NULL);
+
+ rd_kafka_cgrp_set_join_state(
+ rkcg, err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS
+ ? RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL
+ : RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL);
+
+ /* Schedule application rebalance callback/event if enabled */
+ if (rkcg->rkcg_rk->rk_conf.enabled_events & RD_KAFKA_EVENT_REBALANCE) {
+ rd_kafka_op_t *rko;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGN",
+ "Group \"%s\": delegating %s of %d partition(s) "
+ "to application on queue %s: %s",
+ rkcg->rkcg_group_id->str,
+ err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+ ? "revoke"
+ : "assign",
+ assignment->cnt,
+ rd_kafka_q_dest_name(rkcg->rkcg_q), reason);
+
+ /* Pause currently assigned partitions while waiting for
+ * rebalance callback to get called to make sure the
+ * application will not receive any more messages that
+ * might block it from serving the rebalance callback
+ * and to not process messages for partitions it
+ * might have lost in the rebalance. */
+ rd_kafka_assignment_pause(rkcg->rkcg_rk, "rebalance");
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_REBALANCE);
+ rko->rko_err = err;
+ rko->rko_u.rebalance.partitions =
+ rd_kafka_topic_partition_list_copy(assignment);
+
+ if (rd_kafka_q_enq(rkcg->rkcg_q, rko))
+ goto done; /* Rebalance op successfully enqueued */
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
+ "Group \"%s\": ops queue is disabled, not "
+ "delegating partition %s to application",
+ rkcg->rkcg_group_id->str,
+ err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+ ? "unassign"
+ : "assign");
+
+ /* FALLTHRU */
+ }
+
+ /* No application rebalance callback/event handler, or it is not
+ * available, do the assign/unassign ourselves.
+ * We need to be careful here not to trigger assignment_serve()
+ * since it may call into the cgrp code again, in which case we
+ * can't really track what the outcome state will be. */
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+ error = rd_kafka_cgrp_assign(rkcg, assignment);
+ else
+ error = rd_kafka_cgrp_unassign(rkcg);
+
+ if (error) {
+ rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE",
+ "Group \"%s\": internal %s "
+ "of %d partition(s) failed: %s: "
+ "unassigning all partitions and rejoining",
+ rkcg->rkcg_group_id->str,
+ err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS
+ ? "unassign"
+ : "assign",
+ rkcg->rkcg_group_assignment->cnt,
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_cgrp_set_join_state(rkcg,
+ /* This is a clean state for
+ * assignment_done() to rejoin
+ * from. */
+ RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+ rd_kafka_assignment_clear(rkcg->rkcg_rk);
+ }
+
+ /* Now serve the assignment to make updates */
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+
+done:
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+ rd_kafka_cgrp_group_assignment_set(rkcg, assignment);
+ else
+ rd_kafka_cgrp_group_assignment_set(rkcg, NULL);
+}
+
+
+/**
+ * @brief Rejoin the group.
+ *
+ * @remark This function must not have any side-effects but setting the
+ * join state.
+ */
+static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...)
+ RD_FORMAT(printf, 2, 3);
+
+static void rd_kafka_cgrp_rejoin(rd_kafka_cgrp_t *rkcg, const char *fmt, ...) {
+ char reason[512];
+ va_list ap;
+ char astr[128];
+
+ va_start(ap, fmt);
+ rd_vsnprintf(reason, sizeof(reason), fmt, ap);
+ va_end(ap);
+
+ if (rkcg->rkcg_group_assignment)
+ rd_snprintf(astr, sizeof(astr), " with %d owned partition(s)",
+ rkcg->rkcg_group_assignment->cnt);
+ else
+ rd_snprintf(astr, sizeof(astr), " without an assignment");
+
+ if (rkcg->rkcg_subscription || rkcg->rkcg_next_subscription) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REJOIN",
+ "Group \"%s\": %s group%s: %s", rkcg->rkcg_group_id->str,
+ rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT
+ ? "Joining"
+ : "Rejoining",
+ astr, reason);
+ } else {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "NOREJOIN",
+ "Group \"%s\": Not %s group%s: %s: "
+ "no subscribed topics",
+ rkcg->rkcg_group_id->str,
+ rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT
+ ? "joining"
+ : "rejoining",
+ astr, reason);
+
+ rd_kafka_cgrp_leave_maybe(rkcg);
+ }
+
+ rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_INIT);
+}
+
+
+/**
+ * @brief Collect all assigned or owned partitions from group members.
+ * The member field of each result element is set to the associated
+ * group member. The members_match field is set to rd_false.
+ *
+ * @param members Array of group members.
+ * @param member_cnt Number of elements in members.
+ * @param par_cnt The total number of partitions expected to be collected.
+ * @param collect_owned If rd_true, rkgm_owned partitions will be collected,
+ * else rkgm_assignment partitions will be collected.
+ */
+static map_toppar_member_info_t *
+rd_kafka_collect_partitions(const rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ size_t par_cnt,
+ rd_bool_t collect_owned) {
+ size_t i;
+ map_toppar_member_info_t *collected = rd_calloc(1, sizeof(*collected));
+
+ RD_MAP_INIT(collected, par_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free,
+ PartitionMemberInfo_free);
+
+ for (i = 0; i < member_cnt; i++) {
+ size_t j;
+ const rd_kafka_group_member_t *rkgm = &members[i];
+ const rd_kafka_topic_partition_list_t *toppars =
+ collect_owned ? rkgm->rkgm_owned : rkgm->rkgm_assignment;
+
+ for (j = 0; j < (size_t)toppars->cnt; j++) {
+ rd_kafka_topic_partition_t *rktpar =
+ rd_kafka_topic_partition_copy(&toppars->elems[j]);
+ PartitionMemberInfo_t *pmi =
+ PartitionMemberInfo_new(rkgm, rd_false);
+ RD_MAP_SET(collected, rktpar, pmi);
+ }
+ }
+
+ return collected;
+}
+
+
+/**
+ * @brief Set intersection. Returns a set of all elements of \p a that
+ * are also elements of \p b. Additionally, compares the members
+ * field of matching elements from \p a and \p b and if not NULL
+ * and equal, sets the members_match field in the result element
+ * to rd_true and the member field to equal that of the elements,
+ * else sets the members_match field to rd_false and member field
+ * to NULL.
+ */
+static map_toppar_member_info_t *
+rd_kafka_member_partitions_intersect(map_toppar_member_info_t *a,
+ map_toppar_member_info_t *b) {
+ const rd_kafka_topic_partition_t *key;
+ const PartitionMemberInfo_t *a_v;
+ map_toppar_member_info_t *intersection =
+ rd_calloc(1, sizeof(*intersection));
+
+ RD_MAP_INIT(
+ intersection, RD_MIN(a ? RD_MAP_CNT(a) : 1, b ? RD_MAP_CNT(b) : 1),
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
+
+ if (!a || !b)
+ return intersection;
+
+ RD_MAP_FOREACH(key, a_v, a) {
+ rd_bool_t members_match;
+ const PartitionMemberInfo_t *b_v = RD_MAP_GET(b, key);
+
+ if (b_v == NULL)
+ continue;
+
+ members_match =
+ a_v->member && b_v->member &&
+ rd_kafka_group_member_cmp(a_v->member, b_v->member) == 0;
+
+ RD_MAP_SET(intersection, rd_kafka_topic_partition_copy(key),
+ PartitionMemberInfo_new(b_v->member, members_match));
+ }
+
+ return intersection;
+}
+
+
+/**
+ * @brief Set subtraction. Returns a set of all elements of \p a
+ * that are not elements of \p b. Sets the member field in
+ * elements in the returned set to equal that of the
+ * corresponding element in \p a
+ */
+static map_toppar_member_info_t *
+rd_kafka_member_partitions_subtract(map_toppar_member_info_t *a,
+ map_toppar_member_info_t *b) {
+ const rd_kafka_topic_partition_t *key;
+ const PartitionMemberInfo_t *a_v;
+ map_toppar_member_info_t *difference =
+ rd_calloc(1, sizeof(*difference));
+
+ RD_MAP_INIT(difference, a ? RD_MAP_CNT(a) : 1,
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free,
+ PartitionMemberInfo_free);
+
+ if (!a)
+ return difference;
+
+ RD_MAP_FOREACH(key, a_v, a) {
+ const PartitionMemberInfo_t *b_v =
+ b ? RD_MAP_GET(b, key) : NULL;
+
+ if (!b_v)
+ RD_MAP_SET(
+ difference, rd_kafka_topic_partition_copy(key),
+ PartitionMemberInfo_new(a_v->member, rd_false));
+ }
+
+ return difference;
+}
+
+
+/**
+ * @brief Adjust the partition assignment as provided by the assignor
+ * according to the COOPERATIVE protocol.
+ */
+static void rd_kafka_cooperative_protocol_adjust_assignment(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_group_member_t *members,
+ int member_cnt) {
+
+ /* https://cwiki.apache.org/confluence/display/KAFKA/KIP-429%3A+Kafk\
+ a+Consumer+Incremental+Rebalance+Protocol */
+
+ int i;
+ int expected_max_assignment_size;
+ int total_assigned = 0;
+ int not_revoking = 0;
+ size_t par_cnt = 0;
+ const rd_kafka_topic_partition_t *toppar;
+ const PartitionMemberInfo_t *pmi;
+ map_toppar_member_info_t *assigned;
+ map_toppar_member_info_t *owned;
+ map_toppar_member_info_t *maybe_revoking;
+ map_toppar_member_info_t *ready_to_migrate;
+ map_toppar_member_info_t *unknown_but_owned;
+
+ for (i = 0; i < member_cnt; i++)
+ par_cnt += members[i].rkgm_owned->cnt;
+
+ assigned = rd_kafka_collect_partitions(members, member_cnt, par_cnt,
+ rd_false /*assigned*/);
+
+ owned = rd_kafka_collect_partitions(members, member_cnt, par_cnt,
+ rd_true /*owned*/);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
+ "Group \"%s\": Partitions owned by members: %d, "
+ "partitions assigned by assignor: %d",
+ rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(owned),
+ (int)RD_MAP_CNT(assigned));
+
+ /* Still owned by some members */
+ maybe_revoking = rd_kafka_member_partitions_intersect(assigned, owned);
+
+ /* Not previously owned by anyone */
+ ready_to_migrate = rd_kafka_member_partitions_subtract(assigned, owned);
+
+ /* Don't exist in assigned partitions */
+ unknown_but_owned =
+ rd_kafka_member_partitions_subtract(owned, assigned);
+
+ /* Rough guess at a size that is a bit higher than
+ * the maximum number of partitions likely to be
+ * assigned to any partition. */
+ expected_max_assignment_size =
+ (int)(RD_MAP_CNT(assigned) / member_cnt) + 4;
+
+ for (i = 0; i < member_cnt; i++) {
+ rd_kafka_group_member_t *rkgm = &members[i];
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_assignment);
+
+ rkgm->rkgm_assignment = rd_kafka_topic_partition_list_new(
+ expected_max_assignment_size);
+ }
+
+ /* For maybe-revoking-partitions, check if the owner has
+ * changed. If yes, exclude them from the assigned-partitions
+ * list to the new owner. The old owner will realize it does
+ * not own it any more, revoke it and then trigger another
+ * rebalance for these partitions to finally be reassigned.
+ */
+ RD_MAP_FOREACH(toppar, pmi, maybe_revoking) {
+ if (!pmi->members_match)
+ /* Owner has changed. */
+ continue;
+
+ /* Owner hasn't changed. */
+ rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment,
+ toppar->topic,
+ toppar->partition);
+
+ total_assigned++;
+ not_revoking++;
+ }
+
+ /* For ready-to-migrate-partitions, it is safe to move them
+ * to the new member immediately since we know no one owns
+ * it before, and hence we can encode the owner from the
+ * newly-assigned-partitions directly.
+ */
+ RD_MAP_FOREACH(toppar, pmi, ready_to_migrate) {
+ rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment,
+ toppar->topic,
+ toppar->partition);
+ total_assigned++;
+ }
+
+ /* For unknown-but-owned-partitions, it is also safe to just
+ * give them back to whoever claimed to be their owners by
+ * encoding them directly as well. If this is due to a topic
+ * metadata update, then a later rebalance will be triggered
+ * anyway.
+ */
+ RD_MAP_FOREACH(toppar, pmi, unknown_but_owned) {
+ rd_kafka_topic_partition_list_add(pmi->member->rkgm_assignment,
+ toppar->topic,
+ toppar->partition);
+ total_assigned++;
+ }
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
+ "Group \"%s\": COOPERATIVE protocol collection sizes: "
+ "maybe revoking: %d, ready to migrate: %d, unknown but "
+ "owned: %d",
+ rkcg->rkcg_group_id->str, (int)RD_MAP_CNT(maybe_revoking),
+ (int)RD_MAP_CNT(ready_to_migrate),
+ (int)RD_MAP_CNT(unknown_but_owned));
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRP",
+ "Group \"%s\": %d partitions assigned to consumers",
+ rkcg->rkcg_group_id->str, total_assigned);
+
+ RD_MAP_DESTROY_AND_FREE(maybe_revoking);
+ RD_MAP_DESTROY_AND_FREE(ready_to_migrate);
+ RD_MAP_DESTROY_AND_FREE(unknown_but_owned);
+ RD_MAP_DESTROY_AND_FREE(assigned);
+ RD_MAP_DESTROY_AND_FREE(owned);
+}
+
+
+/**
+ * @brief Parses and handles the MemberState from a SyncGroupResponse.
+ */
+static void rd_kafka_cgrp_handle_SyncGroup_memberstate(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ const rd_kafkap_bytes_t *member_state) {
+ rd_kafka_buf_t *rkbuf = NULL;
+ rd_kafka_topic_partition_list_t *assignment = NULL;
+ const int log_decode_errors = LOG_ERR;
+ int16_t Version;
+ rd_kafkap_bytes_t UserData;
+
+ /* Dont handle new assignments when terminating */
+ if (!err && rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
+ err = RD_KAFKA_RESP_ERR__DESTROY;
+
+ if (err)
+ goto err;
+
+ if (RD_KAFKAP_BYTES_LEN(member_state) == 0) {
+ /* Empty assignment. */
+ assignment = rd_kafka_topic_partition_list_new(0);
+ memset(&UserData, 0, sizeof(UserData));
+ goto done;
+ }
+
+ /* Parse assignment from MemberState */
+ rkbuf = rd_kafka_buf_new_shadow(
+ member_state->data, RD_KAFKAP_BYTES_LEN(member_state), NULL);
+ /* Protocol parser needs a broker handle to log errors on. */
+ if (rkb) {
+ rkbuf->rkbuf_rkb = rkb;
+ rd_kafka_broker_keep(rkb);
+ } else
+ rkbuf->rkbuf_rkb = rd_kafka_broker_internal(rkcg->rkcg_rk);
+
+ rd_kafka_buf_read_i16(rkbuf, &Version);
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ if (!(assignment =
+ rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields)))
+ goto err_parse;
+ rd_kafka_buf_read_bytes(rkbuf, &UserData);
+
+done:
+ rd_kafka_cgrp_update_session_timeout(rkcg, rd_true /*reset timeout*/);
+
+ rd_assert(rkcg->rkcg_assignor);
+ if (rkcg->rkcg_assignor->rkas_on_assignment_cb) {
+ char *member_id;
+ RD_KAFKAP_STR_DUPA(&member_id, rkcg->rkcg_member_id);
+ rd_kafka_consumer_group_metadata_t *cgmd =
+ rd_kafka_consumer_group_metadata_new_with_genid(
+ rkcg->rkcg_rk->rk_conf.group_id_str,
+ rkcg->rkcg_generation_id, member_id,
+ rkcg->rkcg_rk->rk_conf.group_instance_id);
+ rkcg->rkcg_assignor->rkas_on_assignment_cb(
+ rkcg->rkcg_assignor, &(rkcg->rkcg_assignor_state),
+ assignment, &UserData, cgmd);
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+ }
+
+ // FIXME: Remove when we're done debugging.
+ rd_kafka_topic_partition_list_log(rkcg->rkcg_rk, "ASSIGNMENT",
+ RD_KAFKA_DBG_CGRP, assignment);
+
+ /* Set the new assignment */
+ rd_kafka_cgrp_handle_assignment(rkcg, assignment);
+
+ rd_kafka_topic_partition_list_destroy(assignment);
+
+ if (rkbuf)
+ rd_kafka_buf_destroy(rkbuf);
+
+ return;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+
+err:
+ if (rkbuf)
+ rd_kafka_buf_destroy(rkbuf);
+
+ if (assignment)
+ rd_kafka_topic_partition_list_destroy(assignment);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPSYNC",
+ "Group \"%s\": synchronization failed: %s: rejoining",
+ rkcg->rkcg_group_id->str, rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID)
+ rd_kafka_set_fatal_error(rkcg->rkcg_rk, err,
+ "Fatal consumer error: %s",
+ rd_kafka_err2str(err));
+ else if (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
+ rkcg->rkcg_generation_id = -1;
+ else if (err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+
+ if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
+ (err == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION ||
+ err == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID))
+ rd_kafka_cgrp_revoke_all_rejoin(
+ rkcg, rd_true /*assignment is lost*/,
+ rd_true /*this consumer is initiating*/, "SyncGroup error");
+ else
+ rd_kafka_cgrp_rejoin(rkcg, "SyncGroup error: %s",
+ rd_kafka_err2str(err));
+}
+
+
+
+/**
+ * @brief Cgrp handler for SyncGroup responses. opaque must be the cgrp handle.
+ */
+static void rd_kafka_cgrp_handle_SyncGroup(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = opaque;
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode = 0;
+ rd_kafkap_bytes_t MemberState = RD_ZERO_INIT;
+ int actions;
+
+ if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) {
+ rd_kafka_dbg(
+ rkb->rkb_rk, CGRP, "SYNCGROUP",
+ "SyncGroup response: discarding outdated request "
+ "(now in join-state %s)",
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+ rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
+ return;
+ }
+
+ if (err) {
+ ErrorCode = err;
+ goto err;
+ }
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ rd_kafka_buf_read_bytes(rkbuf, &MemberState);
+
+err:
+ actions = rd_kafka_err_action(rkb, ErrorCode, request,
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ /* Re-query for coordinator */
+ rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
+ RD_KAFKA_OP_COORD_QUERY, ErrorCode);
+ /* FALLTHRU */
+ }
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ if (rd_kafka_buf_retry(rkb, request))
+ return;
+ /* FALLTHRU */
+ }
+
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "SYNCGROUP",
+ "SyncGroup response: %s (%d bytes of MemberState data)",
+ rd_kafka_err2str(ErrorCode),
+ RD_KAFKAP_BYTES_LEN(&MemberState));
+
+ rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
+
+ if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
+ return; /* Termination */
+
+ rd_kafka_cgrp_handle_SyncGroup_memberstate(rkcg, rkb, ErrorCode,
+ &MemberState);
+
+ return;
+
+err_parse:
+ ErrorCode = rkbuf->rkbuf_err;
+ goto err;
+}
+
+
+/**
+ * @brief Run group assignment.
+ */
+static void rd_kafka_cgrp_assignor_run(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_assignor_t *rkas,
+ rd_kafka_resp_err_t err,
+ rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ int member_cnt) {
+ char errstr[512];
+
+ if (err) {
+ rd_snprintf(errstr, sizeof(errstr),
+ "Failed to get cluster metadata: %s",
+ rd_kafka_err2str(err));
+ goto err;
+ }
+
+ *errstr = '\0';
+
+ /* Run assignor */
+ err = rd_kafka_assignor_run(rkcg, rkas, metadata, members, member_cnt,
+ errstr, sizeof(errstr));
+
+ if (err) {
+ if (!*errstr)
+ rd_snprintf(errstr, sizeof(errstr), "%s",
+ rd_kafka_err2str(err));
+ goto err;
+ }
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGNOR",
+ "Group \"%s\": \"%s\" assignor run for %d member(s)",
+ rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
+ member_cnt);
+
+ if (rkas->rkas_protocol == RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE)
+ rd_kafka_cooperative_protocol_adjust_assignment(rkcg, members,
+ member_cnt);
+
+ rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC);
+
+ rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
+
+ /* Respond to broker with assignment set or error */
+ rd_kafka_SyncGroupRequest(
+ rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
+ rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, members,
+ err ? 0 : member_cnt, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_cgrp_handle_SyncGroup, rkcg);
+ return;
+
+err:
+ rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "ASSIGNOR",
+ "Group \"%s\": failed to run assignor \"%s\" for "
+ "%d member(s): %s",
+ rkcg->rkcg_group_id->str, rkas->rkas_protocol_name->str,
+ member_cnt, errstr);
+
+ rd_kafka_cgrp_rejoin(rkcg, "%s assignor failed: %s",
+ rkas->rkas_protocol_name->str, errstr);
+}
+
+
+
+/**
+ * @brief Op callback from handle_JoinGroup
+ */
+static rd_kafka_op_res_t
+rd_kafka_cgrp_assignor_handle_Metadata_op(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED; /* Terminating */
+
+ if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA)
+ return RD_KAFKA_OP_RES_HANDLED; /* From outdated state */
+
+ if (!rkcg->rkcg_group_leader.members) {
+ rd_kafka_dbg(rk, CGRP, "GRPLEADER",
+ "Group \"%.*s\": no longer leader: "
+ "not running assignor",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ rd_kafka_cgrp_assignor_run(rkcg, rkcg->rkcg_assignor, rko->rko_err,
+ rko->rko_u.metadata.md,
+ rkcg->rkcg_group_leader.members,
+ rkcg->rkcg_group_leader.member_cnt);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * Parse single JoinGroup.Members.MemberMetadata for "consumer" ProtocolType
+ *
+ * Protocol definition:
+ * https://cwiki.apache.org/confluence/display/KAFKA/Kafka+Client-side+Assignment+Proposal
+ *
+ * Returns 0 on success or -1 on error.
+ */
+static int rd_kafka_group_MemberMetadata_consumer_read(
+ rd_kafka_broker_t *rkb,
+ rd_kafka_group_member_t *rkgm,
+ const rd_kafkap_bytes_t *MemberMetadata) {
+
+ rd_kafka_buf_t *rkbuf;
+ int16_t Version;
+ int32_t subscription_cnt;
+ rd_kafkap_bytes_t UserData;
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG;
+
+ /* Create a shadow-buffer pointing to the metadata to ease parsing. */
+ rkbuf = rd_kafka_buf_new_shadow(
+ MemberMetadata->data, RD_KAFKAP_BYTES_LEN(MemberMetadata), NULL);
+
+ /* Protocol parser needs a broker handle to log errors on. */
+ rkbuf->rkbuf_rkb = rkb;
+ rd_kafka_broker_keep(rkb);
+
+ rd_kafka_buf_read_i16(rkbuf, &Version);
+ rd_kafka_buf_read_i32(rkbuf, &subscription_cnt);
+
+ if (subscription_cnt > 10000 || subscription_cnt <= 0)
+ goto err;
+
+ rkgm->rkgm_subscription =
+ rd_kafka_topic_partition_list_new(subscription_cnt);
+
+ while (subscription_cnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ char *topic_name;
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ RD_KAFKAP_STR_DUPA(&topic_name, &Topic);
+ rd_kafka_topic_partition_list_add(
+ rkgm->rkgm_subscription, topic_name, RD_KAFKA_PARTITION_UA);
+ }
+
+ rd_kafka_buf_read_bytes(rkbuf, &UserData);
+ rkgm->rkgm_userdata = rd_kafkap_bytes_copy(&UserData);
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ if (Version >= 1 &&
+ !(rkgm->rkgm_owned =
+ rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields)))
+ goto err;
+
+ rd_kafka_buf_destroy(rkbuf);
+
+ return 0;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+
+err:
+ rd_rkb_dbg(rkb, CGRP, "MEMBERMETA",
+ "Failed to parse MemberMetadata for \"%.*s\": %s",
+ RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
+ rd_kafka_err2str(err));
+ if (rkgm->rkgm_subscription) {
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_subscription);
+ rkgm->rkgm_subscription = NULL;
+ }
+
+ rd_kafka_buf_destroy(rkbuf);
+ return -1;
+}
+
+
+/**
+ * @brief cgrp handler for JoinGroup responses
+ * opaque must be the cgrp handle.
+ *
+ * @locality rdkafka main thread (unless ERR__DESTROY: arbitrary thread)
+ */
+static void rd_kafka_cgrp_handle_JoinGroup(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = opaque;
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode = 0;
+ int32_t GenerationId;
+ rd_kafkap_str_t Protocol, LeaderId;
+ rd_kafkap_str_t MyMemberId = RD_KAFKAP_STR_INITIALIZER;
+ int32_t member_cnt;
+ int actions;
+ int i_am_leader = 0;
+ rd_kafka_assignor_t *rkas = NULL;
+
+ rd_kafka_cgrp_clear_wait_resp(rkcg, RD_KAFKAP_JoinGroup);
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY ||
+ rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
+ return; /* Terminating */
+
+ if (rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN) {
+ rd_kafka_dbg(
+ rkb->rkb_rk, CGRP, "JOINGROUP",
+ "JoinGroup response: discarding outdated request "
+ "(now in join-state %s)",
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+ return;
+ }
+
+ if (err) {
+ ErrorCode = err;
+ goto err;
+ }
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 2)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ rd_kafka_buf_read_i32(rkbuf, &GenerationId);
+ rd_kafka_buf_read_str(rkbuf, &Protocol);
+ rd_kafka_buf_read_str(rkbuf, &LeaderId);
+ rd_kafka_buf_read_str(rkbuf, &MyMemberId);
+ rd_kafka_buf_read_i32(rkbuf, &member_cnt);
+
+ if (!ErrorCode && RD_KAFKAP_STR_IS_NULL(&Protocol)) {
+ /* Protocol not set, we will not be able to find
+ * a matching assignor so error out early. */
+ ErrorCode = RD_KAFKA_RESP_ERR__BAD_MSG;
+ } else if (!ErrorCode) {
+ char *protocol_name;
+ RD_KAFKAP_STR_DUPA(&protocol_name, &Protocol);
+ if (!(rkas = rd_kafka_assignor_find(rkcg->rkcg_rk,
+ protocol_name)) ||
+ !rkas->rkas_enabled) {
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
+ "Unsupported assignment strategy \"%s\"",
+ protocol_name);
+ if (rkcg->rkcg_assignor) {
+ if (rkcg->rkcg_assignor->rkas_destroy_state_cb)
+ rkcg->rkcg_assignor
+ ->rkas_destroy_state_cb(
+ rkcg->rkcg_assignor_state);
+ rkcg->rkcg_assignor_state = NULL;
+ rkcg->rkcg_assignor = NULL;
+ }
+ ErrorCode = RD_KAFKA_RESP_ERR__UNKNOWN_PROTOCOL;
+ }
+ }
+
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
+ "JoinGroup response: GenerationId %" PRId32
+ ", "
+ "Protocol %.*s, LeaderId %.*s%s, my MemberId %.*s, "
+ "member metadata count "
+ "%" PRId32 ": %s",
+ GenerationId, RD_KAFKAP_STR_PR(&Protocol),
+ RD_KAFKAP_STR_PR(&LeaderId),
+ RD_KAFKAP_STR_LEN(&MyMemberId) &&
+ !rd_kafkap_str_cmp(&LeaderId, &MyMemberId)
+ ? " (me)"
+ : "",
+ RD_KAFKAP_STR_PR(&MyMemberId), member_cnt,
+ ErrorCode ? rd_kafka_err2str(ErrorCode) : "(no error)");
+
+ if (!ErrorCode) {
+ char *my_member_id;
+ RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId);
+ rd_kafka_cgrp_set_member_id(rkcg, my_member_id);
+ rkcg->rkcg_generation_id = GenerationId;
+ i_am_leader = !rd_kafkap_str_cmp(&LeaderId, &MyMemberId);
+ } else {
+ rd_interval_backoff(&rkcg->rkcg_join_intvl, 1000 * 1000);
+ goto err;
+ }
+
+ if (rkcg->rkcg_assignor && rkcg->rkcg_assignor != rkas) {
+ if (rkcg->rkcg_assignor->rkas_destroy_state_cb)
+ rkcg->rkcg_assignor->rkas_destroy_state_cb(
+ rkcg->rkcg_assignor_state);
+ rkcg->rkcg_assignor_state = NULL;
+ }
+ rkcg->rkcg_assignor = rkas;
+
+ if (i_am_leader) {
+ rd_kafka_group_member_t *members;
+ int i;
+ int sub_cnt = 0;
+ rd_list_t topics;
+ rd_kafka_op_t *rko;
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "JOINGROUP",
+ "I am elected leader for group \"%s\" "
+ "with %" PRId32 " member(s)",
+ rkcg->rkcg_group_id->str, member_cnt);
+
+ if (member_cnt > 100000) {
+ err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ goto err;
+ }
+
+ rd_list_init(&topics, member_cnt, rd_free);
+
+ members = rd_calloc(member_cnt, sizeof(*members));
+
+ for (i = 0; i < member_cnt; i++) {
+ rd_kafkap_str_t MemberId;
+ rd_kafkap_bytes_t MemberMetadata;
+ rd_kafka_group_member_t *rkgm;
+ rd_kafkap_str_t GroupInstanceId =
+ RD_KAFKAP_STR_INITIALIZER;
+
+ rd_kafka_buf_read_str(rkbuf, &MemberId);
+ if (request->rkbuf_reqhdr.ApiVersion >= 5)
+ rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
+ rd_kafka_buf_read_bytes(rkbuf, &MemberMetadata);
+
+ rkgm = &members[sub_cnt];
+ rkgm->rkgm_member_id = rd_kafkap_str_copy(&MemberId);
+ rkgm->rkgm_group_instance_id =
+ rd_kafkap_str_copy(&GroupInstanceId);
+ rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
+ rkgm->rkgm_generation = -1;
+
+ if (rd_kafka_group_MemberMetadata_consumer_read(
+ rkb, rkgm, &MemberMetadata)) {
+ /* Failed to parse this member's metadata,
+ * ignore it. */
+ } else {
+ sub_cnt++;
+ rkgm->rkgm_assignment =
+ rd_kafka_topic_partition_list_new(
+ rkgm->rkgm_subscription->cnt);
+ rd_kafka_topic_partition_list_get_topic_names(
+ rkgm->rkgm_subscription, &topics,
+ 0 /*dont include regex*/);
+ }
+ }
+
+ /* FIXME: What to do if parsing failed for some/all members?
+ * It is a sign of incompatibility. */
+
+
+ rd_kafka_cgrp_group_leader_reset(rkcg,
+ "JoinGroup response clean-up");
+
+ rd_kafka_assert(NULL, rkcg->rkcg_group_leader.members == NULL);
+ rkcg->rkcg_group_leader.members = members;
+ rkcg->rkcg_group_leader.member_cnt = sub_cnt;
+
+ rd_kafka_cgrp_set_join_state(
+ rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
+
+ /* The assignor will need metadata so fetch it asynchronously
+ * and run the assignor when we get a reply.
+ * Create a callback op that the generic metadata code
+ * will trigger when metadata has been parsed. */
+ rko = rd_kafka_op_new_cb(
+ rkcg->rkcg_rk, RD_KAFKA_OP_METADATA,
+ rd_kafka_cgrp_assignor_handle_Metadata_op);
+ rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, NULL);
+
+ rd_kafka_MetadataRequest(
+ rkb, &topics, "partition assignor",
+ rd_false /*!allow_auto_create*/,
+ /* cgrp_update=false:
+ * Since the subscription list may not be identical
+ * across all members of the group and thus the
+ * Metadata response may not be identical to this
+ * consumer's subscription list, we want to
+ * avoid triggering a rejoin or error propagation
+ * on receiving the response since some topics
+ * may be missing. */
+ rd_false, rko);
+ rd_list_destroy(&topics);
+
+ } else {
+ rd_kafka_cgrp_set_join_state(
+ rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC);
+
+ rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_SyncGroup);
+
+ rd_kafka_SyncGroupRequest(
+ rkb, rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
+ rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id, NULL, 0,
+ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_cgrp_handle_SyncGroup, rkcg);
+ }
+
+err:
+ actions = rd_kafka_err_action(
+ rkb, ErrorCode, request, RD_KAFKA_ERR_ACTION_IGNORE,
+ RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
+
+ RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED,
+
+ RD_KAFKA_ERR_ACTION_IGNORE, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
+
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ /* Re-query for coordinator */
+ rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
+ RD_KAFKA_OP_COORD_QUERY, ErrorCode);
+ }
+
+ /* No need for retries here since the join is intervalled,
+ * see rkcg_join_intvl */
+
+ if (ErrorCode) {
+ if (ErrorCode == RD_KAFKA_RESP_ERR__DESTROY)
+ return; /* Termination */
+
+ if (ErrorCode == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID) {
+ rd_kafka_set_fatal_error(rkcg->rkcg_rk, ErrorCode,
+ "Fatal consumer error: %s",
+ rd_kafka_err2str(ErrorCode));
+ ErrorCode = RD_KAFKA_RESP_ERR__FATAL;
+
+ } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
+ rd_kafka_consumer_err(
+ rkcg->rkcg_q, rd_kafka_broker_id(rkb), ErrorCode, 0,
+ NULL, NULL, RD_KAFKA_OFFSET_INVALID,
+ "JoinGroup failed: %s",
+ rd_kafka_err2str(ErrorCode));
+
+ if (ErrorCode == RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID)
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+ else if (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION)
+ rkcg->rkcg_generation_id = -1;
+ else if (ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED) {
+ /* KIP-394 requires member.id on initial join
+ * group request */
+ char *my_member_id;
+ RD_KAFKAP_STR_DUPA(&my_member_id, &MyMemberId);
+ rd_kafka_cgrp_set_member_id(rkcg, my_member_id);
+ /* Skip the join backoff */
+ rd_interval_reset(&rkcg->rkcg_join_intvl);
+ }
+
+ if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
+ (ErrorCode == RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION ||
+ ErrorCode == RD_KAFKA_RESP_ERR_MEMBER_ID_REQUIRED))
+ rd_kafka_cgrp_revoke_all_rejoin(
+ rkcg, rd_true /*assignment is lost*/,
+ rd_true /*this consumer is initiating*/,
+ "JoinGroup error");
+ else
+ rd_kafka_cgrp_rejoin(rkcg, "JoinGroup error: %s",
+ rd_kafka_err2str(ErrorCode));
+ }
+
+ return;
+
+err_parse:
+ ErrorCode = rkbuf->rkbuf_err;
+ goto err;
+}
+
+
+/**
+ * @brief Check subscription against requested Metadata.
+ */
+static rd_kafka_op_res_t rd_kafka_cgrp_handle_Metadata_op(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED; /* Terminating */
+
+ rd_kafka_cgrp_metadata_update_check(rkcg, rd_false /*dont rejoin*/);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief (Async) Refresh metadata (for cgrp's needs)
+ *
+ * @returns 1 if metadata refresh was requested, or 0 if metadata is
+ * up to date, or -1 if no broker is available for metadata requests.
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static int rd_kafka_cgrp_metadata_refresh(rd_kafka_cgrp_t *rkcg,
+ int *metadata_agep,
+ const char *reason) {
+ rd_kafka_t *rk = rkcg->rkcg_rk;
+ rd_kafka_op_t *rko;
+ rd_list_t topics;
+ rd_kafka_resp_err_t err;
+
+ rd_list_init(&topics, 8, rd_free);
+
+ /* Insert all non-wildcard topics in cache. */
+ rd_kafka_metadata_cache_hint_rktparlist(
+ rkcg->rkcg_rk, rkcg->rkcg_subscription, NULL, 0 /*dont replace*/);
+
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) {
+ /* For wildcard subscriptions make sure the
+ * cached full metadata isn't too old. */
+ int metadata_age = -1;
+
+ if (rk->rk_ts_full_metadata)
+ metadata_age =
+ (int)(rd_clock() - rk->rk_ts_full_metadata) / 1000;
+
+ *metadata_agep = metadata_age;
+
+ if (metadata_age != -1 &&
+ metadata_age <= rk->rk_conf.metadata_max_age_ms) {
+ rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA,
+ "CGRPMETADATA",
+ "%s: metadata for wildcard subscription "
+ "is up to date (%dms old)",
+ reason, *metadata_agep);
+ rd_list_destroy(&topics);
+ return 0; /* Up-to-date */
+ }
+
+ } else {
+ /* Check that all subscribed topics are in the cache. */
+ int r;
+
+ rd_kafka_topic_partition_list_get_topic_names(
+ rkcg->rkcg_subscription, &topics, 0 /*no regexps*/);
+
+ rd_kafka_rdlock(rk);
+ r = rd_kafka_metadata_cache_topics_count_exists(rk, &topics,
+ metadata_agep);
+ rd_kafka_rdunlock(rk);
+
+ if (r == rd_list_cnt(&topics)) {
+ rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA,
+ "CGRPMETADATA",
+ "%s: metadata for subscription "
+ "is up to date (%dms old)",
+ reason, *metadata_agep);
+ rd_list_destroy(&topics);
+ return 0; /* Up-to-date and all topics exist. */
+ }
+
+ rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA",
+ "%s: metadata for subscription "
+ "only available for %d/%d topics (%dms old)",
+ reason, r, rd_list_cnt(&topics), *metadata_agep);
+ }
+
+ /* Async request, result will be triggered from
+ * rd_kafka_parse_metadata(). */
+ rko = rd_kafka_op_new_cb(rkcg->rkcg_rk, RD_KAFKA_OP_METADATA,
+ rd_kafka_cgrp_handle_Metadata_op);
+ rd_kafka_op_set_replyq(rko, rkcg->rkcg_ops, 0);
+
+ err = rd_kafka_metadata_request(rkcg->rkcg_rk, NULL, &topics,
+ rd_false /*!allow auto create */,
+ rd_true /*cgrp_update*/, reason, rko);
+ if (err) {
+ rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_METADATA, "CGRPMETADATA",
+ "%s: need to refresh metadata (%dms old) "
+ "but no usable brokers available: %s",
+ reason, *metadata_agep, rd_kafka_err2str(err));
+ rd_kafka_op_destroy(rko);
+ }
+
+ rd_list_destroy(&topics);
+
+ return err ? -1 : 1;
+}
+
+
+
+static void rd_kafka_cgrp_join(rd_kafka_cgrp_t *rkcg) {
+ int metadata_age;
+
+ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP ||
+ rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_INIT ||
+ rd_kafka_cgrp_awaiting_response(rkcg))
+ return;
+
+ /* On max.poll.interval.ms failure, do not rejoin group until the
+ * application has called poll. */
+ if ((rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED) &&
+ rd_kafka_max_poll_exceeded(rkcg->rkcg_rk))
+ return;
+
+ rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "JOIN",
+ "Group \"%.*s\": join with %d subscribed topic(s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_list_cnt(rkcg->rkcg_subscribed_topics));
+
+
+ /* See if we need to query metadata to continue:
+ * - if subscription contains wildcards:
+ * * query all topics in cluster
+ *
+ * - if subscription does not contain wildcards but
+ * some topics are missing from the local metadata cache:
+ * * query subscribed topics (all cached ones)
+ *
+ * - otherwise:
+ * * rely on topic metadata cache
+ */
+ /* We need up-to-date full metadata to continue,
+ * refresh metadata if necessary. */
+ if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age,
+ "consumer join") == 1) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER,
+ "JOIN",
+ "Group \"%.*s\": "
+ "postponing join until up-to-date "
+ "metadata is available",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
+
+ rd_assert(
+ rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT ||
+ /* Possible via rd_kafka_cgrp_modify_subscription */
+ rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+
+ rd_kafka_cgrp_set_join_state(
+ rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
+
+ return; /* ^ async call */
+ }
+
+ if (rd_list_empty(rkcg->rkcg_subscribed_topics))
+ rd_kafka_cgrp_metadata_update_check(rkcg,
+ rd_false /*dont join*/);
+
+ if (rd_list_empty(rkcg->rkcg_subscribed_topics)) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "JOIN",
+ "Group \"%.*s\": "
+ "no matching topics based on %dms old metadata: "
+ "next metadata refresh in %dms",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), metadata_age,
+ rkcg->rkcg_rk->rk_conf.metadata_refresh_interval_ms -
+ metadata_age);
+ return;
+ }
+
+ rd_rkb_dbg(
+ rkcg->rkcg_curr_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "JOIN",
+ "Joining group \"%.*s\" with %d subscribed topic(s) and "
+ "member id \"%.*s\"",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_list_cnt(rkcg->rkcg_subscribed_topics),
+ rkcg->rkcg_member_id ? RD_KAFKAP_STR_LEN(rkcg->rkcg_member_id) : 0,
+ rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : "");
+
+
+ rd_kafka_cgrp_set_join_state(rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN);
+
+ rd_kafka_cgrp_set_wait_resp(rkcg, RD_KAFKAP_JoinGroup);
+
+ rd_kafka_JoinGroupRequest(
+ rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_member_id,
+ rkcg->rkcg_group_instance_id,
+ rkcg->rkcg_rk->rk_conf.group_protocol_type,
+ rkcg->rkcg_subscribed_topics, RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_cgrp_handle_JoinGroup, rkcg);
+}
+
+/**
+ * Rejoin group on update to effective subscribed topics list
+ */
+static void rd_kafka_cgrp_revoke_rejoin(rd_kafka_cgrp_t *rkcg,
+ const char *reason) {
+ /*
+ * Clean-up group leader duties, if any.
+ */
+ rd_kafka_cgrp_group_leader_reset(rkcg, "group (re)join");
+
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "REJOIN",
+ "Group \"%.*s\" (re)joining in join-state %s "
+ "with %d assigned partition(s): %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0,
+ reason);
+
+ rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/,
+ rd_true /*initiating*/, reason);
+}
+
+/**
+ * @brief Update the effective list of subscribed topics.
+ *
+ * Set \p tinfos to NULL to clear the list.
+ *
+ * @param tinfos rd_list_t(rd_kafka_topic_info_t *): new effective topic list
+ *
+ * @returns true on change, else false.
+ *
+ * @remark Takes ownership of \p tinfos
+ */
+static rd_bool_t rd_kafka_cgrp_update_subscribed_topics(rd_kafka_cgrp_t *rkcg,
+ rd_list_t *tinfos) {
+ rd_kafka_topic_info_t *tinfo;
+ int i;
+
+ if (!tinfos) {
+ if (!rd_list_empty(rkcg->rkcg_subscribed_topics))
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION",
+ "Group \"%.*s\": "
+ "clearing subscribed topics list (%d)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_list_cnt(rkcg->rkcg_subscribed_topics));
+ tinfos = rd_list_new(0, (void *)rd_kafka_topic_info_destroy);
+
+ } else {
+ if (rd_list_cnt(tinfos) == 0)
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIPTION",
+ "Group \"%.*s\": "
+ "no topics in metadata matched "
+ "subscription",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
+ }
+
+ /* Sort for comparison */
+ rd_list_sort(tinfos, rd_kafka_topic_info_cmp);
+
+ /* Compare to existing to see if anything changed. */
+ if (!rd_list_cmp(rkcg->rkcg_subscribed_topics, tinfos,
+ rd_kafka_topic_info_cmp)) {
+ /* No change */
+ rd_list_destroy(tinfos);
+ return rd_false;
+ }
+
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA, "SUBSCRIPTION",
+ "Group \"%.*s\": effective subscription list changed "
+ "from %d to %d topic(s):",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_list_cnt(rkcg->rkcg_subscribed_topics), rd_list_cnt(tinfos));
+
+ RD_LIST_FOREACH(tinfo, tinfos, i)
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_METADATA,
+ "SUBSCRIPTION", " Topic %s with %d partition(s)",
+ tinfo->topic, tinfo->partition_cnt);
+
+ rd_list_destroy(rkcg->rkcg_subscribed_topics);
+
+ rkcg->rkcg_subscribed_topics = tinfos;
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Handle Heartbeat response.
+ */
+void rd_kafka_cgrp_handle_Heartbeat(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode = 0;
+ int actions = 0;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return;
+
+ rd_dassert(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT);
+ rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
+
+ rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (err)
+ goto err;
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ if (ErrorCode) {
+ err = ErrorCode;
+ goto err;
+ }
+
+ rd_kafka_cgrp_update_session_timeout(
+ rkcg, rd_false /*don't update if session has expired*/);
+
+ return;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ rkcg->rkcg_last_heartbeat_err = err;
+
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "HEARTBEAT",
+ "Group \"%s\" heartbeat error response in "
+ "state %s (join-state %s, %d partition(s) assigned): %s",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0,
+ rd_kafka_err2str(err));
+
+ if (rkcg->rkcg_join_state <= RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "HEARTBEAT",
+ "Heartbeat response: discarding outdated "
+ "request (now in join-state %s)",
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+ return;
+ }
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ /* quick cleanup */
+ return;
+
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR_FOR_GROUP:
+ case RD_KAFKA_RESP_ERR_GROUP_COORDINATOR_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER, "HEARTBEAT",
+ "Heartbeat failed due to coordinator (%s) "
+ "no longer available: %s: "
+ "re-querying for coordinator",
+ rkcg->rkcg_curr_coord
+ ? rd_kafka_broker_name(rkcg->rkcg_curr_coord)
+ : "none",
+ rd_kafka_err2str(err));
+ /* Remain in joined state and keep querying for coordinator */
+ actions = RD_KAFKA_ERR_ACTION_REFRESH;
+ break;
+
+ case RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS:
+ rd_kafka_cgrp_update_session_timeout(
+ rkcg, rd_false /*don't update if session has expired*/);
+ /* No further action if already rebalancing */
+ if (RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg))
+ return;
+ rd_kafka_cgrp_group_is_rebalancing(rkcg);
+ return;
+
+ case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
+ rd_true /*initiating*/,
+ "resetting member-id");
+ return;
+
+ case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
+ rkcg->rkcg_generation_id = -1;
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
+ rd_true /*initiating*/,
+ "illegal generation");
+ return;
+
+ case RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID:
+ rd_kafka_set_fatal_error(rkcg->rkcg_rk, err,
+ "Fatal consumer error: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(
+ rkcg, rd_true, /*assignment lost*/
+ rd_true, /*initiating*/
+ "consumer fenced by "
+ "newer instance");
+ return;
+
+ default:
+ actions = rd_kafka_err_action(rkb, err, request,
+ RD_KAFKA_ERR_ACTION_END);
+ break;
+ }
+
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ /* Re-query for coordinator */
+ rd_kafka_cgrp_coord_query(rkcg, rd_kafka_err2str(err));
+ }
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY &&
+ rd_kafka_buf_retry(rkb, request)) {
+ /* Retry */
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
+ return;
+ }
+}
+
+
+
+/**
+ * @brief Send Heartbeat
+ */
+static void rd_kafka_cgrp_heartbeat(rd_kafka_cgrp_t *rkcg) {
+ /* Don't send heartbeats if max.poll.interval.ms was exceeded */
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED)
+ return;
+
+ /* Skip heartbeat if we have one in transit */
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT)
+ return;
+
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT;
+ rd_kafka_HeartbeatRequest(
+ rkcg->rkcg_coord, rkcg->rkcg_group_id, rkcg->rkcg_generation_id,
+ rkcg->rkcg_member_id, rkcg->rkcg_group_instance_id,
+ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0), rd_kafka_cgrp_handle_Heartbeat,
+ NULL);
+}
+
+/**
+ * Cgrp is now terminated: decommission it and signal back to application.
+ */
+static void rd_kafka_cgrp_terminated(rd_kafka_cgrp_t *rkcg) {
+ if (rd_atomic32_get(&rkcg->rkcg_terminated))
+ return; /* terminated() may be called multiple times,
+ * make sure to only terminate once. */
+
+ rd_kafka_cgrp_group_assignment_set(rkcg, NULL);
+
+ rd_kafka_assert(NULL, !rd_kafka_assignment_in_progress(rkcg->rkcg_rk));
+ rd_kafka_assert(NULL, !rkcg->rkcg_group_assignment);
+ rd_kafka_assert(NULL, rkcg->rkcg_rk->rk_consumer.wait_commit_cnt == 0);
+ rd_kafka_assert(NULL, rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM);
+
+ rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers,
+ &rkcg->rkcg_offset_commit_tmr, 1 /*lock*/);
+
+ rd_kafka_q_purge(rkcg->rkcg_wait_coord_q);
+
+ /* Disable and empty ops queue since there will be no
+ * (broker) thread serving it anymore after the unassign_broker
+ * below.
+ * This prevents hang on destroy where responses are enqueued on
+ * rkcg_ops without anything serving the queue. */
+ rd_kafka_q_disable(rkcg->rkcg_ops);
+ rd_kafka_q_purge(rkcg->rkcg_ops);
+
+ if (rkcg->rkcg_curr_coord)
+ rd_kafka_cgrp_coord_clear_broker(rkcg);
+
+ if (rkcg->rkcg_coord) {
+ rd_kafka_broker_destroy(rkcg->rkcg_coord);
+ rkcg->rkcg_coord = NULL;
+ }
+
+ rd_atomic32_set(&rkcg->rkcg_terminated, rd_true);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
+ "Consumer group sub-system terminated%s",
+ rkcg->rkcg_reply_rko ? " (will enqueue reply)" : "");
+
+ if (rkcg->rkcg_reply_rko) {
+ /* Signal back to application. */
+ rd_kafka_replyq_enq(&rkcg->rkcg_reply_rko->rko_replyq,
+ rkcg->rkcg_reply_rko, 0);
+ rkcg->rkcg_reply_rko = NULL;
+ }
+
+ /* Remove cgrp application queue forwarding, if any. */
+ rd_kafka_q_fwd_set(rkcg->rkcg_q, NULL);
+}
+
+
+/**
+ * If a cgrp is terminating and all outstanding ops are now finished
+ * then progress to final termination and return 1.
+ * Else returns 0.
+ */
+static RD_INLINE int rd_kafka_cgrp_try_terminate(rd_kafka_cgrp_t *rkcg) {
+
+ if (rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM)
+ return 1;
+
+ if (likely(!(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)))
+ return 0;
+
+ /* Check if wait-coord queue has timed out. */
+ if (rd_kafka_q_len(rkcg->rkcg_wait_coord_q) > 0 &&
+ rkcg->rkcg_ts_terminate +
+ (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000) <
+ rd_clock()) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
+ "Group \"%s\": timing out %d op(s) in "
+ "wait-for-coordinator queue",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_q_len(rkcg->rkcg_wait_coord_q));
+ rd_kafka_q_disable(rkcg->rkcg_wait_coord_q);
+ if (rd_kafka_q_concat(rkcg->rkcg_ops,
+ rkcg->rkcg_wait_coord_q) == -1) {
+ /* ops queue shut down, purge coord queue */
+ rd_kafka_q_purge(rkcg->rkcg_wait_coord_q);
+ }
+ }
+
+ if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) &&
+ rd_list_empty(&rkcg->rkcg_toppars) &&
+ !rd_kafka_assignment_in_progress(rkcg->rkcg_rk) &&
+ rkcg->rkcg_rk->rk_consumer.wait_commit_cnt == 0 &&
+ !(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)) {
+ /* Since we might be deep down in a 'rko' handler
+ * called from cgrp_op_serve() we cant call terminated()
+ * directly since it will decommission the rkcg_ops queue
+ * that might be locked by intermediate functions.
+ * Instead set the TERM state and let the cgrp terminate
+ * at its own discretion. */
+ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_TERM);
+
+ return 1;
+ } else {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "CGRPTERM",
+ "Group \"%s\": "
+ "waiting for %s%d toppar(s), "
+ "%s"
+ "%d commit(s)%s%s%s (state %s, join-state %s) "
+ "before terminating",
+ rkcg->rkcg_group_id->str,
+ RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ? "assign call, " : "",
+ rd_list_cnt(&rkcg->rkcg_toppars),
+ rd_kafka_assignment_in_progress(rkcg->rkcg_rk)
+ ? "assignment in progress, "
+ : "",
+ rkcg->rkcg_rk->rk_consumer.wait_commit_cnt,
+ (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WAIT_LEAVE)
+ ? ", wait-leave,"
+ : "",
+ rkcg->rkcg_rebalance_rejoin ? ", rebalance_rejoin," : "",
+ (rkcg->rkcg_rebalance_incr_assignment != NULL)
+ ? ", rebalance_incr_assignment,"
+ : "",
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+ return 0;
+ }
+}
+
+
+/**
+ * @brief Add partition to this cgrp management
+ *
+ * @locks none
+ */
+static void rd_kafka_cgrp_partition_add(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_toppar_t *rktp) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTADD",
+ "Group \"%s\": add %s [%" PRId32 "]",
+ rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition);
+
+ rd_kafka_toppar_lock(rktp);
+ rd_assert(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_CGRP));
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_CGRP;
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_keep(rktp);
+ rd_list_add(&rkcg->rkcg_toppars, rktp);
+}
+
+/**
+ * @brief Remove partition from this cgrp management
+ *
+ * @locks none
+ */
+static void rd_kafka_cgrp_partition_del(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_toppar_t *rktp) {
+ int cnt = 0, barrier_cnt = 0, message_cnt = 0, other_cnt = 0;
+ rd_kafka_op_t *rko;
+ rd_kafka_q_t *rkq;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
+ "Group \"%s\": delete %s [%" PRId32 "]",
+ rkcg->rkcg_group_id->str, rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition);
+
+ rd_kafka_toppar_lock(rktp);
+ rd_assert(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_CGRP);
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_CGRP;
+
+ if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) {
+ /* Partition is being removed from the cluster and it's stopped,
+ * so rktp->rktp_fetchq->rkq_fwdq is NULL.
+ * Purge remaining operations in rktp->rktp_fetchq->rkq_q,
+ * while holding lock, to avoid circular references */
+ rkq = rktp->rktp_fetchq;
+ mtx_lock(&rkq->rkq_lock);
+ rd_assert(!rkq->rkq_fwdq);
+
+ rko = TAILQ_FIRST(&rkq->rkq_q);
+ while (rko) {
+ if (rko->rko_type != RD_KAFKA_OP_BARRIER &&
+ rko->rko_type != RD_KAFKA_OP_FETCH) {
+ rd_kafka_log(
+ rkcg->rkcg_rk, LOG_WARNING, "PARTDEL",
+ "Purging toppar fetch queue buffer op"
+ "with unexpected type: %s",
+ rd_kafka_op2str(rko->rko_type));
+ }
+
+ if (rko->rko_type == RD_KAFKA_OP_BARRIER)
+ barrier_cnt++;
+ else if (rko->rko_type == RD_KAFKA_OP_FETCH)
+ message_cnt++;
+ else
+ other_cnt++;
+
+ rko = TAILQ_NEXT(rko, rko_link);
+ cnt++;
+ }
+
+ mtx_unlock(&rkq->rkq_lock);
+
+ if (cnt) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
+ "Purge toppar fetch queue buffer "
+ "containing %d op(s) "
+ "(%d barrier(s), %d message(s), %d other)"
+ " to avoid "
+ "circular references",
+ cnt, barrier_cnt, message_cnt, other_cnt);
+ rd_kafka_q_purge(rktp->rktp_fetchq);
+ } else {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "PARTDEL",
+ "Not purging toppar fetch queue buffer."
+ " No ops present in the buffer.");
+ }
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_list_remove(&rkcg->rkcg_toppars, rktp);
+
+ rd_kafka_toppar_destroy(rktp); /* refcnt from _add above */
+
+ rd_kafka_cgrp_try_terminate(rkcg);
+}
+
+
+
+/**
+ * @brief Defer offset commit (rko) until coordinator is available.
+ *
+ * @returns 1 if the rko was deferred or 0 if the defer queue is disabled
+ * or rko already deferred.
+ */
+static int rd_kafka_cgrp_defer_offset_commit(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_op_t *rko,
+ const char *reason) {
+
+ /* wait_coord_q is disabled session.timeout.ms after
+ * group close() has been initated. */
+ if (rko->rko_u.offset_commit.ts_timeout != 0 ||
+ !rd_kafka_q_ready(rkcg->rkcg_wait_coord_q))
+ return 0;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COMMIT",
+ "Group \"%s\": "
+ "unable to OffsetCommit in state %s: %s: "
+ "coordinator (%s) is unavailable: "
+ "retrying later",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state], reason,
+ rkcg->rkcg_curr_coord
+ ? rd_kafka_broker_name(rkcg->rkcg_curr_coord)
+ : "none");
+
+ rko->rko_flags |= RD_KAFKA_OP_F_REPROCESS;
+ rko->rko_u.offset_commit.ts_timeout =
+ rd_clock() +
+ (rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000);
+ rd_kafka_q_enq(rkcg->rkcg_wait_coord_q, rko);
+
+ return 1;
+}
+
+
+/**
+ * @brief Update the committed offsets for the partitions in \p offsets,
+ *
+ * @remark \p offsets may be NULL if \p err is set
+ * @returns the number of partitions with errors encountered
+ */
+static int rd_kafka_cgrp_update_committed_offsets(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets) {
+ int i;
+ int errcnt = 0;
+
+ /* Update toppars' committed offset or global error */
+ for (i = 0; offsets && i < offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
+ rd_kafka_toppar_t *rktp;
+
+ /* Ignore logical offsets since they were never
+ * sent to the broker. */
+ if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset))
+ continue;
+
+ /* Propagate global error to all partitions that don't have
+ * explicit error set. */
+ if (err && !rktpar->err)
+ rktpar->err = err;
+
+ if (rktpar->err) {
+ rd_kafka_dbg(rkcg->rkcg_rk, TOPIC, "OFFSET",
+ "OffsetCommit failed for "
+ "%s [%" PRId32
+ "] at offset "
+ "%" PRId64 " in join-state %s: %s",
+ rktpar->topic, rktpar->partition,
+ rktpar->offset,
+ rd_kafka_cgrp_join_state_names
+ [rkcg->rkcg_join_state],
+ rd_kafka_err2str(rktpar->err));
+
+ errcnt++;
+ continue;
+ }
+
+ rktp = rd_kafka_topic_partition_get_toppar(rkcg->rkcg_rk,
+ rktpar, rd_false);
+ if (!rktp)
+ continue;
+
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_committed_pos =
+ rd_kafka_topic_partition_get_fetch_pos(rktpar);
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp); /* from get_toppar() */
+ }
+
+ return errcnt;
+}
+
+
+/**
+ * @brief Propagate OffsetCommit results.
+ *
+ * @param rko_orig The original rko that triggered the commit, this is used
+ * to propagate the result.
+ * @param err Is the aggregated request-level error, or ERR_NO_ERROR.
+ * @param errcnt Are the number of partitions in \p offsets that failed
+ * offset commit.
+ */
+static void rd_kafka_cgrp_propagate_commit_result(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_op_t *rko_orig,
+ rd_kafka_resp_err_t err,
+ int errcnt,
+ rd_kafka_topic_partition_list_t *offsets) {
+
+ const rd_kafka_t *rk = rkcg->rkcg_rk;
+ int offset_commit_cb_served = 0;
+
+ /* If no special callback is set but a offset_commit_cb has
+ * been set in conf then post an event for the latter. */
+ if (!rko_orig->rko_u.offset_commit.cb && rk->rk_conf.offset_commit_cb) {
+ rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err);
+
+ rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH);
+
+ if (offsets)
+ rko_reply->rko_u.offset_commit.partitions =
+ rd_kafka_topic_partition_list_copy(offsets);
+
+ rko_reply->rko_u.offset_commit.cb =
+ rk->rk_conf.offset_commit_cb;
+ rko_reply->rko_u.offset_commit.opaque = rk->rk_conf.opaque;
+
+ rd_kafka_q_enq(rk->rk_rep, rko_reply);
+ offset_commit_cb_served++;
+ }
+
+
+ /* Enqueue reply to requester's queue, if any. */
+ if (rko_orig->rko_replyq.q) {
+ rd_kafka_op_t *rko_reply = rd_kafka_op_new_reply(rko_orig, err);
+
+ rd_kafka_op_set_prio(rko_reply, RD_KAFKA_PRIO_HIGH);
+
+ /* Copy offset & partitions & callbacks to reply op */
+ rko_reply->rko_u.offset_commit = rko_orig->rko_u.offset_commit;
+ if (offsets)
+ rko_reply->rko_u.offset_commit.partitions =
+ rd_kafka_topic_partition_list_copy(offsets);
+ if (rko_reply->rko_u.offset_commit.reason)
+ rko_reply->rko_u.offset_commit.reason =
+ rd_strdup(rko_reply->rko_u.offset_commit.reason);
+
+ rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko_reply, 0);
+ offset_commit_cb_served++;
+ }
+
+ if (!offset_commit_cb_served && offsets &&
+ (errcnt > 0 || (err != RD_KAFKA_RESP_ERR_NO_ERROR &&
+ err != RD_KAFKA_RESP_ERR__NO_OFFSET))) {
+ /* If there is no callback or handler for this (auto)
+ * commit then log an error (#1043) */
+ char tmp[512];
+
+ rd_kafka_topic_partition_list_str(
+ offsets, tmp, sizeof(tmp),
+ /* Print per-partition errors unless there was a
+ * request-level error. */
+ RD_KAFKA_FMT_F_OFFSET |
+ (errcnt ? RD_KAFKA_FMT_F_ONLY_ERR : 0));
+
+ rd_kafka_log(
+ rkcg->rkcg_rk, LOG_WARNING, "COMMITFAIL",
+ "Offset commit (%s) failed "
+ "for %d/%d partition(s) in join-state %s: "
+ "%s%s%s",
+ rko_orig->rko_u.offset_commit.reason,
+ errcnt ? errcnt : offsets->cnt, offsets->cnt,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ errcnt ? rd_kafka_err2str(err) : "", errcnt ? ": " : "",
+ tmp);
+ }
+}
+
+
+
+/**
+ * @brief Handle OffsetCommitResponse
+ * Takes the original 'rko' as opaque argument.
+ * @remark \p rkb, rkbuf, and request may be NULL in a number of
+ * error cases (e.g., _NO_OFFSET, _WAIT_COORD)
+ */
+static void rd_kafka_cgrp_op_handle_OffsetCommit(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = rk->rk_cgrp;
+ rd_kafka_op_t *rko_orig = opaque;
+ rd_kafka_topic_partition_list_t *offsets =
+ rko_orig->rko_u.offset_commit.partitions; /* maybe NULL */
+ int errcnt;
+
+ RD_KAFKA_OP_TYPE_ASSERT(rko_orig, RD_KAFKA_OP_OFFSET_COMMIT);
+
+ err = rd_kafka_handle_OffsetCommit(rk, rkb, err, rkbuf, request,
+ offsets, rd_false);
+
+ /* Suppress empty commit debug logs if allowed */
+ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET ||
+ !rko_orig->rko_u.offset_commit.silent_empty) {
+ if (rkb)
+ rd_rkb_dbg(rkb, CGRP, "COMMIT",
+ "OffsetCommit for %d partition(s) in "
+ "join-state %s: "
+ "%s: returned: %s",
+ offsets ? offsets->cnt : -1,
+ rd_kafka_cgrp_join_state_names
+ [rkcg->rkcg_join_state],
+ rko_orig->rko_u.offset_commit.reason,
+ rd_kafka_err2str(err));
+ else
+ rd_kafka_dbg(rk, CGRP, "COMMIT",
+ "OffsetCommit for %d partition(s) in "
+ "join-state "
+ "%s: %s: "
+ "returned: %s",
+ offsets ? offsets->cnt : -1,
+ rd_kafka_cgrp_join_state_names
+ [rkcg->rkcg_join_state],
+ rko_orig->rko_u.offset_commit.reason,
+ rd_kafka_err2str(err));
+ }
+
+
+ /*
+ * Error handling
+ */
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
+ /* Revoke assignment and rebalance on unknown member */
+ rd_kafka_cgrp_set_member_id(rk->rk_cgrp, "");
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(
+ rkcg, rd_true /*assignment is lost*/,
+ rd_true /*this consumer is initiating*/,
+ "OffsetCommit error: Unknown member");
+ break;
+
+ case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
+ /* Revoke assignment and rebalance on illegal generation */
+ rk->rk_cgrp->rkcg_generation_id = -1;
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(
+ rkcg, rd_true /*assignment is lost*/,
+ rd_true /*this consumer is initiating*/,
+ "OffsetCommit error: Illegal generation");
+ break;
+
+ case RD_KAFKA_RESP_ERR__IN_PROGRESS:
+ return; /* Retrying */
+
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ /* The coordinator is not available, defer the offset commit
+ * to when the coordinator is back up again. */
+
+ /* Future-proofing, see timeout_scan(). */
+ rd_kafka_assert(NULL, err != RD_KAFKA_RESP_ERR__WAIT_COORD);
+
+ if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko_orig,
+ rd_kafka_err2str(err)))
+ return;
+ break;
+
+ default:
+ break;
+ }
+
+ /* Call on_commit interceptors */
+ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET &&
+ err != RD_KAFKA_RESP_ERR__DESTROY && offsets && offsets->cnt > 0)
+ rd_kafka_interceptors_on_commit(rk, offsets, err);
+
+ /* Keep track of outstanding commits */
+ rd_kafka_assert(NULL, rk->rk_consumer.wait_commit_cnt > 0);
+ rk->rk_consumer.wait_commit_cnt--;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ rd_kafka_op_destroy(rko_orig);
+ return; /* Handle is terminating, this op may be handled
+ * by the op enq()ing thread rather than the
+ * rdkafka main thread, it is not safe to
+ * continue here. */
+ }
+
+ /* Update the committed offsets for each partition's rktp. */
+ errcnt = rd_kafka_cgrp_update_committed_offsets(rkcg, err, offsets);
+
+ if (err != RD_KAFKA_RESP_ERR__DESTROY &&
+ !(err == RD_KAFKA_RESP_ERR__NO_OFFSET &&
+ rko_orig->rko_u.offset_commit.silent_empty)) {
+ /* Propagate commit results (success or permanent error)
+ * unless we're shutting down or commit was empty. */
+ rd_kafka_cgrp_propagate_commit_result(rkcg, rko_orig, err,
+ errcnt, offsets);
+ }
+
+ rd_kafka_op_destroy(rko_orig);
+
+ /* If the current state was waiting for commits to finish we'll try to
+ * transition to the next state. */
+ if (rk->rk_consumer.wait_commit_cnt == 0)
+ rd_kafka_assignment_serve(rk);
+}
+
+
+static size_t rd_kafka_topic_partition_has_absolute_offset(
+ const rd_kafka_topic_partition_t *rktpar,
+ void *opaque) {
+ return rktpar->offset >= 0 ? 1 : 0;
+}
+
+
+/**
+ * Commit a list of offsets.
+ * Reuse the orignating 'rko' for the async reply.
+ * 'rko->rko_payload' should either by NULL (to commit current assignment) or
+ * a proper topic_partition_list_t with offsets to commit.
+ * The offset list will be altered.
+ *
+ * \p rko...silent_empty: if there are no offsets to commit bail out
+ * silently without posting an op on the reply queue.
+ * \p set_offsets: set offsets and epochs in
+ * rko->rko_u.offset_commit.partitions from the rktp's
+ * stored offset.
+ *
+ * Locality: cgrp thread
+ */
+static void rd_kafka_cgrp_offsets_commit(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_op_t *rko,
+ rd_bool_t set_offsets,
+ const char *reason) {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_resp_err_t err;
+ int valid_offsets = 0;
+ int r;
+ rd_kafka_buf_t *rkbuf;
+ rd_kafka_op_t *reply;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ if (!(rko->rko_flags & RD_KAFKA_OP_F_REPROCESS)) {
+ /* wait_commit_cnt has already been increased for
+ * reprocessed ops. */
+ rkcg->rkcg_rk->rk_consumer.wait_commit_cnt++;
+ }
+
+ /* If offsets is NULL we shall use the current assignment
+ * (not the group assignment). */
+ if (!rko->rko_u.offset_commit.partitions &&
+ rkcg->rkcg_rk->rk_consumer.assignment.all->cnt > 0) {
+ if (rd_kafka_cgrp_assignment_is_lost(rkcg)) {
+ /* Not committing assigned offsets: assignment lost */
+ err = RD_KAFKA_RESP_ERR__ASSIGNMENT_LOST;
+ goto err;
+ }
+
+ rko->rko_u.offset_commit.partitions =
+ rd_kafka_topic_partition_list_copy(
+ rkcg->rkcg_rk->rk_consumer.assignment.all);
+ }
+
+ offsets = rko->rko_u.offset_commit.partitions;
+
+ if (offsets) {
+ /* Set offsets to commits */
+ if (set_offsets)
+ rd_kafka_topic_partition_list_set_offsets(
+ rkcg->rkcg_rk, rko->rko_u.offset_commit.partitions,
+ 1, RD_KAFKA_OFFSET_INVALID /* def */,
+ 1 /* is commit */);
+
+ /* Check the number of valid offsets to commit. */
+ valid_offsets = (int)rd_kafka_topic_partition_list_sum(
+ offsets, rd_kafka_topic_partition_has_absolute_offset,
+ NULL);
+ }
+
+ if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
+ /* Commits are not allowed when a fatal error has been raised */
+ err = RD_KAFKA_RESP_ERR__FATAL;
+ goto err;
+ }
+
+ if (!valid_offsets) {
+ /* No valid offsets */
+ err = RD_KAFKA_RESP_ERR__NO_OFFSET;
+ goto err;
+ }
+
+ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
+ "COMMIT",
+ "Deferring \"%s\" offset commit "
+ "for %d partition(s) in state %s: "
+ "no coordinator available",
+ reason, valid_offsets,
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state]);
+
+ if (rd_kafka_cgrp_defer_offset_commit(rkcg, rko, reason))
+ return;
+
+ err = RD_KAFKA_RESP_ERR__WAIT_COORD;
+ goto err;
+ }
+
+
+ rd_rkb_dbg(rkcg->rkcg_coord, CONSUMER | RD_KAFKA_DBG_CGRP, "COMMIT",
+ "Committing offsets for %d partition(s) with "
+ "generation-id %" PRId32 " in join-state %s: %s",
+ valid_offsets, rkcg->rkcg_generation_id,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ reason);
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid(
+ rkcg->rkcg_rk->rk_conf.group_id_str, rkcg->rkcg_generation_id,
+ rkcg->rkcg_member_id->str,
+ rkcg->rkcg_rk->rk_conf.group_instance_id);
+
+ /* Send OffsetCommit */
+ r = rd_kafka_OffsetCommitRequest(rkcg->rkcg_coord, cgmetadata, offsets,
+ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_cgrp_op_handle_OffsetCommit,
+ rko, reason);
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+
+ /* Must have valid offsets to commit if we get here */
+ rd_kafka_assert(NULL, r != 0);
+
+ return;
+
+err:
+ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
+ "COMMIT", "OffsetCommit internal error: %s",
+ rd_kafka_err2str(err));
+
+ /* Propagate error through dummy buffer object that will
+ * call the response handler from the main loop, avoiding
+ * any recursive calls from op_handle_OffsetCommit ->
+ * assignment_serve() and then back to cgrp_assigned_offsets_commit() */
+
+ reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
+ reply->rko_rk = rkcg->rkcg_rk; /* Set rk since the rkbuf will not
+ * have a rkb to reach it. */
+ reply->rko_err = err;
+
+ rkbuf = rd_kafka_buf_new(0, 0);
+ rkbuf->rkbuf_cb = rd_kafka_cgrp_op_handle_OffsetCommit;
+ rkbuf->rkbuf_opaque = rko;
+ reply->rko_u.xbuf.rkbuf = rkbuf;
+
+ rd_kafka_q_enq(rkcg->rkcg_ops, reply);
+}
+
+
+/**
+ * @brief Commit offsets assigned partitions.
+ *
+ * If \p offsets is NULL all partitions in the current assignment will be used.
+ * If \p set_offsets is true the offsets to commit will be read from the
+ * rktp's stored offset rather than the .offset fields in \p offsets.
+ *
+ * rkcg_wait_commit_cnt will be increased accordingly.
+ */
+void rd_kafka_cgrp_assigned_offsets_commit(
+ rd_kafka_cgrp_t *rkcg,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_bool_t set_offsets,
+ const char *reason) {
+ rd_kafka_op_t *rko;
+
+ if (rd_kafka_cgrp_assignment_is_lost(rkcg)) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "AUTOCOMMIT",
+ "Group \"%s\": not committing assigned offsets: "
+ "assignment lost",
+ rkcg->rkcg_group_id->str);
+ return;
+ }
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
+ rko->rko_u.offset_commit.reason = rd_strdup(reason);
+ if (rkcg->rkcg_rk->rk_conf.enabled_events &
+ RD_KAFKA_EVENT_OFFSET_COMMIT) {
+ /* Send results to application */
+ rd_kafka_op_set_replyq(rko, rkcg->rkcg_rk->rk_rep, 0);
+ rko->rko_u.offset_commit.cb =
+ rkcg->rkcg_rk->rk_conf.offset_commit_cb; /*maybe NULL*/
+ rko->rko_u.offset_commit.opaque = rkcg->rkcg_rk->rk_conf.opaque;
+ }
+ /* NULL partitions means current assignment */
+ if (offsets)
+ rko->rko_u.offset_commit.partitions =
+ rd_kafka_topic_partition_list_copy(offsets);
+ rko->rko_u.offset_commit.silent_empty = 1;
+ rd_kafka_cgrp_offsets_commit(rkcg, rko, set_offsets, reason);
+}
+
+
+/**
+ * auto.commit.interval.ms commit timer callback.
+ *
+ * Trigger a group offset commit.
+ *
+ * Locality: rdkafka main thread
+ */
+static void rd_kafka_cgrp_offset_commit_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_cgrp_t *rkcg = arg;
+
+ /* Don't attempt auto commit when rebalancing or initializing since
+ * the rkcg_generation_id is most likely in flux. */
+ if (rkcg->rkcg_subscription &&
+ rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_STEADY)
+ return;
+
+ rd_kafka_cgrp_assigned_offsets_commit(
+ rkcg, NULL, rd_true /*set offsets*/, "cgrp auto commit timer");
+}
+
+
+/**
+ * @brief If rkcg_next_subscription or rkcg_next_unsubscribe are
+ * set, trigger a state change so that they are applied from the
+ * main dispatcher.
+ *
+ * @returns rd_true if a subscribe was scheduled, else false.
+ */
+static rd_bool_t
+rd_kafka_trigger_waiting_subscribe_maybe(rd_kafka_cgrp_t *rkcg) {
+
+ if (rkcg->rkcg_next_subscription || rkcg->rkcg_next_unsubscribe) {
+ /* Skip the join backoff */
+ rd_interval_reset(&rkcg->rkcg_join_intvl);
+ rd_kafka_cgrp_rejoin(rkcg, "Applying next subscription");
+ return rd_true;
+ }
+
+ return rd_false;
+}
+
+
+/**
+ * @brief Incrementally add to an existing partition assignment
+ * May update \p partitions but will not hold on to it.
+ *
+ * @returns an error object or NULL on success.
+ */
+static rd_kafka_error_t *
+rd_kafka_cgrp_incremental_assign(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_error_t *error;
+
+ error = rd_kafka_assignment_add(rkcg->rkcg_rk, partitions);
+ if (error)
+ return error;
+
+ if (rkcg->rkcg_join_state ==
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) {
+ rd_kafka_assignment_resume(rkcg->rkcg_rk,
+ "incremental assign called");
+ rd_kafka_cgrp_set_join_state(rkcg,
+ RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+
+ if (rkcg->rkcg_subscription) {
+ /* If using subscribe(), start a timer to enforce
+ * `max.poll.interval.ms`.
+ * Instead of restarting the timer on each ...poll()
+ * call, which would be costly (once per message),
+ * set up an intervalled timer that checks a timestamp
+ * (that is updated on ..poll()).
+ * The timer interval is 2 hz. */
+ rd_kafka_timer_start(
+ &rkcg->rkcg_rk->rk_timers,
+ &rkcg->rkcg_max_poll_interval_tmr,
+ 500 * 1000ll /* 500ms */,
+ rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg);
+ }
+ }
+
+ rd_kafka_cgrp_assignment_clear_lost(rkcg,
+ "incremental_assign() called");
+
+ return NULL;
+}
+
+
+/**
+ * @brief Incrementally remove partitions from an existing partition
+ * assignment. May update \p partitions but will not hold on
+ * to it.
+ *
+ * @remark This method does not unmark the current assignment as lost
+ * (if lost). That happens following _incr_unassign_done and
+ * a group-rejoin initiated.
+ *
+ * @returns An error object or NULL on success.
+ */
+static rd_kafka_error_t *rd_kafka_cgrp_incremental_unassign(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_error_t *error;
+
+ error = rd_kafka_assignment_subtract(rkcg->rkcg_rk, partitions);
+ if (error)
+ return error;
+
+ if (rkcg->rkcg_join_state ==
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) {
+ rd_kafka_assignment_resume(rkcg->rkcg_rk,
+ "incremental unassign called");
+ rd_kafka_cgrp_set_join_state(
+ rkcg,
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE);
+ }
+
+ rd_kafka_cgrp_assignment_clear_lost(rkcg,
+ "incremental_unassign() called");
+
+ return NULL;
+}
+
+
+/**
+ * @brief Call when all incremental unassign operations are done to transition
+ * to the next state.
+ */
+static void rd_kafka_cgrp_incr_unassign_done(rd_kafka_cgrp_t *rkcg) {
+
+ /* If this action was underway when a terminate was initiated, it will
+ * be left to complete. Now that's done, unassign all partitions */
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
+ "Group \"%s\" is terminating, initiating full "
+ "unassign",
+ rkcg->rkcg_group_id->str);
+ rd_kafka_cgrp_unassign(rkcg);
+ return;
+ }
+
+ if (rkcg->rkcg_rebalance_incr_assignment) {
+
+ /* This incremental unassign was part of a normal rebalance
+ * (in which the revoke set was not empty). Immediately
+ * trigger the assign that follows this revoke. The protocol
+ * dictates this should occur even if the new assignment
+ * set is empty.
+ *
+ * Also, since this rebalance had some revoked partitions,
+ * a re-join should occur following the assign.
+ */
+
+ rd_kafka_rebalance_op_incr(rkcg,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rkcg->rkcg_rebalance_incr_assignment,
+ rd_true /*rejoin following assign*/,
+ "cooperative assign after revoke");
+
+ rd_kafka_topic_partition_list_destroy(
+ rkcg->rkcg_rebalance_incr_assignment);
+ rkcg->rkcg_rebalance_incr_assignment = NULL;
+
+ /* Note: rkcg_rebalance_rejoin is actioned / reset in
+ * rd_kafka_cgrp_incremental_assign call */
+
+ } else if (rkcg->rkcg_rebalance_rejoin) {
+ rkcg->rkcg_rebalance_rejoin = rd_false;
+
+ /* There are some cases (lost partitions), where a rejoin
+ * should occur immediately following the unassign (this
+ * is not the case under normal conditions), in which case
+ * the rejoin flag will be set. */
+
+ /* Skip the join backoff */
+ rd_interval_reset(&rkcg->rkcg_join_intvl);
+
+ rd_kafka_cgrp_rejoin(rkcg, "Incremental unassignment done");
+
+ } else if (!rd_kafka_trigger_waiting_subscribe_maybe(rkcg)) {
+ /* After this incremental unassignment we're now back in
+ * a steady state. */
+ rd_kafka_cgrp_set_join_state(rkcg,
+ RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+ }
+}
+
+
+/**
+ * @brief Call when all absolute (non-incremental) unassign operations are done
+ * to transition to the next state.
+ */
+static void rd_kafka_cgrp_unassign_done(rd_kafka_cgrp_t *rkcg) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNASSIGN",
+ "Group \"%s\": unassign done in state %s "
+ "(join-state %s)",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ /* Leave group, if desired. */
+ rd_kafka_cgrp_leave_maybe(rkcg);
+
+ if (rkcg->rkcg_join_state !=
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE)
+ return;
+
+ /* All partitions are unassigned. Rejoin the group. */
+
+ /* Skip the join backoff */
+ rd_interval_reset(&rkcg->rkcg_join_intvl);
+
+ rd_kafka_cgrp_rejoin(rkcg, "Unassignment done");
+}
+
+
+
+/**
+ * @brief Called from assignment code when all in progress
+ * assignment/unassignment operations are done, allowing the cgrp to
+ * transition to other states if needed.
+ *
+ * @remark This may be called spontaneously without any need for a state
+ * change in the rkcg.
+ */
+void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNDONE",
+ "Group \"%s\": "
+ "assignment operations done in join-state %s "
+ "(rebalance rejoin=%s)",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ RD_STR_ToF(rkcg->rkcg_rebalance_rejoin));
+
+ switch (rkcg->rkcg_join_state) {
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE:
+ rd_kafka_cgrp_unassign_done(rkcg);
+ break;
+
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE:
+ rd_kafka_cgrp_incr_unassign_done(rkcg);
+ break;
+
+ case RD_KAFKA_CGRP_JOIN_STATE_STEADY:
+ /* If an updated/next subscription is available, schedule it. */
+ if (rd_kafka_trigger_waiting_subscribe_maybe(rkcg))
+ break;
+
+ if (rkcg->rkcg_rebalance_rejoin) {
+ rkcg->rkcg_rebalance_rejoin = rd_false;
+
+ /* Skip the join backoff */
+ rd_interval_reset(&rkcg->rkcg_join_intvl);
+
+ rd_kafka_cgrp_rejoin(
+ rkcg,
+ "rejoining group to redistribute "
+ "previously owned partitions to other "
+ "group members");
+ break;
+ }
+
+ /* FALLTHRU */
+
+ case RD_KAFKA_CGRP_JOIN_STATE_INIT:
+ /* Check if cgrp is trying to terminate, which is safe to do
+ * in these two states. Otherwise we'll need to wait for
+ * the current state to decommission. */
+ rd_kafka_cgrp_try_terminate(rkcg);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+
+/**
+ * @brief Remove existing assignment.
+ */
+static rd_kafka_error_t *rd_kafka_cgrp_unassign(rd_kafka_cgrp_t *rkcg) {
+
+ rd_kafka_assignment_clear(rkcg->rkcg_rk);
+
+ if (rkcg->rkcg_join_state ==
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL) {
+ rd_kafka_assignment_resume(rkcg->rkcg_rk, "unassign called");
+ rd_kafka_cgrp_set_join_state(
+ rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE);
+ }
+
+ rd_kafka_cgrp_assignment_clear_lost(rkcg, "unassign() called");
+
+ return NULL;
+}
+
+
+/**
+ * @brief Set new atomic partition assignment
+ * May update \p assignment but will not hold on to it.
+ *
+ * @returns NULL on success or an error if a fatal error has been raised.
+ */
+static rd_kafka_error_t *
+rd_kafka_cgrp_assign(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *assignment) {
+ rd_kafka_error_t *error;
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "ASSIGN",
+ "Group \"%s\": new assignment of %d partition(s) "
+ "in join-state %s",
+ rkcg->rkcg_group_id->str, assignment ? assignment->cnt : 0,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ /* Clear existing assignment, if any, and serve its removals. */
+ if (rd_kafka_assignment_clear(rkcg->rkcg_rk))
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+
+ error = rd_kafka_assignment_add(rkcg->rkcg_rk, assignment);
+ if (error)
+ return error;
+
+ rd_kafka_cgrp_assignment_clear_lost(rkcg, "assign() called");
+
+ if (rkcg->rkcg_join_state ==
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL) {
+ rd_kafka_assignment_resume(rkcg->rkcg_rk, "assign called");
+ rd_kafka_cgrp_set_join_state(rkcg,
+ RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+
+ if (rkcg->rkcg_subscription) {
+ /* If using subscribe(), start a timer to enforce
+ * `max.poll.interval.ms`.
+ * Instead of restarting the timer on each ...poll()
+ * call, which would be costly (once per message),
+ * set up an intervalled timer that checks a timestamp
+ * (that is updated on ..poll()).
+ * The timer interval is 2 hz. */
+ rd_kafka_timer_start(
+ &rkcg->rkcg_rk->rk_timers,
+ &rkcg->rkcg_max_poll_interval_tmr,
+ 500 * 1000ll /* 500ms */,
+ rd_kafka_cgrp_max_poll_interval_check_tmr_cb, rkcg);
+ }
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Construct a typed map from list \p rktparlist with key corresponding
+ * to each element in the list and value NULL.
+ *
+ * @remark \p rktparlist may be NULL.
+ */
+static map_toppar_member_info_t *rd_kafka_toppar_list_to_toppar_member_info_map(
+ rd_kafka_topic_partition_list_t *rktparlist) {
+ map_toppar_member_info_t *map = rd_calloc(1, sizeof(*map));
+ const rd_kafka_topic_partition_t *rktpar;
+
+ RD_MAP_INIT(map, rktparlist ? rktparlist->cnt : 0,
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free,
+ PartitionMemberInfo_free);
+
+ if (!rktparlist)
+ return map;
+
+ RD_KAFKA_TPLIST_FOREACH(rktpar, rktparlist)
+ RD_MAP_SET(map, rd_kafka_topic_partition_copy(rktpar),
+ PartitionMemberInfo_new(NULL, rd_false));
+
+ return map;
+}
+
+
+/**
+ * @brief Construct a toppar list from map \p map with elements corresponding
+ * to the keys of \p map.
+ */
+static rd_kafka_topic_partition_list_t *
+rd_kafka_toppar_member_info_map_to_list(map_toppar_member_info_t *map) {
+ const rd_kafka_topic_partition_t *k;
+ rd_kafka_topic_partition_list_t *list =
+ rd_kafka_topic_partition_list_new((int)RD_MAP_CNT(map));
+
+ RD_MAP_FOREACH_KEY(k, map) {
+ rd_kafka_topic_partition_list_add(list, k->topic, k->partition);
+ }
+
+ return list;
+}
+
+
+/**
+ * @brief Handle a rebalance-triggered partition assignment
+ * (COOPERATIVE case).
+ */
+static void rd_kafka_cgrp_handle_assignment_cooperative(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *assignment) {
+ map_toppar_member_info_t *new_assignment_set;
+ map_toppar_member_info_t *old_assignment_set;
+ map_toppar_member_info_t *newly_added_set;
+ map_toppar_member_info_t *revoked_set;
+ rd_kafka_topic_partition_list_t *newly_added;
+ rd_kafka_topic_partition_list_t *revoked;
+
+ new_assignment_set =
+ rd_kafka_toppar_list_to_toppar_member_info_map(assignment);
+
+ old_assignment_set = rd_kafka_toppar_list_to_toppar_member_info_map(
+ rkcg->rkcg_group_assignment);
+
+ newly_added_set = rd_kafka_member_partitions_subtract(
+ new_assignment_set, old_assignment_set);
+ revoked_set = rd_kafka_member_partitions_subtract(old_assignment_set,
+ new_assignment_set);
+
+ newly_added = rd_kafka_toppar_member_info_map_to_list(newly_added_set);
+ revoked = rd_kafka_toppar_member_info_map_to_list(revoked_set);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "COOPASSIGN",
+ "Group \"%s\": incremental assignment: %d newly added, "
+ "%d revoked partitions based on assignment of %d "
+ "partitions",
+ rkcg->rkcg_group_id->str, newly_added->cnt, revoked->cnt,
+ assignment->cnt);
+
+ if (revoked->cnt > 0) {
+ /* Setting rkcg_incr_assignment causes a follow on incremental
+ * assign rebalance op after completion of this incremental
+ * unassign op. */
+
+ rkcg->rkcg_rebalance_incr_assignment = newly_added;
+ newly_added = NULL;
+
+ rd_kafka_rebalance_op_incr(rkcg,
+ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ revoked, rd_false /*no rejoin
+ following unassign*/
+ ,
+ "sync group revoke");
+
+ } else {
+ /* There are no revoked partitions - trigger the assign
+ * rebalance op, and flag that the group does not need
+ * to be re-joined */
+
+ rd_kafka_rebalance_op_incr(
+ rkcg, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, newly_added,
+ rd_false /*no rejoin following assign*/,
+ "sync group assign");
+ }
+
+ if (newly_added)
+ rd_kafka_topic_partition_list_destroy(newly_added);
+ rd_kafka_topic_partition_list_destroy(revoked);
+ RD_MAP_DESTROY_AND_FREE(revoked_set);
+ RD_MAP_DESTROY_AND_FREE(newly_added_set);
+ RD_MAP_DESTROY_AND_FREE(old_assignment_set);
+ RD_MAP_DESTROY_AND_FREE(new_assignment_set);
+}
+
+
+/**
+ * @brief Sets or clears the group's partition assignment for our consumer.
+ *
+ * Will replace the current group assignment, if any.
+ */
+static void rd_kafka_cgrp_group_assignment_set(
+ rd_kafka_cgrp_t *rkcg,
+ const rd_kafka_topic_partition_list_t *partitions) {
+
+ if (rkcg->rkcg_group_assignment)
+ rd_kafka_topic_partition_list_destroy(
+ rkcg->rkcg_group_assignment);
+
+ if (partitions) {
+ rkcg->rkcg_group_assignment =
+ rd_kafka_topic_partition_list_copy(partitions);
+ rd_kafka_topic_partition_list_sort_by_topic(
+ rkcg->rkcg_group_assignment);
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT",
+ "Group \"%s\": setting group assignment to %d "
+ "partition(s)",
+ rkcg->rkcg_group_id->str,
+ rkcg->rkcg_group_assignment->cnt);
+
+ } else {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "ASSIGNMENT",
+ "Group \"%s\": clearing group assignment",
+ rkcg->rkcg_group_id->str);
+ rkcg->rkcg_group_assignment = NULL;
+ }
+
+ rd_kafka_wrlock(rkcg->rkcg_rk);
+ rkcg->rkcg_c.assignment_size =
+ rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0;
+ rd_kafka_wrunlock(rkcg->rkcg_rk);
+
+ if (rkcg->rkcg_group_assignment)
+ rd_kafka_topic_partition_list_log(
+ rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP,
+ rkcg->rkcg_group_assignment);
+}
+
+
+/**
+ * @brief Adds or removes \p partitions from the current group assignment.
+ *
+ * @param add Whether to add or remove the partitions.
+ *
+ * @remark The added partitions must not already be on the group assignment,
+ * and the removed partitions must be on the group assignment.
+ *
+ * To be used with incremental rebalancing.
+ *
+ */
+static void rd_kafka_cgrp_group_assignment_modify(
+ rd_kafka_cgrp_t *rkcg,
+ rd_bool_t add,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ const rd_kafka_topic_partition_t *rktpar;
+ int precnt;
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "ASSIGNMENT",
+ "Group \"%s\": %d partition(s) being %s group assignment "
+ "of %d partition(s)",
+ rkcg->rkcg_group_id->str, partitions->cnt,
+ add ? "added to" : "removed from",
+ rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0);
+
+ if (partitions == rkcg->rkcg_group_assignment) {
+ /* \p partitions is the actual assignment, which
+ * must mean it is all to be removed.
+ * Short-cut directly to set(NULL). */
+ rd_assert(!add);
+ rd_kafka_cgrp_group_assignment_set(rkcg, NULL);
+ return;
+ }
+
+ if (add && (!rkcg->rkcg_group_assignment ||
+ rkcg->rkcg_group_assignment->cnt == 0)) {
+ /* Adding to an empty assignment is a set operation. */
+ rd_kafka_cgrp_group_assignment_set(rkcg, partitions);
+ return;
+ }
+
+ if (!add) {
+ /* Removing from an empty assignment is illegal. */
+ rd_assert(rkcg->rkcg_group_assignment != NULL &&
+ rkcg->rkcg_group_assignment->cnt > 0);
+ }
+
+
+ precnt = rkcg->rkcg_group_assignment->cnt;
+ RD_KAFKA_TPLIST_FOREACH(rktpar, partitions) {
+ int idx;
+
+ idx = rd_kafka_topic_partition_list_find_idx(
+ rkcg->rkcg_group_assignment, rktpar->topic,
+ rktpar->partition);
+
+ if (add) {
+ rd_assert(idx == -1);
+
+ rd_kafka_topic_partition_list_add_copy(
+ rkcg->rkcg_group_assignment, rktpar);
+
+ } else {
+ rd_assert(idx != -1);
+
+ rd_kafka_topic_partition_list_del_by_idx(
+ rkcg->rkcg_group_assignment, idx);
+ }
+ }
+
+ if (add)
+ rd_assert(precnt + partitions->cnt ==
+ rkcg->rkcg_group_assignment->cnt);
+ else
+ rd_assert(precnt - partitions->cnt ==
+ rkcg->rkcg_group_assignment->cnt);
+
+ if (rkcg->rkcg_group_assignment->cnt == 0) {
+ rd_kafka_topic_partition_list_destroy(
+ rkcg->rkcg_group_assignment);
+ rkcg->rkcg_group_assignment = NULL;
+
+ } else if (add)
+ rd_kafka_topic_partition_list_sort_by_topic(
+ rkcg->rkcg_group_assignment);
+
+ rd_kafka_wrlock(rkcg->rkcg_rk);
+ rkcg->rkcg_c.assignment_size =
+ rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0;
+ rd_kafka_wrunlock(rkcg->rkcg_rk);
+
+ if (rkcg->rkcg_group_assignment)
+ rd_kafka_topic_partition_list_log(
+ rkcg->rkcg_rk, "GRPASSIGNMENT", RD_KAFKA_DBG_CGRP,
+ rkcg->rkcg_group_assignment);
+}
+
+
+/**
+ * @brief Handle a rebalance-triggered partition assignment.
+ *
+ * If a rebalance_cb has been registered we enqueue an op for the app
+ * and let the app perform the actual assign() call. Otherwise we
+ * assign() directly from here.
+ *
+ * This provides the most flexibility, allowing the app to perform any
+ * operation it seem fit (e.g., offset writes or reads) before actually
+ * updating the assign():ment.
+ */
+static void
+rd_kafka_cgrp_handle_assignment(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *assignment) {
+
+ if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) {
+ rd_kafka_cgrp_handle_assignment_cooperative(rkcg, assignment);
+ } else {
+
+ rd_kafka_rebalance_op(rkcg,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ assignment, "new assignment");
+ }
+}
+
+
+/**
+ * Clean up any group-leader related resources.
+ *
+ * Locality: cgrp thread
+ */
+static void rd_kafka_cgrp_group_leader_reset(rd_kafka_cgrp_t *rkcg,
+ const char *reason) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "GRPLEADER",
+ "Group \"%.*s\": resetting group leader info: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), reason);
+
+ if (rkcg->rkcg_group_leader.members) {
+ int i;
+
+ for (i = 0; i < rkcg->rkcg_group_leader.member_cnt; i++)
+ rd_kafka_group_member_clear(
+ &rkcg->rkcg_group_leader.members[i]);
+ rkcg->rkcg_group_leader.member_cnt = 0;
+ rd_free(rkcg->rkcg_group_leader.members);
+ rkcg->rkcg_group_leader.members = NULL;
+ }
+}
+
+
+/**
+ * @brief React to a RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS broker response.
+ */
+static void rd_kafka_cgrp_group_is_rebalancing(rd_kafka_cgrp_t *rkcg) {
+
+ if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_EAGER) {
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_false /*lost*/,
+ rd_false /*initiating*/,
+ "rebalance in progress");
+ return;
+ }
+
+
+ /* In the COOPERATIVE case, simply rejoin the group
+ * - partitions are unassigned on SyncGroup response,
+ * not prior to JoinGroup as with the EAGER case. */
+
+ if (RD_KAFKA_CGRP_REBALANCING(rkcg)) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE",
+ "Group \"%.*s\": skipping "
+ "COOPERATIVE rebalance in state %s "
+ "(join-state %s)%s%s%s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)
+ ? " (awaiting assign call)"
+ : "",
+ (rkcg->rkcg_rebalance_incr_assignment != NULL)
+ ? " (incremental assignment pending)"
+ : "",
+ rkcg->rkcg_rebalance_rejoin ? " (rebalance rejoin)" : "");
+ return;
+ }
+
+ rd_kafka_cgrp_rejoin(rkcg, "Group is rebalancing");
+}
+
+
+
+/**
+ * @brief Triggers the application rebalance callback if required to
+ * revoke partitions, and transition to INIT state for (eventual)
+ * rejoin. Does nothing if a rebalance workflow is already in
+ * progress
+ */
+static void rd_kafka_cgrp_revoke_all_rejoin_maybe(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t assignment_lost,
+ rd_bool_t initiating,
+ const char *reason) {
+ if (RD_KAFKA_CGRP_REBALANCING(rkcg)) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE",
+ "Group \"%.*s\": rebalance (%s) "
+ "already in progress, skipping in state %s "
+ "(join-state %s) with %d assigned partition(s)%s%s%s: "
+ "%s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_rebalance_protocol2str(
+ rd_kafka_cgrp_rebalance_protocol(rkcg)),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ rkcg->rkcg_group_assignment
+ ? rkcg->rkcg_group_assignment->cnt
+ : 0,
+ assignment_lost ? " (lost)" : "",
+ rkcg->rkcg_rebalance_incr_assignment
+ ? ", incremental assignment in progress"
+ : "",
+ rkcg->rkcg_rebalance_rejoin ? ", rejoin on rebalance" : "",
+ reason);
+ return;
+ }
+
+ rd_kafka_cgrp_revoke_all_rejoin(rkcg, assignment_lost, initiating,
+ reason);
+}
+
+
+/**
+ * @brief Triggers the application rebalance callback if required to
+ * revoke partitions, and transition to INIT state for (eventual)
+ * rejoin.
+ */
+static void rd_kafka_cgrp_revoke_all_rejoin(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t assignment_lost,
+ rd_bool_t initiating,
+ const char *reason) {
+
+ rd_kafka_rebalance_protocol_t protocol =
+ rd_kafka_cgrp_rebalance_protocol(rkcg);
+
+ rd_bool_t terminating =
+ unlikely(rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE);
+
+
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP, "REBALANCE",
+ "Group \"%.*s\" %s (%s) in state %s (join-state %s) "
+ "with %d assigned partition(s)%s: %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ initiating ? "initiating rebalance" : "is rebalancing",
+ rd_kafka_rebalance_protocol2str(protocol),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ rkcg->rkcg_group_assignment ? rkcg->rkcg_group_assignment->cnt : 0,
+ assignment_lost ? " (lost)" : "", reason);
+
+ rd_snprintf(rkcg->rkcg_c.rebalance_reason,
+ sizeof(rkcg->rkcg_c.rebalance_reason), "%s", reason);
+
+
+ if (protocol == RD_KAFKA_REBALANCE_PROTOCOL_EAGER ||
+ protocol == RD_KAFKA_REBALANCE_PROTOCOL_NONE) {
+ /* EAGER case (or initial subscribe) - revoke partitions which
+ * will be followed by rejoin, if required. */
+
+ if (assignment_lost)
+ rd_kafka_cgrp_assignment_set_lost(
+ rkcg, "%s: revoking assignment and rejoining",
+ reason);
+
+ /* Schedule application rebalance op if there is an existing
+ * assignment (albeit perhaps empty) and there is no
+ * outstanding rebalance op in progress. */
+ if (rkcg->rkcg_group_assignment &&
+ !RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg)) {
+ rd_kafka_rebalance_op(
+ rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ rkcg->rkcg_group_assignment, reason);
+ } else {
+ /* Skip the join backoff */
+ rd_interval_reset(&rkcg->rkcg_join_intvl);
+
+ rd_kafka_cgrp_rejoin(rkcg, "%s", reason);
+ }
+
+ return;
+ }
+
+
+ /* COOPERATIVE case. */
+
+ /* All partitions should never be revoked unless terminating, leaving
+ * the group, or on assignment lost. Another scenario represents a
+ * logic error. Fail fast in this case. */
+ if (!(terminating || assignment_lost ||
+ (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE))) {
+ rd_kafka_log(rkcg->rkcg_rk, LOG_ERR, "REBALANCE",
+ "Group \"%s\": unexpected instruction to revoke "
+ "current assignment and rebalance "
+ "(terminating=%d, assignment_lost=%d, "
+ "LEAVE_ON_UNASSIGN_DONE=%d)",
+ rkcg->rkcg_group_id->str, terminating,
+ assignment_lost,
+ (rkcg->rkcg_flags &
+ RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE));
+ rd_dassert(!*"BUG: unexpected instruction to revoke "
+ "current assignment and rebalance");
+ }
+
+ if (rkcg->rkcg_group_assignment &&
+ rkcg->rkcg_group_assignment->cnt > 0) {
+ if (assignment_lost)
+ rd_kafka_cgrp_assignment_set_lost(
+ rkcg,
+ "%s: revoking incremental assignment "
+ "and rejoining",
+ reason);
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
+ "REBALANCE",
+ "Group \"%.*s\": revoking "
+ "all %d partition(s)%s%s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rkcg->rkcg_group_assignment->cnt,
+ terminating ? " (terminating)" : "",
+ assignment_lost ? " (assignment lost)" : "");
+
+ rd_kafka_rebalance_op_incr(
+ rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ rkcg->rkcg_group_assignment,
+ terminating ? rd_false : rd_true /*rejoin*/, reason);
+
+ return;
+ }
+
+ if (terminating) {
+ /* If terminating, then don't rejoin group. */
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
+ "REBALANCE",
+ "Group \"%.*s\": consumer is "
+ "terminating, skipping rejoin",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
+ return;
+ }
+
+ rd_kafka_cgrp_rejoin(rkcg, "Current assignment is empty");
+}
+
+
+/**
+ * @brief `max.poll.interval.ms` enforcement check timer.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void
+rd_kafka_cgrp_max_poll_interval_check_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_cgrp_t *rkcg = arg;
+ rd_kafka_t *rk = rkcg->rkcg_rk;
+ int exceeded;
+
+ exceeded = rd_kafka_max_poll_exceeded(rk);
+
+ if (likely(!exceeded))
+ return;
+
+ rd_kafka_log(rk, LOG_WARNING, "MAXPOLL",
+ "Application maximum poll interval (%dms) "
+ "exceeded by %dms "
+ "(adjust max.poll.interval.ms for "
+ "long-running message processing): "
+ "leaving group",
+ rk->rk_conf.max_poll_interval_ms, exceeded);
+
+ rd_kafka_consumer_err(rkcg->rkcg_q, RD_KAFKA_NODEID_UA,
+ RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED, 0, NULL,
+ NULL, RD_KAFKA_OFFSET_INVALID,
+ "Application maximum poll interval (%dms) "
+ "exceeded by %dms",
+ rk->rk_conf.max_poll_interval_ms, exceeded);
+
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED;
+
+ rd_kafka_timer_stop(rkts, &rkcg->rkcg_max_poll_interval_tmr,
+ 1 /*lock*/);
+
+ /* Leave the group before calling rebalance since the standard leave
+ * will be triggered first after the rebalance callback has been served.
+ * But since the application is blocked still doing processing
+ * that leave will be further delayed.
+ *
+ * KIP-345: static group members should continue to respect
+ * `max.poll.interval.ms` but should not send a LeaveGroupRequest.
+ */
+ if (!RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg))
+ rd_kafka_cgrp_leave(rkcg);
+
+ /* Timing out or leaving the group invalidates the member id, reset it
+ * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+
+ /* Trigger rebalance */
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
+ rd_true /*initiating*/,
+ "max.poll.interval.ms exceeded");
+}
+
+
+/**
+ * @brief Generate consumer errors for each topic in the list.
+ *
+ * Also replaces the list of last reported topic errors so that repeated
+ * errors are silenced.
+ *
+ * @param errored Errored topics.
+ * @param error_prefix Error message prefix.
+ *
+ * @remark Assumes ownership of \p errored.
+ */
+static void rd_kafka_propagate_consumer_topic_errors(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *errored,
+ const char *error_prefix) {
+ int i;
+
+ for (i = 0; i < errored->cnt; i++) {
+ rd_kafka_topic_partition_t *topic = &errored->elems[i];
+ rd_kafka_topic_partition_t *prev;
+
+ rd_assert(topic->err);
+
+ /* Normalize error codes, unknown topic may be
+ * reported by the broker, or the lack of a topic in
+ * metadata response is figured out by the client.
+ * Make sure the application only sees one error code
+ * for both these cases. */
+ if (topic->err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ topic->err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ /* Check if this topic errored previously */
+ prev = rd_kafka_topic_partition_list_find(
+ rkcg->rkcg_errored_topics, topic->topic,
+ RD_KAFKA_PARTITION_UA);
+
+ if (prev && prev->err == topic->err)
+ continue; /* This topic already reported same error */
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_TOPIC,
+ "TOPICERR", "%s: %s: %s", error_prefix,
+ topic->topic, rd_kafka_err2str(topic->err));
+
+ /* Send consumer error to application */
+ rd_kafka_consumer_err(
+ rkcg->rkcg_q, RD_KAFKA_NODEID_UA, topic->err, 0,
+ topic->topic, NULL, RD_KAFKA_OFFSET_INVALID, "%s: %s: %s",
+ error_prefix, topic->topic, rd_kafka_err2str(topic->err));
+ }
+
+ rd_kafka_topic_partition_list_destroy(rkcg->rkcg_errored_topics);
+ rkcg->rkcg_errored_topics = errored;
+}
+
+
+/**
+ * @brief Work out the topics currently subscribed to that do not
+ * match any pattern in \p subscription.
+ */
+static rd_kafka_topic_partition_list_t *rd_kafka_cgrp_get_unsubscribing_topics(
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *subscription) {
+ int i;
+ rd_kafka_topic_partition_list_t *result;
+
+ result = rd_kafka_topic_partition_list_new(
+ rkcg->rkcg_subscribed_topics->rl_cnt);
+
+ /* TODO: Something that isn't O(N*M) */
+ for (i = 0; i < rkcg->rkcg_subscribed_topics->rl_cnt; i++) {
+ int j;
+ const char *topic =
+ ((rd_kafka_topic_info_t *)
+ rkcg->rkcg_subscribed_topics->rl_elems[i])
+ ->topic;
+
+ for (j = 0; j < subscription->cnt; j++) {
+ const char *pattern = subscription->elems[j].topic;
+ if (rd_kafka_topic_match(rkcg->rkcg_rk, pattern,
+ topic)) {
+ break;
+ }
+ }
+
+ if (j == subscription->cnt)
+ rd_kafka_topic_partition_list_add(
+ result, topic, RD_KAFKA_PARTITION_UA);
+ }
+
+ if (result->cnt == 0) {
+ rd_kafka_topic_partition_list_destroy(result);
+ return NULL;
+ }
+
+ return result;
+}
+
+
+/**
+ * @brief Determine the partitions to revoke, given the topics being
+ * unassigned.
+ */
+static rd_kafka_topic_partition_list_t *
+rd_kafka_cgrp_calculate_subscribe_revoking_partitions(
+ rd_kafka_cgrp_t *rkcg,
+ const rd_kafka_topic_partition_list_t *unsubscribing) {
+ rd_kafka_topic_partition_list_t *revoking;
+ const rd_kafka_topic_partition_t *rktpar;
+
+ if (!unsubscribing)
+ return NULL;
+
+ if (!rkcg->rkcg_group_assignment ||
+ rkcg->rkcg_group_assignment->cnt == 0)
+ return NULL;
+
+ revoking =
+ rd_kafka_topic_partition_list_new(rkcg->rkcg_group_assignment->cnt);
+
+ /* TODO: Something that isn't O(N*M). */
+ RD_KAFKA_TPLIST_FOREACH(rktpar, unsubscribing) {
+ const rd_kafka_topic_partition_t *assigned;
+
+ RD_KAFKA_TPLIST_FOREACH(assigned, rkcg->rkcg_group_assignment) {
+ if (!strcmp(assigned->topic, rktpar->topic)) {
+ rd_kafka_topic_partition_list_add(
+ revoking, assigned->topic,
+ assigned->partition);
+ continue;
+ }
+ }
+ }
+
+ if (revoking->cnt == 0) {
+ rd_kafka_topic_partition_list_destroy(revoking);
+ revoking = NULL;
+ }
+
+ return revoking;
+}
+
+
+/**
+ * @brief Handle a new subscription that is modifying an existing subscription
+ * in the COOPERATIVE case.
+ *
+ * @remark Assumes ownership of \p rktparlist.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_cgrp_modify_subscription(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *rktparlist) {
+ rd_kafka_topic_partition_list_t *unsubscribing_topics;
+ rd_kafka_topic_partition_list_t *revoking;
+ rd_list_t *tinfos;
+ rd_kafka_topic_partition_list_t *errored;
+ int metadata_age;
+ int old_cnt = rkcg->rkcg_subscription->cnt;
+
+ rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION;
+
+ if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0)
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION;
+
+ /* Topics in rkcg_subscribed_topics that don't match any pattern in
+ the new subscription. */
+ unsubscribing_topics =
+ rd_kafka_cgrp_get_unsubscribing_topics(rkcg, rktparlist);
+
+ /* Currently assigned topic partitions that are no longer desired. */
+ revoking = rd_kafka_cgrp_calculate_subscribe_revoking_partitions(
+ rkcg, unsubscribing_topics);
+
+ rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription);
+ rkcg->rkcg_subscription = rktparlist;
+
+ if (rd_kafka_cgrp_metadata_refresh(rkcg, &metadata_age,
+ "modify subscription") == 1) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER,
+ "MODSUB",
+ "Group \"%.*s\": postponing join until "
+ "up-to-date metadata is available",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id));
+
+ rd_assert(
+ rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT ||
+ /* Possible via rd_kafka_cgrp_modify_subscription */
+ rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY);
+
+ rd_kafka_cgrp_set_join_state(
+ rkcg, RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
+
+
+ /* Revoke/join will occur after metadata refresh completes */
+ if (revoking)
+ rd_kafka_topic_partition_list_destroy(revoking);
+ if (unsubscribing_topics)
+ rd_kafka_topic_partition_list_destroy(
+ unsubscribing_topics);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE",
+ "Group \"%.*s\": modifying subscription of size %d to "
+ "new subscription of size %d, removing %d topic(s), "
+ "revoking %d partition(s) (join-state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), old_cnt,
+ rkcg->rkcg_subscription->cnt,
+ unsubscribing_topics ? unsubscribing_topics->cnt : 0,
+ revoking ? revoking->cnt : 0,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ if (unsubscribing_topics)
+ rd_kafka_topic_partition_list_destroy(unsubscribing_topics);
+
+ /* Create a list of the topics in metadata that matches the new
+ * subscription */
+ tinfos = rd_list_new(rkcg->rkcg_subscription->cnt,
+ (void *)rd_kafka_topic_info_destroy);
+
+ /* Unmatched topics will be added to the errored list. */
+ errored = rd_kafka_topic_partition_list_new(0);
+
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION)
+ rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos,
+ rkcg->rkcg_subscription, errored);
+ else
+ rd_kafka_metadata_topic_filter(
+ rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored);
+
+ /* Propagate consumer errors for any non-existent or errored topics.
+ * The function takes ownership of errored. */
+ rd_kafka_propagate_consumer_topic_errors(
+ rkcg, errored, "Subscribed topic not available");
+
+ if (rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos) && !revoking) {
+ rd_kafka_cgrp_rejoin(rkcg, "Subscription modified");
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ if (revoking) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CONSUMER | RD_KAFKA_DBG_CGRP,
+ "REBALANCE",
+ "Group \"%.*s\" revoking "
+ "%d of %d partition(s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ revoking->cnt, rkcg->rkcg_group_assignment->cnt);
+
+ rd_kafka_rebalance_op_incr(
+ rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, revoking,
+ rd_true /*rejoin*/, "subscribe");
+
+ rd_kafka_topic_partition_list_destroy(revoking);
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * Remove existing topic subscription.
+ */
+static rd_kafka_resp_err_t rd_kafka_cgrp_unsubscribe(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t leave_group) {
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "UNSUBSCRIBE",
+ "Group \"%.*s\": unsubscribe from current %ssubscription "
+ "of size %d (leave group=%s, has joined=%s, %s, "
+ "join-state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rkcg->rkcg_subscription ? "" : "unset ",
+ rkcg->rkcg_subscription ? rkcg->rkcg_subscription->cnt : 0,
+ RD_STR_ToF(leave_group),
+ RD_STR_ToF(RD_KAFKA_CGRP_HAS_JOINED(rkcg)),
+ rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str : "n/a",
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ rd_kafka_timer_stop(&rkcg->rkcg_rk->rk_timers,
+ &rkcg->rkcg_max_poll_interval_tmr, 1 /*lock*/);
+
+ if (rkcg->rkcg_subscription) {
+ rd_kafka_topic_partition_list_destroy(rkcg->rkcg_subscription);
+ rkcg->rkcg_subscription = NULL;
+ }
+
+ rd_kafka_cgrp_update_subscribed_topics(rkcg, NULL);
+
+ /*
+ * Clean-up group leader duties, if any.
+ */
+ rd_kafka_cgrp_group_leader_reset(rkcg, "unsubscribe");
+
+ if (leave_group && RD_KAFKA_CGRP_HAS_JOINED(rkcg))
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE;
+
+ /* FIXME: Why are we only revoking if !assignment_lost ? */
+ if (!rd_kafka_cgrp_assignment_is_lost(rkcg))
+ rd_kafka_cgrp_revoke_all_rejoin(rkcg, rd_false /*not lost*/,
+ rd_true /*initiating*/,
+ "unsubscribe");
+
+ rkcg->rkcg_flags &= ~(RD_KAFKA_CGRP_F_SUBSCRIPTION |
+ RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * Set new atomic topic subscription.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_cgrp_subscribe(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_topic_partition_list_t *rktparlist) {
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE",
+ "Group \"%.*s\": subscribe to new %ssubscription "
+ "of %d topics (join-state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rktparlist ? "" : "unset ",
+ rktparlist ? rktparlist->cnt : 0,
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ if (rkcg->rkcg_rk->rk_conf.enabled_assignor_cnt == 0)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ /* If the consumer has raised a fatal error treat all subscribes as
+ unsubscribe */
+ if (rd_kafka_fatal_error_code(rkcg->rkcg_rk)) {
+ if (rkcg->rkcg_subscription)
+ rd_kafka_cgrp_unsubscribe(rkcg,
+ rd_true /*leave group*/);
+ return RD_KAFKA_RESP_ERR__FATAL;
+ }
+
+ /* Clear any existing postponed subscribe. */
+ if (rkcg->rkcg_next_subscription)
+ rd_kafka_topic_partition_list_destroy_free(
+ rkcg->rkcg_next_subscription);
+ rkcg->rkcg_next_subscription = NULL;
+ rkcg->rkcg_next_unsubscribe = rd_false;
+
+ if (RD_KAFKA_CGRP_REBALANCING(rkcg)) {
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER, "SUBSCRIBE",
+ "Group \"%.*s\": postponing "
+ "subscribe until previous rebalance "
+ "completes (join-state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ if (!rktparlist)
+ rkcg->rkcg_next_unsubscribe = rd_true;
+ else
+ rkcg->rkcg_next_subscription = rktparlist;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
+ rktparlist && rkcg->rkcg_subscription)
+ return rd_kafka_cgrp_modify_subscription(rkcg, rktparlist);
+
+ /* Remove existing subscription first */
+ if (rkcg->rkcg_subscription)
+ rd_kafka_cgrp_unsubscribe(
+ rkcg,
+ rktparlist
+ ? rd_false /* don't leave group if new subscription */
+ : rd_true /* leave group if no new subscription */);
+
+ if (!rktparlist)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_SUBSCRIPTION;
+
+ if (rd_kafka_topic_partition_list_regex_cnt(rktparlist) > 0)
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION;
+
+ rkcg->rkcg_subscription = rktparlist;
+
+ rd_kafka_cgrp_join(rkcg);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * Same as cgrp_terminate() but called from the cgrp/main thread upon receiving
+ * the op 'rko' from cgrp_terminate().
+ *
+ * NOTE: Takes ownership of 'rko'
+ *
+ * Locality: main thread
+ */
+void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko) {
+
+ rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread));
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTERM",
+ "Terminating group \"%.*s\" in state %s "
+ "with %d partition(s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_list_cnt(&rkcg->rkcg_toppars));
+
+ if (unlikely(rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_TERM ||
+ (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) ||
+ rkcg->rkcg_reply_rko != NULL)) {
+ /* Already terminating or handling a previous terminate */
+ if (rko) {
+ rd_kafka_q_t *rkq = rko->rko_replyq.q;
+ rko->rko_replyq.q = NULL;
+ rd_kafka_consumer_err(
+ rkq, RD_KAFKA_NODEID_UA,
+ RD_KAFKA_RESP_ERR__IN_PROGRESS,
+ rko->rko_replyq.version, NULL, NULL,
+ RD_KAFKA_OFFSET_INVALID, "Group is %s",
+ rkcg->rkcg_reply_rko ? "terminating"
+ : "terminated");
+ rd_kafka_q_destroy(rkq);
+ rd_kafka_op_destroy(rko);
+ }
+ return;
+ }
+
+ /* Mark for stopping, the actual state transition
+ * is performed when all toppars have left. */
+ rkcg->rkcg_flags |= RD_KAFKA_CGRP_F_TERMINATE;
+ rkcg->rkcg_ts_terminate = rd_clock();
+ rkcg->rkcg_reply_rko = rko;
+
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION)
+ rd_kafka_cgrp_unsubscribe(
+ rkcg,
+ /* Leave group if this is a controlled shutdown */
+ !rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk));
+
+ /* Reset the wait-for-LeaveGroup flag if there is an outstanding
+ * LeaveGroupRequest being waited on (from a prior unsubscribe), but
+ * the destroy flags have NO_CONSUMER_CLOSE set, which calls
+ * for immediate termination. */
+ if (rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk))
+ rkcg->rkcg_flags &= ~RD_KAFKA_CGRP_F_WAIT_LEAVE;
+
+ /* If there's an oustanding rebalance which has not yet been
+ * served by the application it will be served from consumer_close().
+ * If the instance is being terminated with NO_CONSUMER_CLOSE we
+ * trigger unassign directly to avoid stalling on rebalance callback
+ * queues that are no longer served by the application. */
+ if (!RD_KAFKA_CGRP_WAIT_ASSIGN_CALL(rkcg) ||
+ rd_kafka_destroy_flags_no_consumer_close(rkcg->rkcg_rk))
+ rd_kafka_cgrp_unassign(rkcg);
+
+ /* Serve assignment so it can start to decommission */
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+
+ /* Try to terminate right away if all preconditions are met. */
+ rd_kafka_cgrp_try_terminate(rkcg);
+}
+
+
+/**
+ * Terminate and decommission a cgrp asynchronously.
+ *
+ * Locality: any thread
+ */
+void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq) {
+ rd_kafka_assert(NULL, !thrd_is_current(rkcg->rkcg_rk->rk_thread));
+ rd_kafka_cgrp_op(rkcg, NULL, replyq, RD_KAFKA_OP_TERMINATE, 0);
+}
+
+
+struct _op_timeout_offset_commit {
+ rd_ts_t now;
+ rd_kafka_t *rk;
+ rd_list_t expired;
+};
+
+/**
+ * q_filter callback for expiring OFFSET_COMMIT timeouts.
+ */
+static int rd_kafka_op_offset_commit_timeout_check(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ void *opaque) {
+ struct _op_timeout_offset_commit *state =
+ (struct _op_timeout_offset_commit *)opaque;
+
+ if (likely(rko->rko_type != RD_KAFKA_OP_OFFSET_COMMIT ||
+ rko->rko_u.offset_commit.ts_timeout == 0 ||
+ rko->rko_u.offset_commit.ts_timeout > state->now)) {
+ return 0;
+ }
+
+ rd_kafka_q_deq0(rkq, rko);
+
+ /* Add to temporary list to avoid recursive
+ * locking of rkcg_wait_coord_q. */
+ rd_list_add(&state->expired, rko);
+ return 1;
+}
+
+
+/**
+ * Scan for various timeouts.
+ */
+static void rd_kafka_cgrp_timeout_scan(rd_kafka_cgrp_t *rkcg, rd_ts_t now) {
+ struct _op_timeout_offset_commit ofc_state;
+ int i, cnt = 0;
+ rd_kafka_op_t *rko;
+
+ ofc_state.now = now;
+ ofc_state.rk = rkcg->rkcg_rk;
+ rd_list_init(&ofc_state.expired, 0, NULL);
+
+ cnt += rd_kafka_q_apply(rkcg->rkcg_wait_coord_q,
+ rd_kafka_op_offset_commit_timeout_check,
+ &ofc_state);
+
+ RD_LIST_FOREACH(rko, &ofc_state.expired, i)
+ rd_kafka_cgrp_op_handle_OffsetCommit(rkcg->rkcg_rk, NULL,
+ RD_KAFKA_RESP_ERR__WAIT_COORD,
+ NULL, NULL, rko);
+
+ rd_list_destroy(&ofc_state.expired);
+
+ if (cnt > 0)
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "CGRPTIMEOUT",
+ "Group \"%.*s\": timed out %d op(s), %d remain",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id), cnt,
+ rd_kafka_q_len(rkcg->rkcg_wait_coord_q));
+}
+
+
+/**
+ * @brief Handle an assign op.
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_cgrp_handle_assign_op(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error = NULL;
+
+ if (rd_kafka_fatal_error_code(rkcg->rkcg_rk) ||
+ rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE) {
+ /* Treat all assignments as unassign when a fatal error is
+ * raised or the cgrp is terminating. */
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP | RD_KAFKA_DBG_CONSUMER,
+ "ASSIGN",
+ "Group \"%s\": Consumer %s: "
+ "treating assign as unassign",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_fatal_error_code(rkcg->rkcg_rk)
+ ? "has raised a fatal error"
+ : "is terminating");
+
+ if (rko->rko_u.assign.partitions) {
+ rd_kafka_topic_partition_list_destroy(
+ rko->rko_u.assign.partitions);
+ rko->rko_u.assign.partitions = NULL;
+ }
+ rko->rko_u.assign.method = RD_KAFKA_ASSIGN_METHOD_ASSIGN;
+
+ } else if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE &&
+ !(rko->rko_u.assign.method ==
+ RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN ||
+ rko->rko_u.assign.method ==
+ RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN))
+ error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE,
+ "Changes to the current assignment "
+ "must be made using "
+ "incremental_assign() or "
+ "incremental_unassign() "
+ "when rebalance protocol type is "
+ "COOPERATIVE");
+
+ else if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_EAGER &&
+ !(rko->rko_u.assign.method == RD_KAFKA_ASSIGN_METHOD_ASSIGN))
+ error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__STATE,
+ "Changes to the current assignment "
+ "must be made using "
+ "assign() when rebalance "
+ "protocol type is EAGER");
+
+ if (!error) {
+ switch (rko->rko_u.assign.method) {
+ case RD_KAFKA_ASSIGN_METHOD_ASSIGN:
+ /* New atomic assignment (partitions != NULL),
+ * or unassignment (partitions == NULL) */
+ if (rko->rko_u.assign.partitions)
+ error = rd_kafka_cgrp_assign(
+ rkcg, rko->rko_u.assign.partitions);
+ else
+ error = rd_kafka_cgrp_unassign(rkcg);
+ break;
+ case RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN:
+ error = rd_kafka_cgrp_incremental_assign(
+ rkcg, rko->rko_u.assign.partitions);
+ break;
+ case RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN:
+ error = rd_kafka_cgrp_incremental_unassign(
+ rkcg, rko->rko_u.assign.partitions);
+ break;
+ default:
+ RD_NOTREACHED();
+ break;
+ }
+
+ /* If call succeeded serve the assignment */
+ if (!error)
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+ }
+
+ if (error) {
+ /* Log error since caller might not check
+ * *assign() return value. */
+ rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "ASSIGN",
+ "Group \"%s\": application *assign() call "
+ "failed: %s",
+ rkcg->rkcg_group_id->str,
+ rd_kafka_error_string(error));
+ }
+
+ rd_kafka_op_error_reply(rko, error);
+}
+
+
+/**
+ * @brief Handle cgrp queue op.
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static rd_kafka_op_res_t rd_kafka_cgrp_op_serve(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = opaque;
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_resp_err_t err;
+ const int silent_op = rko->rko_type == RD_KAFKA_OP_RECV_BUF;
+
+ rktp = rko->rko_rktp;
+
+ if (rktp && !silent_op)
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "CGRPOP",
+ "Group \"%.*s\" received op %s in state %s "
+ "(join-state %s) for %.*s [%" PRId32 "]",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_op2str(rko->rko_type),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+ else if (!silent_op)
+ rd_kafka_dbg(
+ rkcg->rkcg_rk, CGRP, "CGRPOP",
+ "Group \"%.*s\" received op %s in state %s "
+ "(join-state %s)",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_op2str(rko->rko_type),
+ rd_kafka_cgrp_state_names[rkcg->rkcg_state],
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ switch ((int)rko->rko_type) {
+ case RD_KAFKA_OP_NAME:
+ /* Return the currently assigned member id. */
+ if (rkcg->rkcg_member_id)
+ rko->rko_u.name.str =
+ RD_KAFKAP_STR_DUP(rkcg->rkcg_member_id);
+ rd_kafka_op_reply(rko, 0);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_CG_METADATA:
+ /* Return the current consumer group metadata. */
+ rko->rko_u.cg_metadata =
+ rkcg->rkcg_member_id
+ ? rd_kafka_consumer_group_metadata_new_with_genid(
+ rkcg->rkcg_rk->rk_conf.group_id_str,
+ rkcg->rkcg_generation_id,
+ rkcg->rkcg_member_id->str,
+ rkcg->rkcg_rk->rk_conf.group_instance_id)
+ : NULL;
+ rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_OFFSET_FETCH:
+ if (rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP ||
+ (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)) {
+ rd_kafka_op_handle_OffsetFetch(
+ rkcg->rkcg_rk, NULL, RD_KAFKA_RESP_ERR__WAIT_COORD,
+ NULL, NULL, rko);
+ rko = NULL; /* rko freed by handler */
+ break;
+ }
+
+ rd_kafka_OffsetFetchRequest(
+ rkcg->rkcg_coord, rk->rk_group_id->str,
+ rko->rko_u.offset_fetch.partitions,
+ rko->rko_u.offset_fetch.require_stable_offsets,
+ 0, /* Timeout */
+ RD_KAFKA_REPLYQ(rkcg->rkcg_ops, 0),
+ rd_kafka_op_handle_OffsetFetch, rko);
+ rko = NULL; /* rko now owned by request */
+ break;
+
+ case RD_KAFKA_OP_PARTITION_JOIN:
+ rd_kafka_cgrp_partition_add(rkcg, rktp);
+
+ /* If terminating tell the partition to leave */
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_TERMINATE)
+ rd_kafka_toppar_op_fetch_stop(rktp, RD_KAFKA_NO_REPLYQ);
+ break;
+
+ case RD_KAFKA_OP_PARTITION_LEAVE:
+ rd_kafka_cgrp_partition_del(rkcg, rktp);
+ break;
+
+ case RD_KAFKA_OP_OFFSET_COMMIT:
+ /* Trigger offsets commit. */
+ rd_kafka_cgrp_offsets_commit(rkcg, rko,
+ /* only set offsets
+ * if no partitions were
+ * specified. */
+ rko->rko_u.offset_commit.partitions
+ ? 0
+ : 1 /* set_offsets*/,
+ rko->rko_u.offset_commit.reason);
+ rko = NULL; /* rko now owned by request */
+ break;
+
+ case RD_KAFKA_OP_COORD_QUERY:
+ rd_kafka_cgrp_coord_query(
+ rkcg,
+ rko->rko_err ? rd_kafka_err2str(rko->rko_err) : "from op");
+ break;
+
+ case RD_KAFKA_OP_SUBSCRIBE:
+ rd_kafka_app_polled(rk);
+
+ /* New atomic subscription (may be NULL) */
+ err =
+ rd_kafka_cgrp_subscribe(rkcg, rko->rko_u.subscribe.topics);
+
+ if (!err) /* now owned by rkcg */
+ rko->rko_u.subscribe.topics = NULL;
+
+ rd_kafka_op_reply(rko, err);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_ASSIGN:
+ rd_kafka_cgrp_handle_assign_op(rkcg, rko);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_GET_SUBSCRIPTION:
+ if (rkcg->rkcg_next_subscription)
+ rko->rko_u.subscribe.topics =
+ rd_kafka_topic_partition_list_copy(
+ rkcg->rkcg_next_subscription);
+ else if (rkcg->rkcg_next_unsubscribe)
+ rko->rko_u.subscribe.topics = NULL;
+ else if (rkcg->rkcg_subscription)
+ rko->rko_u.subscribe.topics =
+ rd_kafka_topic_partition_list_copy(
+ rkcg->rkcg_subscription);
+ rd_kafka_op_reply(rko, 0);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_GET_ASSIGNMENT:
+ /* This is the consumer assignment, not the group assignment. */
+ rko->rko_u.assign.partitions =
+ rd_kafka_topic_partition_list_copy(
+ rkcg->rkcg_rk->rk_consumer.assignment.all);
+
+ rd_kafka_op_reply(rko, 0);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_GET_REBALANCE_PROTOCOL:
+ rko->rko_u.rebalance_protocol.str =
+ rd_kafka_rebalance_protocol2str(
+ rd_kafka_cgrp_rebalance_protocol(rkcg));
+ rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
+ rko = NULL;
+ break;
+
+ case RD_KAFKA_OP_TERMINATE:
+ rd_kafka_cgrp_terminate0(rkcg, rko);
+ rko = NULL; /* terminate0() takes ownership */
+ break;
+
+ default:
+ rd_kafka_assert(rkcg->rkcg_rk, !*"unknown type");
+ break;
+ }
+
+ if (rko)
+ rd_kafka_op_destroy(rko);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @returns true if the session timeout has expired (due to no successful
+ * Heartbeats in session.timeout.ms) and triggers a rebalance.
+ */
+static rd_bool_t rd_kafka_cgrp_session_timeout_check(rd_kafka_cgrp_t *rkcg,
+ rd_ts_t now) {
+ rd_ts_t delta;
+ char buf[256];
+
+ if (unlikely(!rkcg->rkcg_ts_session_timeout))
+ return rd_true; /* Session has expired */
+
+ delta = now - rkcg->rkcg_ts_session_timeout;
+ if (likely(delta < 0))
+ return rd_false;
+
+ delta += rkcg->rkcg_rk->rk_conf.group_session_timeout_ms * 1000;
+
+ rd_snprintf(buf, sizeof(buf),
+ "Consumer group session timed out (in join-state %s) after "
+ "%" PRId64
+ " ms without a successful response from the "
+ "group coordinator (broker %" PRId32 ", last error was %s)",
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state],
+ delta / 1000, rkcg->rkcg_coord_id,
+ rd_kafka_err2str(rkcg->rkcg_last_heartbeat_err));
+
+ rkcg->rkcg_last_heartbeat_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_log(rkcg->rkcg_rk, LOG_WARNING, "SESSTMOUT",
+ "%s: revoking assignment and rejoining group", buf);
+
+ /* Prevent further rebalances */
+ rkcg->rkcg_ts_session_timeout = 0;
+
+ /* Timing out invalidates the member id, reset it
+ * now to avoid an ERR_UNKNOWN_MEMBER_ID on the next join. */
+ rd_kafka_cgrp_set_member_id(rkcg, "");
+
+ /* Revoke and rebalance */
+ rd_kafka_cgrp_revoke_all_rejoin_maybe(rkcg, rd_true /*lost*/,
+ rd_true /*initiating*/, buf);
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Apply the next waiting subscribe/unsubscribe, if any.
+ */
+static void rd_kafka_cgrp_apply_next_subscribe(rd_kafka_cgrp_t *rkcg) {
+ rd_assert(rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_INIT);
+
+ if (rkcg->rkcg_next_subscription) {
+ rd_kafka_topic_partition_list_t *next_subscription =
+ rkcg->rkcg_next_subscription;
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE",
+ "Group \"%s\": invoking waiting postponed "
+ "subscribe",
+ rkcg->rkcg_group_id->str);
+ rkcg->rkcg_next_subscription = NULL;
+ rd_kafka_cgrp_subscribe(rkcg, next_subscription);
+
+ } else if (rkcg->rkcg_next_unsubscribe) {
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "SUBSCRIBE",
+ "Group \"%s\": invoking waiting postponed "
+ "unsubscribe",
+ rkcg->rkcg_group_id->str);
+ rkcg->rkcg_next_unsubscribe = rd_false;
+ rd_kafka_cgrp_unsubscribe(rkcg, rd_true /*Leave*/);
+ }
+}
+
+/**
+ * Client group's join state handling
+ */
+static void rd_kafka_cgrp_join_state_serve(rd_kafka_cgrp_t *rkcg) {
+ rd_ts_t now = rd_clock();
+
+ if (unlikely(rd_kafka_fatal_error_code(rkcg->rkcg_rk)))
+ return;
+
+ switch (rkcg->rkcg_join_state) {
+ case RD_KAFKA_CGRP_JOIN_STATE_INIT:
+ if (unlikely(rd_kafka_cgrp_awaiting_response(rkcg)))
+ break;
+
+ /* If there is a next subscription, apply it. */
+ rd_kafka_cgrp_apply_next_subscribe(rkcg);
+
+ /* If we have a subscription start the join process. */
+ if (!rkcg->rkcg_subscription)
+ break;
+
+ if (rd_interval_immediate(&rkcg->rkcg_join_intvl, 1000 * 1000,
+ now) > 0)
+ rd_kafka_cgrp_join(rkcg);
+ break;
+
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN:
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA:
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC:
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE:
+ /* FIXME: I think we might have to send heartbeats in
+ * in WAIT_INCR_UNASSIGN, yes-no? */
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE:
+ break;
+
+ case RD_KAFKA_CGRP_JOIN_STATE_STEADY:
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL:
+ case RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL:
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_SUBSCRIPTION &&
+ rd_interval(
+ &rkcg->rkcg_heartbeat_intvl,
+ rkcg->rkcg_rk->rk_conf.group_heartbeat_intvl_ms * 1000,
+ now) > 0)
+ rd_kafka_cgrp_heartbeat(rkcg);
+ break;
+ }
+}
+/**
+ * Client group handling.
+ * Called from main thread to serve the operational aspects of a cgrp.
+ */
+void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg) {
+ rd_kafka_broker_t *rkb = rkcg->rkcg_coord;
+ int rkb_state = RD_KAFKA_BROKER_STATE_INIT;
+ rd_ts_t now;
+
+ if (rkb) {
+ rd_kafka_broker_lock(rkb);
+ rkb_state = rkb->rkb_state;
+ rd_kafka_broker_unlock(rkb);
+
+ /* Go back to querying state if we lost the current coordinator
+ * connection. */
+ if (rkb_state < RD_KAFKA_BROKER_STATE_UP &&
+ rkcg->rkcg_state == RD_KAFKA_CGRP_STATE_UP)
+ rd_kafka_cgrp_set_state(
+ rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
+ }
+
+ now = rd_clock();
+
+ /* Check for cgrp termination */
+ if (unlikely(rd_kafka_cgrp_try_terminate(rkcg))) {
+ rd_kafka_cgrp_terminated(rkcg);
+ return; /* cgrp terminated */
+ }
+
+ /* Bail out if we're terminating. */
+ if (unlikely(rd_kafka_terminating(rkcg->rkcg_rk)))
+ return;
+
+ /* Check session timeout regardless of current coordinator
+ * connection state (rkcg_state) */
+ if (rkcg->rkcg_join_state == RD_KAFKA_CGRP_JOIN_STATE_STEADY)
+ rd_kafka_cgrp_session_timeout_check(rkcg, now);
+
+retry:
+ switch (rkcg->rkcg_state) {
+ case RD_KAFKA_CGRP_STATE_TERM:
+ break;
+
+ case RD_KAFKA_CGRP_STATE_INIT:
+ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_QUERY_COORD);
+ /* FALLTHRU */
+
+ case RD_KAFKA_CGRP_STATE_QUERY_COORD:
+ /* Query for coordinator. */
+ if (rd_interval_immediate(&rkcg->rkcg_coord_query_intvl,
+ 500 * 1000, now) > 0)
+ rd_kafka_cgrp_coord_query(rkcg,
+ "intervaled in "
+ "state query-coord");
+ break;
+
+ case RD_KAFKA_CGRP_STATE_WAIT_COORD:
+ /* Waiting for FindCoordinator response */
+ break;
+
+ case RD_KAFKA_CGRP_STATE_WAIT_BROKER:
+ /* See if the group should be reassigned to another broker. */
+ if (rd_kafka_cgrp_coord_update(rkcg, rkcg->rkcg_coord_id))
+ goto retry; /* Coordinator changed, retry state-machine
+ * to speed up next transition. */
+
+ /* Coordinator query */
+ if (rd_interval(&rkcg->rkcg_coord_query_intvl, 1000 * 1000,
+ now) > 0)
+ rd_kafka_cgrp_coord_query(rkcg,
+ "intervaled in "
+ "state wait-broker");
+ break;
+
+ case RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT:
+ /* Waiting for broker transport to come up.
+ * Also make sure broker supports groups. */
+ if (rkb_state < RD_KAFKA_BROKER_STATE_UP || !rkb ||
+ !rd_kafka_broker_supports(
+ rkb, RD_KAFKA_FEATURE_BROKER_GROUP_COORD)) {
+ /* Coordinator query */
+ if (rd_interval(&rkcg->rkcg_coord_query_intvl,
+ 1000 * 1000, now) > 0)
+ rd_kafka_cgrp_coord_query(
+ rkcg,
+ "intervaled in state "
+ "wait-broker-transport");
+
+ } else {
+ rd_kafka_cgrp_set_state(rkcg, RD_KAFKA_CGRP_STATE_UP);
+
+ /* Serve join state to trigger (re)join */
+ rd_kafka_cgrp_join_state_serve(rkcg);
+
+ /* Serve any pending partitions in the assignment */
+ rd_kafka_assignment_serve(rkcg->rkcg_rk);
+ }
+ break;
+
+ case RD_KAFKA_CGRP_STATE_UP:
+ /* Move any ops awaiting the coordinator to the ops queue
+ * for reprocessing. */
+ rd_kafka_q_concat(rkcg->rkcg_ops, rkcg->rkcg_wait_coord_q);
+
+ /* Relaxed coordinator queries. */
+ if (rd_interval(&rkcg->rkcg_coord_query_intvl,
+ rkcg->rkcg_rk->rk_conf.coord_query_intvl_ms *
+ 1000,
+ now) > 0)
+ rd_kafka_cgrp_coord_query(rkcg,
+ "intervaled in state up");
+
+ rd_kafka_cgrp_join_state_serve(rkcg);
+ break;
+ }
+
+ if (unlikely(rkcg->rkcg_state != RD_KAFKA_CGRP_STATE_UP &&
+ rd_interval(&rkcg->rkcg_timeout_scan_intvl, 1000 * 1000,
+ now) > 0))
+ rd_kafka_cgrp_timeout_scan(rkcg, now);
+}
+
+
+
+/**
+ * Send an op to a cgrp.
+ *
+ * Locality: any thread
+ */
+void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_op_type_t type,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(type);
+ rko->rko_err = err;
+ rko->rko_replyq = replyq;
+
+ if (rktp)
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_q_enq(rkcg->rkcg_ops, rko);
+}
+
+
+
+void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id) {
+ if (rkcg->rkcg_member_id && member_id &&
+ !rd_kafkap_str_cmp_str(rkcg->rkcg_member_id, member_id))
+ return; /* No change */
+
+ rd_kafka_dbg(rkcg->rkcg_rk, CGRP, "MEMBERID",
+ "Group \"%.*s\": updating member id \"%s\" -> \"%s\"",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rkcg->rkcg_member_id ? rkcg->rkcg_member_id->str
+ : "(not-set)",
+ member_id ? member_id : "(not-set)");
+
+ if (rkcg->rkcg_member_id) {
+ rd_kafkap_str_destroy(rkcg->rkcg_member_id);
+ rkcg->rkcg_member_id = NULL;
+ }
+
+ if (member_id)
+ rkcg->rkcg_member_id = rd_kafkap_str_new(member_id, -1);
+}
+
+
+/**
+ * @brief Determine owned partitions that no longer exist (partitions in
+ * deleted or re-created topics).
+ */
+static rd_kafka_topic_partition_list_t *
+rd_kafka_cgrp_owned_but_not_exist_partitions(rd_kafka_cgrp_t *rkcg) {
+ rd_kafka_topic_partition_list_t *result = NULL;
+ const rd_kafka_topic_partition_t *curr;
+
+ if (!rkcg->rkcg_group_assignment)
+ return NULL;
+
+ RD_KAFKA_TPLIST_FOREACH(curr, rkcg->rkcg_group_assignment) {
+ if (rd_list_find(rkcg->rkcg_subscribed_topics, curr->topic,
+ rd_kafka_topic_info_topic_cmp))
+ continue;
+
+ if (!result)
+ result = rd_kafka_topic_partition_list_new(
+ rkcg->rkcg_group_assignment->cnt);
+
+ rd_kafka_topic_partition_list_add_copy(result, curr);
+ }
+
+ return result;
+}
+
+
+/**
+ * @brief Check if the latest metadata affects the current subscription:
+ * - matched topic added
+ * - matched topic removed
+ * - matched topic's partition count change
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t do_join) {
+ rd_list_t *tinfos;
+ rd_kafka_topic_partition_list_t *errored;
+ rd_bool_t changed;
+
+ rd_kafka_assert(NULL, thrd_is_current(rkcg->rkcg_rk->rk_thread));
+
+ if (!rkcg->rkcg_subscription || rkcg->rkcg_subscription->cnt == 0)
+ return;
+
+ /*
+ * Unmatched topics will be added to the errored list.
+ */
+ errored = rd_kafka_topic_partition_list_new(0);
+
+ /*
+ * Create a list of the topics in metadata that matches our subscription
+ */
+ tinfos = rd_list_new(rkcg->rkcg_subscription->cnt,
+ (void *)rd_kafka_topic_info_destroy);
+
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION)
+ rd_kafka_metadata_topic_match(rkcg->rkcg_rk, tinfos,
+ rkcg->rkcg_subscription, errored);
+ else
+ rd_kafka_metadata_topic_filter(
+ rkcg->rkcg_rk, tinfos, rkcg->rkcg_subscription, errored);
+
+
+ /*
+ * Propagate consumer errors for any non-existent or errored topics.
+ * The function takes ownership of errored.
+ */
+ rd_kafka_propagate_consumer_topic_errors(
+ rkcg, errored, "Subscribed topic not available");
+
+ /*
+ * Update effective list of topics (takes ownership of \c tinfos)
+ */
+ changed = rd_kafka_cgrp_update_subscribed_topics(rkcg, tinfos);
+
+ if (!do_join ||
+ (!changed &&
+ /* If we get the same effective list of topics as last time around,
+ * but the join is waiting for this metadata query to complete,
+ * then we should not return here but follow through with the
+ * (re)join below. */
+ rkcg->rkcg_join_state != RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA))
+ return;
+
+ /* List of subscribed topics changed, trigger rejoin. */
+ rd_kafka_dbg(rkcg->rkcg_rk,
+ CGRP | RD_KAFKA_DBG_METADATA | RD_KAFKA_DBG_CONSUMER,
+ "REJOIN",
+ "Group \"%.*s\": "
+ "subscription updated from metadata change: "
+ "rejoining group in state %s",
+ RD_KAFKAP_STR_PR(rkcg->rkcg_group_id),
+ rd_kafka_cgrp_join_state_names[rkcg->rkcg_join_state]);
+
+ if (rd_kafka_cgrp_rebalance_protocol(rkcg) ==
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE) {
+
+ /* Partitions from deleted topics */
+ rd_kafka_topic_partition_list_t *owned_but_not_exist =
+ rd_kafka_cgrp_owned_but_not_exist_partitions(rkcg);
+
+ if (owned_but_not_exist) {
+ rd_kafka_cgrp_assignment_set_lost(
+ rkcg, "%d subscribed topic(s) no longer exist",
+ owned_but_not_exist->cnt);
+
+ rd_kafka_rebalance_op_incr(
+ rkcg, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ owned_but_not_exist,
+ rkcg->rkcg_group_leader.members != NULL
+ /* Rejoin group following revoke's
+ * unassign if we are leader */
+ ,
+ "topics not available");
+ rd_kafka_topic_partition_list_destroy(
+ owned_but_not_exist);
+
+ } else {
+ /* Nothing to revoke, rejoin group if we are the
+ * leader.
+ * The KIP says to rejoin the group on metadata
+ * change only if we're the leader. But what if a
+ * non-leader is subscribed to a regex that the others
+ * aren't?
+ * Going against the KIP and rejoining here. */
+ rd_kafka_cgrp_rejoin(
+ rkcg,
+ "Metadata for subscribed topic(s) has "
+ "changed");
+ }
+
+ } else {
+ /* EAGER */
+ rd_kafka_cgrp_revoke_rejoin(rkcg,
+ "Metadata for subscribed topic(s) "
+ "has changed");
+ }
+
+ /* We shouldn't get stuck in this state. */
+ rd_dassert(rkcg->rkcg_join_state !=
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA);
+}
+
+
+rd_kafka_consumer_group_metadata_t *
+rd_kafka_consumer_group_metadata_new(const char *group_id) {
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new_with_genid(
+ group_id, -1, "", NULL);
+
+ return cgmetadata;
+}
+
+rd_kafka_consumer_group_metadata_t *
+rd_kafka_consumer_group_metadata_new_with_genid(const char *group_id,
+ int32_t generation_id,
+ const char *member_id,
+ const char *group_instance_id) {
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ cgmetadata = rd_calloc(1, sizeof(*cgmetadata));
+ cgmetadata->group_id = rd_strdup(group_id);
+ cgmetadata->generation_id = generation_id;
+ cgmetadata->member_id = rd_strdup(member_id);
+ if (group_instance_id)
+ cgmetadata->group_instance_id = rd_strdup(group_instance_id);
+
+ return cgmetadata;
+}
+
+rd_kafka_consumer_group_metadata_t *
+rd_kafka_consumer_group_metadata(rd_kafka_t *rk) {
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ rd_kafka_op_t *rko;
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return NULL;
+
+ rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_CG_METADATA);
+ if (!rko)
+ return NULL;
+
+ cgmetadata = rko->rko_u.cg_metadata;
+ rko->rko_u.cg_metadata = NULL;
+ rd_kafka_op_destroy(rko);
+
+ return cgmetadata;
+}
+
+void rd_kafka_consumer_group_metadata_destroy(
+ rd_kafka_consumer_group_metadata_t *cgmetadata) {
+ rd_free(cgmetadata->group_id);
+ rd_free(cgmetadata->member_id);
+ if (cgmetadata->group_instance_id)
+ rd_free(cgmetadata->group_instance_id);
+ rd_free(cgmetadata);
+}
+
+rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup(
+ const rd_kafka_consumer_group_metadata_t *cgmetadata) {
+ rd_kafka_consumer_group_metadata_t *ret;
+
+ ret = rd_calloc(1, sizeof(*cgmetadata));
+ ret->group_id = rd_strdup(cgmetadata->group_id);
+ ret->generation_id = cgmetadata->generation_id;
+ ret->member_id = rd_strdup(cgmetadata->member_id);
+ if (cgmetadata->group_instance_id)
+ ret->group_instance_id =
+ rd_strdup(cgmetadata->group_instance_id);
+
+ return ret;
+}
+
+/*
+ * Consumer group metadata serialization format v2:
+ * "CGMDv2:"<generation_id><group_id>"\0"<member_id>"\0" \
+ * <group_instance_id_is_null>[<group_instance_id>"\0"]
+ * Where <group_id> is the group_id string.
+ */
+static const char rd_kafka_consumer_group_metadata_magic[7] = "CGMDv2:";
+
+rd_kafka_error_t *rd_kafka_consumer_group_metadata_write(
+ const rd_kafka_consumer_group_metadata_t *cgmd,
+ void **bufferp,
+ size_t *sizep) {
+ char *buf;
+ size_t size;
+ size_t of = 0;
+ size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic);
+ size_t groupid_len = strlen(cgmd->group_id) + 1;
+ size_t generationid_len = sizeof(cgmd->generation_id);
+ size_t member_id_len = strlen(cgmd->member_id) + 1;
+ int8_t group_instance_id_is_null = cgmd->group_instance_id ? 0 : 1;
+ size_t group_instance_id_is_null_len =
+ sizeof(group_instance_id_is_null);
+ size_t group_instance_id_len =
+ cgmd->group_instance_id ? strlen(cgmd->group_instance_id) + 1 : 0;
+
+ size = magic_len + groupid_len + generationid_len + member_id_len +
+ group_instance_id_is_null_len + group_instance_id_len;
+
+ buf = rd_malloc(size);
+
+ memcpy(buf, rd_kafka_consumer_group_metadata_magic, magic_len);
+ of += magic_len;
+
+ memcpy(buf + of, &cgmd->generation_id, generationid_len);
+ of += generationid_len;
+
+ memcpy(buf + of, cgmd->group_id, groupid_len);
+ of += groupid_len;
+
+ memcpy(buf + of, cgmd->member_id, member_id_len);
+ of += member_id_len;
+
+ memcpy(buf + of, &group_instance_id_is_null,
+ group_instance_id_is_null_len);
+ of += group_instance_id_is_null_len;
+
+ if (!group_instance_id_is_null)
+ memcpy(buf + of, cgmd->group_instance_id,
+ group_instance_id_len);
+ of += group_instance_id_len;
+
+ rd_assert(of == size);
+
+ *bufferp = buf;
+ *sizep = size;
+
+ return NULL;
+}
+
+
+/*
+ * Check that a string is printable, returning NULL if not or
+ * a pointer immediately after the end of the string NUL
+ * terminator if so.
+ **/
+static const char *str_is_printable(const char *s, const char *end) {
+ const char *c;
+ for (c = s; *c && c != end; c++)
+ if (!isprint((int)*c))
+ return NULL;
+ return c + 1;
+}
+
+
+rd_kafka_error_t *rd_kafka_consumer_group_metadata_read(
+ rd_kafka_consumer_group_metadata_t **cgmdp,
+ const void *buffer,
+ size_t size) {
+ const char *buf = (const char *)buffer;
+ const char *end = buf + size;
+ const char *next;
+ size_t magic_len = sizeof(rd_kafka_consumer_group_metadata_magic);
+ int32_t generation_id;
+ size_t generationid_len = sizeof(generation_id);
+ const char *group_id;
+ const char *member_id;
+ int8_t group_instance_id_is_null;
+ const char *group_instance_id = NULL;
+
+ if (size < magic_len + generationid_len + 1 + 1 + 1)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
+ "Input buffer is too short");
+
+ if (memcmp(buffer, rd_kafka_consumer_group_metadata_magic, magic_len))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
+ "Input buffer is not a serialized "
+ "consumer group metadata object");
+ memcpy(&generation_id, buf + magic_len, generationid_len);
+
+ group_id = buf + magic_len + generationid_len;
+ next = str_is_printable(group_id, end);
+ if (!next)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
+ "Input buffer group id is not safe");
+
+ member_id = next;
+ next = str_is_printable(member_id, end);
+ if (!next)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
+ "Input buffer member id is not "
+ "safe");
+
+ group_instance_id_is_null = (int8_t) * (next++);
+ if (!group_instance_id_is_null) {
+ group_instance_id = next;
+ next = str_is_printable(group_instance_id, end);
+ if (!next)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
+ "Input buffer group "
+ "instance id is not safe");
+ }
+
+ if (next != end)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__BAD_MSG,
+ "Input buffer bad length");
+
+ *cgmdp = rd_kafka_consumer_group_metadata_new_with_genid(
+ group_id, generation_id, member_id, group_instance_id);
+
+ return NULL;
+}
+
+
+static int
+unittest_consumer_group_metadata_iteration(const char *group_id,
+ int32_t generation_id,
+ const char *member_id,
+ const char *group_instance_id) {
+ rd_kafka_consumer_group_metadata_t *cgmd;
+ void *buffer, *buffer2;
+ size_t size, size2;
+ rd_kafka_error_t *error;
+
+ cgmd = rd_kafka_consumer_group_metadata_new_with_genid(
+ group_id, generation_id, member_id, group_instance_id);
+ RD_UT_ASSERT(cgmd != NULL, "failed to create metadata");
+
+ error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer, &size);
+ RD_UT_ASSERT(!error, "metadata_write failed: %s",
+ rd_kafka_error_string(error));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+
+ cgmd = NULL;
+ error = rd_kafka_consumer_group_metadata_read(&cgmd, buffer, size);
+ RD_UT_ASSERT(!error, "metadata_read failed: %s",
+ rd_kafka_error_string(error));
+
+ /* Serialize again and compare buffers */
+ error = rd_kafka_consumer_group_metadata_write(cgmd, &buffer2, &size2);
+ RD_UT_ASSERT(!error, "metadata_write failed: %s",
+ rd_kafka_error_string(error));
+
+ RD_UT_ASSERT(size == size2 && !memcmp(buffer, buffer2, size),
+ "metadata_read/write size or content mismatch: "
+ "size %" PRIusz ", size2 %" PRIusz,
+ size, size2);
+
+ rd_kafka_consumer_group_metadata_destroy(cgmd);
+ rd_free(buffer);
+ rd_free(buffer2);
+
+ return 0;
+}
+
+
+static int unittest_consumer_group_metadata(void) {
+ const char *ids[] = {
+ "mY. random id:.",
+ "0",
+ "2222222222222222222222221111111111111111111111111111112222",
+ "",
+ "NULL",
+ NULL,
+ };
+ int i, j, k, gen_id;
+ int ret;
+ const char *group_id;
+ const char *member_id;
+ const char *group_instance_id;
+
+ for (i = 0; ids[i]; i++) {
+ for (j = 0; ids[j]; j++) {
+ for (k = 0; ids[k]; k++) {
+ for (gen_id = -1; gen_id < 1; gen_id++) {
+ group_id = ids[i];
+ member_id = ids[j];
+ group_instance_id = ids[k];
+ if (strcmp(group_instance_id, "NULL") ==
+ 0)
+ group_instance_id = NULL;
+ ret =
+ unittest_consumer_group_metadata_iteration(
+ group_id, gen_id, member_id,
+ group_instance_id);
+ if (ret)
+ return ret;
+ }
+ }
+ }
+ }
+
+ RD_UT_PASS();
+}
+
+
+static int unittest_set_intersect(void) {
+ size_t par_cnt = 10;
+ map_toppar_member_info_t *dst;
+ rd_kafka_topic_partition_t *toppar;
+ PartitionMemberInfo_t *v;
+ char *id = "id";
+ rd_kafkap_str_t id1 = RD_KAFKAP_STR_INITIALIZER;
+ rd_kafkap_str_t id2 = RD_KAFKAP_STR_INITIALIZER;
+ rd_kafka_group_member_t *gm1;
+ rd_kafka_group_member_t *gm2;
+
+ id1.len = 2;
+ id1.str = id;
+ id2.len = 2;
+ id2.str = id;
+
+ map_toppar_member_info_t a = RD_MAP_INITIALIZER(
+ par_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
+
+ map_toppar_member_info_t b = RD_MAP_INITIALIZER(
+ par_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
+
+ gm1 = rd_calloc(1, sizeof(*gm1));
+ gm1->rkgm_member_id = &id1;
+ gm1->rkgm_group_instance_id = &id1;
+ gm2 = rd_calloc(1, sizeof(*gm2));
+ gm2->rkgm_member_id = &id2;
+ gm2->rkgm_group_instance_id = &id2;
+
+ RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4),
+ PartitionMemberInfo_new(gm1, rd_false));
+ RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 4),
+ PartitionMemberInfo_new(gm1, rd_false));
+ RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 7),
+ PartitionMemberInfo_new(gm1, rd_false));
+
+ RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 7),
+ PartitionMemberInfo_new(gm1, rd_false));
+ RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4),
+ PartitionMemberInfo_new(gm2, rd_false));
+
+ dst = rd_kafka_member_partitions_intersect(&a, &b);
+
+ RD_UT_ASSERT(RD_MAP_CNT(&a) == 3, "expected a cnt to be 3 not %d",
+ (int)RD_MAP_CNT(&a));
+ RD_UT_ASSERT(RD_MAP_CNT(&b) == 2, "expected b cnt to be 2 not %d",
+ (int)RD_MAP_CNT(&b));
+ RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d",
+ (int)RD_MAP_CNT(dst));
+
+ toppar = rd_kafka_topic_partition_new("t1", 4);
+ RD_UT_ASSERT((v = RD_MAP_GET(dst, toppar)), "unexpected element");
+ RD_UT_ASSERT(v->members_match, "expected members to match");
+ rd_kafka_topic_partition_destroy(toppar);
+
+ RD_MAP_DESTROY(&a);
+ RD_MAP_DESTROY(&b);
+ RD_MAP_DESTROY(dst);
+ rd_free(dst);
+
+ rd_free(gm1);
+ rd_free(gm2);
+
+ RD_UT_PASS();
+}
+
+
+static int unittest_set_subtract(void) {
+ size_t par_cnt = 10;
+ rd_kafka_topic_partition_t *toppar;
+ map_toppar_member_info_t *dst;
+
+ map_toppar_member_info_t a = RD_MAP_INITIALIZER(
+ par_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
+
+ map_toppar_member_info_t b = RD_MAP_INITIALIZER(
+ par_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
+
+ RD_MAP_SET(&a, rd_kafka_topic_partition_new("t1", 4),
+ PartitionMemberInfo_new(NULL, rd_false));
+ RD_MAP_SET(&a, rd_kafka_topic_partition_new("t2", 7),
+ PartitionMemberInfo_new(NULL, rd_false));
+
+ RD_MAP_SET(&b, rd_kafka_topic_partition_new("t2", 4),
+ PartitionMemberInfo_new(NULL, rd_false));
+ RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 4),
+ PartitionMemberInfo_new(NULL, rd_false));
+ RD_MAP_SET(&b, rd_kafka_topic_partition_new("t1", 7),
+ PartitionMemberInfo_new(NULL, rd_false));
+
+ dst = rd_kafka_member_partitions_subtract(&a, &b);
+
+ RD_UT_ASSERT(RD_MAP_CNT(&a) == 2, "expected a cnt to be 2 not %d",
+ (int)RD_MAP_CNT(&a));
+ RD_UT_ASSERT(RD_MAP_CNT(&b) == 3, "expected b cnt to be 3 not %d",
+ (int)RD_MAP_CNT(&b));
+ RD_UT_ASSERT(RD_MAP_CNT(dst) == 1, "expected dst cnt to be 1 not %d",
+ (int)RD_MAP_CNT(dst));
+
+ toppar = rd_kafka_topic_partition_new("t2", 7);
+ RD_UT_ASSERT(RD_MAP_GET(dst, toppar), "unexpected element");
+ rd_kafka_topic_partition_destroy(toppar);
+
+ RD_MAP_DESTROY(&a);
+ RD_MAP_DESTROY(&b);
+ RD_MAP_DESTROY(dst);
+ rd_free(dst);
+
+ RD_UT_PASS();
+}
+
+
+static int unittest_map_to_list(void) {
+ rd_kafka_topic_partition_list_t *list;
+
+ map_toppar_member_info_t map = RD_MAP_INITIALIZER(
+ 10, rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, PartitionMemberInfo_free);
+
+ RD_MAP_SET(&map, rd_kafka_topic_partition_new("t1", 101),
+ PartitionMemberInfo_new(NULL, rd_false));
+
+ list = rd_kafka_toppar_member_info_map_to_list(&map);
+
+ RD_UT_ASSERT(list->cnt == 1, "expecting list size of 1 not %d.",
+ list->cnt);
+ RD_UT_ASSERT(list->elems[0].partition == 101,
+ "expecting partition 101 not %d",
+ list->elems[0].partition);
+ RD_UT_ASSERT(!strcmp(list->elems[0].topic, "t1"),
+ "expecting topic 't1', not %s", list->elems[0].topic);
+
+ rd_kafka_topic_partition_list_destroy(list);
+ RD_MAP_DESTROY(&map);
+
+ RD_UT_PASS();
+}
+
+
+static int unittest_list_to_map(void) {
+ rd_kafka_topic_partition_t *toppar;
+ map_toppar_member_info_t *map;
+ rd_kafka_topic_partition_list_t *list =
+ rd_kafka_topic_partition_list_new(1);
+
+ rd_kafka_topic_partition_list_add(list, "topic1", 201);
+ rd_kafka_topic_partition_list_add(list, "topic2", 202);
+
+ map = rd_kafka_toppar_list_to_toppar_member_info_map(list);
+
+ RD_UT_ASSERT(RD_MAP_CNT(map) == 2, "expected map cnt to be 2 not %d",
+ (int)RD_MAP_CNT(map));
+ toppar = rd_kafka_topic_partition_new("topic1", 201);
+ RD_UT_ASSERT(RD_MAP_GET(map, toppar),
+ "expected topic1 [201] to exist in map");
+ rd_kafka_topic_partition_destroy(toppar);
+ toppar = rd_kafka_topic_partition_new("topic2", 202);
+ RD_UT_ASSERT(RD_MAP_GET(map, toppar),
+ "expected topic2 [202] to exist in map");
+ rd_kafka_topic_partition_destroy(toppar);
+
+ RD_MAP_DESTROY(map);
+ rd_free(map);
+ rd_kafka_topic_partition_list_destroy(list);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Consumer group unit tests
+ */
+int unittest_cgrp(void) {
+ int fails = 0;
+
+ fails += unittest_consumer_group_metadata();
+ fails += unittest_set_intersect();
+ fails += unittest_set_subtract();
+ fails += unittest_map_to_list();
+ fails += unittest_list_to_map();
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h
new file mode 100644
index 000000000..4fa51e548
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_cgrp.h
@@ -0,0 +1,383 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_CGRP_H_
+#define _RDKAFKA_CGRP_H_
+
+#include "rdinterval.h"
+
+#include "rdkafka_assignor.h"
+
+
+/**
+ * Client groups implementation
+ *
+ * Client groups handling for a single cgrp is assigned to a single
+ * rd_kafka_broker_t object at any given time.
+ * The main thread will call cgrp_serve() to serve its cgrps.
+ *
+ * This means that the cgrp itself does not need to be locked since it
+ * is only ever used from the main thread.
+ *
+ */
+
+
+extern const char *rd_kafka_cgrp_join_state_names[];
+
+/**
+ * Client group
+ */
+typedef struct rd_kafka_cgrp_s {
+ const rd_kafkap_str_t *rkcg_group_id;
+ rd_kafkap_str_t *rkcg_member_id; /* Last assigned MemberId */
+ rd_kafkap_str_t *rkcg_group_instance_id;
+ const rd_kafkap_str_t *rkcg_client_id;
+
+ enum {
+ /* Init state */
+ RD_KAFKA_CGRP_STATE_INIT,
+
+ /* Cgrp has been stopped. This is a final state */
+ RD_KAFKA_CGRP_STATE_TERM,
+
+ /* Query for group coordinator */
+ RD_KAFKA_CGRP_STATE_QUERY_COORD,
+
+ /* Outstanding query, awaiting response */
+ RD_KAFKA_CGRP_STATE_WAIT_COORD,
+
+ /* Wait ack from assigned cgrp manager broker thread */
+ RD_KAFKA_CGRP_STATE_WAIT_BROKER,
+
+ /* Wait for manager broker thread to connect to broker */
+ RD_KAFKA_CGRP_STATE_WAIT_BROKER_TRANSPORT,
+
+ /* Coordinator is up and manager is assigned. */
+ RD_KAFKA_CGRP_STATE_UP,
+ } rkcg_state;
+ rd_ts_t rkcg_ts_statechange; /* Timestamp of last
+ * state change. */
+
+
+ enum {
+ /* all: join or rejoin, possibly with an existing assignment. */
+ RD_KAFKA_CGRP_JOIN_STATE_INIT,
+
+ /* all: JoinGroupRequest sent, awaiting response. */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_JOIN,
+
+ /* all: MetadataRequest sent, awaiting response.
+ * While metadata requests may be issued at any time,
+ * this state is only set upon a proper (re)join. */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_METADATA,
+
+ /* Follower: SyncGroupRequest sent, awaiting response. */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_SYNC,
+
+ /* all: waiting for application to call *_assign() */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_ASSIGN_CALL,
+
+ /* all: waiting for application to call *_unassign() */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_CALL,
+
+ /* all: waiting for full assignment to decommission */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_UNASSIGN_TO_COMPLETE,
+
+ /* all: waiting for partial assignment to decommission */
+ RD_KAFKA_CGRP_JOIN_STATE_WAIT_INCR_UNASSIGN_TO_COMPLETE,
+
+ /* all: synchronized and assigned
+ * may be an empty assignment. */
+ RD_KAFKA_CGRP_JOIN_STATE_STEADY,
+ } rkcg_join_state;
+
+ /* State when group leader */
+ struct {
+ rd_kafka_group_member_t *members;
+ int member_cnt;
+ } rkcg_group_leader;
+
+ rd_kafka_q_t *rkcg_q; /* Application poll queue */
+ rd_kafka_q_t *rkcg_ops; /* Manager ops queue */
+ rd_kafka_q_t *rkcg_wait_coord_q; /* Ops awaiting coord */
+ int rkcg_flags;
+#define RD_KAFKA_CGRP_F_TERMINATE 0x1 /* Terminate cgrp (async) */
+#define RD_KAFKA_CGRP_F_LEAVE_ON_UNASSIGN_DONE \
+ 0x8 /* Send LeaveGroup when \
+ * unassign is done */
+#define RD_KAFKA_CGRP_F_SUBSCRIPTION \
+ 0x10 /* If set: \
+ * subscription \
+ * else: \
+ * static assignment */
+#define RD_KAFKA_CGRP_F_HEARTBEAT_IN_TRANSIT \
+ 0x20 /* A Heartbeat request \
+ * is in transit, dont \
+ * send a new one. */
+#define RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION \
+ 0x40 /* Subscription contains \
+ * wildcards. */
+#define RD_KAFKA_CGRP_F_WAIT_LEAVE \
+ 0x80 /* Wait for LeaveGroup \
+ * to be sent. \
+ * This is used to stall \
+ * termination until \
+ * the LeaveGroupRequest \
+ * is responded to, \
+ * otherwise it risks \
+ * being dropped in the \
+ * output queue when \
+ * the broker is destroyed. \
+ */
+#define RD_KAFKA_CGRP_F_MAX_POLL_EXCEEDED \
+ 0x100 /**< max.poll.interval.ms \
+ * was exceeded and we \
+ * left the group. \
+ * Do not rejoin until \
+ * the application has \
+ * polled again. */
+
+ rd_interval_t rkcg_coord_query_intvl; /* Coordinator query intvl*/
+ rd_interval_t rkcg_heartbeat_intvl; /* Heartbeat intvl */
+ rd_interval_t rkcg_join_intvl; /* JoinGroup interval */
+ rd_interval_t rkcg_timeout_scan_intvl; /* Timeout scanner */
+
+ rd_ts_t rkcg_ts_session_timeout; /**< Absolute session
+ * timeout enforced by
+ * the consumer, this
+ * value is updated on
+ * Heartbeat success,
+ * etc. */
+ rd_kafka_resp_err_t rkcg_last_heartbeat_err; /**< Last Heartbeat error,
+ * used for logging. */
+
+ TAILQ_HEAD(, rd_kafka_topic_s) rkcg_topics; /* Topics subscribed to */
+
+ rd_list_t rkcg_toppars; /* Toppars subscribed to*/
+
+ int32_t rkcg_generation_id; /* Current generation id */
+
+ rd_kafka_assignor_t *rkcg_assignor; /**< The current partition
+ * assignor. used by both
+ * leader and members. */
+ void *rkcg_assignor_state; /**< current partition
+ * assignor state */
+
+ int32_t rkcg_coord_id; /**< Current coordinator id,
+ * or -1 if not known. */
+
+ rd_kafka_broker_t *rkcg_curr_coord; /**< Current coordinator
+ * broker handle, or NULL.
+ * rkcg_coord's nodename is
+ * updated to this broker's
+ * nodename when there is a
+ * coordinator change. */
+ rd_kafka_broker_t *rkcg_coord; /**< The dedicated coordinator
+ * broker handle.
+ * Will be updated when the
+ * coordinator changes. */
+
+ int16_t rkcg_wait_resp; /**< Awaiting response for this
+ * ApiKey.
+ * Makes sure only one
+ * JoinGroup or SyncGroup
+ * request is outstanding.
+ * Unset value is -1. */
+
+ /** Current subscription */
+ rd_kafka_topic_partition_list_t *rkcg_subscription;
+ /** The actual topics subscribed (after metadata+wildcard matching).
+ * Sorted. */
+ rd_list_t *rkcg_subscribed_topics; /**< (rd_kafka_topic_info_t *) */
+ /** Subscribed topics that are errored/not available. */
+ rd_kafka_topic_partition_list_t *rkcg_errored_topics;
+ /** If a SUBSCRIBE op is received during a COOPERATIVE rebalance,
+ * actioning this will be postponed until after the rebalance
+ * completes. The waiting subscription is stored here.
+ * Mutually exclusive with rkcg_next_subscription. */
+ rd_kafka_topic_partition_list_t *rkcg_next_subscription;
+ /** If a (un)SUBSCRIBE op is received during a COOPERATIVE rebalance,
+ * actioning this will be posponed until after the rebalance
+ * completes. This flag is used to signal a waiting unsubscribe
+ * operation. Mutually exclusive with rkcg_next_subscription. */
+ rd_bool_t rkcg_next_unsubscribe;
+
+ /** Assignment considered lost */
+ rd_atomic32_t rkcg_assignment_lost;
+
+ /** Current assignment of partitions from last SyncGroup response.
+ * NULL means no assignment, else empty or non-empty assignment.
+ *
+ * This group assignment is the actual set of partitions that were
+ * assigned to our consumer by the consumer group leader and should
+ * not be confused with the rk_consumer.assignment which is the
+ * partitions assigned by the application using assign(), et.al.
+ *
+ * The group assignment and the consumer assignment are typically
+ * identical, but not necessarily since an application is free to
+ * assign() any partition, not just the partitions it is handed
+ * through the rebalance callback.
+ *
+ * Yes, this nomenclature is ambigious but has historical reasons,
+ * so for now just try to remember that:
+ * - group assignment == consumer group assignment.
+ * - assignment == actual used assignment, i.e., fetched partitions.
+ *
+ * @remark This list is always sorted.
+ */
+ rd_kafka_topic_partition_list_t *rkcg_group_assignment;
+
+ /** The partitions to incrementally assign following a
+ * currently in-progress incremental unassign. */
+ rd_kafka_topic_partition_list_t *rkcg_rebalance_incr_assignment;
+
+ /** Rejoin the group following a currently in-progress
+ * incremental unassign. */
+ rd_bool_t rkcg_rebalance_rejoin;
+
+ rd_kafka_resp_err_t rkcg_last_err; /* Last error propagated to
+ * application.
+ * This is for silencing
+ * same errors. */
+
+ rd_kafka_timer_t rkcg_offset_commit_tmr; /* Offset commit timer */
+ rd_kafka_timer_t rkcg_max_poll_interval_tmr; /**< Enforce the max
+ * poll interval. */
+
+ rd_kafka_t *rkcg_rk;
+
+ rd_kafka_op_t *rkcg_reply_rko; /* Send reply for op
+ * (OP_TERMINATE)
+ * to this rko's queue. */
+
+ rd_ts_t rkcg_ts_terminate; /* Timestamp of when
+ * cgrp termination was
+ * initiated. */
+
+ rd_atomic32_t rkcg_terminated; /**< Consumer has been closed */
+
+ /* Protected by rd_kafka_*lock() */
+ struct {
+ rd_ts_t ts_rebalance; /* Timestamp of
+ * last rebalance */
+ int rebalance_cnt; /* Number of
+ rebalances */
+ char rebalance_reason[256]; /**< Last rebalance
+ * reason */
+ int assignment_size; /* Partition count
+ * of last rebalance
+ * assignment */
+ } rkcg_c;
+
+} rd_kafka_cgrp_t;
+
+
+
+/* Check if broker is the coordinator */
+#define RD_KAFKA_CGRP_BROKER_IS_COORD(rkcg, rkb) \
+ ((rkcg)->rkcg_coord_id != -1 && \
+ (rkcg)->rkcg_coord_id == (rkb)->rkb_nodeid)
+
+/**
+ * @returns true if cgrp is using static group membership
+ */
+#define RD_KAFKA_CGRP_IS_STATIC_MEMBER(rkcg) \
+ !RD_KAFKAP_STR_IS_NULL((rkcg)->rkcg_group_instance_id)
+
+extern const char *rd_kafka_cgrp_state_names[];
+extern const char *rd_kafka_cgrp_join_state_names[];
+
+void rd_kafka_cgrp_destroy_final(rd_kafka_cgrp_t *rkcg);
+rd_kafka_cgrp_t *rd_kafka_cgrp_new(rd_kafka_t *rk,
+ const rd_kafkap_str_t *group_id,
+ const rd_kafkap_str_t *client_id);
+void rd_kafka_cgrp_serve(rd_kafka_cgrp_t *rkcg);
+
+void rd_kafka_cgrp_op(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_op_type_t type,
+ rd_kafka_resp_err_t err);
+void rd_kafka_cgrp_terminate0(rd_kafka_cgrp_t *rkcg, rd_kafka_op_t *rko);
+void rd_kafka_cgrp_terminate(rd_kafka_cgrp_t *rkcg, rd_kafka_replyq_t replyq);
+
+
+rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_del(rd_kafka_cgrp_t *rkcg,
+ const char *pattern);
+rd_kafka_resp_err_t rd_kafka_cgrp_topic_pattern_add(rd_kafka_cgrp_t *rkcg,
+ const char *pattern);
+
+int rd_kafka_cgrp_topic_check(rd_kafka_cgrp_t *rkcg, const char *topic);
+
+void rd_kafka_cgrp_set_member_id(rd_kafka_cgrp_t *rkcg, const char *member_id);
+
+void rd_kafka_cgrp_set_join_state(rd_kafka_cgrp_t *rkcg, int join_state);
+
+rd_kafka_broker_t *rd_kafka_cgrp_get_coord(rd_kafka_cgrp_t *rkcg);
+void rd_kafka_cgrp_coord_query(rd_kafka_cgrp_t *rkcg, const char *reason);
+void rd_kafka_cgrp_coord_dead(rd_kafka_cgrp_t *rkcg,
+ rd_kafka_resp_err_t err,
+ const char *reason);
+void rd_kafka_cgrp_metadata_update_check(rd_kafka_cgrp_t *rkcg,
+ rd_bool_t do_join);
+#define rd_kafka_cgrp_get(rk) ((rk)->rk_cgrp)
+
+
+void rd_kafka_cgrp_assigned_offsets_commit(
+ rd_kafka_cgrp_t *rkcg,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_bool_t set_offsets,
+ const char *reason);
+
+void rd_kafka_cgrp_assignment_done(rd_kafka_cgrp_t *rkcg);
+
+rd_bool_t rd_kafka_cgrp_assignment_is_lost(rd_kafka_cgrp_t *rkcg);
+
+
+struct rd_kafka_consumer_group_metadata_s {
+ char *group_id;
+ int32_t generation_id;
+ char *member_id;
+ char *group_instance_id; /**< Optional (NULL) */
+};
+
+rd_kafka_consumer_group_metadata_t *rd_kafka_consumer_group_metadata_dup(
+ const rd_kafka_consumer_group_metadata_t *cgmetadata);
+
+static RD_UNUSED const char *
+rd_kafka_rebalance_protocol2str(rd_kafka_rebalance_protocol_t protocol) {
+ switch (protocol) {
+ case RD_KAFKA_REBALANCE_PROTOCOL_EAGER:
+ return "EAGER";
+ case RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE:
+ return "COOPERATIVE";
+ default:
+ return "NONE";
+ }
+}
+
+#endif /* _RDKAFKA_CGRP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c
new file mode 100644
index 000000000..e481f4dd8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.c
@@ -0,0 +1,4362 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2022 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rd.h"
+#include "rdfloat.h"
+
+#include <stdlib.h>
+#include <ctype.h>
+#include <stddef.h>
+
+#include "rdkafka_int.h"
+#include "rdkafka_feature.h"
+#include "rdkafka_interceptor.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_assignor.h"
+#include "rdkafka_sasl_oauthbearer.h"
+#if WITH_PLUGINS
+#include "rdkafka_plugin.h"
+#endif
+#include "rdunittest.h"
+
+#ifndef _WIN32
+#include <netinet/tcp.h>
+#else
+
+#ifndef WIN32_MEAN_AND_LEAN
+#define WIN32_MEAN_AND_LEAN
+#endif
+#include <windows.h>
+#endif
+
+struct rd_kafka_property {
+ rd_kafka_conf_scope_t scope;
+ const char *name;
+ enum { _RK_C_STR,
+ _RK_C_INT,
+ _RK_C_DBL, /* Double */
+ _RK_C_S2I, /* String to Integer mapping.
+ * Supports limited canonical str->int mappings
+ * using s2i[] */
+ _RK_C_S2F, /* CSV String to Integer flag mapping (OR:ed) */
+ _RK_C_BOOL,
+ _RK_C_PTR, /* Only settable through special set functions */
+ _RK_C_PATLIST, /* Pattern list */
+ _RK_C_KSTR, /* Kafka string */
+ _RK_C_ALIAS, /* Alias: points to other property through .sdef */
+ _RK_C_INTERNAL, /* Internal, don't expose to application */
+ _RK_C_INVALID, /* Invalid property, used to catch known
+ * but unsupported Java properties. */
+ } type;
+ int offset;
+ const char *desc;
+ int vmin;
+ int vmax;
+ int vdef; /* Default value (int) */
+ const char *sdef; /* Default value (string) */
+ void *pdef; /* Default value (pointer) */
+ double ddef; /* Default value (double) */
+ double dmin;
+ double dmax;
+ struct {
+ int val;
+ const char *str;
+ const char *unsupported; /**< Reason for value not being
+ * supported in this build. */
+ } s2i[20]; /* _RK_C_S2I and _RK_C_S2F */
+
+ const char *unsupported; /**< Reason for propery not being supported
+ * in this build.
+ * Will be included in the conf_set()
+ * error string. */
+
+ /* Value validator (STR) */
+ int (*validate)(const struct rd_kafka_property *prop,
+ const char *val,
+ int ival);
+
+ /* Configuration object constructors and destructor for use when
+ * the property value itself is not used, or needs extra care. */
+ void (*ctor)(int scope, void *pconf);
+ void (*dtor)(int scope, void *pconf);
+ void (*copy)(int scope,
+ void *pdst,
+ const void *psrc,
+ void *dstptr,
+ const void *srcptr,
+ size_t filter_cnt,
+ const char **filter);
+
+ rd_kafka_conf_res_t (*set)(int scope,
+ void *pconf,
+ const char *name,
+ const char *value,
+ void *dstptr,
+ rd_kafka_conf_set_mode_t set_mode,
+ char *errstr,
+ size_t errstr_size);
+};
+
+
+#define _RK(field) offsetof(rd_kafka_conf_t, field)
+#define _RKT(field) offsetof(rd_kafka_topic_conf_t, field)
+
+#if WITH_SSL
+#define _UNSUPPORTED_SSL .unsupported = NULL
+#else
+#define _UNSUPPORTED_SSL .unsupported = "OpenSSL not available at build time"
+#endif
+
+#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && defined(WITH_SSL) && \
+ !defined(LIBRESSL_VERSION_NUMBER)
+#define _UNSUPPORTED_OPENSSL_1_0_2 .unsupported = NULL
+#else
+#define _UNSUPPORTED_OPENSSL_1_0_2 \
+ .unsupported = "OpenSSL >= 1.0.2 not available at build time"
+#endif
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000 && defined(WITH_SSL) && \
+ !defined(LIBRESSL_VERSION_NUMBER)
+#define _UNSUPPORTED_OPENSSL_1_1_0 .unsupported = NULL
+#else
+#define _UNSUPPORTED_OPENSSL_1_1_0 \
+ .unsupported = "OpenSSL >= 1.1.0 not available at build time"
+#endif
+
+#if WITH_SSL_ENGINE
+#define _UNSUPPORTED_SSL_ENGINE .unsupported = NULL
+#else
+#define _UNSUPPORTED_SSL_ENGINE \
+ .unsupported = "OpenSSL >= 1.1.x not available at build time"
+#endif
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000 && defined(WITH_SSL)
+#define _UNSUPPORTED_SSL_3 .unsupported = NULL
+#else
+#define _UNSUPPORTED_SSL_3 \
+ .unsupported = "OpenSSL >= 3.0.0 not available at build time"
+#endif
+
+
+#if WITH_ZLIB
+#define _UNSUPPORTED_ZLIB .unsupported = NULL
+#else
+#define _UNSUPPORTED_ZLIB .unsupported = "zlib not available at build time"
+#endif
+
+#if WITH_SNAPPY
+#define _UNSUPPORTED_SNAPPY .unsupported = NULL
+#else
+#define _UNSUPPORTED_SNAPPY .unsupported = "snappy not enabled at build time"
+#endif
+
+#if WITH_ZSTD
+#define _UNSUPPORTED_ZSTD .unsupported = NULL
+#else
+#define _UNSUPPORTED_ZSTD .unsupported = "libzstd not available at build time"
+#endif
+
+#if WITH_CURL
+#define _UNSUPPORTED_HTTP .unsupported = NULL
+#else
+#define _UNSUPPORTED_HTTP .unsupported = "libcurl not available at build time"
+#endif
+
+#if WITH_OAUTHBEARER_OIDC
+#define _UNSUPPORTED_OIDC .unsupported = NULL
+#else
+#define _UNSUPPORTED_OIDC \
+ .unsupported = \
+ "OAuth/OIDC depends on libcurl and OpenSSL which were not " \
+ "available at build time"
+#endif
+
+#ifdef _WIN32
+#define _UNSUPPORTED_WIN32_GSSAPI \
+ .unsupported = \
+ "Kerberos keytabs are not supported on Windows, " \
+ "instead the logged on " \
+ "user's credentials are used through native SSPI"
+#else
+#define _UNSUPPORTED_WIN32_GSSAPI .unsupported = NULL
+#endif
+
+#if defined(_WIN32) || defined(WITH_SASL_CYRUS)
+#define _UNSUPPORTED_GSSAPI .unsupported = NULL
+#else
+#define _UNSUPPORTED_GSSAPI \
+ .unsupported = "cyrus-sasl/libsasl2 not available at build time"
+#endif
+
+#define _UNSUPPORTED_OAUTHBEARER _UNSUPPORTED_SSL
+
+
+static rd_kafka_conf_res_t
+rd_kafka_anyconf_get0(const void *conf,
+ const struct rd_kafka_property *prop,
+ char *dest,
+ size_t *dest_size);
+
+
+
+/**
+ * @returns a unique index for property \p prop, using the byte position
+ * of the field.
+ */
+static RD_INLINE int rd_kafka_prop2idx(const struct rd_kafka_property *prop) {
+ return prop->offset;
+}
+
+
+
+/**
+ * @brief Set the property as modified.
+ *
+ * We do this by mapping the property's conf struct field byte offset
+ * to a bit in a bit vector.
+ * If the bit is set the property has been modified, otherwise it is
+ * at its default unmodified value.
+ *
+ * \p is_modified 1: set as modified, 0: clear modified
+ */
+static void rd_kafka_anyconf_set_modified(void *conf,
+ const struct rd_kafka_property *prop,
+ int is_modified) {
+ int idx = rd_kafka_prop2idx(prop);
+ int bkt = idx / 64;
+ uint64_t bit = (uint64_t)1 << (idx % 64);
+ struct rd_kafka_anyconf_hdr *confhdr = conf;
+
+ rd_assert(idx < RD_KAFKA_CONF_PROPS_IDX_MAX &&
+ *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX");
+
+ if (is_modified)
+ confhdr->modified[bkt] |= bit;
+ else
+ confhdr->modified[bkt] &= ~bit;
+}
+
+/**
+ * @brief Clear is_modified for all properties.
+ * @warning Does NOT clear/reset the value.
+ */
+static void rd_kafka_anyconf_clear_all_is_modified(void *conf) {
+ struct rd_kafka_anyconf_hdr *confhdr = conf;
+
+ memset(confhdr, 0, sizeof(*confhdr));
+}
+
+
+/**
+ * @returns true of the property has been set/modified, else false.
+ */
+static rd_bool_t
+rd_kafka_anyconf_is_modified(const void *conf,
+ const struct rd_kafka_property *prop) {
+ int idx = rd_kafka_prop2idx(prop);
+ int bkt = idx / 64;
+ uint64_t bit = (uint64_t)1 << (idx % 64);
+ const struct rd_kafka_anyconf_hdr *confhdr = conf;
+
+ return !!(confhdr->modified[bkt] & bit);
+}
+
+/**
+ * @returns true if any property in \p conf has been set/modified.
+ */
+static rd_bool_t rd_kafka_anyconf_is_any_modified(const void *conf) {
+ const struct rd_kafka_anyconf_hdr *confhdr = conf;
+ int i;
+
+ for (i = 0; i < (int)RD_ARRAYSIZE(confhdr->modified); i++)
+ if (confhdr->modified[i])
+ return rd_true;
+
+ return rd_false;
+}
+
+
+
+/**
+ * @brief Validate \p broker.version.fallback property.
+ */
+static int
+rd_kafka_conf_validate_broker_version(const struct rd_kafka_property *prop,
+ const char *val,
+ int ival) {
+ struct rd_kafka_ApiVersion *apis;
+ size_t api_cnt;
+ return rd_kafka_get_legacy_ApiVersions(val, &apis, &api_cnt, NULL);
+}
+
+/**
+ * @brief Validate that string is a single item, without delimters (, space).
+ */
+static RD_UNUSED int
+rd_kafka_conf_validate_single(const struct rd_kafka_property *prop,
+ const char *val,
+ int ival) {
+ return !strchr(val, ',') && !strchr(val, ' ');
+}
+
+/**
+ * @brief Validate builtin partitioner string
+ */
+static RD_UNUSED int
+rd_kafka_conf_validate_partitioner(const struct rd_kafka_property *prop,
+ const char *val,
+ int ival) {
+ return !strcmp(val, "random") || !strcmp(val, "consistent") ||
+ !strcmp(val, "consistent_random") || !strcmp(val, "murmur2") ||
+ !strcmp(val, "murmur2_random") || !strcmp(val, "fnv1a") ||
+ !strcmp(val, "fnv1a_random");
+}
+
+
+/**
+ * librdkafka configuration property definitions.
+ */
+static const struct rd_kafka_property rd_kafka_properties[] = {
+ /* Global properties */
+ {_RK_GLOBAL, "builtin.features", _RK_C_S2F, _RK(builtin_features),
+ "Indicates the builtin features for this build of librdkafka. "
+ "An application can either query this value or attempt to set it "
+ "with its list of required features to check for library support.",
+ 0, 0x7fffffff, 0xffff,
+ .s2i = {{0x1, "gzip", _UNSUPPORTED_ZLIB},
+ {0x2, "snappy", _UNSUPPORTED_SNAPPY},
+ {0x4, "ssl", _UNSUPPORTED_SSL},
+ {0x8, "sasl"},
+ {0x10, "regex"},
+ {0x20, "lz4"},
+ {0x40, "sasl_gssapi", _UNSUPPORTED_GSSAPI},
+ {0x80, "sasl_plain"},
+ {0x100, "sasl_scram", _UNSUPPORTED_SSL},
+ {0x200, "plugins"
+#if !WITH_PLUGINS
+ ,
+ .unsupported = "libdl/dlopen(3) not available at "
+ "build time"
+#endif
+ },
+ {0x400, "zstd", _UNSUPPORTED_ZSTD},
+ {0x800, "sasl_oauthbearer", _UNSUPPORTED_SSL},
+ {0x1000, "http", _UNSUPPORTED_HTTP},
+ {0x2000, "oidc", _UNSUPPORTED_OIDC},
+ {0, NULL}}},
+ {_RK_GLOBAL, "client.id", _RK_C_STR, _RK(client_id_str),
+ "Client identifier.", .sdef = "rdkafka"},
+ {_RK_GLOBAL | _RK_HIDDEN, "client.software.name", _RK_C_STR, _RK(sw_name),
+ "Client software name as reported to broker version >= v2.4.0. "
+ "Broker-side character restrictions apply, as of broker version "
+ "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client "
+ "will replace any other character with `-` and strip leading and "
+ "trailing non-alphanumeric characters before tranmission to "
+ "the broker. "
+ "This property should only be set by high-level language "
+ "librdkafka client bindings.",
+ .sdef = "librdkafka"},
+ {
+ _RK_GLOBAL | _RK_HIDDEN,
+ "client.software.version",
+ _RK_C_STR,
+ _RK(sw_version),
+ "Client software version as reported to broker version >= v2.4.0. "
+ "Broker-side character restrictions apply, as of broker version "
+ "v2.4.0 the allowed characters are `a-zA-Z0-9.-`. The local client "
+ "will replace any other character with `-` and strip leading and "
+ "trailing non-alphanumeric characters before tranmission to "
+ "the broker. "
+ "This property should only be set by high-level language "
+ "librdkafka client bindings."
+ "If changing this property it is highly recommended to append the "
+ "librdkafka version.",
+ },
+ {_RK_GLOBAL | _RK_HIGH, "metadata.broker.list", _RK_C_STR, _RK(brokerlist),
+ "Initial list of brokers as a CSV list of broker host or host:port. "
+ "The application may also use `rd_kafka_brokers_add()` to add "
+ "brokers during runtime."},
+ {_RK_GLOBAL | _RK_HIGH, "bootstrap.servers", _RK_C_ALIAS, 0,
+ "See metadata.broker.list", .sdef = "metadata.broker.list"},
+ {_RK_GLOBAL | _RK_MED, "message.max.bytes", _RK_C_INT, _RK(max_msg_size),
+ "Maximum Kafka protocol request message size. "
+ "Due to differing framing overhead between protocol versions the "
+ "producer is unable to reliably enforce a strict max message limit "
+ "at produce time and may exceed the maximum size by one message in "
+ "protocol ProduceRequests, the broker will enforce the the topic's "
+ "`max.message.bytes` limit (see Apache Kafka documentation).",
+ 1000, 1000000000, 1000000},
+ {_RK_GLOBAL, "message.copy.max.bytes", _RK_C_INT, _RK(msg_copy_max_size),
+ "Maximum size for message to be copied to buffer. "
+ "Messages larger than this will be passed by reference (zero-copy) "
+ "at the expense of larger iovecs.",
+ 0, 1000000000, 0xffff},
+ {_RK_GLOBAL | _RK_MED, "receive.message.max.bytes", _RK_C_INT,
+ _RK(recv_max_msg_size),
+ "Maximum Kafka protocol response message size. "
+ "This serves as a safety precaution to avoid memory exhaustion in "
+ "case of protocol hickups. "
+ "This value must be at least `fetch.max.bytes` + 512 to allow "
+ "for protocol overhead; the value is adjusted automatically "
+ "unless the configuration property is explicitly set.",
+ 1000, INT_MAX, 100000000},
+ {_RK_GLOBAL, "max.in.flight.requests.per.connection", _RK_C_INT,
+ _RK(max_inflight),
+ "Maximum number of in-flight requests per broker connection. "
+ "This is a generic property applied to all broker communication, "
+ "however it is primarily relevant to produce requests. "
+ "In particular, note that other mechanisms limit the number "
+ "of outstanding consumer fetch request per broker to one.",
+ 1, 1000000, 1000000},
+ {_RK_GLOBAL, "max.in.flight", _RK_C_ALIAS,
+ .sdef = "max.in.flight.requests.per.connection"},
+ {_RK_GLOBAL | _RK_DEPRECATED | _RK_HIDDEN, "metadata.request.timeout.ms",
+ _RK_C_INT, _RK(metadata_request_timeout_ms), "Not used.", 10, 900 * 1000,
+ 10},
+ {_RK_GLOBAL, "topic.metadata.refresh.interval.ms", _RK_C_INT,
+ _RK(metadata_refresh_interval_ms),
+ "Period of time in milliseconds at which topic and broker "
+ "metadata is refreshed in order to proactively discover any new "
+ "brokers, topics, partitions or partition leader changes. "
+ "Use -1 to disable the intervalled refresh (not recommended). "
+ "If there are no locally referenced topics "
+ "(no topic objects created, no messages produced, "
+ "no subscription or no assignment) then only the broker list will "
+ "be refreshed every interval but no more often than every 10s.",
+ -1, 3600 * 1000, 5 * 60 * 1000},
+ {_RK_GLOBAL, "metadata.max.age.ms", _RK_C_INT, _RK(metadata_max_age_ms),
+ "Metadata cache max age. "
+ "Defaults to topic.metadata.refresh.interval.ms * 3",
+ 1, 24 * 3600 * 1000, 5 * 60 * 1000 * 3},
+ {_RK_GLOBAL, "topic.metadata.refresh.fast.interval.ms", _RK_C_INT,
+ _RK(metadata_refresh_fast_interval_ms),
+ "When a topic loses its leader a new metadata request will be "
+ "enqueued with this initial interval, exponentially increasing "
+ "until the topic metadata has been refreshed. "
+ "This is used to recover quickly from transitioning leader brokers.",
+ 1, 60 * 1000, 250},
+ {_RK_GLOBAL | _RK_DEPRECATED, "topic.metadata.refresh.fast.cnt", _RK_C_INT,
+ _RK(metadata_refresh_fast_cnt), "No longer used.", 0, 1000, 10},
+ {_RK_GLOBAL, "topic.metadata.refresh.sparse", _RK_C_BOOL,
+ _RK(metadata_refresh_sparse),
+ "Sparse metadata requests (consumes less network bandwidth)", 0, 1, 1},
+ {_RK_GLOBAL, "topic.metadata.propagation.max.ms", _RK_C_INT,
+ _RK(metadata_propagation_max_ms),
+ "Apache Kafka topic creation is asynchronous and it takes some "
+ "time for a new topic to propagate throughout the cluster to all "
+ "brokers. "
+ "If a client requests topic metadata after manual topic creation but "
+ "before the topic has been fully propagated to the broker the "
+ "client is requesting metadata from, the topic will seem to be "
+ "non-existent and the client will mark the topic as such, "
+ "failing queued produced messages with `ERR__UNKNOWN_TOPIC`. "
+ "This setting delays marking a topic as non-existent until the "
+ "configured propagation max time has passed. "
+ "The maximum propagation time is calculated from the time the "
+ "topic is first referenced in the client, e.g., on produce().",
+ 0, 60 * 60 * 1000, 30 * 1000},
+ {_RK_GLOBAL, "topic.blacklist", _RK_C_PATLIST, _RK(topic_blacklist),
+ "Topic blacklist, a comma-separated list of regular expressions "
+ "for matching topic names that should be ignored in "
+ "broker metadata information as if the topics did not exist."},
+ {_RK_GLOBAL | _RK_MED, "debug", _RK_C_S2F, _RK(debug),
+ "A comma-separated list of debug contexts to enable. "
+ "Detailed Producer debugging: broker,topic,msg. "
+ "Consumer: consumer,cgrp,topic,fetch",
+ .s2i = {{RD_KAFKA_DBG_GENERIC, "generic"},
+ {RD_KAFKA_DBG_BROKER, "broker"},
+ {RD_KAFKA_DBG_TOPIC, "topic"},
+ {RD_KAFKA_DBG_METADATA, "metadata"},
+ {RD_KAFKA_DBG_FEATURE, "feature"},
+ {RD_KAFKA_DBG_QUEUE, "queue"},
+ {RD_KAFKA_DBG_MSG, "msg"},
+ {RD_KAFKA_DBG_PROTOCOL, "protocol"},
+ {RD_KAFKA_DBG_CGRP, "cgrp"},
+ {RD_KAFKA_DBG_SECURITY, "security"},
+ {RD_KAFKA_DBG_FETCH, "fetch"},
+ {RD_KAFKA_DBG_INTERCEPTOR, "interceptor"},
+ {RD_KAFKA_DBG_PLUGIN, "plugin"},
+ {RD_KAFKA_DBG_CONSUMER, "consumer"},
+ {RD_KAFKA_DBG_ADMIN, "admin"},
+ {RD_KAFKA_DBG_EOS, "eos"},
+ {RD_KAFKA_DBG_MOCK, "mock"},
+ {RD_KAFKA_DBG_ASSIGNOR, "assignor"},
+ {RD_KAFKA_DBG_CONF, "conf"},
+ {RD_KAFKA_DBG_ALL, "all"}}},
+ {_RK_GLOBAL, "socket.timeout.ms", _RK_C_INT, _RK(socket_timeout_ms),
+ "Default timeout for network requests. "
+ "Producer: ProduceRequests will use the lesser value of "
+ "`socket.timeout.ms` and remaining `message.timeout.ms` for the "
+ "first message in the batch. "
+ "Consumer: FetchRequests will use "
+ "`fetch.wait.max.ms` + `socket.timeout.ms`. "
+ "Admin: Admin requests will use `socket.timeout.ms` or explicitly "
+ "set `rd_kafka_AdminOptions_set_operation_timeout()` value.",
+ 10, 300 * 1000, 60 * 1000},
+ {_RK_GLOBAL | _RK_DEPRECATED, "socket.blocking.max.ms", _RK_C_INT,
+ _RK(socket_blocking_max_ms), "No longer used.", 1, 60 * 1000, 1000},
+ {_RK_GLOBAL, "socket.send.buffer.bytes", _RK_C_INT, _RK(socket_sndbuf_size),
+ "Broker socket send buffer size. System default is used if 0.", 0,
+ 100000000, 0},
+ {_RK_GLOBAL, "socket.receive.buffer.bytes", _RK_C_INT,
+ _RK(socket_rcvbuf_size),
+ "Broker socket receive buffer size. System default is used if 0.", 0,
+ 100000000, 0},
+ {_RK_GLOBAL, "socket.keepalive.enable", _RK_C_BOOL, _RK(socket_keepalive),
+ "Enable TCP keep-alives (SO_KEEPALIVE) on broker sockets", 0, 1, 0
+#ifndef SO_KEEPALIVE
+ ,
+ .unsupported = "SO_KEEPALIVE not available at build time"
+#endif
+ },
+ {_RK_GLOBAL, "socket.nagle.disable", _RK_C_BOOL, _RK(socket_nagle_disable),
+ "Disable the Nagle algorithm (TCP_NODELAY) on broker sockets.", 0, 1, 0
+#ifndef TCP_NODELAY
+ ,
+ .unsupported = "TCP_NODELAY not available at build time"
+#endif
+ },
+ {_RK_GLOBAL, "socket.max.fails", _RK_C_INT, _RK(socket_max_fails),
+ "Disconnect from broker when this number of send failures "
+ "(e.g., timed out requests) is reached. Disable with 0. "
+ "WARNING: It is highly recommended to leave this setting at "
+ "its default value of 1 to avoid the client and broker to "
+ "become desynchronized in case of request timeouts. "
+ "NOTE: The connection is automatically re-established.",
+ 0, 1000000, 1},
+ {_RK_GLOBAL, "broker.address.ttl", _RK_C_INT, _RK(broker_addr_ttl),
+ "How long to cache the broker address resolving "
+ "results (milliseconds).",
+ 0, 86400 * 1000, 1 * 1000},
+ {_RK_GLOBAL, "broker.address.family", _RK_C_S2I, _RK(broker_addr_family),
+ "Allowed broker IP address families: any, v4, v6", .vdef = AF_UNSPEC,
+ .s2i =
+ {
+ {AF_UNSPEC, "any"},
+ {AF_INET, "v4"},
+ {AF_INET6, "v6"},
+ }},
+ {_RK_GLOBAL | _RK_MED, "socket.connection.setup.timeout.ms", _RK_C_INT,
+ _RK(socket_connection_setup_timeout_ms),
+ "Maximum time allowed for broker connection setup "
+ "(TCP connection setup as well SSL and SASL handshake). "
+ "If the connection to the broker is not fully functional after this "
+ "the connection will be closed and retried.",
+ 1000, INT_MAX, 30 * 1000 /* 30s */},
+ {_RK_GLOBAL | _RK_MED, "connections.max.idle.ms", _RK_C_INT,
+ _RK(connections_max_idle_ms),
+ "Close broker connections after the specified time of "
+ "inactivity. "
+ "Disable with 0. "
+ "If this property is left at its default value some heuristics are "
+ "performed to determine a suitable default value, this is currently "
+ "limited to identifying brokers on Azure "
+ "(see librdkafka issue #3109 for more info).",
+ 0, INT_MAX, 0},
+ {_RK_GLOBAL | _RK_MED | _RK_HIDDEN, "enable.sparse.connections", _RK_C_BOOL,
+ _RK(sparse_connections),
+ "When enabled the client will only connect to brokers "
+ "it needs to communicate with. When disabled the client "
+ "will maintain connections to all brokers in the cluster.",
+ 0, 1, 1},
+ {_RK_GLOBAL | _RK_DEPRECATED, "reconnect.backoff.jitter.ms", _RK_C_INT,
+ _RK(reconnect_jitter_ms),
+ "No longer used. See `reconnect.backoff.ms` and "
+ "`reconnect.backoff.max.ms`.",
+ 0, 60 * 60 * 1000, 0},
+ {_RK_GLOBAL | _RK_MED, "reconnect.backoff.ms", _RK_C_INT,
+ _RK(reconnect_backoff_ms),
+ "The initial time to wait before reconnecting to a broker "
+ "after the connection has been closed. "
+ "The time is increased exponentially until "
+ "`reconnect.backoff.max.ms` is reached. "
+ "-25% to +50% jitter is applied to each reconnect backoff. "
+ "A value of 0 disables the backoff and reconnects immediately.",
+ 0, 60 * 60 * 1000, 100},
+ {_RK_GLOBAL | _RK_MED, "reconnect.backoff.max.ms", _RK_C_INT,
+ _RK(reconnect_backoff_max_ms),
+ "The maximum time to wait before reconnecting to a broker "
+ "after the connection has been closed.",
+ 0, 60 * 60 * 1000, 10 * 1000},
+ {_RK_GLOBAL | _RK_HIGH, "statistics.interval.ms", _RK_C_INT,
+ _RK(stats_interval_ms),
+ "librdkafka statistics emit interval. The application also needs to "
+ "register a stats callback using `rd_kafka_conf_set_stats_cb()`. "
+ "The granularity is 1000ms. A value of 0 disables statistics.",
+ 0, 86400 * 1000, 0},
+ {_RK_GLOBAL, "enabled_events", _RK_C_INT, _RK(enabled_events),
+ "See `rd_kafka_conf_set_events()`", 0, 0x7fffffff, 0},
+ {_RK_GLOBAL, "error_cb", _RK_C_PTR, _RK(error_cb),
+ "Error callback (set with rd_kafka_conf_set_error_cb())"},
+ {_RK_GLOBAL, "throttle_cb", _RK_C_PTR, _RK(throttle_cb),
+ "Throttle callback (set with rd_kafka_conf_set_throttle_cb())"},
+ {_RK_GLOBAL, "stats_cb", _RK_C_PTR, _RK(stats_cb),
+ "Statistics callback (set with rd_kafka_conf_set_stats_cb())"},
+ {_RK_GLOBAL, "log_cb", _RK_C_PTR, _RK(log_cb),
+ "Log callback (set with rd_kafka_conf_set_log_cb())",
+ .pdef = rd_kafka_log_print},
+ {_RK_GLOBAL, "log_level", _RK_C_INT, _RK(log_level),
+ "Logging level (syslog(3) levels)", 0, 7, 6},
+ {_RK_GLOBAL, "log.queue", _RK_C_BOOL, _RK(log_queue),
+ "Disable spontaneous log_cb from internal librdkafka "
+ "threads, instead enqueue log messages on queue set with "
+ "`rd_kafka_set_log_queue()` and serve log callbacks or "
+ "events through the standard poll APIs. "
+ "**NOTE**: Log messages will linger in a temporary queue "
+ "until the log queue has been set.",
+ 0, 1, 0},
+ {_RK_GLOBAL, "log.thread.name", _RK_C_BOOL, _RK(log_thread_name),
+ "Print internal thread name in log messages "
+ "(useful for debugging librdkafka internals)",
+ 0, 1, 1},
+ {_RK_GLOBAL, "enable.random.seed", _RK_C_BOOL, _RK(enable_random_seed),
+ "If enabled librdkafka will initialize the PRNG "
+ "with srand(current_time.milliseconds) on the first invocation of "
+ "rd_kafka_new() (required only if rand_r() is not available on your "
+ "platform). "
+ "If disabled the application must call srand() prior to calling "
+ "rd_kafka_new().",
+ 0, 1, 1},
+ {_RK_GLOBAL, "log.connection.close", _RK_C_BOOL, _RK(log_connection_close),
+ "Log broker disconnects. "
+ "It might be useful to turn this off when interacting with "
+ "0.9 brokers with an aggressive `connections.max.idle.ms` value.",
+ 0, 1, 1},
+ {_RK_GLOBAL, "background_event_cb", _RK_C_PTR, _RK(background_event_cb),
+ "Background queue event callback "
+ "(set with rd_kafka_conf_set_background_event_cb())"},
+ {_RK_GLOBAL, "socket_cb", _RK_C_PTR, _RK(socket_cb),
+ "Socket creation callback to provide race-free CLOEXEC",
+ .pdef =
+#ifdef __linux__
+ rd_kafka_socket_cb_linux
+#else
+ rd_kafka_socket_cb_generic
+#endif
+ },
+ {
+ _RK_GLOBAL,
+ "connect_cb",
+ _RK_C_PTR,
+ _RK(connect_cb),
+ "Socket connect callback",
+ },
+ {
+ _RK_GLOBAL,
+ "closesocket_cb",
+ _RK_C_PTR,
+ _RK(closesocket_cb),
+ "Socket close callback",
+ },
+ {_RK_GLOBAL, "open_cb", _RK_C_PTR, _RK(open_cb),
+ "File open callback to provide race-free CLOEXEC",
+ .pdef =
+#ifdef __linux__
+ rd_kafka_open_cb_linux
+#else
+ rd_kafka_open_cb_generic
+#endif
+ },
+ {_RK_GLOBAL, "resolve_cb", _RK_C_PTR, _RK(resolve_cb),
+ "Address resolution callback (set with rd_kafka_conf_set_resolve_cb())."},
+ {_RK_GLOBAL, "opaque", _RK_C_PTR, _RK(opaque),
+ "Application opaque (set with rd_kafka_conf_set_opaque())"},
+ {_RK_GLOBAL, "default_topic_conf", _RK_C_PTR, _RK(topic_conf),
+ "Default topic configuration for automatically subscribed topics"},
+ {_RK_GLOBAL, "internal.termination.signal", _RK_C_INT, _RK(term_sig),
+ "Signal that librdkafka will use to quickly terminate on "
+ "rd_kafka_destroy(). If this signal is not set then there will be a "
+ "delay before rd_kafka_wait_destroyed() returns true "
+ "as internal threads are timing out their system calls. "
+ "If this signal is set however the delay will be minimal. "
+ "The application should mask this signal as an internal "
+ "signal handler is installed.",
+ 0, 128, 0},
+ {_RK_GLOBAL | _RK_HIGH, "api.version.request", _RK_C_BOOL,
+ _RK(api_version_request),
+ "Request broker's supported API versions to adjust functionality to "
+ "available protocol features. If set to false, or the "
+ "ApiVersionRequest fails, the fallback version "
+ "`broker.version.fallback` will be used. "
+ "**NOTE**: Depends on broker version >=0.10.0. If the request is not "
+ "supported by (an older) broker the `broker.version.fallback` fallback is "
+ "used.",
+ 0, 1, 1},
+ {_RK_GLOBAL, "api.version.request.timeout.ms", _RK_C_INT,
+ _RK(api_version_request_timeout_ms),
+ "Timeout for broker API version requests.", 1, 5 * 60 * 1000, 10 * 1000},
+ {_RK_GLOBAL | _RK_MED, "api.version.fallback.ms", _RK_C_INT,
+ _RK(api_version_fallback_ms),
+ "Dictates how long the `broker.version.fallback` fallback is used "
+ "in the case the ApiVersionRequest fails. "
+ "**NOTE**: The ApiVersionRequest is only issued when a new connection "
+ "to the broker is made (such as after an upgrade).",
+ 0, 86400 * 7 * 1000, 0},
+
+ {_RK_GLOBAL | _RK_MED, "broker.version.fallback", _RK_C_STR,
+ _RK(broker_version_fallback),
+ "Older broker versions (before 0.10.0) provide no way for a client to "
+ "query "
+ "for supported protocol features "
+ "(ApiVersionRequest, see `api.version.request`) making it impossible "
+ "for the client to know what features it may use. "
+ "As a workaround a user may set this property to the expected broker "
+ "version and the client will automatically adjust its feature set "
+ "accordingly if the ApiVersionRequest fails (or is disabled). "
+ "The fallback broker version will be used for `api.version.fallback.ms`. "
+ "Valid values are: 0.9.0, 0.8.2, 0.8.1, 0.8.0. "
+ "Any other value >= 0.10, such as 0.10.2.1, "
+ "enables ApiVersionRequests.",
+ .sdef = "0.10.0", .validate = rd_kafka_conf_validate_broker_version},
+ {_RK_GLOBAL, "allow.auto.create.topics", _RK_C_BOOL,
+ _RK(allow_auto_create_topics),
+ "Allow automatic topic creation on the broker when subscribing to "
+ "or assigning non-existent topics. "
+ "The broker must also be configured with "
+ "`auto.create.topics.enable=true` for this configuration to "
+ "take effect. "
+ "Note: the default value (true) for the producer is "
+ "different from the default value (false) for the consumer. "
+ "Further, the consumer default value is different from the Java "
+ "consumer (true), and this property is not supported by the Java "
+ "producer. Requires broker version >= 0.11.0.0, for older broker "
+ "versions only the broker configuration applies.",
+ 0, 1, 0},
+
+ /* Security related global properties */
+ {_RK_GLOBAL | _RK_HIGH, "security.protocol", _RK_C_S2I,
+ _RK(security_protocol), "Protocol used to communicate with brokers.",
+ .vdef = RD_KAFKA_PROTO_PLAINTEXT,
+ .s2i = {{RD_KAFKA_PROTO_PLAINTEXT, "plaintext"},
+ {RD_KAFKA_PROTO_SSL, "ssl", _UNSUPPORTED_SSL},
+ {RD_KAFKA_PROTO_SASL_PLAINTEXT, "sasl_plaintext"},
+ {RD_KAFKA_PROTO_SASL_SSL, "sasl_ssl", _UNSUPPORTED_SSL},
+ {0, NULL}}},
+
+ {_RK_GLOBAL, "ssl.cipher.suites", _RK_C_STR, _RK(ssl.cipher_suites),
+ "A cipher suite is a named combination of authentication, "
+ "encryption, MAC and key exchange algorithm used to negotiate the "
+ "security settings for a network connection using TLS or SSL network "
+ "protocol. See manual page for `ciphers(1)` and "
+ "`SSL_CTX_set_cipher_list(3).",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.curves.list", _RK_C_STR, _RK(ssl.curves_list),
+ "The supported-curves extension in the TLS ClientHello message specifies "
+ "the curves (standard/named, or 'explicit' GF(2^k) or GF(p)) the client "
+ "is willing to have the server use. See manual page for "
+ "`SSL_CTX_set1_curves_list(3)`. OpenSSL >= 1.0.2 required.",
+ _UNSUPPORTED_OPENSSL_1_0_2},
+ {_RK_GLOBAL, "ssl.sigalgs.list", _RK_C_STR, _RK(ssl.sigalgs_list),
+ "The client uses the TLS ClientHello signature_algorithms extension "
+ "to indicate to the server which signature/hash algorithm pairs "
+ "may be used in digital signatures. See manual page for "
+ "`SSL_CTX_set1_sigalgs_list(3)`. OpenSSL >= 1.0.2 required.",
+ _UNSUPPORTED_OPENSSL_1_0_2},
+ {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.location", _RK_C_STR,
+ _RK(ssl.key_location),
+ "Path to client's private key (PEM) used for authentication.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.password", _RK_C_STR,
+ _RK(ssl.key_password),
+ "Private key passphrase (for use with `ssl.key.location` "
+ "and `set_ssl_cert()`)",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL | _RK_SENSITIVE, "ssl.key.pem", _RK_C_STR, _RK(ssl.key_pem),
+ "Client's private key string (PEM format) used for authentication.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL | _RK_SENSITIVE, "ssl_key", _RK_C_INTERNAL, _RK(ssl.key),
+ "Client's private key as set by rd_kafka_conf_set_ssl_cert()",
+ .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy,
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.certificate.location", _RK_C_STR, _RK(ssl.cert_location),
+ "Path to client's public key (PEM) used for authentication.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.certificate.pem", _RK_C_STR, _RK(ssl.cert_pem),
+ "Client's public key string (PEM format) used for authentication.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl_certificate", _RK_C_INTERNAL, _RK(ssl.key),
+ "Client's public key as set by rd_kafka_conf_set_ssl_cert()",
+ .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy,
+ _UNSUPPORTED_SSL},
+
+ {_RK_GLOBAL, "ssl.ca.location", _RK_C_STR, _RK(ssl.ca_location),
+ "File or directory path to CA certificate(s) for verifying "
+ "the broker's key. "
+ "Defaults: "
+ "On Windows the system's CA certificates are automatically looked "
+ "up in the Windows Root certificate store. "
+ "On Mac OSX this configuration defaults to `probe`. "
+ "It is recommended to install openssl using Homebrew, "
+ "to provide CA certificates. "
+ "On Linux install the distribution's ca-certificates package. "
+ "If OpenSSL is statically linked or `ssl.ca.location` is set to "
+ "`probe` a list of standard paths will be probed and the first one "
+ "found will be used as the default CA certificate location path. "
+ "If OpenSSL is dynamically linked the OpenSSL library's default "
+ "path will be used (see `OPENSSLDIR` in `openssl version -a`).",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL | _RK_SENSITIVE, "ssl.ca.pem", _RK_C_STR, _RK(ssl.ca_pem),
+ "CA certificate string (PEM format) for verifying the broker's key.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl_ca", _RK_C_INTERNAL, _RK(ssl.ca),
+ "CA certificate as set by rd_kafka_conf_set_ssl_cert()",
+ .dtor = rd_kafka_conf_cert_dtor, .copy = rd_kafka_conf_cert_copy,
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.ca.certificate.stores", _RK_C_STR,
+ _RK(ssl.ca_cert_stores),
+ "Comma-separated list of Windows Certificate stores to load "
+ "CA certificates from. Certificates will be loaded in the same "
+ "order as stores are specified. If no certificates can be loaded "
+ "from any of the specified stores an error is logged and the "
+ "OpenSSL library's default CA location is used instead. "
+ "Store names are typically one or more of: MY, Root, Trust, CA.",
+ .sdef = "Root",
+#if !defined(_WIN32)
+ .unsupported = "configuration only valid on Windows"
+#endif
+ },
+
+ {_RK_GLOBAL, "ssl.crl.location", _RK_C_STR, _RK(ssl.crl_location),
+ "Path to CRL for verifying broker's certificate validity.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.keystore.location", _RK_C_STR, _RK(ssl.keystore_location),
+ "Path to client's keystore (PKCS#12) used for authentication.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL | _RK_SENSITIVE, "ssl.keystore.password", _RK_C_STR,
+ _RK(ssl.keystore_password), "Client's keystore (PKCS#12) password.",
+ _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.providers", _RK_C_STR, _RK(ssl.providers),
+ "Comma-separated list of OpenSSL 3.0.x implementation providers. "
+ "E.g., \"default,legacy\".",
+ _UNSUPPORTED_SSL_3},
+ {_RK_GLOBAL | _RK_DEPRECATED, "ssl.engine.location", _RK_C_STR,
+ _RK(ssl.engine_location),
+ "Path to OpenSSL engine library. OpenSSL >= 1.1.x required. "
+ "DEPRECATED: OpenSSL engine support is deprecated and should be "
+ "replaced by OpenSSL 3 providers.",
+ _UNSUPPORTED_SSL_ENGINE},
+ {_RK_GLOBAL, "ssl.engine.id", _RK_C_STR, _RK(ssl.engine_id),
+ "OpenSSL engine id is the name used for loading engine.",
+ .sdef = "dynamic", _UNSUPPORTED_SSL_ENGINE},
+ {_RK_GLOBAL, "ssl_engine_callback_data", _RK_C_PTR,
+ _RK(ssl.engine_callback_data),
+ "OpenSSL engine callback data (set "
+ "with rd_kafka_conf_set_engine_callback_data()).",
+ _UNSUPPORTED_SSL_ENGINE},
+ {_RK_GLOBAL, "enable.ssl.certificate.verification", _RK_C_BOOL,
+ _RK(ssl.enable_verify),
+ "Enable OpenSSL's builtin broker (server) certificate verification. "
+ "This verification can be extended by the application by "
+ "implementing a certificate_verify_cb.",
+ 0, 1, 1, _UNSUPPORTED_SSL},
+ {_RK_GLOBAL, "ssl.endpoint.identification.algorithm", _RK_C_S2I,
+ _RK(ssl.endpoint_identification),
+ "Endpoint identification algorithm to validate broker "
+ "hostname using broker certificate. "
+ "https - Server (broker) hostname verification as "
+ "specified in RFC2818. "
+ "none - No endpoint verification. "
+ "OpenSSL >= 1.0.2 required.",
+ .vdef = RD_KAFKA_SSL_ENDPOINT_ID_HTTPS,
+ .s2i = {{RD_KAFKA_SSL_ENDPOINT_ID_NONE, "none"},
+ {RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, "https"}},
+ _UNSUPPORTED_OPENSSL_1_0_2},
+ {_RK_GLOBAL, "ssl.certificate.verify_cb", _RK_C_PTR,
+ _RK(ssl.cert_verify_cb),
+ "Callback to verify the broker certificate chain.", _UNSUPPORTED_SSL},
+
+ /* Point user in the right direction if they try to apply
+ * Java client SSL / JAAS properties. */
+ {_RK_GLOBAL, "ssl.truststore.location", _RK_C_INVALID, _RK(dummy),
+ "Java TrustStores are not supported, use `ssl.ca.location` "
+ "and a certificate file instead. "
+ "See "
+ "https://github.com/edenhill/librdkafka/wiki/Using-SSL-with-librdkafka "
+ "for more information."},
+ {_RK_GLOBAL, "sasl.jaas.config", _RK_C_INVALID, _RK(dummy),
+ "Java JAAS configuration is not supported, see "
+ "https://github.com/edenhill/librdkafka/wiki/Using-SASL-with-librdkafka "
+ "for more information."},
+
+ {_RK_GLOBAL | _RK_HIGH, "sasl.mechanisms", _RK_C_STR, _RK(sasl.mechanisms),
+ "SASL mechanism to use for authentication. "
+ "Supported: GSSAPI, PLAIN, SCRAM-SHA-256, SCRAM-SHA-512, OAUTHBEARER. "
+ "**NOTE**: Despite the name only one mechanism must be configured.",
+ .sdef = "GSSAPI", .validate = rd_kafka_conf_validate_single},
+ {_RK_GLOBAL | _RK_HIGH, "sasl.mechanism", _RK_C_ALIAS,
+ .sdef = "sasl.mechanisms"},
+ {_RK_GLOBAL, "sasl.kerberos.service.name", _RK_C_STR,
+ _RK(sasl.service_name),
+ "Kerberos principal name that Kafka runs as, "
+ "not including /hostname@REALM",
+ .sdef = "kafka"},
+ {_RK_GLOBAL, "sasl.kerberos.principal", _RK_C_STR, _RK(sasl.principal),
+ "This client's Kerberos principal name. "
+ "(Not supported on Windows, will use the logon user's principal).",
+ .sdef = "kafkaclient"},
+ {_RK_GLOBAL, "sasl.kerberos.kinit.cmd", _RK_C_STR, _RK(sasl.kinit_cmd),
+ "Shell command to refresh or acquire the client's Kerberos ticket. "
+ "This command is executed on client creation and every "
+ "sasl.kerberos.min.time.before.relogin (0=disable). "
+ "%{config.prop.name} is replaced by corresponding config "
+ "object value.",
+ .sdef =
+ /* First attempt to refresh, else acquire. */
+ "kinit -R -t \"%{sasl.kerberos.keytab}\" "
+ "-k %{sasl.kerberos.principal} || "
+ "kinit -t \"%{sasl.kerberos.keytab}\" -k %{sasl.kerberos.principal}",
+ _UNSUPPORTED_WIN32_GSSAPI},
+ {_RK_GLOBAL, "sasl.kerberos.keytab", _RK_C_STR, _RK(sasl.keytab),
+ "Path to Kerberos keytab file. "
+ "This configuration property is only used as a variable in "
+ "`sasl.kerberos.kinit.cmd` as "
+ "` ... -t \"%{sasl.kerberos.keytab}\"`.",
+ _UNSUPPORTED_WIN32_GSSAPI},
+ {_RK_GLOBAL, "sasl.kerberos.min.time.before.relogin", _RK_C_INT,
+ _RK(sasl.relogin_min_time),
+ "Minimum time in milliseconds between key refresh attempts. "
+ "Disable automatic key refresh by setting this property to 0.",
+ 0, 86400 * 1000, 60 * 1000, _UNSUPPORTED_WIN32_GSSAPI},
+ {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.username", _RK_C_STR,
+ _RK(sasl.username),
+ "SASL username for use with the PLAIN and SASL-SCRAM-.. mechanisms"},
+ {_RK_GLOBAL | _RK_HIGH | _RK_SENSITIVE, "sasl.password", _RK_C_STR,
+ _RK(sasl.password),
+ "SASL password for use with the PLAIN and SASL-SCRAM-.. mechanism"},
+ {_RK_GLOBAL | _RK_SENSITIVE, "sasl.oauthbearer.config", _RK_C_STR,
+ _RK(sasl.oauthbearer_config),
+ "SASL/OAUTHBEARER configuration. The format is "
+ "implementation-dependent and must be parsed accordingly. The "
+ "default unsecured token implementation (see "
+ "https://tools.ietf.org/html/rfc7515#appendix-A.5) recognizes "
+ "space-separated name=value pairs with valid names including "
+ "principalClaimName, principal, scopeClaimName, scope, and "
+ "lifeSeconds. The default value for principalClaimName is \"sub\", "
+ "the default value for scopeClaimName is \"scope\", and the default "
+ "value for lifeSeconds is 3600. The scope value is CSV format with "
+ "the default value being no/empty scope. For example: "
+ "`principalClaimName=azp principal=admin scopeClaimName=roles "
+ "scope=role1,role2 lifeSeconds=600`. In addition, SASL extensions "
+ "can be communicated to the broker via "
+ "`extension_NAME=value`. For example: "
+ "`principal=admin extension_traceId=123`",
+ _UNSUPPORTED_OAUTHBEARER},
+ {_RK_GLOBAL, "enable.sasl.oauthbearer.unsecure.jwt", _RK_C_BOOL,
+ _RK(sasl.enable_oauthbearer_unsecure_jwt),
+ "Enable the builtin unsecure JWT OAUTHBEARER token handler "
+ "if no oauthbearer_refresh_cb has been set. "
+ "This builtin handler should only be used for development "
+ "or testing, and not in production.",
+ 0, 1, 0, _UNSUPPORTED_OAUTHBEARER},
+ {_RK_GLOBAL, "oauthbearer_token_refresh_cb", _RK_C_PTR,
+ _RK(sasl.oauthbearer.token_refresh_cb),
+ "SASL/OAUTHBEARER token refresh callback (set with "
+ "rd_kafka_conf_set_oauthbearer_token_refresh_cb(), triggered by "
+ "rd_kafka_poll(), et.al. "
+ "This callback will be triggered when it is time to refresh "
+ "the client's OAUTHBEARER token. "
+ "Also see `rd_kafka_conf_enable_sasl_queue()`.",
+ _UNSUPPORTED_OAUTHBEARER},
+ {
+ _RK_GLOBAL | _RK_HIDDEN,
+ "enable_sasl_queue",
+ _RK_C_BOOL,
+ _RK(sasl.enable_callback_queue),
+ "Enable the SASL callback queue "
+ "(set with rd_kafka_conf_enable_sasl_queue()).",
+ 0,
+ 1,
+ 0,
+ },
+ {_RK_GLOBAL, "sasl.oauthbearer.method", _RK_C_S2I,
+ _RK(sasl.oauthbearer.method),
+ "Set to \"default\" or \"oidc\" to control which login method "
+ "to be used. If set to \"oidc\", the following properties must also be "
+ "be specified: "
+ "`sasl.oauthbearer.client.id`, `sasl.oauthbearer.client.secret`, "
+ "and `sasl.oauthbearer.token.endpoint.url`.",
+ .vdef = RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT,
+ .s2i = {{RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT, "default"},
+ {RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC, "oidc"}},
+ _UNSUPPORTED_OIDC},
+ {_RK_GLOBAL, "sasl.oauthbearer.client.id", _RK_C_STR,
+ _RK(sasl.oauthbearer.client_id),
+ "Public identifier for the application. "
+ "Must be unique across all clients that the "
+ "authorization server handles. "
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
+ _UNSUPPORTED_OIDC},
+ {_RK_GLOBAL, "sasl.oauthbearer.client.secret", _RK_C_STR,
+ _RK(sasl.oauthbearer.client_secret),
+ "Client secret only known to the application and the "
+ "authorization server. This should be a sufficiently random string "
+ "that is not guessable. "
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
+ _UNSUPPORTED_OIDC},
+ {_RK_GLOBAL, "sasl.oauthbearer.scope", _RK_C_STR,
+ _RK(sasl.oauthbearer.scope),
+ "Client use this to specify the scope of the access request to the "
+ "broker. "
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
+ _UNSUPPORTED_OIDC},
+ {_RK_GLOBAL, "sasl.oauthbearer.extensions", _RK_C_STR,
+ _RK(sasl.oauthbearer.extensions_str),
+ "Allow additional information to be provided to the broker. "
+ "Comma-separated list of key=value pairs. "
+ "E.g., \"supportFeatureX=true,organizationId=sales-emea\"."
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
+ _UNSUPPORTED_OIDC},
+ {_RK_GLOBAL, "sasl.oauthbearer.token.endpoint.url", _RK_C_STR,
+ _RK(sasl.oauthbearer.token_endpoint_url),
+ "OAuth/OIDC issuer token endpoint HTTP(S) URI used to retrieve token. "
+ "Only used when `sasl.oauthbearer.method` is set to \"oidc\".",
+ _UNSUPPORTED_OIDC},
+
+ /* Plugins */
+ {_RK_GLOBAL, "plugin.library.paths", _RK_C_STR, _RK(plugin_paths),
+ "List of plugin libraries to load (; separated). "
+ "The library search path is platform dependent (see dlopen(3) for "
+ "Unix and LoadLibrary() for Windows). If no filename extension is "
+ "specified the platform-specific extension (such as .dll or .so) "
+ "will be appended automatically.",
+#if WITH_PLUGINS
+ .set = rd_kafka_plugins_conf_set
+#else
+ .unsupported = "libdl/dlopen(3) not available at build time"
+#endif
+ },
+
+ /* Interceptors are added through specific API and not exposed
+ * as configuration properties.
+ * The interceptor property must be defined after plugin.library.paths
+ * so that the plugin libraries are properly loaded before
+ * interceptors are configured when duplicating configuration objects.*/
+ {_RK_GLOBAL, "interceptors", _RK_C_INTERNAL, _RK(interceptors),
+ "Interceptors added through rd_kafka_conf_interceptor_add_..() "
+ "and any configuration handled by interceptors.",
+ .ctor = rd_kafka_conf_interceptor_ctor,
+ .dtor = rd_kafka_conf_interceptor_dtor,
+ .copy = rd_kafka_conf_interceptor_copy},
+
+ /* Test mocks. */
+ {_RK_GLOBAL | _RK_HIDDEN, "test.mock.num.brokers", _RK_C_INT,
+ _RK(mock.broker_cnt),
+ "Number of mock brokers to create. "
+ "This will automatically overwrite `bootstrap.servers` with the "
+ "mock broker list.",
+ 0, 10000, 0},
+ {_RK_GLOBAL | _RK_HIDDEN, "test.mock.broker.rtt", _RK_C_INT,
+ _RK(mock.broker_rtt), "Simulated mock broker latency in milliseconds.", 0,
+ 60 * 60 * 1000 /*1h*/, 0},
+
+ /* Unit test interfaces.
+ * These are not part of the public API and may change at any time.
+ * Only to be used by the librdkafka tests. */
+ {_RK_GLOBAL | _RK_HIDDEN, "ut_handle_ProduceResponse", _RK_C_PTR,
+ _RK(ut.handle_ProduceResponse),
+ "ProduceResponse handler: "
+ "rd_kafka_resp_err_t (*cb) (rd_kafka_t *rk, "
+ "int32_t brokerid, uint64_t msgid, rd_kafka_resp_err_t err)"},
+
+ /* Global consumer group properties */
+ {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "group.id", _RK_C_STR, _RK(group_id_str),
+ "Client group id string. All clients sharing the same group.id "
+ "belong to the same group."},
+ {_RK_GLOBAL | _RK_CGRP | _RK_MED, "group.instance.id", _RK_C_STR,
+ _RK(group_instance_id),
+ "Enable static group membership. "
+ "Static group members are able to leave and rejoin a group "
+ "within the configured `session.timeout.ms` without prompting a "
+ "group rebalance. This should be used in combination with a larger "
+ "`session.timeout.ms` to avoid group rebalances caused by transient "
+ "unavailability (e.g. process restarts). "
+ "Requires broker version >= 2.3.0."},
+ {_RK_GLOBAL | _RK_CGRP | _RK_MED, "partition.assignment.strategy",
+ _RK_C_STR, _RK(partition_assignment_strategy),
+ "The name of one or more partition assignment strategies. The "
+ "elected group leader will use a strategy supported by all "
+ "members of the group to assign partitions to group members. If "
+ "there is more than one eligible strategy, preference is "
+ "determined by the order of this list (strategies earlier in the "
+ "list have higher priority). "
+ "Cooperative and non-cooperative (eager) strategies must not be "
+ "mixed. "
+ "Available strategies: range, roundrobin, cooperative-sticky.",
+ .sdef = "range,roundrobin"},
+ {_RK_GLOBAL | _RK_CGRP | _RK_HIGH, "session.timeout.ms", _RK_C_INT,
+ _RK(group_session_timeout_ms),
+ "Client group session and failure detection timeout. "
+ "The consumer sends periodic heartbeats (heartbeat.interval.ms) "
+ "to indicate its liveness to the broker. If no hearts are "
+ "received by the broker for a group member within the "
+ "session timeout, the broker will remove the consumer from "
+ "the group and trigger a rebalance. "
+ "The allowed range is configured with the **broker** configuration "
+ "properties `group.min.session.timeout.ms` and "
+ "`group.max.session.timeout.ms`. "
+ "Also see `max.poll.interval.ms`.",
+ 1, 3600 * 1000, 45 * 1000},
+ {_RK_GLOBAL | _RK_CGRP, "heartbeat.interval.ms", _RK_C_INT,
+ _RK(group_heartbeat_intvl_ms),
+ "Group session keepalive heartbeat interval.", 1, 3600 * 1000, 3 * 1000},
+ {_RK_GLOBAL | _RK_CGRP, "group.protocol.type", _RK_C_KSTR,
+ _RK(group_protocol_type),
+ "Group protocol type. NOTE: Currently, the only supported group "
+ "protocol type is `consumer`.",
+ .sdef = "consumer"},
+ {_RK_GLOBAL | _RK_CGRP, "coordinator.query.interval.ms", _RK_C_INT,
+ _RK(coord_query_intvl_ms),
+ "How often to query for the current client group coordinator. "
+ "If the currently assigned coordinator is down the configured "
+ "query interval will be divided by ten to more quickly recover "
+ "in case of coordinator reassignment.",
+ 1, 3600 * 1000, 10 * 60 * 1000},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "max.poll.interval.ms", _RK_C_INT,
+ _RK(max_poll_interval_ms),
+ "Maximum allowed time between calls to consume messages "
+ "(e.g., rd_kafka_consumer_poll()) for high-level consumers. "
+ "If this interval is exceeded the consumer is considered failed "
+ "and the group will rebalance in order to reassign the "
+ "partitions to another consumer group member. "
+ "Warning: Offset commits may be not possible at this point. "
+ "Note: It is recommended to set `enable.auto.offset.store=false` "
+ "for long-time processing applications and then explicitly store "
+ "offsets (using offsets_store()) *after* message processing, to "
+ "make sure offsets are not auto-committed prior to processing "
+ "has finished. "
+ "The interval is checked two times per second. "
+ "See KIP-62 for more information.",
+ 1, 86400 * 1000, 300000},
+
+ /* Global consumer properties */
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.commit", _RK_C_BOOL,
+ _RK(enable_auto_commit),
+ "Automatically and periodically commit offsets in the background. "
+ "Note: setting this to false does not prevent the consumer from "
+ "fetching previously committed start offsets. To circumvent this "
+ "behaviour set specific start offsets per partition in the call "
+ "to assign().",
+ 0, 1, 1},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "auto.commit.interval.ms", _RK_C_INT,
+ _RK(auto_commit_interval_ms),
+ "The frequency in milliseconds that the consumer offsets "
+ "are committed (written) to offset storage. (0 = disable). "
+ "This setting is used by the high-level consumer.",
+ 0, 86400 * 1000, 5 * 1000},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "enable.auto.offset.store",
+ _RK_C_BOOL, _RK(enable_auto_offset_store),
+ "Automatically store offset of last message provided to "
+ "application. "
+ "The offset store is an in-memory store of the next offset to "
+ "(auto-)commit for each partition.",
+ 0, 1, 1},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.min.messages", _RK_C_INT,
+ _RK(queued_min_msgs),
+ "Minimum number of messages per topic+partition "
+ "librdkafka tries to maintain in the local consumer queue.",
+ 1, 10000000, 100000},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "queued.max.messages.kbytes",
+ _RK_C_INT, _RK(queued_max_msg_kbytes),
+ "Maximum number of kilobytes of queued pre-fetched messages "
+ "in the local consumer queue. "
+ "If using the high-level consumer this setting applies to the "
+ "single consumer queue, regardless of the number of partitions. "
+ "When using the legacy simple consumer or when separate "
+ "partition queues are used this setting applies per partition. "
+ "This value may be overshot by fetch.message.max.bytes. "
+ "This property has higher priority than queued.min.messages.",
+ 1, INT_MAX / 1024, 0x10000 /*64MB*/},
+ {_RK_GLOBAL | _RK_CONSUMER, "fetch.wait.max.ms", _RK_C_INT,
+ _RK(fetch_wait_max_ms),
+ "Maximum time the broker may wait to fill the Fetch response "
+ "with fetch.min.bytes of messages.",
+ 0, 300 * 1000, 500},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.message.max.bytes", _RK_C_INT,
+ _RK(fetch_msg_max_bytes),
+ "Initial maximum number of bytes per topic+partition to request when "
+ "fetching messages from the broker. "
+ "If the client encounters a message larger than this value "
+ "it will gradually try to increase it until the "
+ "entire message can be fetched.",
+ 1, 1000000000, 1024 * 1024},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "max.partition.fetch.bytes",
+ _RK_C_ALIAS, .sdef = "fetch.message.max.bytes"},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.max.bytes", _RK_C_INT,
+ _RK(fetch_max_bytes),
+ "Maximum amount of data the broker shall return for a Fetch request. "
+ "Messages are fetched in batches by the consumer and if the first "
+ "message batch in the first non-empty partition of the Fetch request "
+ "is larger than this value, then the message batch will still be "
+ "returned to ensure the consumer can make progress. "
+ "The maximum message batch size accepted by the broker is defined "
+ "via `message.max.bytes` (broker config) or "
+ "`max.message.bytes` (broker topic config). "
+ "`fetch.max.bytes` is automatically adjusted upwards to be "
+ "at least `message.max.bytes` (consumer config).",
+ 0, INT_MAX - 512, 50 * 1024 * 1024 /* 50MB */},
+ {_RK_GLOBAL | _RK_CONSUMER, "fetch.min.bytes", _RK_C_INT,
+ _RK(fetch_min_bytes),
+ "Minimum number of bytes the broker responds with. "
+ "If fetch.wait.max.ms expires the accumulated data will "
+ "be sent to the client regardless of this setting.",
+ 1, 100000000, 1},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "fetch.error.backoff.ms", _RK_C_INT,
+ _RK(fetch_error_backoff_ms),
+ "How long to postpone the next fetch request for a "
+ "topic+partition in case of a fetch error.",
+ 0, 300 * 1000, 500},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method",
+ _RK_C_S2I, _RK(offset_store_method),
+ "Offset commit store method: "
+ "'file' - DEPRECATED: local file store (offset.store.path, et.al), "
+ "'broker' - broker commit store "
+ "(requires Apache Kafka 0.8.2 or later on the broker).",
+ .vdef = RD_KAFKA_OFFSET_METHOD_BROKER,
+ .s2i = {{RD_KAFKA_OFFSET_METHOD_NONE, "none"},
+ {RD_KAFKA_OFFSET_METHOD_FILE, "file"},
+ {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_HIGH, "isolation.level", _RK_C_S2I,
+ _RK(isolation_level),
+ "Controls how to read messages written transactionally: "
+ "`read_committed` - only return transactional messages which have "
+ "been committed. `read_uncommitted` - return all messages, even "
+ "transactional messages which have been aborted.",
+ .vdef = RD_KAFKA_READ_COMMITTED,
+ .s2i = {{RD_KAFKA_READ_UNCOMMITTED, "read_uncommitted"},
+ {RD_KAFKA_READ_COMMITTED, "read_committed"}}},
+ {_RK_GLOBAL | _RK_CONSUMER, "consume_cb", _RK_C_PTR, _RK(consume_cb),
+ "Message consume callback (set with rd_kafka_conf_set_consume_cb())"},
+ {_RK_GLOBAL | _RK_CONSUMER, "rebalance_cb", _RK_C_PTR, _RK(rebalance_cb),
+ "Called after consumer group has been rebalanced "
+ "(set with rd_kafka_conf_set_rebalance_cb())"},
+ {_RK_GLOBAL | _RK_CONSUMER, "offset_commit_cb", _RK_C_PTR,
+ _RK(offset_commit_cb),
+ "Offset commit result propagation callback. "
+ "(set with rd_kafka_conf_set_offset_commit_cb())"},
+ {_RK_GLOBAL | _RK_CONSUMER, "enable.partition.eof", _RK_C_BOOL,
+ _RK(enable_partition_eof),
+ "Emit RD_KAFKA_RESP_ERR__PARTITION_EOF event whenever the "
+ "consumer reaches the end of a partition.",
+ 0, 1, 0},
+ {_RK_GLOBAL | _RK_CONSUMER | _RK_MED, "check.crcs", _RK_C_BOOL,
+ _RK(check_crcs),
+ "Verify CRC32 of consumed messages, ensuring no on-the-wire or "
+ "on-disk corruption to the messages occurred. This check comes "
+ "at slightly increased CPU usage.",
+ 0, 1, 0},
+ {_RK_GLOBAL, "client.rack", _RK_C_KSTR, _RK(client_rack),
+ "A rack identifier for this client. This can be any string value "
+ "which indicates where this client is physically located. It "
+ "corresponds with the broker config `broker.rack`.",
+ .sdef = ""},
+
+ /* Global producer properties */
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "transactional.id", _RK_C_STR,
+ _RK(eos.transactional_id),
+ "Enables the transactional producer. "
+ "The transactional.id is used to identify the same transactional "
+ "producer instance across process restarts. "
+ "It allows the producer to guarantee that transactions corresponding "
+ "to earlier instances of the same producer have been finalized "
+ "prior to starting any new transactions, and that any "
+ "zombie instances are fenced off. "
+ "If no transactional.id is provided, then the producer is limited "
+ "to idempotent delivery (if enable.idempotence is set). "
+ "Requires broker version >= 0.11.0."},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "transaction.timeout.ms", _RK_C_INT,
+ _RK(eos.transaction_timeout_ms),
+ "The maximum amount of time in milliseconds that the transaction "
+ "coordinator will wait for a transaction status update from the "
+ "producer before proactively aborting the ongoing transaction. "
+ "If this value is larger than the `transaction.max.timeout.ms` "
+ "setting in the broker, the init_transactions() call will fail with "
+ "ERR_INVALID_TRANSACTION_TIMEOUT. "
+ "The transaction timeout automatically adjusts "
+ "`message.timeout.ms` and `socket.timeout.ms`, unless explicitly "
+ "configured in which case they must not exceed the "
+ "transaction timeout (`socket.timeout.ms` must be at least 100ms "
+ "lower than `transaction.timeout.ms`). "
+ "This is also the default timeout value if no timeout (-1) is "
+ "supplied to the transactional API methods.",
+ 1000, INT_MAX, 60000},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "enable.idempotence", _RK_C_BOOL,
+ _RK(eos.idempotence),
+ "When set to `true`, the producer will ensure that messages are "
+ "successfully produced exactly once and in the original produce "
+ "order. "
+ "The following configuration properties are adjusted automatically "
+ "(if not modified by the user) when idempotence is enabled: "
+ "`max.in.flight.requests.per.connection=" RD_KAFKA_IDEMP_MAX_INFLIGHT_STR
+ "` (must be less than or "
+ "equal to " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "), `retries=INT32_MAX` "
+ "(must be greater than 0), `acks=all`, `queuing.strategy=fifo`. "
+ "Producer instantation will fail if user-supplied configuration "
+ "is incompatible.",
+ 0, 1, 0},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_EXPERIMENTAL, "enable.gapless.guarantee",
+ _RK_C_BOOL, _RK(eos.gapless),
+ "When set to `true`, any error that could result in a gap "
+ "in the produced message series when a batch of messages fails, "
+ "will raise a fatal error (ERR__GAPLESS_GUARANTEE) and stop "
+ "the producer. "
+ "Messages failing due to `message.timeout.ms` are not covered "
+ "by this guarantee. "
+ "Requires `enable.idempotence=true`.",
+ 0, 1, 0},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.messages",
+ _RK_C_INT, _RK(queue_buffering_max_msgs),
+ "Maximum number of messages allowed on the producer queue. "
+ "This queue is shared by all topics and partitions. A value of 0 disables "
+ "this limit.",
+ 0, INT_MAX, 100000},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.kbytes",
+ _RK_C_INT, _RK(queue_buffering_max_kbytes),
+ "Maximum total message size sum allowed on the producer queue. "
+ "This queue is shared by all topics and partitions. "
+ "This property has higher priority than queue.buffering.max.messages.",
+ 1, INT_MAX, 0x100000 /*1GB*/},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "queue.buffering.max.ms", _RK_C_DBL,
+ _RK(buffering_max_ms_dbl),
+ "Delay in milliseconds to wait for messages in the producer queue "
+ "to accumulate before constructing message batches (MessageSets) to "
+ "transmit to brokers. "
+ "A higher value allows larger and more effective "
+ "(less overhead, improved compression) batches of messages to "
+ "accumulate at the expense of increased message delivery latency.",
+ .dmin = 0, .dmax = 900.0 * 1000.0, .ddef = 5.0},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "linger.ms", _RK_C_ALIAS,
+ .sdef = "queue.buffering.max.ms"},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_HIGH, "message.send.max.retries",
+ _RK_C_INT, _RK(max_retries),
+ "How many times to retry sending a failing Message. "
+ "**Note:** retrying may cause reordering unless "
+ "`enable.idempotence` is set to true.",
+ 0, INT32_MAX, INT32_MAX},
+ {_RK_GLOBAL | _RK_PRODUCER, "retries", _RK_C_ALIAS,
+ .sdef = "message.send.max.retries"},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "retry.backoff.ms", _RK_C_INT,
+ _RK(retry_backoff_ms),
+ "The backoff time in milliseconds before retrying a protocol request.", 1,
+ 300 * 1000, 100},
+
+ {_RK_GLOBAL | _RK_PRODUCER, "queue.buffering.backpressure.threshold",
+ _RK_C_INT, _RK(queue_backpressure_thres),
+ "The threshold of outstanding not yet transmitted broker requests "
+ "needed to backpressure the producer's message accumulator. "
+ "If the number of not yet transmitted requests equals or exceeds "
+ "this number, produce request creation that would have otherwise "
+ "been triggered (for example, in accordance with linger.ms) will be "
+ "delayed. A lower number yields larger and more effective batches. "
+ "A higher value can improve latency when using compression on slow "
+ "machines.",
+ 1, 1000000, 1},
+
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.codec", _RK_C_S2I,
+ _RK(compression_codec),
+ "compression codec to use for compressing message sets. "
+ "This is the default value for all topics, may be overridden by "
+ "the topic configuration property `compression.codec`. ",
+ .vdef = RD_KAFKA_COMPRESSION_NONE,
+ .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"},
+ {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB},
+ {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY},
+ {RD_KAFKA_COMPRESSION_LZ4, "lz4"},
+ {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD},
+ {0}}},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "compression.type", _RK_C_ALIAS,
+ .sdef = "compression.codec"},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.num.messages", _RK_C_INT,
+ _RK(batch_num_messages),
+ "Maximum number of messages batched in one MessageSet. "
+ "The total MessageSet size is also limited by batch.size and "
+ "message.max.bytes.",
+ 1, 1000000, 10000},
+ {_RK_GLOBAL | _RK_PRODUCER | _RK_MED, "batch.size", _RK_C_INT,
+ _RK(batch_size),
+ "Maximum size (in bytes) of all messages batched in one MessageSet, "
+ "including protocol framing overhead. "
+ "This limit is applied after the first message has been added "
+ "to the batch, regardless of the first message's size, this is to "
+ "ensure that messages that exceed batch.size are produced. "
+ "The total MessageSet size is also limited by batch.num.messages and "
+ "message.max.bytes.",
+ 1, INT_MAX, 1000000},
+ {_RK_GLOBAL | _RK_PRODUCER, "delivery.report.only.error", _RK_C_BOOL,
+ _RK(dr_err_only), "Only provide delivery reports for failed messages.", 0,
+ 1, 0},
+ {_RK_GLOBAL | _RK_PRODUCER, "dr_cb", _RK_C_PTR, _RK(dr_cb),
+ "Delivery report callback (set with rd_kafka_conf_set_dr_cb())"},
+ {_RK_GLOBAL | _RK_PRODUCER, "dr_msg_cb", _RK_C_PTR, _RK(dr_msg_cb),
+ "Delivery report callback (set with rd_kafka_conf_set_dr_msg_cb())"},
+ {_RK_GLOBAL | _RK_PRODUCER, "sticky.partitioning.linger.ms", _RK_C_INT,
+ _RK(sticky_partition_linger_ms),
+ "Delay in milliseconds to wait to assign new sticky partitions for "
+ "each topic. "
+ "By default, set to double the time of linger.ms. To disable sticky "
+ "behavior, set to 0. "
+ "This behavior affects messages with the key NULL in all cases, and "
+ "messages with key lengths of zero when the consistent_random "
+ "partitioner is in use. "
+ "These messages would otherwise be assigned randomly. "
+ "A higher value allows for more effective batching of these "
+ "messages.",
+ 0, 900000, 10},
+
+
+ /*
+ * Topic properties
+ */
+
+ /* Topic producer properties */
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "request.required.acks", _RK_C_INT,
+ _RKT(required_acks),
+ "This field indicates the number of acknowledgements the leader "
+ "broker must receive from ISR brokers before responding to the "
+ "request: "
+ "*0*=Broker does not send any response/ack to client, "
+ "*-1* or *all*=Broker will block until message is committed by all "
+ "in sync replicas (ISRs). If there are less than "
+ "`min.insync.replicas` (broker configuration) in the ISR set the "
+ "produce request will fail.",
+ -1, 1000, -1,
+ .s2i =
+ {
+ {-1, "all"},
+ }},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "acks", _RK_C_ALIAS,
+ .sdef = "request.required.acks"},
+
+ {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "request.timeout.ms", _RK_C_INT,
+ _RKT(request_timeout_ms),
+ "The ack timeout of the producer request in milliseconds. "
+ "This value is only enforced by the broker and relies "
+ "on `request.required.acks` being != 0.",
+ 1, 900 * 1000, 30 * 1000},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "message.timeout.ms", _RK_C_INT,
+ _RKT(message_timeout_ms),
+ "Local message timeout. "
+ "This value is only enforced locally and limits the time a "
+ "produced message waits for successful delivery. "
+ "A time of 0 is infinite. "
+ "This is the maximum time librdkafka may use to deliver a message "
+ "(including retries). Delivery error occurs when either the retry "
+ "count or the message timeout are exceeded. "
+ "The message timeout is automatically adjusted to "
+ "`transaction.timeout.ms` if `transactional.id` is configured.",
+ 0, INT32_MAX, 300 * 1000},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "delivery.timeout.ms", _RK_C_ALIAS,
+ .sdef = "message.timeout.ms"},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL,
+ "queuing.strategy", _RK_C_S2I, _RKT(queuing_strategy),
+ "Producer queuing strategy. FIFO preserves produce ordering, "
+ "while LIFO prioritizes new messages.",
+ .vdef = 0,
+ .s2i = {{RD_KAFKA_QUEUE_FIFO, "fifo"}, {RD_KAFKA_QUEUE_LIFO, "lifo"}}},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED, "produce.offset.report",
+ _RK_C_BOOL, _RKT(produce_offset_report), "No longer used.", 0, 1, 0},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "partitioner", _RK_C_STR,
+ _RKT(partitioner_str),
+ "Partitioner: "
+ "`random` - random distribution, "
+ "`consistent` - CRC32 hash of key (Empty and NULL keys are mapped to "
+ "single partition), "
+ "`consistent_random` - CRC32 hash of key (Empty and NULL keys are "
+ "randomly partitioned), "
+ "`murmur2` - Java Producer compatible Murmur2 hash of key (NULL keys are "
+ "mapped to single partition), "
+ "`murmur2_random` - Java Producer compatible Murmur2 hash of key "
+ "(NULL keys are randomly partitioned. This is functionally equivalent "
+ "to the default partitioner in the Java Producer.), "
+ "`fnv1a` - FNV-1a hash of key (NULL keys are mapped to single partition), "
+ "`fnv1a_random` - FNV-1a hash of key (NULL keys are randomly "
+ "partitioned).",
+ .sdef = "consistent_random",
+ .validate = rd_kafka_conf_validate_partitioner},
+ {_RK_TOPIC | _RK_PRODUCER, "partitioner_cb", _RK_C_PTR, _RKT(partitioner),
+ "Custom partitioner callback "
+ "(set with rd_kafka_topic_conf_set_partitioner_cb())"},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_DEPRECATED | _RK_EXPERIMENTAL,
+ "msg_order_cmp", _RK_C_PTR, _RKT(msg_order_cmp),
+ "Message queue ordering comparator "
+ "(set with rd_kafka_topic_conf_set_msg_order_cmp()). "
+ "Also see `queuing.strategy`."},
+ {_RK_TOPIC, "opaque", _RK_C_PTR, _RKT(opaque),
+ "Application opaque (set with rd_kafka_topic_conf_set_opaque())"},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.codec", _RK_C_S2I,
+ _RKT(compression_codec),
+ "Compression codec to use for compressing message sets. "
+ "inherit = inherit global compression.codec configuration.",
+ .vdef = RD_KAFKA_COMPRESSION_INHERIT,
+ .s2i = {{RD_KAFKA_COMPRESSION_NONE, "none"},
+ {RD_KAFKA_COMPRESSION_GZIP, "gzip", _UNSUPPORTED_ZLIB},
+ {RD_KAFKA_COMPRESSION_SNAPPY, "snappy", _UNSUPPORTED_SNAPPY},
+ {RD_KAFKA_COMPRESSION_LZ4, "lz4"},
+ {RD_KAFKA_COMPRESSION_ZSTD, "zstd", _UNSUPPORTED_ZSTD},
+ {RD_KAFKA_COMPRESSION_INHERIT, "inherit"},
+ {0}}},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_HIGH, "compression.type", _RK_C_ALIAS,
+ .sdef = "compression.codec"},
+ {_RK_TOPIC | _RK_PRODUCER | _RK_MED, "compression.level", _RK_C_INT,
+ _RKT(compression_level),
+ "Compression level parameter for algorithm selected by configuration "
+ "property `compression.codec`. Higher values will result in better "
+ "compression at the cost of more CPU usage. Usable range is "
+ "algorithm-dependent: [0-9] for gzip; [0-12] for lz4; only 0 for snappy; "
+ "-1 = codec-dependent default compression level.",
+ RD_KAFKA_COMPLEVEL_MIN, RD_KAFKA_COMPLEVEL_MAX,
+ RD_KAFKA_COMPLEVEL_DEFAULT},
+
+
+ /* Topic consumer properties */
+ {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "auto.commit.enable",
+ _RK_C_BOOL, _RKT(auto_commit),
+ "[**LEGACY PROPERTY:** This property is used by the simple legacy "
+ "consumer only. When using the high-level KafkaConsumer, the global "
+ "`enable.auto.commit` property must be used instead]. "
+ "If true, periodically commit offset of the last message handed "
+ "to the application. This committed offset will be used when the "
+ "process restarts to pick up where it left off. "
+ "If false, the application will have to call "
+ "`rd_kafka_offset_store()` to store an offset (optional). "
+ "Offsets will be written to broker or local file according to "
+ "offset.store.method.",
+ 0, 1, 1},
+ {_RK_TOPIC | _RK_CONSUMER, "enable.auto.commit", _RK_C_ALIAS,
+ .sdef = "auto.commit.enable"},
+ {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.commit.interval.ms", _RK_C_INT,
+ _RKT(auto_commit_interval_ms),
+ "[**LEGACY PROPERTY:** This setting is used by the simple legacy "
+ "consumer only. When using the high-level KafkaConsumer, the "
+ "global `auto.commit.interval.ms` property must be used instead]. "
+ "The frequency in milliseconds that the consumer offsets "
+ "are committed (written) to offset storage.",
+ 10, 86400 * 1000, 60 * 1000},
+ {_RK_TOPIC | _RK_CONSUMER | _RK_HIGH, "auto.offset.reset", _RK_C_S2I,
+ _RKT(auto_offset_reset),
+ "Action to take when there is no initial offset in offset store "
+ "or the desired offset is out of range: "
+ "'smallest','earliest' - automatically reset the offset to the smallest "
+ "offset, "
+ "'largest','latest' - automatically reset the offset to the largest "
+ "offset, "
+ "'error' - trigger an error (ERR__AUTO_OFFSET_RESET) which is "
+ "retrieved by consuming messages and checking 'message->err'.",
+ .vdef = RD_KAFKA_OFFSET_END,
+ .s2i =
+ {
+ {RD_KAFKA_OFFSET_BEGINNING, "smallest"},
+ {RD_KAFKA_OFFSET_BEGINNING, "earliest"},
+ {RD_KAFKA_OFFSET_BEGINNING, "beginning"},
+ {RD_KAFKA_OFFSET_END, "largest"},
+ {RD_KAFKA_OFFSET_END, "latest"},
+ {RD_KAFKA_OFFSET_END, "end"},
+ {RD_KAFKA_OFFSET_INVALID, "error"},
+ }},
+ {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.path", _RK_C_STR,
+ _RKT(offset_store_path),
+ "Path to local file for storing offsets. If the path is a directory "
+ "a filename will be automatically generated in that directory based "
+ "on the topic and partition. "
+ "File-based offset storage will be removed in a future version.",
+ .sdef = "."},
+
+ {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.sync.interval.ms",
+ _RK_C_INT, _RKT(offset_store_sync_interval_ms),
+ "fsync() interval for the offset file, in milliseconds. "
+ "Use -1 to disable syncing, and 0 for immediate sync after "
+ "each write. "
+ "File-based offset storage will be removed in a future version.",
+ -1, 86400 * 1000, -1},
+
+ {_RK_TOPIC | _RK_CONSUMER | _RK_DEPRECATED, "offset.store.method",
+ _RK_C_S2I, _RKT(offset_store_method),
+ "Offset commit store method: "
+ "'file' - DEPRECATED: local file store (offset.store.path, et.al), "
+ "'broker' - broker commit store "
+ "(requires \"group.id\" to be configured and "
+ "Apache Kafka 0.8.2 or later on the broker.).",
+ .vdef = RD_KAFKA_OFFSET_METHOD_BROKER,
+ .s2i = {{RD_KAFKA_OFFSET_METHOD_FILE, "file"},
+ {RD_KAFKA_OFFSET_METHOD_BROKER, "broker"}}},
+
+ {_RK_TOPIC | _RK_CONSUMER, "consume.callback.max.messages", _RK_C_INT,
+ _RKT(consume_callback_max_msgs),
+ "Maximum number of messages to dispatch in "
+ "one `rd_kafka_consume_callback*()` call (0 = unlimited)",
+ 0, 1000000, 0},
+
+ {0, /* End */}};
+
+/**
+ * @returns the property object for \p name in \p scope, or NULL if not found.
+ * @remark does not work with interceptor configs.
+ */
+const struct rd_kafka_property *rd_kafka_conf_prop_find(int scope,
+ const char *name) {
+ const struct rd_kafka_property *prop;
+
+restart:
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+
+ if (!(prop->scope & scope))
+ continue;
+
+ if (strcmp(prop->name, name))
+ continue;
+
+ if (prop->type == _RK_C_ALIAS) {
+ /* Caller supplied an alias, restart
+ * search for real name. */
+ name = prop->sdef;
+ goto restart;
+ }
+
+ return prop;
+ }
+
+ return NULL;
+}
+
+/**
+ * @returns rd_true if property has been set/modified, else rd_false.
+ *
+ * @warning Asserts if the property does not exist.
+ */
+rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
+ const char *name) {
+ const struct rd_kafka_property *prop;
+
+ if (!(prop = rd_kafka_conf_prop_find(_RK_GLOBAL, name)))
+ RD_BUG("Configuration property \"%s\" does not exist", name);
+
+ return rd_kafka_anyconf_is_modified(conf, prop);
+}
+
+
+/**
+ * @returns true if property has been set/modified, else 0.
+ *
+ * @warning Asserts if the property does not exist.
+ */
+static rd_bool_t
+rd_kafka_topic_conf_is_modified(const rd_kafka_topic_conf_t *conf,
+ const char *name) {
+ const struct rd_kafka_property *prop;
+
+ if (!(prop = rd_kafka_conf_prop_find(_RK_TOPIC, name)))
+ RD_BUG("Topic configuration property \"%s\" does not exist",
+ name);
+
+ return rd_kafka_anyconf_is_modified(conf, prop);
+}
+
+
+
+static rd_kafka_conf_res_t
+rd_kafka_anyconf_set_prop0(int scope,
+ void *conf,
+ const struct rd_kafka_property *prop,
+ const char *istr,
+ int ival,
+ rd_kafka_conf_set_mode_t set_mode,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_conf_res_t res;
+
+#define _RK_PTR(TYPE, BASE, OFFSET) (TYPE)(void *)(((char *)(BASE)) + (OFFSET))
+
+ /* Try interceptors first (only for GLOBAL config) */
+ if (scope & _RK_GLOBAL) {
+ if (prop->type == _RK_C_PTR || prop->type == _RK_C_INTERNAL)
+ res = RD_KAFKA_CONF_UNKNOWN;
+ else
+ res = rd_kafka_interceptors_on_conf_set(
+ conf, prop->name, istr, errstr, errstr_size);
+ if (res != RD_KAFKA_CONF_UNKNOWN)
+ return res;
+ }
+
+
+ if (prop->set) {
+ /* Custom setter */
+
+ res = prop->set(scope, conf, prop->name, istr,
+ _RK_PTR(void *, conf, prop->offset), set_mode,
+ errstr, errstr_size);
+
+ if (res != RD_KAFKA_CONF_OK)
+ return res;
+
+ /* FALLTHRU so that property value is set. */
+ }
+
+ switch (prop->type) {
+ case _RK_C_STR: {
+ char **str = _RK_PTR(char **, conf, prop->offset);
+ if (*str)
+ rd_free(*str);
+ if (istr)
+ *str = rd_strdup(istr);
+ else
+ *str = prop->sdef ? rd_strdup(prop->sdef) : NULL;
+ break;
+ }
+ case _RK_C_KSTR: {
+ rd_kafkap_str_t **kstr =
+ _RK_PTR(rd_kafkap_str_t **, conf, prop->offset);
+ if (*kstr)
+ rd_kafkap_str_destroy(*kstr);
+ if (istr)
+ *kstr = rd_kafkap_str_new(istr, -1);
+ else
+ *kstr = prop->sdef ? rd_kafkap_str_new(prop->sdef, -1)
+ : NULL;
+ break;
+ }
+ case _RK_C_PTR:
+ *_RK_PTR(const void **, conf, prop->offset) = istr;
+ break;
+ case _RK_C_BOOL:
+ case _RK_C_INT:
+ case _RK_C_S2I:
+ case _RK_C_S2F: {
+ int *val = _RK_PTR(int *, conf, prop->offset);
+
+ if (prop->type == _RK_C_S2F) {
+ switch (set_mode) {
+ case _RK_CONF_PROP_SET_REPLACE:
+ *val = ival;
+ break;
+ case _RK_CONF_PROP_SET_ADD:
+ *val |= ival;
+ break;
+ case _RK_CONF_PROP_SET_DEL:
+ *val &= ~ival;
+ break;
+ }
+ } else {
+ /* Single assignment */
+ *val = ival;
+ }
+ break;
+ }
+ case _RK_C_DBL: {
+ double *val = _RK_PTR(double *, conf, prop->offset);
+ if (istr) {
+ char *endptr;
+ double new_val = strtod(istr, &endptr);
+ /* This is verified in set_prop() */
+ rd_assert(endptr != istr);
+ *val = new_val;
+ } else
+ *val = prop->ddef;
+ break;
+ }
+
+ case _RK_C_PATLIST: {
+ /* Split comma-separated list into individual regex expressions
+ * that are verified and then append to the provided list. */
+ rd_kafka_pattern_list_t **plist;
+
+ plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset);
+
+ if (*plist)
+ rd_kafka_pattern_list_destroy(*plist);
+
+ if (istr) {
+ if (!(*plist = rd_kafka_pattern_list_new(
+ istr, errstr, (int)errstr_size)))
+ return RD_KAFKA_CONF_INVALID;
+ } else
+ *plist = NULL;
+
+ break;
+ }
+
+ case _RK_C_INTERNAL:
+ /* Probably handled by setter */
+ break;
+
+ default:
+ rd_kafka_assert(NULL, !*"unknown conf type");
+ }
+
+
+ rd_kafka_anyconf_set_modified(conf, prop, 1 /*modified*/);
+ return RD_KAFKA_CONF_OK;
+}
+
+
+/**
+ * @brief Find s2i (string-to-int mapping) entry and return its array index,
+ * or -1 on miss.
+ */
+static int rd_kafka_conf_s2i_find(const struct rd_kafka_property *prop,
+ const char *value) {
+ int j;
+
+ for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
+ if (prop->s2i[j].str && !rd_strcasecmp(prop->s2i[j].str, value))
+ return j;
+ }
+
+ return -1;
+}
+
+
+/**
+ * @brief Set configuration property.
+ *
+ * @param allow_specific Allow rd_kafka_*conf_set_..() to be set,
+ * such as rd_kafka_conf_set_log_cb().
+ * Should not be allowed from the conf_set() string interface.
+ */
+static rd_kafka_conf_res_t
+rd_kafka_anyconf_set_prop(int scope,
+ void *conf,
+ const struct rd_kafka_property *prop,
+ const char *value,
+ int allow_specific,
+ char *errstr,
+ size_t errstr_size) {
+ int ival;
+
+ if (prop->unsupported) {
+ rd_snprintf(errstr, errstr_size,
+ "Configuration property \"%s\" not supported "
+ "in this build: %s",
+ prop->name, prop->unsupported);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ switch (prop->type) {
+ case _RK_C_STR:
+ /* Left-trim string(likes) */
+ if (value)
+ while (isspace((int)*value))
+ value++;
+
+ /* FALLTHRU */
+ case _RK_C_KSTR:
+ if (prop->s2i[0].str) {
+ int match;
+
+ if (!value || (match = rd_kafka_conf_s2i_find(
+ prop, value)) == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value for "
+ "configuration property \"%s\": "
+ "%s",
+ prop->name, value);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ /* Replace value string with canonical form */
+ value = prop->s2i[match].str;
+ }
+ /* FALLTHRU */
+ case _RK_C_PATLIST:
+ if (prop->validate &&
+ (!value || !prop->validate(prop, value, -1))) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value for "
+ "configuration property \"%s\": %s",
+ prop->name, value);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
+ _RK_CONF_PROP_SET_REPLACE,
+ errstr, errstr_size);
+
+ case _RK_C_PTR:
+ /* Allow hidden internal unit test properties to
+ * be set from generic conf_set() interface. */
+ if (!allow_specific && !(prop->scope & _RK_HIDDEN)) {
+ rd_snprintf(errstr, errstr_size,
+ "Property \"%s\" must be set through "
+ "dedicated .._set_..() function",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+ return rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
+ _RK_CONF_PROP_SET_REPLACE,
+ errstr, errstr_size);
+
+ case _RK_C_BOOL:
+ if (!value) {
+ rd_snprintf(errstr, errstr_size,
+ "Bool configuration property \"%s\" cannot "
+ "be set to empty value",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+
+ if (!rd_strcasecmp(value, "true") ||
+ !rd_strcasecmp(value, "t") || !strcmp(value, "1"))
+ ival = 1;
+ else if (!rd_strcasecmp(value, "false") ||
+ !rd_strcasecmp(value, "f") || !strcmp(value, "0"))
+ ival = 0;
+ else {
+ rd_snprintf(errstr, errstr_size,
+ "Expected bool value for \"%s\": "
+ "true or false",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival,
+ _RK_CONF_PROP_SET_REPLACE, errstr,
+ errstr_size);
+ return RD_KAFKA_CONF_OK;
+
+ case _RK_C_INT: {
+ const char *end;
+
+ if (!value) {
+ rd_snprintf(errstr, errstr_size,
+ "Integer configuration "
+ "property \"%s\" cannot be set "
+ "to empty value",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ ival = (int)strtol(value, (char **)&end, 0);
+ if (end == value) {
+ /* Non numeric, check s2i for string mapping */
+ int match = rd_kafka_conf_s2i_find(prop, value);
+
+ if (match == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value for "
+ "configuration property \"%s\"",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ if (prop->s2i[match].unsupported) {
+ rd_snprintf(errstr, errstr_size,
+ "Unsupported value \"%s\" for "
+ "configuration property \"%s\": %s",
+ value, prop->name,
+ prop->s2i[match].unsupported);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ ival = prop->s2i[match].val;
+ }
+
+ if (ival < prop->vmin || ival > prop->vmax) {
+ rd_snprintf(errstr, errstr_size,
+ "Configuration property \"%s\" value "
+ "%i is outside allowed range %i..%i\n",
+ prop->name, ival, prop->vmin, prop->vmax);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ rd_kafka_anyconf_set_prop0(scope, conf, prop, value, ival,
+ _RK_CONF_PROP_SET_REPLACE, errstr,
+ errstr_size);
+ return RD_KAFKA_CONF_OK;
+ }
+
+ case _RK_C_DBL: {
+ const char *end;
+ double dval;
+
+ if (!value) {
+ rd_snprintf(errstr, errstr_size,
+ "Float configuration "
+ "property \"%s\" cannot be set "
+ "to empty value",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ dval = strtod(value, (char **)&end);
+ if (end == value) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value for "
+ "configuration property \"%s\"",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ if (dval < prop->dmin || dval > prop->dmax) {
+ rd_snprintf(errstr, errstr_size,
+ "Configuration property \"%s\" value "
+ "%g is outside allowed range %g..%g\n",
+ prop->name, dval, prop->dmin, prop->dmax);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ rd_kafka_anyconf_set_prop0(scope, conf, prop, value, 0,
+ _RK_CONF_PROP_SET_REPLACE, errstr,
+ errstr_size);
+ return RD_KAFKA_CONF_OK;
+ }
+
+ case _RK_C_S2I:
+ case _RK_C_S2F: {
+ int j;
+ const char *next;
+
+ if (!value) {
+ rd_snprintf(errstr, errstr_size,
+ "Configuration "
+ "property \"%s\" cannot be set "
+ "to empty value",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ next = value;
+ while (next && *next) {
+ const char *s, *t;
+ rd_kafka_conf_set_mode_t set_mode =
+ _RK_CONF_PROP_SET_ADD; /* S2F */
+
+ s = next;
+
+ if (prop->type == _RK_C_S2F && (t = strchr(s, ','))) {
+ /* CSV flag field */
+ next = t + 1;
+ } else {
+ /* Single string */
+ t = s + strlen(s);
+ next = NULL;
+ }
+
+
+ /* Left trim */
+ while (s < t && isspace((int)*s))
+ s++;
+
+ /* Right trim */
+ while (t > s && isspace((int)*t))
+ t--;
+
+ /* S2F: +/- prefix */
+ if (prop->type == _RK_C_S2F) {
+ if (*s == '+') {
+ set_mode = _RK_CONF_PROP_SET_ADD;
+ s++;
+ } else if (*s == '-') {
+ set_mode = _RK_CONF_PROP_SET_DEL;
+ s++;
+ }
+ }
+
+ /* Empty string? */
+ if (s == t)
+ continue;
+
+ /* Match string to s2i table entry */
+ for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
+ int new_val;
+
+ if (!prop->s2i[j].str)
+ continue;
+
+ if (strlen(prop->s2i[j].str) ==
+ (size_t)(t - s) &&
+ !rd_strncasecmp(prop->s2i[j].str, s,
+ (int)(t - s)))
+ new_val = prop->s2i[j].val;
+ else
+ continue;
+
+ if (prop->s2i[j].unsupported) {
+ rd_snprintf(
+ errstr, errstr_size,
+ "Unsupported value \"%.*s\" "
+ "for configuration property "
+ "\"%s\": %s",
+ (int)(t - s), s, prop->name,
+ prop->s2i[j].unsupported);
+ return RD_KAFKA_CONF_INVALID;
+ }
+
+ rd_kafka_anyconf_set_prop0(
+ scope, conf, prop, value, new_val, set_mode,
+ errstr, errstr_size);
+
+ if (prop->type == _RK_C_S2F) {
+ /* Flags: OR it in: do next */
+ break;
+ } else {
+ /* Single assignment */
+ return RD_KAFKA_CONF_OK;
+ }
+ }
+
+ /* S2F: Good match: continue with next */
+ if (j < (int)RD_ARRAYSIZE(prop->s2i))
+ continue;
+
+ /* No match */
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value \"%.*s\" for "
+ "configuration property \"%s\"",
+ (int)(t - s), s, prop->name);
+ return RD_KAFKA_CONF_INVALID;
+ }
+ return RD_KAFKA_CONF_OK;
+ }
+
+ case _RK_C_INTERNAL:
+ rd_snprintf(errstr, errstr_size,
+ "Internal property \"%s\" not settable",
+ prop->name);
+ return RD_KAFKA_CONF_INVALID;
+
+ case _RK_C_INVALID:
+ rd_snprintf(errstr, errstr_size, "%s", prop->desc);
+ return RD_KAFKA_CONF_INVALID;
+
+ default:
+ rd_kafka_assert(NULL, !*"unknown conf type");
+ }
+
+ /* not reachable */
+ return RD_KAFKA_CONF_INVALID;
+}
+
+
+
+static void rd_kafka_defaultconf_set(int scope, void *conf) {
+ const struct rd_kafka_property *prop;
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+ if (!(prop->scope & scope))
+ continue;
+
+ if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
+ continue;
+
+ if (prop->ctor)
+ prop->ctor(scope, conf);
+
+ if (prop->sdef || prop->vdef || prop->pdef ||
+ !rd_dbl_zero(prop->ddef))
+ rd_kafka_anyconf_set_prop0(
+ scope, conf, prop,
+ prop->sdef ? prop->sdef : prop->pdef, prop->vdef,
+ _RK_CONF_PROP_SET_REPLACE, NULL, 0);
+ }
+}
+
+rd_kafka_conf_t *rd_kafka_conf_new(void) {
+ rd_kafka_conf_t *conf = rd_calloc(1, sizeof(*conf));
+ rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*conf) &&
+ *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX");
+ rd_kafka_defaultconf_set(_RK_GLOBAL, conf);
+ rd_kafka_anyconf_clear_all_is_modified(conf);
+ return conf;
+}
+
+rd_kafka_topic_conf_t *rd_kafka_topic_conf_new(void) {
+ rd_kafka_topic_conf_t *tconf = rd_calloc(1, sizeof(*tconf));
+ rd_assert(RD_KAFKA_CONF_PROPS_IDX_MAX > sizeof(*tconf) &&
+ *"Increase RD_KAFKA_CONF_PROPS_IDX_MAX");
+ rd_kafka_defaultconf_set(_RK_TOPIC, tconf);
+ rd_kafka_anyconf_clear_all_is_modified(tconf);
+ return tconf;
+}
+
+
+static int rd_kafka_anyconf_set(int scope,
+ void *conf,
+ const char *name,
+ const char *value,
+ char *errstr,
+ size_t errstr_size) {
+ char estmp[1];
+ const struct rd_kafka_property *prop;
+ rd_kafka_conf_res_t res;
+
+ if (!errstr) {
+ errstr = estmp;
+ errstr_size = 0;
+ }
+
+ if (value && !*value)
+ value = NULL;
+
+ /* Try interceptors first (only for GLOBAL config for now) */
+ if (scope & _RK_GLOBAL) {
+ res = rd_kafka_interceptors_on_conf_set(
+ (rd_kafka_conf_t *)conf, name, value, errstr, errstr_size);
+ /* Handled (successfully or not) by interceptor. */
+ if (res != RD_KAFKA_CONF_UNKNOWN)
+ return res;
+ }
+
+ /* Then global config */
+
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+
+ if (!(prop->scope & scope))
+ continue;
+
+ if (strcmp(prop->name, name))
+ continue;
+
+ if (prop->type == _RK_C_ALIAS)
+ return rd_kafka_anyconf_set(scope, conf, prop->sdef,
+ value, errstr, errstr_size);
+
+ return rd_kafka_anyconf_set_prop(scope, conf, prop, value,
+ 0 /*don't allow specifics*/,
+ errstr, errstr_size);
+ }
+
+ rd_snprintf(errstr, errstr_size,
+ "No such configuration property: \"%s\"", name);
+
+ return RD_KAFKA_CONF_UNKNOWN;
+}
+
+
+/**
+ * @brief Set a rd_kafka_*_conf_set_...() specific property, such as
+ * rd_kafka_conf_set_error_cb().
+ *
+ * @warning Will not call interceptor's on_conf_set.
+ * @warning Asserts if \p name is not known or value is incorrect.
+ *
+ * Implemented as a macro to have rd_assert() print the original function.
+ */
+
+#define rd_kafka_anyconf_set_internal(SCOPE, CONF, NAME, VALUE) \
+ do { \
+ const struct rd_kafka_property *_prop; \
+ rd_kafka_conf_res_t _res; \
+ _prop = rd_kafka_conf_prop_find(SCOPE, NAME); \
+ rd_assert(_prop && * "invalid property name"); \
+ _res = rd_kafka_anyconf_set_prop( \
+ SCOPE, CONF, _prop, (const void *)VALUE, \
+ 1 /*allow-specifics*/, NULL, 0); \
+ rd_assert(_res == RD_KAFKA_CONF_OK); \
+ } while (0)
+
+
+rd_kafka_conf_res_t rd_kafka_conf_set(rd_kafka_conf_t *conf,
+ const char *name,
+ const char *value,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_conf_res_t res;
+
+ res = rd_kafka_anyconf_set(_RK_GLOBAL, conf, name, value, errstr,
+ errstr_size);
+ if (res != RD_KAFKA_CONF_UNKNOWN)
+ return res;
+
+ /* Fallthru:
+ * If the global property was unknown, try setting it on the
+ * default topic config. */
+ if (!conf->topic_conf) {
+ /* Create topic config, might be over-written by application
+ * later. */
+ rd_kafka_conf_set_default_topic_conf(conf,
+ rd_kafka_topic_conf_new());
+ }
+
+ return rd_kafka_topic_conf_set(conf->topic_conf, name, value, errstr,
+ errstr_size);
+}
+
+
+rd_kafka_conf_res_t rd_kafka_topic_conf_set(rd_kafka_topic_conf_t *conf,
+ const char *name,
+ const char *value,
+ char *errstr,
+ size_t errstr_size) {
+ if (!strncmp(name, "topic.", strlen("topic.")))
+ name += strlen("topic.");
+
+ return rd_kafka_anyconf_set(_RK_TOPIC, conf, name, value, errstr,
+ errstr_size);
+}
+
+
+/**
+ * @brief Overwrites the contents of \p str up until but not including
+ * the nul-term.
+ */
+void rd_kafka_desensitize_str(char *str) {
+ size_t len;
+ static const char redacted[] = "(REDACTED)";
+
+#ifdef _WIN32
+ len = strlen(str);
+ SecureZeroMemory(str, len);
+#else
+ volatile char *volatile s;
+
+ for (s = str; *s; s++)
+ *s = '\0';
+
+ len = (size_t)(s - str);
+#endif
+
+ if (len > sizeof(redacted))
+ memcpy(str, redacted, sizeof(redacted));
+}
+
+
+
+/**
+ * @brief Overwrite the value of \p prop, if sensitive.
+ */
+static RD_INLINE void
+rd_kafka_anyconf_prop_desensitize(int scope,
+ void *conf,
+ const struct rd_kafka_property *prop) {
+ if (likely(!(prop->scope & _RK_SENSITIVE)))
+ return;
+
+ switch (prop->type) {
+ case _RK_C_STR: {
+ char **str = _RK_PTR(char **, conf, prop->offset);
+ if (*str)
+ rd_kafka_desensitize_str(*str);
+ break;
+ }
+
+ case _RK_C_INTERNAL:
+ /* This is typically a pointer to something, the
+ * _RK_SENSITIVE flag is set to get it redacted in
+ * ..dump_dbg(), but we don't have to desensitize
+ * anything here. */
+ break;
+
+ default:
+ rd_assert(!*"BUG: Don't know how to desensitize prop type");
+ break;
+ }
+}
+
+
+/**
+ * @brief Desensitize all sensitive properties in \p conf
+ */
+static void rd_kafka_anyconf_desensitize(int scope, void *conf) {
+ const struct rd_kafka_property *prop;
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+ if (!(prop->scope & scope))
+ continue;
+
+ rd_kafka_anyconf_prop_desensitize(scope, conf, prop);
+ }
+}
+
+/**
+ * @brief Overwrite the values of sensitive properties
+ */
+void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf) {
+ if (conf->topic_conf)
+ rd_kafka_anyconf_desensitize(_RK_TOPIC, conf->topic_conf);
+ rd_kafka_anyconf_desensitize(_RK_GLOBAL, conf);
+}
+
+/**
+ * @brief Overwrite the values of sensitive properties
+ */
+void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf) {
+ rd_kafka_anyconf_desensitize(_RK_TOPIC, tconf);
+}
+
+
+static void rd_kafka_anyconf_clear(int scope,
+ void *conf,
+ const struct rd_kafka_property *prop) {
+
+ rd_kafka_anyconf_prop_desensitize(scope, conf, prop);
+
+ switch (prop->type) {
+ case _RK_C_STR: {
+ char **str = _RK_PTR(char **, conf, prop->offset);
+
+ if (*str) {
+ if (prop->set) {
+ prop->set(scope, conf, prop->name, NULL, *str,
+ _RK_CONF_PROP_SET_DEL, NULL, 0);
+ /* FALLTHRU */
+ }
+ rd_free(*str);
+ *str = NULL;
+ }
+ } break;
+
+ case _RK_C_KSTR: {
+ rd_kafkap_str_t **kstr =
+ _RK_PTR(rd_kafkap_str_t **, conf, prop->offset);
+ if (*kstr) {
+ rd_kafkap_str_destroy(*kstr);
+ *kstr = NULL;
+ }
+ } break;
+
+ case _RK_C_PATLIST: {
+ rd_kafka_pattern_list_t **plist;
+ plist = _RK_PTR(rd_kafka_pattern_list_t **, conf, prop->offset);
+ if (*plist) {
+ rd_kafka_pattern_list_destroy(*plist);
+ *plist = NULL;
+ }
+ } break;
+
+ case _RK_C_PTR:
+ if (_RK_PTR(void *, conf, prop->offset) != NULL) {
+ if (!strcmp(prop->name, "default_topic_conf")) {
+ rd_kafka_topic_conf_t **tconf;
+
+ tconf = _RK_PTR(rd_kafka_topic_conf_t **, conf,
+ prop->offset);
+ if (*tconf) {
+ rd_kafka_topic_conf_destroy(*tconf);
+ *tconf = NULL;
+ }
+ }
+ }
+ break;
+
+ default:
+ break;
+ }
+
+ if (prop->dtor)
+ prop->dtor(scope, conf);
+}
+
+void rd_kafka_anyconf_destroy(int scope, void *conf) {
+ const struct rd_kafka_property *prop;
+
+ /* Call on_conf_destroy() interceptors */
+ if (scope == _RK_GLOBAL)
+ rd_kafka_interceptors_on_conf_destroy(conf);
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+ if (!(prop->scope & scope))
+ continue;
+
+ rd_kafka_anyconf_clear(scope, conf, prop);
+ }
+}
+
+
+void rd_kafka_conf_destroy(rd_kafka_conf_t *conf) {
+ rd_kafka_anyconf_destroy(_RK_GLOBAL, conf);
+ // FIXME: partition_assignors
+ rd_free(conf);
+}
+
+void rd_kafka_topic_conf_destroy(rd_kafka_topic_conf_t *topic_conf) {
+ rd_kafka_anyconf_destroy(_RK_TOPIC, topic_conf);
+ rd_free(topic_conf);
+}
+
+
+
+static void rd_kafka_anyconf_copy(int scope,
+ void *dst,
+ const void *src,
+ size_t filter_cnt,
+ const char **filter) {
+ const struct rd_kafka_property *prop;
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+ const char *val = NULL;
+ int ival = 0;
+ char *valstr;
+ size_t valsz;
+ size_t fi;
+ size_t nlen;
+
+ if (!(prop->scope & scope))
+ continue;
+
+ if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
+ continue;
+
+ /* Skip properties that have not been set,
+ * unless it is an internal one which requires
+ * extra logic, such as the interceptors. */
+ if (!rd_kafka_anyconf_is_modified(src, prop) &&
+ prop->type != _RK_C_INTERNAL)
+ continue;
+
+ /* Apply filter, if any. */
+ nlen = strlen(prop->name);
+ for (fi = 0; fi < filter_cnt; fi++) {
+ size_t flen = strlen(filter[fi]);
+ if (nlen >= flen &&
+ !strncmp(filter[fi], prop->name, flen))
+ break;
+ }
+ if (fi < filter_cnt)
+ continue; /* Filter matched */
+
+ switch (prop->type) {
+ case _RK_C_STR:
+ case _RK_C_PTR:
+ val = *_RK_PTR(const char **, src, prop->offset);
+
+ if (!strcmp(prop->name, "default_topic_conf") && val)
+ val = (void *)rd_kafka_topic_conf_dup(
+ (const rd_kafka_topic_conf_t *)(void *)val);
+ break;
+ case _RK_C_KSTR: {
+ rd_kafkap_str_t **kstr =
+ _RK_PTR(rd_kafkap_str_t **, src, prop->offset);
+ if (*kstr)
+ val = (*kstr)->str;
+ break;
+ }
+
+ case _RK_C_BOOL:
+ case _RK_C_INT:
+ case _RK_C_S2I:
+ case _RK_C_S2F:
+ ival = *_RK_PTR(const int *, src, prop->offset);
+
+ /* Get string representation of configuration value. */
+ valsz = 0;
+ rd_kafka_anyconf_get0(src, prop, NULL, &valsz);
+ valstr = rd_alloca(valsz);
+ rd_kafka_anyconf_get0(src, prop, valstr, &valsz);
+ val = valstr;
+ break;
+ case _RK_C_DBL:
+ /* Get string representation of configuration value. */
+ valsz = 0;
+ rd_kafka_anyconf_get0(src, prop, NULL, &valsz);
+ valstr = rd_alloca(valsz);
+ rd_kafka_anyconf_get0(src, prop, valstr, &valsz);
+ val = valstr;
+ break;
+ case _RK_C_PATLIST: {
+ const rd_kafka_pattern_list_t **plist;
+ plist = _RK_PTR(const rd_kafka_pattern_list_t **, src,
+ prop->offset);
+ if (*plist)
+ val = (*plist)->rkpl_orig;
+ break;
+ }
+ case _RK_C_INTERNAL:
+ /* Handled by ->copy() below. */
+ break;
+ default:
+ continue;
+ }
+
+ if (prop->copy)
+ prop->copy(scope, dst, src,
+ _RK_PTR(void *, dst, prop->offset),
+ _RK_PTR(const void *, src, prop->offset),
+ filter_cnt, filter);
+
+ rd_kafka_anyconf_set_prop0(scope, dst, prop, val, ival,
+ _RK_CONF_PROP_SET_REPLACE, NULL, 0);
+ }
+}
+
+
+rd_kafka_conf_t *rd_kafka_conf_dup(const rd_kafka_conf_t *conf) {
+ rd_kafka_conf_t *new = rd_kafka_conf_new();
+
+ rd_kafka_interceptors_on_conf_dup(new, conf, 0, NULL);
+
+ rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, 0, NULL);
+
+ return new;
+}
+
+rd_kafka_conf_t *rd_kafka_conf_dup_filter(const rd_kafka_conf_t *conf,
+ size_t filter_cnt,
+ const char **filter) {
+ rd_kafka_conf_t *new = rd_kafka_conf_new();
+
+ rd_kafka_interceptors_on_conf_dup(new, conf, filter_cnt, filter);
+
+ rd_kafka_anyconf_copy(_RK_GLOBAL, new, conf, filter_cnt, filter);
+
+ return new;
+}
+
+
+rd_kafka_topic_conf_t *
+rd_kafka_topic_conf_dup(const rd_kafka_topic_conf_t *conf) {
+ rd_kafka_topic_conf_t *new = rd_kafka_topic_conf_new();
+
+ rd_kafka_anyconf_copy(_RK_TOPIC, new, conf, 0, NULL);
+
+ return new;
+}
+
+rd_kafka_topic_conf_t *rd_kafka_default_topic_conf_dup(rd_kafka_t *rk) {
+ if (rk->rk_conf.topic_conf)
+ return rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf);
+ else
+ return rd_kafka_topic_conf_new();
+}
+
+void rd_kafka_conf_set_events(rd_kafka_conf_t *conf, int events) {
+ char tmp[32];
+ rd_snprintf(tmp, sizeof(tmp), "%d", events);
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enabled_events", tmp);
+}
+
+void rd_kafka_conf_set_background_event_cb(
+ rd_kafka_conf_t *conf,
+ void (*event_cb)(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "background_event_cb",
+ event_cb);
+}
+
+
+void rd_kafka_conf_set_dr_cb(rd_kafka_conf_t *conf,
+ void (*dr_cb)(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_cb", dr_cb);
+}
+
+
+void rd_kafka_conf_set_dr_msg_cb(
+ rd_kafka_conf_t *conf,
+ void (*dr_msg_cb)(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "dr_msg_cb", dr_msg_cb);
+}
+
+
+void rd_kafka_conf_set_consume_cb(
+ rd_kafka_conf_t *conf,
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "consume_cb",
+ consume_cb);
+}
+
+void rd_kafka_conf_set_rebalance_cb(
+ rd_kafka_conf_t *conf,
+ void (*rebalance_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "rebalance_cb",
+ rebalance_cb);
+}
+
+void rd_kafka_conf_set_offset_commit_cb(
+ rd_kafka_conf_t *conf,
+ void (*offset_commit_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "offset_commit_cb",
+ offset_commit_cb);
+}
+
+
+
+void rd_kafka_conf_set_error_cb(rd_kafka_conf_t *conf,
+ void (*error_cb)(rd_kafka_t *rk,
+ int err,
+ const char *reason,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "error_cb", error_cb);
+}
+
+
+void rd_kafka_conf_set_throttle_cb(rd_kafka_conf_t *conf,
+ void (*throttle_cb)(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "throttle_cb",
+ throttle_cb);
+}
+
+
+void rd_kafka_conf_set_log_cb(rd_kafka_conf_t *conf,
+ void (*log_cb)(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf)) {
+#if !WITH_SYSLOG
+ if (log_cb == rd_kafka_log_syslog)
+ rd_assert(!*"syslog support not enabled in this build");
+#endif
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "log_cb", log_cb);
+}
+
+
+void rd_kafka_conf_set_stats_cb(rd_kafka_conf_t *conf,
+ int (*stats_cb)(rd_kafka_t *rk,
+ char *json,
+ size_t json_len,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "stats_cb", stats_cb);
+}
+
+void rd_kafka_conf_set_oauthbearer_token_refresh_cb(
+ rd_kafka_conf_t *conf,
+ void (*oauthbearer_token_refresh_cb)(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque)) {
+#if WITH_SASL_OAUTHBEARER
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf,
+ "oauthbearer_token_refresh_cb",
+ oauthbearer_token_refresh_cb);
+#endif
+}
+
+void rd_kafka_conf_enable_sasl_queue(rd_kafka_conf_t *conf, int enable) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "enable_sasl_queue",
+ (enable ? "true" : "false"));
+}
+
+void rd_kafka_conf_set_socket_cb(
+ rd_kafka_conf_t *conf,
+ int (*socket_cb)(int domain, int type, int protocol, void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "socket_cb", socket_cb);
+}
+
+void rd_kafka_conf_set_connect_cb(rd_kafka_conf_t *conf,
+ int (*connect_cb)(int sockfd,
+ const struct sockaddr *addr,
+ int addrlen,
+ const char *id,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "connect_cb",
+ connect_cb);
+}
+
+void rd_kafka_conf_set_closesocket_cb(rd_kafka_conf_t *conf,
+ int (*closesocket_cb)(int sockfd,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "closesocket_cb",
+ closesocket_cb);
+}
+
+
+
+#ifndef _WIN32
+void rd_kafka_conf_set_open_cb(rd_kafka_conf_t *conf,
+ int (*open_cb)(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "open_cb", open_cb);
+}
+#endif
+
+void rd_kafka_conf_set_resolve_cb(
+ rd_kafka_conf_t *conf,
+ int (*resolve_cb)(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res,
+ void *opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "resolve_cb",
+ resolve_cb);
+}
+
+rd_kafka_conf_res_t rd_kafka_conf_set_ssl_cert_verify_cb(
+ rd_kafka_conf_t *conf,
+ int (*ssl_cert_verify_cb)(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int *x509_set_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque)) {
+#if !WITH_SSL
+ return RD_KAFKA_CONF_INVALID;
+#else
+ rd_kafka_anyconf_set_internal(
+ _RK_GLOBAL, conf, "ssl.certificate.verify_cb", ssl_cert_verify_cb);
+ return RD_KAFKA_CONF_OK;
+#endif
+}
+
+
+void rd_kafka_conf_set_opaque(rd_kafka_conf_t *conf, void *opaque) {
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "opaque", opaque);
+}
+
+
+void rd_kafka_conf_set_engine_callback_data(rd_kafka_conf_t *conf,
+ void *callback_data) {
+ rd_kafka_anyconf_set_internal(
+ _RK_GLOBAL, conf, "ssl_engine_callback_data", callback_data);
+}
+
+
+void rd_kafka_conf_set_default_topic_conf(rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *tconf) {
+ if (conf->topic_conf) {
+ if (rd_kafka_anyconf_is_any_modified(conf->topic_conf))
+ conf->warn.default_topic_conf_overwritten = rd_true;
+ rd_kafka_topic_conf_destroy(conf->topic_conf);
+ }
+
+ rd_kafka_anyconf_set_internal(_RK_GLOBAL, conf, "default_topic_conf",
+ tconf);
+}
+
+rd_kafka_topic_conf_t *
+rd_kafka_conf_get_default_topic_conf(rd_kafka_conf_t *conf) {
+ return conf->topic_conf;
+}
+
+
+void rd_kafka_topic_conf_set_partitioner_cb(
+ rd_kafka_topic_conf_t *topic_conf,
+ int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque)) {
+ rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "partitioner_cb",
+ partitioner);
+}
+
+void rd_kafka_topic_conf_set_msg_order_cmp(
+ rd_kafka_topic_conf_t *topic_conf,
+ int (*msg_order_cmp)(const rd_kafka_message_t *a,
+ const rd_kafka_message_t *b)) {
+ rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "msg_order_cmp",
+ msg_order_cmp);
+}
+
+void rd_kafka_topic_conf_set_opaque(rd_kafka_topic_conf_t *topic_conf,
+ void *opaque) {
+ rd_kafka_anyconf_set_internal(_RK_TOPIC, topic_conf, "opaque", opaque);
+}
+
+
+
+/**
+ * @brief Convert flags \p ival to csv-string using S2F property \p prop.
+ *
+ * This function has two modes: size query and write.
+ * To query for needed size call with dest==NULL,
+ * to write to buffer of size dest_size call with dest!=NULL.
+ *
+ * An \p ival of -1 means all.
+ *
+ * @param include_unsupported Include flag values that are unsupported
+ * due to missing dependencies at build time.
+ *
+ * @returns the number of bytes written to \p dest (if not NULL), else the
+ * total number of bytes needed.
+ *
+ */
+static size_t rd_kafka_conf_flags2str(char *dest,
+ size_t dest_size,
+ const char *delim,
+ const struct rd_kafka_property *prop,
+ int ival,
+ rd_bool_t include_unsupported) {
+ size_t of = 0;
+ int j;
+
+ if (dest && dest_size > 0)
+ *dest = '\0';
+
+ /* Phase 1: scan for set flags, accumulate needed size.
+ * Phase 2: write to dest */
+ for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i) && prop->s2i[j].str; j++) {
+ if (prop->type == _RK_C_S2F && ival != -1 &&
+ (ival & prop->s2i[j].val) != prop->s2i[j].val)
+ continue;
+ else if (prop->type == _RK_C_S2I && ival != -1 &&
+ prop->s2i[j].val != ival)
+ continue;
+ else if (prop->s2i[j].unsupported && !include_unsupported)
+ continue;
+
+ if (!dest)
+ of += strlen(prop->s2i[j].str) + (of > 0 ? 1 : 0);
+ else {
+ size_t r;
+ r = rd_snprintf(dest + of, dest_size - of, "%s%s",
+ of > 0 ? delim : "", prop->s2i[j].str);
+ if (r > dest_size - of) {
+ r = dest_size - of;
+ break;
+ }
+ of += r;
+ }
+ }
+
+ return of + 1 /*nul*/;
+}
+
+
+/**
+ * Return "original"(re-created) configuration value string
+ */
+static rd_kafka_conf_res_t
+rd_kafka_anyconf_get0(const void *conf,
+ const struct rd_kafka_property *prop,
+ char *dest,
+ size_t *dest_size) {
+ char tmp[22];
+ const char *val = NULL;
+ size_t val_len = 0;
+ int j;
+
+ switch (prop->type) {
+ case _RK_C_STR:
+ val = *_RK_PTR(const char **, conf, prop->offset);
+ break;
+
+ case _RK_C_KSTR: {
+ const rd_kafkap_str_t **kstr =
+ _RK_PTR(const rd_kafkap_str_t **, conf, prop->offset);
+ if (*kstr)
+ val = (*kstr)->str;
+ break;
+ }
+
+ case _RK_C_PTR:
+ val = *_RK_PTR(const void **, conf, prop->offset);
+ if (val) {
+ rd_snprintf(tmp, sizeof(tmp), "%p", (void *)val);
+ val = tmp;
+ }
+ break;
+
+ case _RK_C_BOOL:
+ val = (*_RK_PTR(int *, conf, prop->offset) ? "true" : "false");
+ break;
+
+ case _RK_C_INT:
+ rd_snprintf(tmp, sizeof(tmp), "%i",
+ *_RK_PTR(int *, conf, prop->offset));
+ val = tmp;
+ break;
+
+ case _RK_C_DBL:
+ rd_snprintf(tmp, sizeof(tmp), "%g",
+ *_RK_PTR(double *, conf, prop->offset));
+ val = tmp;
+ break;
+
+ case _RK_C_S2I:
+ for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
+ if (prop->s2i[j].val ==
+ *_RK_PTR(int *, conf, prop->offset)) {
+ val = prop->s2i[j].str;
+ break;
+ }
+ }
+ break;
+
+ case _RK_C_S2F: {
+ const int ival = *_RK_PTR(const int *, conf, prop->offset);
+
+ val_len = rd_kafka_conf_flags2str(dest, dest ? *dest_size : 0,
+ ",", prop, ival,
+ rd_false /*only supported*/);
+ if (dest) {
+ val_len = 0;
+ val = dest;
+ dest = NULL;
+ }
+ break;
+ }
+
+ case _RK_C_PATLIST: {
+ const rd_kafka_pattern_list_t **plist;
+ plist = _RK_PTR(const rd_kafka_pattern_list_t **, conf,
+ prop->offset);
+ if (*plist)
+ val = (*plist)->rkpl_orig;
+ break;
+ }
+
+ default:
+ break;
+ }
+
+ if (val_len) {
+ *dest_size = val_len + 1;
+ return RD_KAFKA_CONF_OK;
+ }
+
+ if (!val)
+ return RD_KAFKA_CONF_INVALID;
+
+ val_len = strlen(val);
+
+ if (dest) {
+ size_t use_len = RD_MIN(val_len, (*dest_size) - 1);
+ memcpy(dest, val, use_len);
+ dest[use_len] = '\0';
+ }
+
+ /* Return needed size */
+ *dest_size = val_len + 1;
+
+ return RD_KAFKA_CONF_OK;
+}
+
+
+static rd_kafka_conf_res_t rd_kafka_anyconf_get(int scope,
+ const void *conf,
+ const char *name,
+ char *dest,
+ size_t *dest_size) {
+ const struct rd_kafka_property *prop;
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+
+ if (!(prop->scope & scope) || strcmp(prop->name, name))
+ continue;
+
+ if (prop->type == _RK_C_ALIAS)
+ return rd_kafka_anyconf_get(scope, conf, prop->sdef,
+ dest, dest_size);
+
+ if (rd_kafka_anyconf_get0(conf, prop, dest, dest_size) ==
+ RD_KAFKA_CONF_OK)
+ return RD_KAFKA_CONF_OK;
+ }
+
+ return RD_KAFKA_CONF_UNKNOWN;
+}
+
+rd_kafka_conf_res_t rd_kafka_topic_conf_get(const rd_kafka_topic_conf_t *conf,
+ const char *name,
+ char *dest,
+ size_t *dest_size) {
+ return rd_kafka_anyconf_get(_RK_TOPIC, conf, name, dest, dest_size);
+}
+
+rd_kafka_conf_res_t rd_kafka_conf_get(const rd_kafka_conf_t *conf,
+ const char *name,
+ char *dest,
+ size_t *dest_size) {
+ rd_kafka_conf_res_t res;
+ res = rd_kafka_anyconf_get(_RK_GLOBAL, conf, name, dest, dest_size);
+ if (res != RD_KAFKA_CONF_UNKNOWN || !conf->topic_conf)
+ return res;
+
+ /* Fallthru:
+ * If the global property was unknown, try getting it from the
+ * default topic config, if any. */
+ return rd_kafka_topic_conf_get(conf->topic_conf, name, dest, dest_size);
+}
+
+
+static const char **rd_kafka_anyconf_dump(int scope,
+ const void *conf,
+ size_t *cntp,
+ rd_bool_t only_modified,
+ rd_bool_t redact_sensitive) {
+ const struct rd_kafka_property *prop;
+ char **arr;
+ int cnt = 0;
+
+ arr = rd_calloc(sizeof(char *), RD_ARRAYSIZE(rd_kafka_properties) * 2);
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+ char *val = NULL;
+ size_t val_size;
+
+ if (!(prop->scope & scope))
+ continue;
+
+ if (only_modified && !rd_kafka_anyconf_is_modified(conf, prop))
+ continue;
+
+ /* Skip aliases, show original property instead.
+ * Skip invalids. */
+ if (prop->type == _RK_C_ALIAS || prop->type == _RK_C_INVALID)
+ continue;
+
+ if (redact_sensitive && (prop->scope & _RK_SENSITIVE)) {
+ val = rd_strdup("[redacted]");
+ } else {
+ /* Query value size */
+ if (rd_kafka_anyconf_get0(conf, prop, NULL,
+ &val_size) !=
+ RD_KAFKA_CONF_OK)
+ continue;
+
+ /* Get value */
+ val = rd_malloc(val_size);
+ rd_kafka_anyconf_get0(conf, prop, val, &val_size);
+ }
+
+ arr[cnt++] = rd_strdup(prop->name);
+ arr[cnt++] = val;
+ }
+
+ *cntp = cnt;
+
+ return (const char **)arr;
+}
+
+
+const char **rd_kafka_conf_dump(rd_kafka_conf_t *conf, size_t *cntp) {
+ return rd_kafka_anyconf_dump(_RK_GLOBAL, conf, cntp, rd_false /*all*/,
+ rd_false /*don't redact*/);
+}
+
+const char **rd_kafka_topic_conf_dump(rd_kafka_topic_conf_t *conf,
+ size_t *cntp) {
+ return rd_kafka_anyconf_dump(_RK_TOPIC, conf, cntp, rd_false /*all*/,
+ rd_false /*don't redact*/);
+}
+
+void rd_kafka_conf_dump_free(const char **arr, size_t cnt) {
+ char **_arr = (char **)arr;
+ unsigned int i;
+
+ for (i = 0; i < cnt; i++)
+ if (_arr[i])
+ rd_free(_arr[i]);
+
+ rd_free(_arr);
+}
+
+
+
+/**
+ * @brief Dump configured properties to debug log.
+ */
+void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk,
+ int scope,
+ const void *conf,
+ const char *description) {
+ const char **arr;
+ size_t cnt;
+ size_t i;
+
+ arr =
+ rd_kafka_anyconf_dump(scope, conf, &cnt, rd_true /*modified only*/,
+ rd_true /*redact sensitive*/);
+ if (cnt > 0)
+ rd_kafka_dbg(rk, CONF, "CONF", "%s:", description);
+ for (i = 0; i < cnt; i += 2)
+ rd_kafka_dbg(rk, CONF, "CONF", " %s = %s", arr[i], arr[i + 1]);
+
+ rd_kafka_conf_dump_free(arr, cnt);
+}
+
+void rd_kafka_conf_properties_show(FILE *fp) {
+ const struct rd_kafka_property *prop0;
+ int last = 0;
+ int j;
+ char tmp[512];
+ const char *dash80 =
+ "----------------------------------------"
+ "----------------------------------------";
+
+ for (prop0 = rd_kafka_properties; prop0->name; prop0++) {
+ const char *typeinfo = "";
+ const char *importance;
+ const struct rd_kafka_property *prop = prop0;
+
+ /* Skip hidden properties. */
+ if (prop->scope & _RK_HIDDEN)
+ continue;
+
+ /* Skip invalid properties. */
+ if (prop->type == _RK_C_INVALID)
+ continue;
+
+ if (!(prop->scope & last)) {
+ fprintf(fp, "%s## %s configuration properties\n\n",
+ last ? "\n\n" : "",
+ prop->scope == _RK_GLOBAL ? "Global" : "Topic");
+
+ fprintf(fp,
+ "%-40s | %3s | %-15s | %13s | %-10s | %-25s\n"
+ "%.*s-|-%.*s-|-%.*s-|-%.*s:|-%.*s-| -%.*s\n",
+ "Property", "C/P", "Range", "Default",
+ "Importance", "Description", 40, dash80, 3,
+ dash80, 15, dash80, 13, dash80, 10, dash80, 25,
+ dash80);
+
+ last = prop->scope & (_RK_GLOBAL | _RK_TOPIC);
+ }
+
+ fprintf(fp, "%-40s | ", prop->name);
+
+ /* For aliases, use the aliased property from here on
+ * so that the alias property shows up with proper
+ * ranges, defaults, etc. */
+ if (prop->type == _RK_C_ALIAS) {
+ prop = rd_kafka_conf_prop_find(prop->scope, prop->sdef);
+ rd_assert(prop && *"BUG: "
+ "alias points to unknown config property");
+ }
+
+ fprintf(fp, "%3s | ",
+ (!(prop->scope & _RK_PRODUCER) ==
+ !(prop->scope & _RK_CONSUMER)
+ ? " * "
+ : ((prop->scope & _RK_PRODUCER) ? " P " : " C ")));
+
+ switch (prop->type) {
+ case _RK_C_STR:
+ case _RK_C_KSTR:
+ typeinfo = "string";
+ case _RK_C_PATLIST:
+ if (prop->type == _RK_C_PATLIST)
+ typeinfo = "pattern list";
+ if (prop->s2i[0].str) {
+ rd_kafka_conf_flags2str(
+ tmp, sizeof(tmp), ", ", prop, -1,
+ rd_true /*include unsupported*/);
+ fprintf(fp, "%-15s | %13s", tmp,
+ prop->sdef ? prop->sdef : "");
+ } else {
+ fprintf(fp, "%-15s | %13s", "",
+ prop->sdef ? prop->sdef : "");
+ }
+ break;
+ case _RK_C_BOOL:
+ typeinfo = "boolean";
+ fprintf(fp, "%-15s | %13s", "true, false",
+ prop->vdef ? "true" : "false");
+ break;
+ case _RK_C_INT:
+ typeinfo = "integer";
+ rd_snprintf(tmp, sizeof(tmp), "%d .. %d", prop->vmin,
+ prop->vmax);
+ fprintf(fp, "%-15s | %13i", tmp, prop->vdef);
+ break;
+ case _RK_C_DBL:
+ typeinfo = "float"; /* more user-friendly than double */
+ rd_snprintf(tmp, sizeof(tmp), "%g .. %g", prop->dmin,
+ prop->dmax);
+ fprintf(fp, "%-15s | %13g", tmp, prop->ddef);
+ break;
+ case _RK_C_S2I:
+ typeinfo = "enum value";
+ rd_kafka_conf_flags2str(
+ tmp, sizeof(tmp), ", ", prop, -1,
+ rd_true /*include unsupported*/);
+ fprintf(fp, "%-15s | ", tmp);
+
+ for (j = 0; j < (int)RD_ARRAYSIZE(prop->s2i); j++) {
+ if (prop->s2i[j].val == prop->vdef) {
+ fprintf(fp, "%13s", prop->s2i[j].str);
+ break;
+ }
+ }
+ if (j == RD_ARRAYSIZE(prop->s2i))
+ fprintf(fp, "%13s", " ");
+ break;
+
+ case _RK_C_S2F:
+ typeinfo = "CSV flags";
+ /* Dont duplicate builtin.features value in
+ * both Range and Default */
+ if (!strcmp(prop->name, "builtin.features"))
+ *tmp = '\0';
+ else
+ rd_kafka_conf_flags2str(
+ tmp, sizeof(tmp), ", ", prop, -1,
+ rd_true /*include unsupported*/);
+ fprintf(fp, "%-15s | ", tmp);
+ rd_kafka_conf_flags2str(
+ tmp, sizeof(tmp), ", ", prop, prop->vdef,
+ rd_true /*include unsupported*/);
+ fprintf(fp, "%13s", tmp);
+
+ break;
+ case _RK_C_PTR:
+ case _RK_C_INTERNAL:
+ typeinfo = "see dedicated API";
+ /* FALLTHRU */
+ default:
+ fprintf(fp, "%-15s | %-13s", "", " ");
+ break;
+ }
+
+ if (prop->scope & _RK_HIGH)
+ importance = "high";
+ else if (prop->scope & _RK_MED)
+ importance = "medium";
+ else
+ importance = "low";
+
+ fprintf(fp, " | %-10s | ", importance);
+
+ if (prop->scope & _RK_EXPERIMENTAL)
+ fprintf(fp,
+ "**EXPERIMENTAL**: "
+ "subject to change or removal. ");
+
+ if (prop->scope & _RK_DEPRECATED)
+ fprintf(fp, "**DEPRECATED** ");
+
+ /* If the original property is an alias, prefix the
+ * description saying so. */
+ if (prop0->type == _RK_C_ALIAS)
+ fprintf(fp, "Alias for `%s`: ", prop0->sdef);
+
+ fprintf(fp, "%s <br>*Type: %s*\n", prop->desc, typeinfo);
+ }
+ fprintf(fp, "\n");
+ fprintf(fp, "### C/P legend: C = Consumer, P = Producer, * = both\n");
+}
+
+
+
+/**
+ * @name Configuration value methods
+ *
+ * @remark This generic interface will eventually replace the config property
+ * used above.
+ * @{
+ */
+
+
+/**
+ * @brief Set up an INT confval.
+ *
+ * @oaram name Property name, must be a const static string (will not be copied)
+ */
+void rd_kafka_confval_init_int(rd_kafka_confval_t *confval,
+ const char *name,
+ int vmin,
+ int vmax,
+ int vdef) {
+ confval->name = name;
+ confval->is_enabled = 1;
+ confval->valuetype = RD_KAFKA_CONFVAL_INT;
+ confval->u.INT.vmin = vmin;
+ confval->u.INT.vmax = vmax;
+ confval->u.INT.vdef = vdef;
+ confval->u.INT.v = vdef;
+}
+
+/**
+ * @brief Set up a PTR confval.
+ *
+ * @oaram name Property name, must be a const static string (will not be copied)
+ */
+void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name) {
+ confval->name = name;
+ confval->is_enabled = 1;
+ confval->valuetype = RD_KAFKA_CONFVAL_PTR;
+ confval->u.PTR = NULL;
+}
+
+/**
+ * @brief Set up but disable an intval, attempt to set this confval will fail.
+ *
+ * @oaram name Property name, must be a const static string (will not be copied)
+ */
+void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name) {
+ confval->name = name;
+ confval->is_enabled = 0;
+}
+
+/**
+ * @brief Set confval's value to \p valuep, verifying the passed
+ * \p valuetype matches (or can be cast to) \p confval's type.
+ *
+ * @param dispname is the display name for the configuration value and is
+ * included in error strings.
+ * @param valuep is a pointer to the value, or NULL to revert to default.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the new value was set, or
+ * RD_KAFKA_RESP_ERR__INVALID_ARG if the value was of incorrect type,
+ * out of range, or otherwise not a valid value.
+ */
+rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval,
+ rd_kafka_confval_type_t valuetype,
+ const void *valuep,
+ char *errstr,
+ size_t errstr_size) {
+
+ if (!confval->is_enabled) {
+ rd_snprintf(errstr, errstr_size,
+ "\"%s\" is not supported for this operation",
+ confval->name);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ switch (confval->valuetype) {
+ case RD_KAFKA_CONFVAL_INT: {
+ int v;
+ const char *end;
+
+ if (!valuep) {
+ /* Revert to default */
+ confval->u.INT.v = confval->u.INT.vdef;
+ confval->is_set = 0;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ switch (valuetype) {
+ case RD_KAFKA_CONFVAL_INT:
+ v = *(const int *)valuep;
+ break;
+ case RD_KAFKA_CONFVAL_STR:
+ v = (int)strtol((const char *)valuep, (char **)&end, 0);
+ if (end == (const char *)valuep) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value type for \"%s\": "
+ "expecting integer",
+ confval->name);
+ return RD_KAFKA_RESP_ERR__INVALID_TYPE;
+ }
+ break;
+ default:
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value type for \"%s\": "
+ "expecting integer",
+ confval->name);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+
+ if ((confval->u.INT.vmin || confval->u.INT.vmax) &&
+ (v < confval->u.INT.vmin || v > confval->u.INT.vmax)) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value type for \"%s\": "
+ "expecting integer in range %d..%d",
+ confval->name, confval->u.INT.vmin,
+ confval->u.INT.vmax);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ confval->u.INT.v = v;
+ confval->is_set = 1;
+ } break;
+
+ case RD_KAFKA_CONFVAL_STR: {
+ size_t vlen;
+ const char *v = (const char *)valuep;
+
+ if (!valuep) {
+ confval->is_set = 0;
+ if (confval->u.STR.vdef)
+ confval->u.STR.v =
+ rd_strdup(confval->u.STR.vdef);
+ else
+ confval->u.STR.v = NULL;
+ }
+
+ if (valuetype != RD_KAFKA_CONFVAL_STR) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value type for \"%s\": "
+ "expecting string",
+ confval->name);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ vlen = strlen(v);
+ if ((confval->u.STR.minlen || confval->u.STR.maxlen) &&
+ (vlen < confval->u.STR.minlen ||
+ vlen > confval->u.STR.maxlen)) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value for \"%s\": "
+ "expecting string with length "
+ "%" PRIusz "..%" PRIusz,
+ confval->name, confval->u.STR.minlen,
+ confval->u.STR.maxlen);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ if (confval->u.STR.v)
+ rd_free(confval->u.STR.v);
+
+ confval->u.STR.v = rd_strdup(v);
+ } break;
+
+ case RD_KAFKA_CONFVAL_PTR:
+ confval->u.PTR = (void *)valuep;
+ break;
+
+ default:
+ RD_NOTREACHED();
+ return RD_KAFKA_RESP_ERR__NOENT;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval) {
+ rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_INT);
+ return confval->u.INT.v;
+}
+
+
+const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval) {
+ rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_STR);
+ return confval->u.STR.v;
+}
+
+void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval) {
+ rd_assert(confval->valuetype == RD_KAFKA_CONFVAL_PTR);
+ return confval->u.PTR;
+}
+
+
+#define _is_alphanum(C) \
+ (((C) >= 'a' && (C) <= 'z') || ((C) >= 'A' && (C) <= 'Z') || \
+ ((C) >= '0' && (C) <= '9'))
+
+/**
+ * @returns true if the string is KIP-511 safe, else false.
+ */
+static rd_bool_t rd_kafka_sw_str_is_safe(const char *str) {
+ const char *s;
+
+ if (!*str)
+ return rd_true;
+
+ for (s = str; *s; s++) {
+ int c = (int)*s;
+
+ if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.')))
+ return rd_false;
+ }
+
+ /* Verify that the string begins and ends with a-zA-Z0-9 */
+ if (!_is_alphanum(*str))
+ return rd_false;
+ if (!_is_alphanum(*(s - 1)))
+ return rd_false;
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Sanitize KIP-511 software name/version strings in-place,
+ * replacing unaccepted characters with "-".
+ *
+ * @warning The \p str is modified in-place.
+ */
+static void rd_kafka_sw_str_sanitize_inplace(char *str) {
+ char *s = str, *d = str;
+
+ /* Strip any leading non-alphanums */
+ while (!_is_alphanum(*s))
+ s++;
+
+ for (; *s; s++) {
+ int c = (int)*s;
+
+ if (unlikely(!(_is_alphanum(c) || c == '-' || c == '.')))
+ *d = '-';
+ else
+ *d = *s;
+ d++;
+ }
+
+ *d = '\0';
+
+ /* Strip any trailing non-alphanums */
+ for (d = d - 1; d >= str && !_is_alphanum(*d); d--)
+ *d = '\0';
+}
+
+#undef _is_alphanum
+
+
+/**
+ * @brief Create a staggered array of key-value pairs from
+ * an array of "key=value" strings (typically from rd_string_split()).
+ *
+ * The output array will have element 0 being key0 and element 1 being
+ * value0. Element 2 being key1 and element 3 being value1, and so on.
+ * E.g.:
+ * input { "key0=value0", "key1=value1" } incnt=2
+ * returns { "key0", "value0", "key1", "value1" } cntp=4
+ *
+ * @returns NULL on error (no '=' separator), or a newly allocated array
+ * on success. The array count is returned in \p cntp.
+ * The returned pointer must be freed with rd_free().
+ */
+char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp) {
+ size_t i;
+ char **out, *p;
+ size_t lens = 0;
+ size_t outcnt = 0;
+
+ /* First calculate total length needed for key-value strings. */
+ for (i = 0; i < incnt; i++) {
+ const char *t = strchr(input[i], '=');
+
+ /* No "=", or "=" at beginning of string. */
+ if (!t || t == input[i])
+ return NULL;
+
+ /* Length of key, '=' (will be \0), value, and \0 */
+ lens += strlen(input[i]) + 1;
+ }
+
+ /* Allocate array along with elements in one go */
+ out = rd_malloc((sizeof(*out) * incnt * 2) + lens);
+ p = (char *)(&out[incnt * 2]);
+
+ for (i = 0; i < incnt; i++) {
+ const char *t = strchr(input[i], '=');
+ size_t namelen = (size_t)(t - input[i]);
+ size_t valuelen = strlen(t + 1);
+
+ /* Copy name */
+ out[outcnt++] = p;
+ memcpy(p, input[i], namelen);
+ p += namelen;
+ *(p++) = '\0';
+
+ /* Copy value */
+ out[outcnt++] = p;
+ memcpy(p, t + 1, valuelen + 1);
+ p += valuelen;
+ *(p++) = '\0';
+ }
+
+
+ *cntp = outcnt;
+ return out;
+}
+
+
+/**
+ * @brief Verify configuration \p conf is
+ * correct/non-conflicting and finalize the configuration
+ * settings for use.
+ *
+ * @returns an error string if configuration is incorrect, else NULL.
+ */
+const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
+ rd_kafka_conf_t *conf) {
+ const char *errstr;
+
+ if (!conf->sw_name)
+ rd_kafka_conf_set(conf, "client.software.name", "librdkafka",
+ NULL, 0);
+ if (!conf->sw_version)
+ rd_kafka_conf_set(conf, "client.software.version",
+ rd_kafka_version_str(), NULL, 0);
+
+ /* The client.software.name and .version are sent to the broker
+ * with the ApiVersionRequest starting with AK 2.4.0 (KIP-511).
+ * These strings need to be sanitized or the broker will reject them,
+ * so modify them in-place here. */
+ rd_assert(conf->sw_name && conf->sw_version);
+ rd_kafka_sw_str_sanitize_inplace(conf->sw_name);
+ rd_kafka_sw_str_sanitize_inplace(conf->sw_version);
+
+ /* Verify mandatory configuration */
+ if (!conf->socket_cb)
+ return "Mandatory config property `socket_cb` not set";
+
+ if (!conf->open_cb)
+ return "Mandatory config property `open_cb` not set";
+
+#if WITH_SSL
+ if (conf->ssl.keystore_location && !conf->ssl.keystore_password)
+ return "`ssl.keystore.password` is mandatory when "
+ "`ssl.keystore.location` is set";
+ if (conf->ssl.ca && (conf->ssl.ca_location || conf->ssl.ca_pem))
+ return "`ssl.ca.location` or `ssl.ca.pem`, and memory-based "
+ "set_ssl_cert(CERT_CA) are mutually exclusive.";
+#ifdef __APPLE__
+ else if (!conf->ssl.ca && !conf->ssl.ca_location && !conf->ssl.ca_pem)
+ /* Default ssl.ca.location to 'probe' on OSX */
+ rd_kafka_conf_set(conf, "ssl.ca.location", "probe", NULL, 0);
+#endif
+#endif
+
+#if WITH_SASL_OAUTHBEARER
+ if (!rd_strcasecmp(conf->sasl.mechanisms, "OAUTHBEARER")) {
+ if (conf->sasl.enable_oauthbearer_unsecure_jwt &&
+ conf->sasl.oauthbearer.token_refresh_cb)
+ return "`enable.sasl.oauthbearer.unsecure.jwt` and "
+ "`oauthbearer_token_refresh_cb` are "
+ "mutually exclusive";
+
+ if (conf->sasl.enable_oauthbearer_unsecure_jwt &&
+ conf->sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC)
+ return "`enable.sasl.oauthbearer.unsecure.jwt` and "
+ "`sasl.oauthbearer.method=oidc` are "
+ "mutually exclusive";
+
+ if (conf->sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC) {
+ if (!conf->sasl.oauthbearer.client_id)
+ return "`sasl.oauthbearer.client.id` is "
+ "mandatory when "
+ "`sasl.oauthbearer.method=oidc` is set";
+
+ if (!conf->sasl.oauthbearer.client_secret) {
+ return "`sasl.oauthbearer.client.secret` is "
+ "mandatory when "
+ "`sasl.oauthbearer.method=oidc` is set";
+ }
+
+ if (!conf->sasl.oauthbearer.token_endpoint_url) {
+ return "`sasl.oauthbearer.token.endpoint.url` "
+ "is mandatory when "
+ "`sasl.oauthbearer.method=oidc` is set";
+ }
+ }
+
+ /* Enable background thread for the builtin OIDC handler,
+ * unless a refresh callback has been set. */
+ if (conf->sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
+ !conf->sasl.oauthbearer.token_refresh_cb) {
+ conf->enabled_events |= RD_KAFKA_EVENT_BACKGROUND;
+ conf->sasl.enable_callback_queue = 1;
+ }
+ }
+
+#endif
+
+ if (cltype == RD_KAFKA_CONSUMER) {
+
+ /* Automatically adjust `fetch.max.bytes` to be >=
+ * `message.max.bytes` and <= `queued.max.message.kbytes`
+ * unless set by user. */
+ if (rd_kafka_conf_is_modified(conf, "fetch.max.bytes")) {
+ if (conf->fetch_max_bytes < conf->max_msg_size)
+ return "`fetch.max.bytes` must be >= "
+ "`message.max.bytes`";
+ } else {
+ conf->fetch_max_bytes =
+ RD_MAX(RD_MIN(conf->fetch_max_bytes,
+ conf->queued_max_msg_kbytes * 1024),
+ conf->max_msg_size);
+ }
+
+ /* Automatically adjust 'receive.message.max.bytes' to
+ * be 512 bytes larger than 'fetch.max.bytes' to have enough
+ * room for protocol framing (including topic name), unless
+ * set by user. */
+ if (rd_kafka_conf_is_modified(conf,
+ "receive.message.max.bytes")) {
+ if (conf->fetch_max_bytes + 512 >
+ conf->recv_max_msg_size)
+ return "`receive.message.max.bytes` must be >= "
+ "`fetch.max.bytes` + 512";
+ } else {
+ conf->recv_max_msg_size =
+ RD_MAX(conf->recv_max_msg_size,
+ conf->fetch_max_bytes + 512);
+ }
+
+ if (conf->max_poll_interval_ms < conf->group_session_timeout_ms)
+ return "`max.poll.interval.ms`must be >= "
+ "`session.timeout.ms`";
+
+ /* Simplifies rd_kafka_is_idempotent() which is producer-only */
+ conf->eos.idempotence = 0;
+
+ } else if (cltype == RD_KAFKA_PRODUCER) {
+ if (conf->eos.transactional_id) {
+ if (!conf->eos.idempotence) {
+ /* Auto enable idempotence unless
+ * explicitly disabled */
+ if (rd_kafka_conf_is_modified(
+ conf, "enable.idempotence"))
+ return "`transactional.id` requires "
+ "`enable.idempotence=true`";
+
+ conf->eos.idempotence = rd_true;
+ }
+
+ /* Make sure at least one request can be sent
+ * before the transaction times out. */
+ if (!rd_kafka_conf_is_modified(conf,
+ "socket.timeout.ms"))
+ conf->socket_timeout_ms = RD_MAX(
+ conf->eos.transaction_timeout_ms - 100,
+ 900);
+ else if (conf->eos.transaction_timeout_ms + 100 <
+ conf->socket_timeout_ms)
+ return "`socket.timeout.ms` must be set <= "
+ "`transaction.timeout.ms` + 100";
+ }
+
+ if (conf->eos.idempotence) {
+ /* Adjust configuration values for idempotent producer*/
+
+ if (rd_kafka_conf_is_modified(conf, "max.in.flight")) {
+ if (conf->max_inflight >
+ RD_KAFKA_IDEMP_MAX_INFLIGHT)
+ return "`max.in.flight` must be "
+ "set "
+ "<="
+ " " RD_KAFKA_IDEMP_MAX_INFLIGHT_STR
+ " when `enable.idempotence` "
+ "is true";
+ } else {
+ conf->max_inflight =
+ RD_MIN(conf->max_inflight,
+ RD_KAFKA_IDEMP_MAX_INFLIGHT);
+ }
+
+
+ if (rd_kafka_conf_is_modified(conf, "retries")) {
+ if (conf->max_retries < 1)
+ return "`retries` must be set >= 1 "
+ "when `enable.idempotence` is "
+ "true";
+ } else {
+ conf->max_retries = INT32_MAX;
+ }
+
+
+ if (rd_kafka_conf_is_modified(
+ conf,
+ "queue.buffering.backpressure.threshold") &&
+ conf->queue_backpressure_thres > 1)
+ return "`queue.buffering.backpressure."
+ "threshold` "
+ "must be set to 1 when "
+ "`enable.idempotence` is true";
+ else
+ conf->queue_backpressure_thres = 1;
+
+ /* acks=all and queuing.strategy are set
+ * in topic_conf_finalize() */
+
+ } else {
+ if (conf->eos.gapless &&
+ rd_kafka_conf_is_modified(
+ conf, "enable.gapless.guarantee"))
+ return "`enable.gapless.guarantee` requires "
+ "`enable.idempotence` to be enabled";
+ }
+
+ if (!rd_kafka_conf_is_modified(conf,
+ "sticky.partitioning.linger.ms"))
+ conf->sticky_partition_linger_ms = (int)RD_MIN(
+ 900000, (rd_ts_t)(2 * conf->buffering_max_ms_dbl));
+ }
+
+
+ if (!rd_kafka_conf_is_modified(conf, "metadata.max.age.ms") &&
+ conf->metadata_refresh_interval_ms > 0)
+ conf->metadata_max_age_ms =
+ conf->metadata_refresh_interval_ms * 3;
+
+ if (conf->reconnect_backoff_max_ms < conf->reconnect_backoff_ms)
+ return "`reconnect.backoff.max.ms` must be >= "
+ "`reconnect.max.ms`";
+
+ if (conf->sparse_connections) {
+ /* Set sparse connection random selection interval to
+ * 10 < reconnect.backoff.ms / 2 < 1000. */
+ conf->sparse_connect_intvl =
+ RD_MAX(11, RD_MIN(conf->reconnect_backoff_ms / 2, 1000));
+ }
+
+ if (!rd_kafka_conf_is_modified(conf, "connections.max.idle.ms") &&
+ conf->brokerlist && rd_strcasestr(conf->brokerlist, "azure")) {
+ /* Issue #3109:
+ * Default connections.max.idle.ms to <4 minutes on Azure. */
+ conf->connections_max_idle_ms = (4 * 60 - 10) * 1000;
+ }
+
+ if (!rd_kafka_conf_is_modified(conf, "allow.auto.create.topics")) {
+ /* Consumer: Do not allow auto create by default.
+ * Producer: Allow auto create by default. */
+ if (cltype == RD_KAFKA_CONSUMER)
+ conf->allow_auto_create_topics = rd_false;
+ else if (cltype == RD_KAFKA_PRODUCER)
+ conf->allow_auto_create_topics = rd_true;
+ }
+
+ /* Finalize and verify the default.topic.config */
+ if (conf->topic_conf) {
+
+ if (cltype == RD_KAFKA_PRODUCER) {
+ rd_kafka_topic_conf_t *tconf = conf->topic_conf;
+
+ if (tconf->message_timeout_ms != 0 &&
+ (double)tconf->message_timeout_ms <=
+ conf->buffering_max_ms_dbl) {
+ if (rd_kafka_conf_is_modified(conf,
+ "linger.ms"))
+ return "`message.timeout.ms` must be "
+ "greater than `linger.ms`";
+ else /* Auto adjust linger.ms to be lower
+ * than message.timeout.ms */
+ conf->buffering_max_ms_dbl =
+ (double)tconf->message_timeout_ms -
+ 0.1;
+ }
+ }
+
+ errstr = rd_kafka_topic_conf_finalize(cltype, conf,
+ conf->topic_conf);
+ if (errstr)
+ return errstr;
+ }
+
+ /* Convert double linger.ms to internal int microseconds after
+ * finalizing default_topic_conf since it may
+ * update buffering_max_ms_dbl. */
+ conf->buffering_max_us = (rd_ts_t)(conf->buffering_max_ms_dbl * 1000);
+
+
+ return NULL;
+}
+
+
+/**
+ * @brief Verify topic configuration \p tconf is
+ * correct/non-conflicting and finalize the configuration
+ * settings for use.
+ *
+ * @returns an error string if configuration is incorrect, else NULL.
+ */
+const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype,
+ const rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *tconf) {
+
+ if (cltype != RD_KAFKA_PRODUCER)
+ return NULL;
+
+ if (conf->eos.idempotence) {
+ /* Ensure acks=all */
+ if (rd_kafka_topic_conf_is_modified(tconf, "acks")) {
+ if (tconf->required_acks != -1)
+ return "`acks` must be set to `all` when "
+ "`enable.idempotence` is true";
+ } else {
+ tconf->required_acks = -1; /* all */
+ }
+
+ /* Ensure FIFO queueing */
+ if (rd_kafka_topic_conf_is_modified(tconf,
+ "queuing.strategy")) {
+ if (tconf->queuing_strategy != RD_KAFKA_QUEUE_FIFO)
+ return "`queuing.strategy` must be set to "
+ "`fifo` when `enable.idempotence` is "
+ "true";
+ } else {
+ tconf->queuing_strategy = RD_KAFKA_QUEUE_FIFO;
+ }
+
+ /* Ensure message.timeout.ms <= transaction.timeout.ms */
+ if (conf->eos.transactional_id) {
+ if (!rd_kafka_topic_conf_is_modified(
+ tconf, "message.timeout.ms"))
+ tconf->message_timeout_ms =
+ conf->eos.transaction_timeout_ms;
+ else if (tconf->message_timeout_ms >
+ conf->eos.transaction_timeout_ms)
+ return "`message.timeout.ms` must be set <= "
+ "`transaction.timeout.ms`";
+ }
+ }
+
+ if (tconf->message_timeout_ms != 0 &&
+ (double)tconf->message_timeout_ms <= conf->buffering_max_ms_dbl &&
+ rd_kafka_conf_is_modified(conf, "linger.ms"))
+ return "`message.timeout.ms` must be greater than `linger.ms`";
+
+ return NULL;
+}
+
+
+/**
+ * @brief Log warnings for set deprecated or experimental
+ * configuration properties.
+ * @returns the number of warnings logged.
+ */
+static int rd_kafka_anyconf_warn_deprecated(rd_kafka_t *rk,
+ rd_kafka_conf_scope_t scope,
+ const void *conf) {
+ const struct rd_kafka_property *prop;
+ int warn_type =
+ rk->rk_type == RD_KAFKA_PRODUCER ? _RK_CONSUMER : _RK_PRODUCER;
+ int warn_on = _RK_DEPRECATED | _RK_EXPERIMENTAL | warn_type;
+
+ int cnt = 0;
+
+ for (prop = rd_kafka_properties; prop->name; prop++) {
+ int match = prop->scope & warn_on;
+
+ if (likely(!(prop->scope & scope) || !match))
+ continue;
+
+ if (likely(!rd_kafka_anyconf_is_modified(conf, prop)))
+ continue;
+
+ if (match != warn_type)
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property %s is %s%s%s: %s",
+ prop->name,
+ match & _RK_DEPRECATED ? "deprecated" : "",
+ match == warn_on ? " and " : "",
+ match & _RK_EXPERIMENTAL ? "experimental"
+ : "",
+ prop->desc);
+
+ if (match & warn_type)
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property %s "
+ "is a %s property and will be ignored by "
+ "this %s instance",
+ prop->name,
+ warn_type == _RK_PRODUCER ? "producer"
+ : "consumer",
+ warn_type == _RK_PRODUCER ? "consumer"
+ : "producer");
+
+ cnt++;
+ }
+
+ return cnt;
+}
+
+
+/**
+ * @brief Log configuration warnings (deprecated configuration properties,
+ * unrecommended combinations, etc).
+ *
+ * @returns the number of warnings logged.
+ *
+ * @locality any
+ * @locks none
+ */
+int rd_kafka_conf_warn(rd_kafka_t *rk) {
+ int cnt = 0;
+
+ cnt = rd_kafka_anyconf_warn_deprecated(rk, _RK_GLOBAL, &rk->rk_conf);
+ if (rk->rk_conf.topic_conf)
+ cnt += rd_kafka_anyconf_warn_deprecated(rk, _RK_TOPIC,
+ rk->rk_conf.topic_conf);
+
+ if (rk->rk_conf.warn.default_topic_conf_overwritten)
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Topic configuration properties set in the "
+ "global configuration were overwritten by "
+ "explicitly setting a default_topic_conf: "
+ "recommend not using set_default_topic_conf");
+
+ /* Additional warnings */
+ if (rk->rk_type == RD_KAFKA_CONSUMER) {
+ if (rk->rk_conf.fetch_wait_max_ms + 1000 >
+ rk->rk_conf.socket_timeout_ms)
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property "
+ "`fetch.wait.max.ms` (%d) should be "
+ "set lower than `socket.timeout.ms` (%d) "
+ "by at least 1000ms to avoid blocking "
+ "and timing out sub-sequent requests",
+ rk->rk_conf.fetch_wait_max_ms,
+ rk->rk_conf.socket_timeout_ms);
+ }
+
+ if (rd_kafka_conf_is_modified(&rk->rk_conf, "sasl.mechanisms") &&
+ !(rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_SSL ||
+ rk->rk_conf.security_protocol == RD_KAFKA_PROTO_SASL_PLAINTEXT)) {
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property `sasl.mechanism` set to "
+ "`%s` but `security.protocol` is not configured "
+ "for SASL: recommend setting "
+ "`security.protocol` to SASL_SSL or "
+ "SASL_PLAINTEXT",
+ rk->rk_conf.sasl.mechanisms);
+ }
+
+ if (rd_kafka_conf_is_modified(&rk->rk_conf, "sasl.username") &&
+ !(!strncmp(rk->rk_conf.sasl.mechanisms, "SCRAM", 5) ||
+ !strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")))
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property `sasl.username` only "
+ "applies when `sasl.mechanism` is set to "
+ "PLAIN or SCRAM-SHA-..");
+
+ if (rd_kafka_conf_is_modified(&rk->rk_conf, "client.software.name") &&
+ !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_name))
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property `client.software.name` "
+ "may only contain 'a-zA-Z0-9.-', other characters "
+ "will be replaced with '-'");
+
+ if (rd_kafka_conf_is_modified(&rk->rk_conf,
+ "client.software.version") &&
+ !rd_kafka_sw_str_is_safe(rk->rk_conf.sw_version))
+ rd_kafka_log(rk, LOG_WARNING, "CONFWARN",
+ "Configuration property `client.software.verison` "
+ "may only contain 'a-zA-Z0-9.-', other characters "
+ "will be replaced with '-'");
+
+ if (rd_atomic32_get(&rk->rk_broker_cnt) == 0)
+ rd_kafka_log(rk, LOG_NOTICE, "CONFWARN",
+ "No `bootstrap.servers` configured: "
+ "client will not be able to connect "
+ "to Kafka cluster");
+
+ return cnt;
+}
+
+
+const rd_kafka_conf_t *rd_kafka_conf(rd_kafka_t *rk) {
+ return &rk->rk_conf;
+}
+
+
+/**
+ * @brief Unittests
+ */
+int unittest_conf(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_conf_res_t res, res2;
+ char errstr[128];
+ int iteration;
+ const struct rd_kafka_property *prop;
+ char readval[512];
+ size_t readlen;
+ const char *errstr2;
+
+ conf = rd_kafka_conf_new();
+ tconf = rd_kafka_topic_conf_new();
+
+ res = rd_kafka_conf_set(conf, "unknown.thing", "foo", errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(res == RD_KAFKA_CONF_UNKNOWN, "fail");
+ RD_UT_ASSERT(*errstr, "fail");
+
+ for (iteration = 0; iteration < 5; iteration++) {
+ int cnt;
+
+
+ /* Iterations:
+ * 0 - Check is_modified
+ * 1 - Set every other config property, read back and verify.
+ * 2 - Check is_modified.
+ * 3 - Set all config properties, read back and verify.
+ * 4 - Check is_modified. */
+ for (prop = rd_kafka_properties, cnt = 0; prop->name;
+ prop++, cnt++) {
+ const char *val;
+ char tmp[64];
+ int odd = cnt & 1;
+ int do_set = iteration == 3 || (iteration == 1 && odd);
+ rd_bool_t is_modified;
+ int exp_is_modified =
+ !prop->unsupported &&
+ (iteration >= 3 ||
+ (iteration > 0 && (do_set || odd)));
+
+ readlen = sizeof(readval);
+
+ /* Avoid some special configs */
+ if (!strcmp(prop->name, "plugin.library.paths") ||
+ !strcmp(prop->name, "builtin.features"))
+ continue;
+
+ switch (prop->type) {
+ case _RK_C_STR:
+ case _RK_C_KSTR:
+ case _RK_C_PATLIST:
+ if (prop->sdef)
+ val = prop->sdef;
+ else
+ val = "test";
+ break;
+
+ case _RK_C_BOOL:
+ val = "true";
+ break;
+
+ case _RK_C_INT:
+ rd_snprintf(tmp, sizeof(tmp), "%d", prop->vdef);
+ val = tmp;
+ break;
+
+ case _RK_C_DBL:
+ rd_snprintf(tmp, sizeof(tmp), "%g", prop->ddef);
+ val = tmp;
+ break;
+
+ case _RK_C_S2F:
+ case _RK_C_S2I:
+ val = prop->s2i[0].str;
+ break;
+
+ case _RK_C_PTR:
+ case _RK_C_ALIAS:
+ case _RK_C_INVALID:
+ case _RK_C_INTERNAL:
+ default:
+ continue;
+ }
+
+
+ if (prop->scope & _RK_GLOBAL) {
+ if (do_set)
+ res = rd_kafka_conf_set(
+ conf, prop->name, val, errstr,
+ sizeof(errstr));
+
+ res2 = rd_kafka_conf_get(conf, prop->name,
+ readval, &readlen);
+
+ is_modified =
+ rd_kafka_conf_is_modified(conf, prop->name);
+
+
+ } else if (prop->scope & _RK_TOPIC) {
+ if (do_set)
+ res = rd_kafka_topic_conf_set(
+ tconf, prop->name, val, errstr,
+ sizeof(errstr));
+
+ res2 = rd_kafka_topic_conf_get(
+ tconf, prop->name, readval, &readlen);
+
+ is_modified = rd_kafka_topic_conf_is_modified(
+ tconf, prop->name);
+
+ } else {
+ RD_NOTREACHED();
+ }
+
+
+
+ if (do_set && prop->unsupported) {
+ RD_UT_ASSERT(res == RD_KAFKA_CONF_INVALID,
+ "conf_set %s should've failed "
+ "with CONF_INVALID, not %d: %s",
+ prop->name, res, errstr);
+
+ } else if (do_set) {
+ RD_UT_ASSERT(res == RD_KAFKA_CONF_OK,
+ "conf_set %s failed: %d: %s",
+ prop->name, res, errstr);
+ RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK,
+ "conf_get %s failed: %d",
+ prop->name, res2);
+
+ RD_UT_ASSERT(!strcmp(readval, val),
+ "conf_get %s "
+ "returned \"%s\": "
+ "expected \"%s\"",
+ prop->name, readval, val);
+
+ RD_UT_ASSERT(is_modified,
+ "Property %s was set but "
+ "is_modified=%d",
+ prop->name, is_modified);
+ }
+
+ assert(is_modified == exp_is_modified);
+ RD_UT_ASSERT(is_modified == exp_is_modified,
+ "Property %s is_modified=%d, "
+ "exp_is_modified=%d "
+ "(iter %d, odd %d, do_set %d)",
+ prop->name, is_modified, exp_is_modified,
+ iteration, odd, do_set);
+ }
+ }
+
+ /* Set an alias and make sure is_modified() works for it. */
+ res = rd_kafka_conf_set(conf, "max.in.flight", "19", NULL, 0);
+ RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res);
+
+ RD_UT_ASSERT(rd_kafka_conf_is_modified(conf, "max.in.flight") ==
+ rd_true,
+ "fail");
+ RD_UT_ASSERT(rd_kafka_conf_is_modified(
+ conf, "max.in.flight.requests.per.connection") ==
+ rd_true,
+ "fail");
+
+ rd_kafka_conf_destroy(conf);
+ rd_kafka_topic_conf_destroy(tconf);
+
+
+ /* Verify that software.client.* string-safing works */
+ conf = rd_kafka_conf_new();
+ res = rd_kafka_conf_set(conf, "client.software.name",
+ " .~aba. va! !.~~", NULL, 0);
+ RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res);
+ res = rd_kafka_conf_set(conf, "client.software.version",
+ "!1.2.3.4.5!!! a", NULL, 0);
+ RD_UT_ASSERT(res == RD_KAFKA_CONF_OK, "%d", res);
+
+ errstr2 = rd_kafka_conf_finalize(RD_KAFKA_PRODUCER, conf);
+ RD_UT_ASSERT(!errstr2, "conf_finalize() failed: %s", errstr2);
+
+ readlen = sizeof(readval);
+ res2 =
+ rd_kafka_conf_get(conf, "client.software.name", readval, &readlen);
+ RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2);
+ RD_UT_ASSERT(!strcmp(readval, "aba.-va"),
+ "client.software.* safification failed: \"%s\"", readval);
+ RD_UT_SAY("Safified client.software.name=\"%s\"", readval);
+
+ readlen = sizeof(readval);
+ res2 = rd_kafka_conf_get(conf, "client.software.version", readval,
+ &readlen);
+ RD_UT_ASSERT(res2 == RD_KAFKA_CONF_OK, "%d", res2);
+ RD_UT_ASSERT(!strcmp(readval, "1.2.3.4.5----a"),
+ "client.software.* safification failed: \"%s\"", readval);
+ RD_UT_SAY("Safified client.software.version=\"%s\"", readval);
+
+ rd_kafka_conf_destroy(conf);
+
+ RD_UT_PASS();
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h
new file mode 100644
index 000000000..161d6e469
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_conf.h
@@ -0,0 +1,650 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014-2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_CONF_H_
+#define _RDKAFKA_CONF_H_
+
+#include "rdlist.h"
+#include "rdkafka_cert.h"
+
+#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 && \
+ !defined(OPENSSL_IS_BORINGSSL)
+#define WITH_SSL_ENGINE 1
+/* Deprecated in OpenSSL 3 */
+#include <openssl/engine.h>
+#endif /* WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000 */
+
+/**
+ * Forward declarations
+ */
+struct rd_kafka_transport_s;
+
+
+/**
+ * MessageSet compression codecs
+ */
+typedef enum {
+ RD_KAFKA_COMPRESSION_NONE,
+ RD_KAFKA_COMPRESSION_GZIP = RD_KAFKA_MSG_ATTR_GZIP,
+ RD_KAFKA_COMPRESSION_SNAPPY = RD_KAFKA_MSG_ATTR_SNAPPY,
+ RD_KAFKA_COMPRESSION_LZ4 = RD_KAFKA_MSG_ATTR_LZ4,
+ RD_KAFKA_COMPRESSION_ZSTD = RD_KAFKA_MSG_ATTR_ZSTD,
+ RD_KAFKA_COMPRESSION_INHERIT, /* Inherit setting from global conf */
+ RD_KAFKA_COMPRESSION_NUM
+} rd_kafka_compression_t;
+
+static RD_INLINE RD_UNUSED const char *
+rd_kafka_compression2str(rd_kafka_compression_t compr) {
+ static const char *names[RD_KAFKA_COMPRESSION_NUM] = {
+ [RD_KAFKA_COMPRESSION_NONE] = "none",
+ [RD_KAFKA_COMPRESSION_GZIP] = "gzip",
+ [RD_KAFKA_COMPRESSION_SNAPPY] = "snappy",
+ [RD_KAFKA_COMPRESSION_LZ4] = "lz4",
+ [RD_KAFKA_COMPRESSION_ZSTD] = "zstd",
+ [RD_KAFKA_COMPRESSION_INHERIT] = "inherit"};
+ static RD_TLS char ret[32];
+
+ if ((int)compr < 0 || compr >= RD_KAFKA_COMPRESSION_NUM) {
+ rd_snprintf(ret, sizeof(ret), "codec0x%x?", (int)compr);
+ return ret;
+ }
+
+ return names[compr];
+}
+
+/**
+ * MessageSet compression levels
+ */
+typedef enum {
+ RD_KAFKA_COMPLEVEL_DEFAULT = -1,
+ RD_KAFKA_COMPLEVEL_MIN = -1,
+ RD_KAFKA_COMPLEVEL_GZIP_MAX = 9,
+ RD_KAFKA_COMPLEVEL_LZ4_MAX = 12,
+ RD_KAFKA_COMPLEVEL_SNAPPY_MAX = 0,
+ RD_KAFKA_COMPLEVEL_ZSTD_MAX = 22,
+ RD_KAFKA_COMPLEVEL_MAX = 12
+} rd_kafka_complevel_t;
+
+typedef enum {
+ RD_KAFKA_PROTO_PLAINTEXT,
+ RD_KAFKA_PROTO_SSL,
+ RD_KAFKA_PROTO_SASL_PLAINTEXT,
+ RD_KAFKA_PROTO_SASL_SSL,
+ RD_KAFKA_PROTO_NUM,
+} rd_kafka_secproto_t;
+
+
+typedef enum {
+ RD_KAFKA_CONFIGURED,
+ RD_KAFKA_LEARNED,
+ RD_KAFKA_INTERNAL,
+ RD_KAFKA_LOGICAL
+} rd_kafka_confsource_t;
+
+static RD_INLINE RD_UNUSED const char *
+rd_kafka_confsource2str(rd_kafka_confsource_t source) {
+ static const char *names[] = {"configured", "learned", "internal",
+ "logical"};
+
+ return names[source];
+}
+
+
+typedef enum {
+ _RK_GLOBAL = 0x1,
+ _RK_PRODUCER = 0x2,
+ _RK_CONSUMER = 0x4,
+ _RK_TOPIC = 0x8,
+ _RK_CGRP = 0x10,
+ _RK_DEPRECATED = 0x20,
+ _RK_HIDDEN = 0x40,
+ _RK_HIGH = 0x80, /* High Importance */
+ _RK_MED = 0x100, /* Medium Importance */
+ _RK_EXPERIMENTAL = 0x200, /* Experimental (unsupported) property */
+ _RK_SENSITIVE = 0x400 /* The configuration property's value
+ * might contain sensitive information. */
+} rd_kafka_conf_scope_t;
+
+/**< While the client groups is a generic concept, it is currently
+ * only implemented for consumers in librdkafka. */
+#define _RK_CGRP _RK_CONSUMER
+
+typedef enum {
+ _RK_CONF_PROP_SET_REPLACE, /* Replace current value (default) */
+ _RK_CONF_PROP_SET_ADD, /* Add value (S2F) */
+ _RK_CONF_PROP_SET_DEL /* Remove value (S2F) */
+} rd_kafka_conf_set_mode_t;
+
+
+
+typedef enum {
+ RD_KAFKA_OFFSET_METHOD_NONE,
+ RD_KAFKA_OFFSET_METHOD_FILE,
+ RD_KAFKA_OFFSET_METHOD_BROKER
+} rd_kafka_offset_method_t;
+
+typedef enum {
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_DEFAULT,
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC
+} rd_kafka_oauthbearer_method_t;
+
+typedef enum {
+ RD_KAFKA_SSL_ENDPOINT_ID_NONE,
+ RD_KAFKA_SSL_ENDPOINT_ID_HTTPS, /**< RFC2818 */
+} rd_kafka_ssl_endpoint_id_t;
+
+/* Increase in steps of 64 as needed.
+ * This must be larger than sizeof(rd_kafka_[topic_]conf_t) */
+#define RD_KAFKA_CONF_PROPS_IDX_MAX (64 * 33)
+
+/**
+ * @struct rd_kafka_anyconf_t
+ * @brief The anyconf header must be the first field in the
+ * rd_kafka_conf_t and rd_kafka_topic_conf_t structs.
+ * It provides a way to track which property has been modified.
+ */
+struct rd_kafka_anyconf_hdr {
+ uint64_t modified[RD_KAFKA_CONF_PROPS_IDX_MAX / 64];
+};
+
+
+/**
+ * Optional configuration struct passed to rd_kafka_new*().
+ *
+ * The struct is populated ted through string properties
+ * by calling rd_kafka_conf_set().
+ *
+ */
+struct rd_kafka_conf_s {
+ struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
+
+ /*
+ * Generic configuration
+ */
+ int enabled_events;
+ int max_msg_size;
+ int msg_copy_max_size;
+ int recv_max_msg_size;
+ int max_inflight;
+ int metadata_request_timeout_ms;
+ int metadata_refresh_interval_ms;
+ int metadata_refresh_fast_cnt;
+ int metadata_refresh_fast_interval_ms;
+ int metadata_refresh_sparse;
+ int metadata_max_age_ms;
+ int metadata_propagation_max_ms;
+ int debug;
+ int broker_addr_ttl;
+ int broker_addr_family;
+ int socket_timeout_ms;
+ int socket_blocking_max_ms;
+ int socket_sndbuf_size;
+ int socket_rcvbuf_size;
+ int socket_keepalive;
+ int socket_nagle_disable;
+ int socket_max_fails;
+ char *client_id_str;
+ char *brokerlist;
+ int stats_interval_ms;
+ int term_sig;
+ int reconnect_backoff_ms;
+ int reconnect_backoff_max_ms;
+ int reconnect_jitter_ms;
+ int socket_connection_setup_timeout_ms;
+ int connections_max_idle_ms;
+ int sparse_connections;
+ int sparse_connect_intvl;
+ int api_version_request;
+ int api_version_request_timeout_ms;
+ int api_version_fallback_ms;
+ char *broker_version_fallback;
+ rd_kafka_secproto_t security_protocol;
+
+ struct {
+#if WITH_SSL
+ SSL_CTX *ctx;
+#endif
+ char *cipher_suites;
+ char *curves_list;
+ char *sigalgs_list;
+ char *key_location;
+ char *key_pem;
+ rd_kafka_cert_t *key;
+ char *key_password;
+ char *cert_location;
+ char *cert_pem;
+ rd_kafka_cert_t *cert;
+ char *ca_location;
+ char *ca_pem;
+ rd_kafka_cert_t *ca;
+ /** CSV list of Windows certificate stores */
+ char *ca_cert_stores;
+ char *crl_location;
+#if WITH_SSL && OPENSSL_VERSION_NUMBER >= 0x10100000
+ ENGINE *engine;
+#endif
+ char *engine_location;
+ char *engine_id;
+ void *engine_callback_data;
+ char *providers;
+ rd_list_t loaded_providers; /**< (SSL_PROVIDER*) */
+ char *keystore_location;
+ char *keystore_password;
+ int endpoint_identification;
+ int enable_verify;
+ int (*cert_verify_cb)(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque);
+ } ssl;
+
+ struct {
+ const struct rd_kafka_sasl_provider *provider;
+ char *principal;
+ char *mechanisms;
+ char *service_name;
+ char *kinit_cmd;
+ char *keytab;
+ int relogin_min_time;
+ /** Protects .username and .password access after client
+ * instance has been created (see sasl_set_credentials()). */
+ mtx_t lock;
+ char *username;
+ char *password;
+#if WITH_SASL_SCRAM
+ /* SCRAM EVP-wrapped hash function
+ * (return value from EVP_shaX()) */
+ const void /*EVP_MD*/ *scram_evp;
+ /* SCRAM direct hash function (e.g., SHA256()) */
+ unsigned char *(*scram_H)(const unsigned char *d,
+ size_t n,
+ unsigned char *md);
+ /* Hash size */
+ size_t scram_H_size;
+#endif
+ char *oauthbearer_config;
+ int enable_oauthbearer_unsecure_jwt;
+ int enable_callback_queue;
+ struct {
+ rd_kafka_oauthbearer_method_t method;
+ char *token_endpoint_url;
+ char *client_id;
+ char *client_secret;
+ char *scope;
+ char *extensions_str;
+ /* SASL/OAUTHBEARER token refresh event callback */
+ void (*token_refresh_cb)(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque);
+ } oauthbearer;
+ } sasl;
+
+ char *plugin_paths;
+#if WITH_PLUGINS
+ rd_list_t plugins;
+#endif
+
+ /* Interceptors */
+ struct {
+ /* rd_kafka_interceptor_method_t lists */
+ rd_list_t on_conf_set; /* on_conf_set interceptors
+ * (not copied on conf_dup()) */
+ rd_list_t on_conf_dup; /* .. (not copied) */
+ rd_list_t on_conf_destroy; /* .. (not copied) */
+ rd_list_t on_new; /* .. (copied) */
+ rd_list_t on_destroy; /* .. (copied) */
+ rd_list_t on_send; /* .. (copied) */
+ rd_list_t on_acknowledgement; /* .. (copied) */
+ rd_list_t on_consume; /* .. (copied) */
+ rd_list_t on_commit; /* .. (copied) */
+ rd_list_t on_request_sent; /* .. (copied) */
+ rd_list_t on_response_received; /* .. (copied) */
+ rd_list_t on_thread_start; /* .. (copied) */
+ rd_list_t on_thread_exit; /* .. (copied) */
+ rd_list_t on_broker_state_change; /* .. (copied) */
+
+ /* rd_strtup_t list */
+ rd_list_t config; /* Configuration name=val's
+ * handled by interceptors. */
+ } interceptors;
+
+ /* Client group configuration */
+ int coord_query_intvl_ms;
+ int max_poll_interval_ms;
+
+ int builtin_features;
+ /*
+ * Consumer configuration
+ */
+ int check_crcs;
+ int queued_min_msgs;
+ int queued_max_msg_kbytes;
+ int64_t queued_max_msg_bytes;
+ int fetch_wait_max_ms;
+ int fetch_msg_max_bytes;
+ int fetch_max_bytes;
+ int fetch_min_bytes;
+ int fetch_error_backoff_ms;
+ char *group_id_str;
+ char *group_instance_id;
+ int allow_auto_create_topics;
+
+ rd_kafka_pattern_list_t *topic_blacklist;
+ struct rd_kafka_topic_conf_s *topic_conf; /* Default topic config
+ * for automatically
+ * subscribed topics. */
+ int enable_auto_commit;
+ int enable_auto_offset_store;
+ int auto_commit_interval_ms;
+ int group_session_timeout_ms;
+ int group_heartbeat_intvl_ms;
+ rd_kafkap_str_t *group_protocol_type;
+ char *partition_assignment_strategy;
+ rd_list_t partition_assignors;
+ int enabled_assignor_cnt;
+
+ void (*rebalance_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque);
+
+ void (*offset_commit_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque);
+
+ rd_kafka_offset_method_t offset_store_method;
+
+ rd_kafka_isolation_level_t isolation_level;
+
+ int enable_partition_eof;
+
+ rd_kafkap_str_t *client_rack;
+
+ /*
+ * Producer configuration
+ */
+ struct {
+ /*
+ * Idempotence
+ */
+ int idempotence; /**< Enable Idempotent Producer */
+ rd_bool_t gapless; /**< Raise fatal error if
+ * gapless guarantee can't be
+ * satisfied. */
+ /*
+ * Transactions
+ */
+ char *transactional_id; /**< Transactional Id */
+ int transaction_timeout_ms; /**< Transaction timeout */
+ } eos;
+ int queue_buffering_max_msgs;
+ int queue_buffering_max_kbytes;
+ double buffering_max_ms_dbl; /**< This is the configured value */
+ rd_ts_t buffering_max_us; /**< This is the value used in the code */
+ int queue_backpressure_thres;
+ int max_retries;
+ int retry_backoff_ms;
+ int batch_num_messages;
+ int batch_size;
+ rd_kafka_compression_t compression_codec;
+ int dr_err_only;
+ int sticky_partition_linger_ms;
+
+ /* Message delivery report callback.
+ * Called once for each produced message, either on
+ * successful and acknowledged delivery to the broker in which
+ * case 'err' is 0, or if the message could not be delivered
+ * in which case 'err' is non-zero (use rd_kafka_err2str()
+ * to obtain a human-readable error reason).
+ *
+ * If the message was produced with neither RD_KAFKA_MSG_F_FREE
+ * or RD_KAFKA_MSG_F_COPY set then 'payload' is the original
+ * pointer provided to rd_kafka_produce().
+ * rdkafka will not perform any further actions on 'payload'
+ * at this point and the application may rd_free the payload data
+ * at this point.
+ *
+ * 'opaque' is 'conf.opaque', while 'msg_opaque' is
+ * the opaque pointer provided in the rd_kafka_produce() call.
+ */
+ void (*dr_cb)(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque);
+
+ void (*dr_msg_cb)(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque);
+
+ /* Consume callback */
+ void (*consume_cb)(rd_kafka_message_t *rkmessage, void *opaque);
+
+ /* Log callback */
+ void (*log_cb)(const rd_kafka_t *rk,
+ int level,
+ const char *fac,
+ const char *buf);
+ int log_level;
+ int log_queue;
+ int log_thread_name;
+ int log_connection_close;
+
+ /* PRNG seeding */
+ int enable_random_seed;
+
+ /* Error callback */
+ void (*error_cb)(rd_kafka_t *rk,
+ int err,
+ const char *reason,
+ void *opaque);
+
+ /* Throttle callback */
+ void (*throttle_cb)(rd_kafka_t *rk,
+ const char *broker_name,
+ int32_t broker_id,
+ int throttle_time_ms,
+ void *opaque);
+
+ /* Stats callback */
+ int (*stats_cb)(rd_kafka_t *rk,
+ char *json,
+ size_t json_len,
+ void *opaque);
+
+ /* Socket creation callback */
+ int (*socket_cb)(int domain, int type, int protocol, void *opaque);
+
+ /* Connect callback */
+ int (*connect_cb)(int sockfd,
+ const struct sockaddr *addr,
+ int addrlen,
+ const char *id,
+ void *opaque);
+
+ /* Close socket callback */
+ int (*closesocket_cb)(int sockfd, void *opaque);
+
+ /* File open callback */
+ int (*open_cb)(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque);
+
+ /* Address resolution callback */
+ int (*resolve_cb)(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res,
+ void *opaque);
+
+ /* Background queue event callback */
+ void (*background_event_cb)(rd_kafka_t *rk,
+ rd_kafka_event_t *rkev,
+ void *opaque);
+
+
+ /* Opaque passed to callbacks. */
+ void *opaque;
+
+ /* For use with value-less properties. */
+ int dummy;
+
+
+ /* Admin client defaults */
+ struct {
+ int request_timeout_ms; /* AdminOptions.request_timeout */
+ } admin;
+
+
+ /*
+ * Test mocks
+ */
+ struct {
+ int broker_cnt; /**< Number of mock brokers */
+ int broker_rtt; /**< Broker RTT */
+ } mock;
+
+ /*
+ * Unit test pluggable interfaces
+ */
+ struct {
+ /**< Inject errors in ProduceResponse handler */
+ rd_kafka_resp_err_t (*handle_ProduceResponse)(
+ rd_kafka_t *rk,
+ int32_t brokerid,
+ uint64_t msgid,
+ rd_kafka_resp_err_t err);
+ } ut;
+
+ char *sw_name; /**< Software/client name */
+ char *sw_version; /**< Software/client version */
+
+ struct {
+ /** Properties on (implicit pass-thru) default_topic_conf were
+ * overwritten by passing an explicit default_topic_conf. */
+ rd_bool_t default_topic_conf_overwritten;
+ } warn;
+};
+
+int rd_kafka_socket_cb_linux(int domain, int type, int protocol, void *opaque);
+int rd_kafka_socket_cb_generic(int domain,
+ int type,
+ int protocol,
+ void *opaque);
+#ifndef _WIN32
+int rd_kafka_open_cb_linux(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque);
+#endif
+int rd_kafka_open_cb_generic(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque);
+
+
+
+struct rd_kafka_topic_conf_s {
+ struct rd_kafka_anyconf_hdr hdr; /**< Must be first field */
+
+ int required_acks;
+ int32_t request_timeout_ms;
+ int message_timeout_ms;
+
+ int32_t (*partitioner)(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque);
+ char *partitioner_str;
+
+ rd_bool_t random_partitioner; /**< rd_true - random
+ * rd_false - sticky */
+
+ int queuing_strategy; /* RD_KAFKA_QUEUE_FIFO|LIFO */
+ int (*msg_order_cmp)(const void *a, const void *b);
+
+ rd_kafka_compression_t compression_codec;
+ rd_kafka_complevel_t compression_level;
+ int produce_offset_report;
+
+ int consume_callback_max_msgs;
+ int auto_commit;
+ int auto_commit_interval_ms;
+ int auto_offset_reset;
+ char *offset_store_path;
+ int offset_store_sync_interval_ms;
+
+ rd_kafka_offset_method_t offset_store_method;
+
+ /* Application provided opaque pointer (this is rkt_opaque) */
+ void *opaque;
+};
+
+
+char **rd_kafka_conf_kv_split(const char **input, size_t incnt, size_t *cntp);
+
+void rd_kafka_anyconf_destroy(int scope, void *conf);
+
+rd_bool_t rd_kafka_conf_is_modified(const rd_kafka_conf_t *conf,
+ const char *name);
+
+void rd_kafka_desensitize_str(char *str);
+
+void rd_kafka_conf_desensitize(rd_kafka_conf_t *conf);
+void rd_kafka_topic_conf_desensitize(rd_kafka_topic_conf_t *tconf);
+
+const char *rd_kafka_conf_finalize(rd_kafka_type_t cltype,
+ rd_kafka_conf_t *conf);
+const char *rd_kafka_topic_conf_finalize(rd_kafka_type_t cltype,
+ const rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *tconf);
+
+
+int rd_kafka_conf_warn(rd_kafka_t *rk);
+
+void rd_kafka_anyconf_dump_dbg(rd_kafka_t *rk,
+ int scope,
+ const void *conf,
+ const char *description);
+
+#include "rdkafka_confval.h"
+
+int unittest_conf(void);
+
+#endif /* _RDKAFKA_CONF_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h
new file mode 100644
index 000000000..3f2bad549
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_confval.h
@@ -0,0 +1,97 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2014-2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_CONFVAL_H_
+#define _RDKAFKA_CONFVAL_H_
+/**
+ * @name Next generation configuration values
+ * @{
+ *
+ */
+
+/**
+ * @brief Configuration value type
+ */
+typedef enum rd_kafka_confval_type_t {
+ RD_KAFKA_CONFVAL_INT,
+ RD_KAFKA_CONFVAL_STR,
+ RD_KAFKA_CONFVAL_PTR,
+} rd_kafka_confval_type_t;
+
+/**
+ * @brief Configuration value (used by AdminOption).
+ * Comes with a type, backed by a union, and a flag to indicate
+ * if the value has been set or not.
+ */
+typedef struct rd_kafka_confval_s {
+ const char *name; /**< Property name */
+ rd_kafka_confval_type_t valuetype; /**< Value type, maps to union.*/
+ int is_set; /**< Value has been set. */
+ int is_enabled; /**< Confval is enabled. */
+ union {
+ struct {
+ int v; /**< Current value */
+ int vmin; /**< Minimum value (inclusive) */
+ int vmax; /**< Maximum value (inclusive) */
+ int vdef; /**< Default value */
+ } INT;
+ struct {
+ char *v; /**< Current value */
+ int allowempty; /**< Allow empty string as value */
+ size_t minlen; /**< Minimum string length excl \0 */
+ size_t maxlen; /**< Maximum string length excl \0 */
+ const char *vdef; /**< Default value */
+ } STR;
+ void *PTR; /**< Pointer */
+ } u;
+} rd_kafka_confval_t;
+
+
+
+void rd_kafka_confval_init_int(rd_kafka_confval_t *confval,
+ const char *name,
+ int vmin,
+ int vmax,
+ int vdef);
+void rd_kafka_confval_init_ptr(rd_kafka_confval_t *confval, const char *name);
+void rd_kafka_confval_disable(rd_kafka_confval_t *confval, const char *name);
+
+rd_kafka_resp_err_t rd_kafka_confval_set_type(rd_kafka_confval_t *confval,
+ rd_kafka_confval_type_t valuetype,
+ const void *valuep,
+ char *errstr,
+ size_t errstr_size);
+
+int rd_kafka_confval_get_int(const rd_kafka_confval_t *confval);
+const char *rd_kafka_confval_get_str(const rd_kafka_confval_t *confval);
+void *rd_kafka_confval_get_ptr(const rd_kafka_confval_t *confval);
+
+/**@}*/
+
+
+#endif /* _RDKAFKA_CONFVAL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c
new file mode 100644
index 000000000..9e41bab72
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.c
@@ -0,0 +1,623 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rdkafka_int.h"
+#include "rdkafka_request.h"
+#include "rdkafka_coord.h"
+
+
+/**
+ * @name Coordinator cache
+ * @{
+ *
+ */
+void rd_kafka_coord_cache_entry_destroy(rd_kafka_coord_cache_t *cc,
+ rd_kafka_coord_cache_entry_t *cce) {
+ rd_assert(cc->cc_cnt > 0);
+ rd_free(cce->cce_coordkey);
+ rd_kafka_broker_destroy(cce->cce_rkb);
+ TAILQ_REMOVE(&cc->cc_entries, cce, cce_link);
+ cc->cc_cnt--;
+ rd_free(cce);
+}
+
+
+/**
+ * @brief Delete any expired cache entries
+ *
+ * @locality rdkafka main thread
+ */
+void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc) {
+ rd_kafka_coord_cache_entry_t *cce, *next;
+ rd_ts_t expire = rd_clock() - cc->cc_expire_thres;
+
+ next = TAILQ_LAST(&cc->cc_entries, rd_kafka_coord_cache_head_s);
+ while (next) {
+ cce = next;
+
+ if (cce->cce_ts_used > expire)
+ break;
+
+ next = TAILQ_PREV(cce, rd_kafka_coord_cache_head_s, cce_link);
+ rd_kafka_coord_cache_entry_destroy(cc, cce);
+ }
+}
+
+
+static rd_kafka_coord_cache_entry_t *
+rd_kafka_coord_cache_find(rd_kafka_coord_cache_t *cc,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey) {
+ rd_kafka_coord_cache_entry_t *cce;
+
+ TAILQ_FOREACH(cce, &cc->cc_entries, cce_link) {
+ if (cce->cce_coordtype == coordtype &&
+ !strcmp(cce->cce_coordkey, coordkey)) {
+ /* Match */
+ cce->cce_ts_used = rd_clock();
+ if (TAILQ_FIRST(&cc->cc_entries) != cce) {
+ /* Move to head of list */
+ TAILQ_REMOVE(&cc->cc_entries, cce, cce_link);
+ TAILQ_INSERT_HEAD(&cc->cc_entries, cce,
+ cce_link);
+ }
+ return cce;
+ }
+ }
+
+ return NULL;
+}
+
+
+rd_kafka_broker_t *rd_kafka_coord_cache_get(rd_kafka_coord_cache_t *cc,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey) {
+ rd_kafka_coord_cache_entry_t *cce;
+
+ cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey);
+ if (!cce)
+ return NULL;
+
+ rd_kafka_broker_keep(cce->cce_rkb);
+ return cce->cce_rkb;
+}
+
+
+
+static void rd_kafka_coord_cache_add(rd_kafka_coord_cache_t *cc,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey,
+ rd_kafka_broker_t *rkb) {
+ rd_kafka_coord_cache_entry_t *cce;
+
+ if (!(cce = rd_kafka_coord_cache_find(cc, coordtype, coordkey))) {
+ if (cc->cc_cnt > 10) {
+ /* Not enough room in cache, remove least used entry */
+ rd_kafka_coord_cache_entry_t *rem = TAILQ_LAST(
+ &cc->cc_entries, rd_kafka_coord_cache_head_s);
+ rd_kafka_coord_cache_entry_destroy(cc, rem);
+ }
+
+ cce = rd_calloc(1, sizeof(*cce));
+ cce->cce_coordtype = coordtype;
+ cce->cce_coordkey = rd_strdup(coordkey);
+ cce->cce_ts_used = rd_clock();
+
+ TAILQ_INSERT_HEAD(&cc->cc_entries, cce, cce_link);
+ cc->cc_cnt++;
+ }
+
+ if (cce->cce_rkb != rkb) {
+ if (cce->cce_rkb)
+ rd_kafka_broker_destroy(cce->cce_rkb);
+ cce->cce_rkb = rkb;
+ rd_kafka_broker_keep(rkb);
+ }
+}
+
+
+/**
+ * @brief Evict any cache entries for broker \p rkb.
+ *
+ * Use this when a request returns ERR_NOT_COORDINATOR_FOR...
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc,
+ rd_kafka_broker_t *rkb) {
+ rd_kafka_coord_cache_entry_t *cce, *tmp;
+
+ TAILQ_FOREACH_SAFE(cce, &cc->cc_entries, cce_link, tmp) {
+ if (cce->cce_rkb == rkb)
+ rd_kafka_coord_cache_entry_destroy(cc, cce);
+ }
+}
+
+/**
+ * @brief Destroy all coord cache entries.
+ */
+void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc) {
+ rd_kafka_coord_cache_entry_t *cce;
+
+ while ((cce = TAILQ_FIRST(&cc->cc_entries)))
+ rd_kafka_coord_cache_entry_destroy(cc, cce);
+}
+
+
+/**
+ * @brief Initialize the coord cache.
+ *
+ * Locking of the coord-cache is up to the owner.
+ */
+void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc,
+ int expire_thres_ms) {
+ TAILQ_INIT(&cc->cc_entries);
+ cc->cc_cnt = 0;
+ cc->cc_expire_thres = expire_thres_ms * 1000;
+}
+
+/**@}*/
+
+
+/**
+ * @name Asynchronous coordinator requests
+ * @{
+ *
+ */
+
+
+
+static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq);
+
+/**
+ * @brief Timer callback for delayed coord requests.
+ */
+static void rd_kafka_coord_req_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_coord_req_t *creq = arg;
+
+ rd_kafka_coord_req_fsm(rkts->rkts_rk, creq);
+}
+
+
+/**
+ * @brief Look up coordinator for \p coordtype and \p coordkey
+ * (either from cache or by FindCoordinator), make sure there is
+ * a connection to the coordinator, and then call \p send_req_cb,
+ * passing the coordinator broker instance and \p rko
+ * to send the request.
+ * These steps may be performed by this function, or asynchronously
+ * at a later time.
+ *
+ * @param delay_ms If non-zero, delay scheduling of the coord request
+ * for this long. The passed \p timeout_ms is automatically
+ * adjusted to + \p delay_ms.
+ *
+ * Response, or error, is sent on \p replyq with callback \p rkbuf_cb.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_coord_req(rd_kafka_t *rk,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey,
+ rd_kafka_send_req_cb_t *send_req_cb,
+ rd_kafka_op_t *rko,
+ int delay_ms,
+ int timeout_ms,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *reply_opaque) {
+ rd_kafka_coord_req_t *creq;
+
+ creq = rd_calloc(1, sizeof(*creq));
+ creq->creq_coordtype = coordtype;
+ creq->creq_coordkey = rd_strdup(coordkey);
+ creq->creq_ts_timeout = rd_timeout_init(delay_ms + timeout_ms);
+ creq->creq_send_req_cb = send_req_cb;
+ creq->creq_rko = rko;
+ creq->creq_replyq = replyq;
+ creq->creq_resp_cb = resp_cb;
+ creq->creq_reply_opaque = reply_opaque;
+ creq->creq_refcnt = 1;
+ creq->creq_done = rd_false;
+ rd_interval_init(&creq->creq_query_intvl);
+
+ TAILQ_INSERT_TAIL(&rk->rk_coord_reqs, creq, creq_link);
+
+ if (delay_ms)
+ rd_kafka_timer_start_oneshot(&rk->rk_timers, &creq->creq_tmr,
+ rd_true, (rd_ts_t)delay_ms * 1000,
+ rd_kafka_coord_req_tmr_cb, creq);
+ else
+ rd_kafka_coord_req_fsm(rk, creq);
+}
+
+
+/**
+ * @brief Decrease refcount of creq and free it if no more references.
+ *
+ * @param done Mark creq as done, having performed its duties. There may still
+ * be lingering references.
+ *
+ * @returns true if creq was destroyed, else false.
+ */
+static rd_bool_t rd_kafka_coord_req_destroy(rd_kafka_t *rk,
+ rd_kafka_coord_req_t *creq,
+ rd_bool_t done) {
+
+ rd_assert(creq->creq_refcnt > 0);
+
+ if (done) {
+ /* Request has been performed, remove from rk_coord_reqs
+ * list so creq won't be triggered again by state broadcasts,
+ * etc. */
+ rd_dassert(!creq->creq_done);
+ TAILQ_REMOVE(&rk->rk_coord_reqs, creq, creq_link);
+ creq->creq_done = rd_true;
+
+ rd_kafka_timer_stop(&rk->rk_timers, &creq->creq_tmr,
+ RD_DO_LOCK);
+ }
+
+ if (--creq->creq_refcnt > 0)
+ return rd_false;
+
+ rd_dassert(creq->creq_done);
+
+ /* Clear out coordinator we were waiting for. */
+ if (creq->creq_rkb) {
+ rd_kafka_broker_persistent_connection_del(
+ creq->creq_rkb, &creq->creq_rkb->rkb_persistconn.coord);
+ rd_kafka_broker_destroy(creq->creq_rkb);
+ creq->creq_rkb = NULL;
+ }
+
+ rd_kafka_replyq_destroy(&creq->creq_replyq);
+ rd_free(creq->creq_coordkey);
+ rd_free(creq);
+
+ return rd_true;
+}
+
+static void rd_kafka_coord_req_keep(rd_kafka_coord_req_t *creq) {
+ creq->creq_refcnt++;
+}
+
+static void rd_kafka_coord_req_fail(rd_kafka_t *rk,
+ rd_kafka_coord_req_t *creq,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_op_t *reply;
+ rd_kafka_buf_t *rkbuf;
+
+ reply = rd_kafka_op_new(RD_KAFKA_OP_RECV_BUF);
+ reply->rko_rk = rk; /* Set rk since the rkbuf will not have a rkb
+ * to reach it. */
+ reply->rko_err = err;
+
+ /* Need a dummy rkbuf to pass state to the buf resp_cb */
+ rkbuf = rd_kafka_buf_new(0, 0);
+ rkbuf->rkbuf_cb = creq->creq_resp_cb;
+ rkbuf->rkbuf_opaque = creq->creq_reply_opaque;
+ reply->rko_u.xbuf.rkbuf = rkbuf;
+
+ rd_kafka_replyq_enq(&creq->creq_replyq, reply, 0);
+
+ rd_kafka_coord_req_destroy(rk, creq, rd_true /*done*/);
+}
+
+
+static void rd_kafka_coord_req_handle_FindCoordinator(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_coord_req_t *creq = opaque;
+ int16_t ErrorCode;
+ rd_kafkap_str_t Host;
+ int32_t NodeId, Port;
+ char errstr[256] = "";
+ int actions;
+ rd_kafka_broker_t *coord;
+ rd_kafka_metadata_broker_t mdb = RD_ZERO_INIT;
+
+ /* If creq has finished (possibly because of an earlier FindCoordinator
+ * response or a broker state broadcast we simply ignore the
+ * response. */
+ if (creq->creq_done)
+ err = RD_KAFKA_RESP_ERR__DESTROY;
+
+ if (err)
+ goto err;
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1) {
+ rd_kafkap_str_t ErrorMsg;
+ rd_kafka_buf_read_str(rkbuf, &ErrorMsg);
+ if (ErrorCode)
+ rd_snprintf(errstr, sizeof(errstr), "%.*s",
+ RD_KAFKAP_STR_PR(&ErrorMsg));
+ }
+
+ if ((err = ErrorCode))
+ goto err;
+
+ rd_kafka_buf_read_i32(rkbuf, &NodeId);
+ rd_kafka_buf_read_str(rkbuf, &Host);
+ rd_kafka_buf_read_i32(rkbuf, &Port);
+
+ mdb.id = NodeId;
+ RD_KAFKAP_STR_DUPA(&mdb.host, &Host);
+ mdb.port = Port;
+
+ /* Find, update or add broker */
+ rd_kafka_broker_update(rk, rkb->rkb_proto, &mdb, &coord);
+
+ if (!coord) {
+ err = RD_KAFKA_RESP_ERR__FAIL;
+ rd_snprintf(errstr, sizeof(errstr),
+ "Failed to add broker: "
+ "instance is probably terminating");
+ goto err;
+ }
+
+
+ rd_kafka_coord_cache_add(&rk->rk_coord_cache, creq->creq_coordtype,
+ creq->creq_coordkey, coord);
+ rd_kafka_broker_destroy(coord); /* refcnt from broker_update() */
+
+ rd_kafka_coord_req_fsm(rk, creq);
+
+ /* Drop refcount from req_fsm() */
+ rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/);
+
+ return;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ actions = rd_kafka_err_action(
+ rkb, err, request,
+
+ RD_KAFKA_ERR_ACTION_SPECIAL, RD_KAFKA_RESP_ERR__DESTROY,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED,
+
+ RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR__TRANSPORT,
+
+ RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) {
+ rd_kafka_coord_req_fail(rk, creq, err);
+ return;
+
+ } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ rd_kafka_buf_retry(rkb, request);
+ return; /* Keep refcnt from req_fsm() and retry */
+ }
+
+ /* Rely on state broadcast to trigger retry */
+
+ /* Drop refcount from req_fsm() */
+ rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/);
+}
+
+
+
+/**
+ * @brief State machine for async coordinator requests.
+ *
+ * @remark May destroy the \p creq.
+ *
+ * @locality any
+ * @locks none
+ */
+static void rd_kafka_coord_req_fsm(rd_kafka_t *rk, rd_kafka_coord_req_t *creq) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_resp_err_t err;
+
+ if (creq->creq_done)
+ /* crqeq has already performed its actions, this is a
+ * lingering reference, e.g., a late FindCoordinator response.
+ * Just ignore. */
+ return;
+
+ if (unlikely(rd_kafka_terminating(rk))) {
+ rd_kafka_coord_req_fail(rk, creq, RD_KAFKA_RESP_ERR__DESTROY);
+ return;
+ }
+
+ /* Do nothing if creq is delayed and the delay time hasn't expired yet.
+ * We will be called again by the timer once it expires.*/
+ if (rd_kafka_timer_next(&rk->rk_timers, &creq->creq_tmr, RD_DO_LOCK) >
+ 0)
+ return;
+
+ /* Check cache first */
+ rkb = rd_kafka_coord_cache_get(
+ &rk->rk_coord_cache, creq->creq_coordtype, creq->creq_coordkey);
+
+ if (rkb) {
+ if (rd_kafka_broker_is_up(rkb)) {
+ /* Cached coordinator is up, send request */
+ rd_kafka_replyq_t replyq;
+
+ /* Clear out previous coordinator we waited for. */
+ if (creq->creq_rkb) {
+ rd_kafka_broker_persistent_connection_del(
+ creq->creq_rkb,
+ &creq->creq_rkb->rkb_persistconn.coord);
+ rd_kafka_broker_destroy(creq->creq_rkb);
+ creq->creq_rkb = NULL;
+ }
+
+ rd_kafka_replyq_copy(&replyq, &creq->creq_replyq);
+ err = creq->creq_send_req_cb(rkb, creq->creq_rko,
+ replyq, creq->creq_resp_cb,
+ creq->creq_reply_opaque);
+
+ if (err) {
+ /* Permanent error, e.g., request not
+ * supported by broker. */
+ rd_kafka_replyq_destroy(&replyq);
+ rd_kafka_coord_req_fail(rk, creq, err);
+ } else {
+ rd_kafka_coord_req_destroy(rk, creq,
+ rd_true /*done*/);
+ }
+
+ } else if (creq->creq_rkb == rkb) {
+ /* No change in coordinator, but it is still not up.
+ * Query for coordinator if at least a second has
+ * passed since this coord_req was created or the
+ * last time we queried. */
+ if (rd_interval(&creq->creq_query_intvl,
+ 1000 * 1000 /* 1s */, 0) > 0) {
+ rd_rkb_dbg(rkb, BROKER, "COORD",
+ "Coordinator connection is "
+ "still down: "
+ "querying for new coordinator");
+ rd_kafka_broker_destroy(rkb);
+ goto query_coord;
+ }
+
+ } else {
+ /* No connection yet.
+ * Let broker thread know we need a connection.
+ * We'll be re-triggered on broker state broadcast. */
+
+ if (creq->creq_rkb) {
+ /* Clear previous */
+ rd_kafka_broker_persistent_connection_del(
+ creq->creq_rkb,
+ &creq->creq_rkb->rkb_persistconn.coord);
+ rd_kafka_broker_destroy(creq->creq_rkb);
+ }
+
+ rd_kafka_broker_keep(rkb);
+ creq->creq_rkb = rkb;
+ rd_kafka_broker_persistent_connection_add(
+ rkb, &rkb->rkb_persistconn.coord);
+ }
+
+ rd_kafka_broker_destroy(rkb);
+ return;
+
+ } else if (creq->creq_rkb) {
+ /* No coordinator information, clear out the previous
+ * coordinator we waited for. */
+ rd_kafka_broker_persistent_connection_del(
+ creq->creq_rkb, &creq->creq_rkb->rkb_persistconn.coord);
+ rd_kafka_broker_destroy(creq->creq_rkb);
+ creq->creq_rkb = NULL;
+ }
+
+query_coord:
+ /* Get any usable broker to look up the coordinator */
+ rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT, RD_DO_LOCK,
+ RD_KAFKA_FEATURE_BROKER_GROUP_COORD,
+ "broker to look up coordinator");
+
+ if (!rkb) {
+ /* No available brokers yet, we'll be re-triggered on
+ * broker state broadcast. */
+ return;
+ }
+
+
+ /* Send FindCoordinator request, the handler will continue
+ * the state machine. */
+ rd_kafka_coord_req_keep(creq);
+ err = rd_kafka_FindCoordinatorRequest(
+ rkb, creq->creq_coordtype, creq->creq_coordkey,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_coord_req_handle_FindCoordinator, creq);
+
+ rd_kafka_broker_destroy(rkb);
+
+ if (err) {
+ rd_kafka_coord_req_fail(rk, creq, err);
+ /* from keep() above */
+ rd_kafka_coord_req_destroy(rk, creq, rd_false /*!done*/);
+ }
+}
+
+
+
+/**
+ * @brief Callback called from rdkafka main thread on each
+ * broker state change from or to UP.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_coord_req_t *creq, *tmp;
+
+ /* Run through all coord_req fsms */
+ TAILQ_FOREACH_SAFE(creq, &rk->rk_coord_reqs, creq_link, tmp) {
+ rd_kafka_coord_req_fsm(rk, creq);
+ }
+}
+
+
+
+/**
+ * @brief Instance is terminating: destroy all coord reqs
+ */
+void rd_kafka_coord_reqs_term(rd_kafka_t *rk) {
+ rd_kafka_coord_req_t *creq;
+
+ while ((creq = TAILQ_FIRST(&rk->rk_coord_reqs)))
+ rd_kafka_coord_req_fail(rk, creq, RD_KAFKA_RESP_ERR__DESTROY);
+}
+
+
+/**
+ * @brief Initialize coord reqs list.
+ */
+void rd_kafka_coord_reqs_init(rd_kafka_t *rk) {
+ TAILQ_INIT(&rk->rk_coord_reqs);
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h
new file mode 100644
index 000000000..4e00a552b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_coord.h
@@ -0,0 +1,132 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_COORD_H_
+#define _RDKAFKA_COORD_H_
+
+
+typedef TAILQ_HEAD(rd_kafka_coord_cache_head_s,
+ rd_kafka_coord_cache_entry_s) rd_kafka_coord_cache_head_t;
+
+/**
+ * @brief Coordinator cache entry
+ */
+typedef struct rd_kafka_coord_cache_entry_s {
+ TAILQ_ENTRY(rd_kafka_coord_cache_entry_s) cce_link;
+ rd_kafka_coordtype_t cce_coordtype; /**< Coordinator type */
+ char *cce_coordkey; /**< Coordinator type key,
+ * e.g the group id */
+ rd_ts_t cce_ts_used; /**< Last used timestamp */
+ rd_kafka_broker_t *cce_rkb; /**< The cached coordinator */
+
+} rd_kafka_coord_cache_entry_t;
+
+/**
+ * @brief Coordinator cache
+ */
+typedef struct rd_kafka_coord_cache_s {
+ rd_kafka_coord_cache_head_t cc_entries; /**< Cache entries */
+ int cc_cnt; /**< Number of entries */
+ rd_ts_t cc_expire_thres; /**< Entries not used in
+ * this long will be
+ * expired */
+} rd_kafka_coord_cache_t;
+
+
+void rd_kafka_coord_cache_expire(rd_kafka_coord_cache_t *cc);
+void rd_kafka_coord_cache_evict(rd_kafka_coord_cache_t *cc,
+ rd_kafka_broker_t *rkb);
+void rd_kafka_coord_cache_destroy(rd_kafka_coord_cache_t *cc);
+void rd_kafka_coord_cache_init(rd_kafka_coord_cache_t *cc, int expire_thres_ms);
+
+
+
+/**
+ * @name Coordinator requests
+ */
+
+/**
+ * @brief Request to be sent to coordinator.
+ * Includes looking up, caching, and connecting to, the coordinator.
+ */
+typedef struct rd_kafka_coord_req_s {
+ TAILQ_ENTRY(rd_kafka_coord_req_s) creq_link; /**< rk_coord_reqs */
+ rd_kafka_coordtype_t creq_coordtype; /**< Coordinator type */
+ char *creq_coordkey; /**< Coordinator key */
+
+ rd_kafka_op_t *creq_rko; /**< Requester's rko that is
+ * provided to creq_send_req_cb
+ * (optional). */
+ rd_kafka_timer_t creq_tmr; /**< Delay timer. */
+ rd_ts_t creq_ts_timeout; /**< Absolute timeout.
+ * Will fail with an error
+ * code pertaining to the
+ * current state */
+ rd_interval_t creq_query_intvl; /**< Coord query interval (1s) */
+
+ rd_kafka_send_req_cb_t *creq_send_req_cb; /**< Sender callback */
+
+ rd_kafka_replyq_t creq_replyq; /**< Reply queue */
+ rd_kafka_resp_cb_t *creq_resp_cb; /**< Reply queue response
+ * parsing callback for the
+ * request sent by
+ * send_req_cb */
+ void *creq_reply_opaque; /**< Opaque passed to
+ * creq_send_req_cb and
+ * creq_resp_cb. */
+
+ int creq_refcnt; /**< Internal reply queue for
+ * FindCoordinator requests
+ * which is forwarded to the
+ * rk_ops queue, but allows
+ * destroying the creq even
+ * with outstanding
+ * FindCoordinator requests. */
+ rd_bool_t creq_done; /**< True if request was sent */
+
+ rd_kafka_broker_t *creq_rkb; /**< creq is waiting for this broker to
+ * come up. */
+} rd_kafka_coord_req_t;
+
+
+void rd_kafka_coord_req(rd_kafka_t *rk,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey,
+ rd_kafka_send_req_cb_t *send_req_cb,
+ rd_kafka_op_t *rko,
+ int delay_ms,
+ int timeout_ms,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *reply_opaque);
+
+void rd_kafka_coord_rkb_monitor_cb(rd_kafka_broker_t *rkb);
+
+void rd_kafka_coord_reqs_term(rd_kafka_t *rk);
+void rd_kafka_coord_reqs_init(rd_kafka_t *rk);
+#endif /* _RDKAFKA_COORD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c
new file mode 100644
index 000000000..4a218daff
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.c
@@ -0,0 +1,228 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name Public API complex error type implementation.
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_error.h"
+
+#include <stdarg.h>
+
+
+void rd_kafka_error_destroy(rd_kafka_error_t *error) {
+ if (error)
+ rd_free(error);
+}
+
+
+/**
+ * @brief Creates a new error object using the optional va-args format list.
+ */
+rd_kafka_error_t *
+rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap) {
+ rd_kafka_error_t *error;
+ ssize_t strsz = 0;
+
+ if (fmt && *fmt) {
+ va_list ap2;
+ va_copy(ap2, ap);
+ strsz = rd_vsnprintf(NULL, 0, fmt, ap2) + 1;
+ va_end(ap2);
+ }
+
+ error = rd_malloc(sizeof(*error) + strsz);
+ error->code = code;
+ error->fatal = rd_false;
+ error->retriable = rd_false;
+ error->txn_requires_abort = rd_false;
+
+ if (strsz > 0) {
+ error->errstr = (char *)(error + 1);
+ rd_vsnprintf(error->errstr, strsz, fmt, ap);
+ } else {
+ error->errstr = NULL;
+ }
+
+ return error;
+}
+
+rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src) {
+ rd_kafka_error_t *error;
+ ssize_t strsz = 0;
+
+ if (src->errstr) {
+ strsz = strlen(src->errstr) + 1;
+ }
+
+ error = rd_malloc(sizeof(*error) + strsz);
+ error->code = src->code;
+ error->fatal = src->fatal;
+ error->retriable = src->retriable;
+ error->txn_requires_abort = src->txn_requires_abort;
+
+ if (strsz > 0) {
+ error->errstr = (char *)(error + 1);
+ rd_strlcpy(error->errstr, src->errstr, strsz);
+ } else {
+ error->errstr = NULL;
+ }
+
+ return error;
+}
+
+/**
+ * @brief Same as rd_kafka_error_copy() but suitable for
+ * rd_list_copy(). The \p opaque is ignored.
+ */
+void *rd_kafka_error_copy_opaque(const void *error, void *opaque) {
+ return rd_kafka_error_copy(error);
+}
+
+
+rd_kafka_error_t *
+rd_kafka_error_new(rd_kafka_resp_err_t code, const char *fmt, ...) {
+ rd_kafka_error_t *error;
+ va_list ap;
+
+ va_start(ap, fmt);
+ error = rd_kafka_error_new_v(code, fmt, ap);
+ va_end(ap);
+
+ return error;
+}
+
+rd_kafka_error_t *
+rd_kafka_error_new_fatal(rd_kafka_resp_err_t code, const char *fmt, ...) {
+ rd_kafka_error_t *error;
+ va_list ap;
+
+ va_start(ap, fmt);
+ error = rd_kafka_error_new_v(code, fmt, ap);
+ va_end(ap);
+
+ rd_kafka_error_set_fatal(error);
+
+ return error;
+}
+
+rd_kafka_error_t *
+rd_kafka_error_new_retriable(rd_kafka_resp_err_t code, const char *fmt, ...) {
+ rd_kafka_error_t *error;
+ va_list ap;
+
+ va_start(ap, fmt);
+ error = rd_kafka_error_new_v(code, fmt, ap);
+ va_end(ap);
+
+ rd_kafka_error_set_retriable(error);
+
+ return error;
+}
+
+rd_kafka_error_t *
+rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code,
+ const char *fmt,
+ ...) {
+ rd_kafka_error_t *error;
+ va_list ap;
+
+ va_start(ap, fmt);
+ error = rd_kafka_error_new_v(code, fmt, ap);
+ va_end(ap);
+
+ rd_kafka_error_set_txn_requires_abort(error);
+
+ return error;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_error_code(const rd_kafka_error_t *error) {
+ return error ? error->code : RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+const char *rd_kafka_error_name(const rd_kafka_error_t *error) {
+ return error ? rd_kafka_err2name(error->code) : "";
+}
+
+const char *rd_kafka_error_string(const rd_kafka_error_t *error) {
+ if (!error)
+ return "";
+ return error->errstr ? error->errstr : rd_kafka_err2str(error->code);
+}
+
+int rd_kafka_error_is_fatal(const rd_kafka_error_t *error) {
+ return error && error->fatal ? 1 : 0;
+}
+
+int rd_kafka_error_is_retriable(const rd_kafka_error_t *error) {
+ return error && error->retriable ? 1 : 0;
+}
+
+int rd_kafka_error_txn_requires_abort(const rd_kafka_error_t *error) {
+ return error && error->txn_requires_abort ? 1 : 0;
+}
+
+
+
+void rd_kafka_error_set_fatal(rd_kafka_error_t *error) {
+ error->fatal = rd_true;
+}
+
+void rd_kafka_error_set_retriable(rd_kafka_error_t *error) {
+ error->retriable = rd_true;
+}
+
+void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error) {
+ error->txn_requires_abort = rd_true;
+}
+
+
+/**
+ * @brief Converts a new style error_t error to the legacy style
+ * resp_err_t code and separate error string, then
+ * destroys the the error object.
+ *
+ * @remark The \p error object is destroyed.
+ */
+rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_resp_err_t err = error->code;
+
+ rd_snprintf(errstr, errstr_size, "%s", rd_kafka_error_string(error));
+
+ rd_kafka_error_destroy(error);
+
+ return err;
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h
new file mode 100644
index 000000000..79984f5ef
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_error.h
@@ -0,0 +1,80 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDKAFKA_ERROR_H_
+#define _RDKAFKA_ERROR_H_
+
+#include <stdarg.h>
+
+/**
+ * @name Public API complex error type implementation.
+ *
+ */
+
+struct rd_kafka_error_s {
+ rd_kafka_resp_err_t code; /**< Error code. */
+ char *errstr; /**< Human readable error string, allocated
+ * with the rd_kafka_error_s struct
+ * after the struct.
+ * Possibly NULL. */
+ rd_bool_t fatal; /**< This error is a fatal error. */
+ rd_bool_t retriable; /**< Operation is retriable. */
+ rd_bool_t
+ txn_requires_abort; /**< This is an abortable transaction error.*/
+};
+
+
+rd_kafka_error_t *
+rd_kafka_error_new_v(rd_kafka_resp_err_t code, const char *fmt, va_list ap);
+
+rd_kafka_error_t *rd_kafka_error_copy(const rd_kafka_error_t *src);
+
+void *rd_kafka_error_copy_opaque(const void *error, void *opaque);
+
+void rd_kafka_error_set_fatal(rd_kafka_error_t *error);
+void rd_kafka_error_set_retriable(rd_kafka_error_t *error);
+void rd_kafka_error_set_txn_requires_abort(rd_kafka_error_t *error);
+
+
+rd_kafka_error_t *rd_kafka_error_new_fatal(rd_kafka_resp_err_t code,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 2, 3);
+rd_kafka_error_t *rd_kafka_error_new_retriable(rd_kafka_resp_err_t code,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 2, 3);
+rd_kafka_error_t *
+rd_kafka_error_new_txn_requires_abort(rd_kafka_resp_err_t code,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 2, 3);
+
+
+rd_kafka_resp_err_t rd_kafka_error_to_legacy(rd_kafka_error_t *error,
+ char *errstr,
+ size_t errstr_size);
+#endif /* _RDKAFKA_ERROR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c
new file mode 100644
index 000000000..ffd1a1780
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.c
@@ -0,0 +1,426 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_event.h"
+#include "rd.h"
+
+rd_kafka_event_type_t rd_kafka_event_type(const rd_kafka_event_t *rkev) {
+ return rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE;
+}
+
+const char *rd_kafka_event_name(const rd_kafka_event_t *rkev) {
+ switch (rkev ? rkev->rko_evtype : RD_KAFKA_EVENT_NONE) {
+ case RD_KAFKA_EVENT_NONE:
+ return "(NONE)";
+ case RD_KAFKA_EVENT_DR:
+ return "DeliveryReport";
+ case RD_KAFKA_EVENT_FETCH:
+ return "Fetch";
+ case RD_KAFKA_EVENT_LOG:
+ return "Log";
+ case RD_KAFKA_EVENT_ERROR:
+ return "Error";
+ case RD_KAFKA_EVENT_REBALANCE:
+ return "Rebalance";
+ case RD_KAFKA_EVENT_OFFSET_COMMIT:
+ return "OffsetCommit";
+ case RD_KAFKA_EVENT_STATS:
+ return "Stats";
+ case RD_KAFKA_EVENT_CREATETOPICS_RESULT:
+ return "CreateTopicsResult";
+ case RD_KAFKA_EVENT_DELETETOPICS_RESULT:
+ return "DeleteTopicsResult";
+ case RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT:
+ return "CreatePartitionsResult";
+ case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT:
+ return "AlterConfigsResult";
+ case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT:
+ return "DescribeConfigsResult";
+ case RD_KAFKA_EVENT_DELETERECORDS_RESULT:
+ return "DeleteRecordsResult";
+ case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT:
+ return "ListConsumerGroupsResult";
+ case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT:
+ return "DescribeConsumerGroupsResult";
+ case RD_KAFKA_EVENT_DELETEGROUPS_RESULT:
+ return "DeleteGroupsResult";
+ case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT:
+ return "DeleteConsumerGroupOffsetsResult";
+ case RD_KAFKA_EVENT_CREATEACLS_RESULT:
+ return "CreateAclsResult";
+ case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
+ return "DescribeAclsResult";
+ case RD_KAFKA_EVENT_DELETEACLS_RESULT:
+ return "DeleteAclsResult";
+ case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT:
+ return "AlterConsumerGroupOffsetsResult";
+ case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT:
+ return "ListConsumerGroupOffsetsResult";
+ case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
+ return "SaslOAuthBearerTokenRefresh";
+ default:
+ return "?unknown?";
+ }
+}
+
+
+
+void rd_kafka_event_destroy(rd_kafka_event_t *rkev) {
+ if (unlikely(!rkev))
+ return;
+ rd_kafka_op_destroy(rkev);
+}
+
+
+/**
+ * @returns the next message from the event's message queue.
+ * @remark messages will be freed automatically when event is destroyed,
+ * application MUST NOT call rd_kafka_message_destroy()
+ */
+const rd_kafka_message_t *rd_kafka_event_message_next(rd_kafka_event_t *rkev) {
+ rd_kafka_op_t *rko = rkev;
+ rd_kafka_msg_t *rkm;
+ rd_kafka_msgq_t *rkmq, *rkmq2;
+ rd_kafka_message_t *rkmessage;
+
+ switch (rkev->rko_type) {
+ case RD_KAFKA_OP_DR:
+ rkmq = &rko->rko_u.dr.msgq;
+ rkmq2 = &rko->rko_u.dr.msgq2;
+ break;
+
+ case RD_KAFKA_OP_FETCH:
+ /* Just one message */
+ if (rko->rko_u.fetch.evidx++ > 0)
+ return NULL;
+
+ rkmessage = rd_kafka_message_get(rko);
+ if (unlikely(!rkmessage))
+ return NULL;
+
+ /* Store offset, etc. */
+ rd_kafka_fetch_op_app_prepare(NULL, rko);
+
+ return rkmessage;
+
+
+ default:
+ return NULL;
+ }
+
+ if (unlikely(!(rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))))
+ return NULL;
+
+ rd_kafka_msgq_deq(rkmq, rkm, 1);
+
+ /* Put rkm on secondary message queue which will be purged later. */
+ rd_kafka_msgq_enq(rkmq2, rkm);
+
+ return rd_kafka_message_get_from_rkm(rko, rkm);
+}
+
+
+size_t rd_kafka_event_message_array(rd_kafka_event_t *rkev,
+ const rd_kafka_message_t **rkmessages,
+ size_t size) {
+ size_t cnt = 0;
+ const rd_kafka_message_t *rkmessage;
+
+ while (cnt < size && (rkmessage = rd_kafka_event_message_next(rkev)))
+ rkmessages[cnt++] = rkmessage;
+
+ return cnt;
+}
+
+
+size_t rd_kafka_event_message_count(rd_kafka_event_t *rkev) {
+ switch (rkev->rko_evtype) {
+ case RD_KAFKA_EVENT_DR:
+ return (size_t)rkev->rko_u.dr.msgq.rkmq_msg_cnt;
+ case RD_KAFKA_EVENT_FETCH:
+ return 1;
+ default:
+ return 0;
+ }
+}
+
+
+const char *rd_kafka_event_config_string(rd_kafka_event_t *rkev) {
+ switch (rkev->rko_evtype) {
+#if WITH_SASL_OAUTHBEARER
+ case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
+ return rkev->rko_rk->rk_conf.sasl.oauthbearer_config;
+#endif
+ default:
+ return NULL;
+ }
+}
+
+rd_kafka_resp_err_t rd_kafka_event_error(rd_kafka_event_t *rkev) {
+ return rkev->rko_err;
+}
+
+const char *rd_kafka_event_error_string(rd_kafka_event_t *rkev) {
+ switch (rkev->rko_type) {
+ case RD_KAFKA_OP_ERR:
+ case RD_KAFKA_OP_CONSUMER_ERR:
+ if (rkev->rko_u.err.errstr)
+ return rkev->rko_u.err.errstr;
+ break;
+ case RD_KAFKA_OP_ADMIN_RESULT:
+ if (rkev->rko_u.admin_result.errstr)
+ return rkev->rko_u.admin_result.errstr;
+ break;
+ default:
+ break;
+ }
+
+ return rd_kafka_err2str(rkev->rko_err);
+}
+
+int rd_kafka_event_error_is_fatal(rd_kafka_event_t *rkev) {
+ return rkev->rko_u.err.fatal;
+}
+
+
+void *rd_kafka_event_opaque(rd_kafka_event_t *rkev) {
+ switch (rkev->rko_type & ~RD_KAFKA_OP_FLAGMASK) {
+ case RD_KAFKA_OP_OFFSET_COMMIT:
+ return rkev->rko_u.offset_commit.opaque;
+ case RD_KAFKA_OP_ADMIN_RESULT:
+ return rkev->rko_u.admin_result.opaque;
+ default:
+ return NULL;
+ }
+}
+
+
+int rd_kafka_event_log(rd_kafka_event_t *rkev,
+ const char **fac,
+ const char **str,
+ int *level) {
+ if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG))
+ return -1;
+
+ if (likely(fac != NULL))
+ *fac = rkev->rko_u.log.fac;
+ if (likely(str != NULL))
+ *str = rkev->rko_u.log.str;
+ if (likely(level != NULL))
+ *level = rkev->rko_u.log.level;
+
+ return 0;
+}
+
+int rd_kafka_event_debug_contexts(rd_kafka_event_t *rkev,
+ char *dst,
+ size_t dstsize) {
+ static const char *names[] = {
+ "generic", "broker", "topic", "metadata", "feature",
+ "queue", "msg", "protocol", "cgrp", "security",
+ "fetch", "interceptor", "plugin", "consumer", "admin",
+ "eos", "mock", NULL};
+ if (unlikely(rkev->rko_evtype != RD_KAFKA_EVENT_LOG))
+ return -1;
+ rd_flags2str(dst, dstsize, names, rkev->rko_u.log.ctx);
+ return 0;
+}
+
+const char *rd_kafka_event_stats(rd_kafka_event_t *rkev) {
+ return rkev->rko_u.stats.json;
+}
+
+rd_kafka_topic_partition_list_t *
+rd_kafka_event_topic_partition_list(rd_kafka_event_t *rkev) {
+ switch (rkev->rko_evtype) {
+ case RD_KAFKA_EVENT_REBALANCE:
+ return rkev->rko_u.rebalance.partitions;
+ case RD_KAFKA_EVENT_OFFSET_COMMIT:
+ return rkev->rko_u.offset_commit.partitions;
+ default:
+ return NULL;
+ }
+}
+
+
+rd_kafka_topic_partition_t *
+rd_kafka_event_topic_partition(rd_kafka_event_t *rkev) {
+ rd_kafka_topic_partition_t *rktpar;
+
+ if (unlikely(!rkev->rko_rktp))
+ return NULL;
+
+ rktpar = rd_kafka_topic_partition_new_from_rktp(rkev->rko_rktp);
+
+ switch (rkev->rko_type) {
+ case RD_KAFKA_OP_ERR:
+ case RD_KAFKA_OP_CONSUMER_ERR:
+ rktpar->offset = rkev->rko_u.err.offset;
+ break;
+ default:
+ break;
+ }
+
+ rktpar->err = rkev->rko_err;
+
+ return rktpar;
+}
+
+
+
+const rd_kafka_CreateTopics_result_t *
+rd_kafka_event_CreateTopics_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATETOPICS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_CreateTopics_result_t *)rkev;
+}
+
+
+const rd_kafka_DeleteTopics_result_t *
+rd_kafka_event_DeleteTopics_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETETOPICS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DeleteTopics_result_t *)rkev;
+}
+
+
+const rd_kafka_CreatePartitions_result_t *
+rd_kafka_event_CreatePartitions_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_CreatePartitions_result_t *)rkev;
+}
+
+
+const rd_kafka_AlterConfigs_result_t *
+rd_kafka_event_AlterConfigs_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONFIGS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_AlterConfigs_result_t *)rkev;
+}
+
+
+const rd_kafka_DescribeConfigs_result_t *
+rd_kafka_event_DescribeConfigs_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DescribeConfigs_result_t *)rkev;
+}
+
+const rd_kafka_DeleteRecords_result_t *
+rd_kafka_event_DeleteRecords_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETERECORDS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DeleteRecords_result_t *)rkev;
+}
+
+const rd_kafka_ListConsumerGroups_result_t *
+rd_kafka_event_ListConsumerGroups_result(rd_kafka_event_t *rkev) {
+ if (!rkev ||
+ rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_ListConsumerGroups_result_t *)rkev;
+}
+
+const rd_kafka_DescribeConsumerGroups_result_t *
+rd_kafka_event_DescribeConsumerGroups_result(rd_kafka_event_t *rkev) {
+ if (!rkev ||
+ rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DescribeConsumerGroups_result_t *)rkev;
+}
+
+const rd_kafka_DeleteGroups_result_t *
+rd_kafka_event_DeleteGroups_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEGROUPS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DeleteGroups_result_t *)rkev;
+}
+
+const rd_kafka_DeleteConsumerGroupOffsets_result_t *
+rd_kafka_event_DeleteConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype !=
+ RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT)
+ return NULL;
+ else
+ return (
+ const rd_kafka_DeleteConsumerGroupOffsets_result_t *)rkev;
+}
+
+const rd_kafka_CreateAcls_result_t *
+rd_kafka_event_CreateAcls_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_CREATEACLS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_CreateAcls_result_t *)rkev;
+}
+
+const rd_kafka_DescribeAcls_result_t *
+rd_kafka_event_DescribeAcls_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DESCRIBEACLS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DescribeAcls_result_t *)rkev;
+}
+
+const rd_kafka_DeleteAcls_result_t *
+rd_kafka_event_DeleteAcls_result(rd_kafka_event_t *rkev) {
+ if (!rkev || rkev->rko_evtype != RD_KAFKA_EVENT_DELETEACLS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_DeleteAcls_result_t *)rkev;
+}
+
+const rd_kafka_AlterConsumerGroupOffsets_result_t *
+rd_kafka_event_AlterConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
+ if (!rkev ||
+ rkev->rko_evtype != RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT)
+ return NULL;
+ else
+ return (
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *)rkev;
+}
+
+const rd_kafka_ListConsumerGroupOffsets_result_t *
+rd_kafka_event_ListConsumerGroupOffsets_result(rd_kafka_event_t *rkev) {
+ if (!rkev ||
+ rkev->rko_evtype != RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT)
+ return NULL;
+ else
+ return (const rd_kafka_ListConsumerGroupOffsets_result_t *)rkev;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h
new file mode 100644
index 000000000..3f9c22e34
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_event.h
@@ -0,0 +1,118 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @brief Converts op type to event type.
+ * @returns the event type, or 0 if the op cannot be mapped to an event.
+ */
+static RD_UNUSED RD_INLINE rd_kafka_event_type_t
+rd_kafka_op2event(rd_kafka_op_type_t optype) {
+ static const rd_kafka_event_type_t map[RD_KAFKA_OP__END] = {
+ [RD_KAFKA_OP_DR] = RD_KAFKA_EVENT_DR,
+ [RD_KAFKA_OP_FETCH] = RD_KAFKA_EVENT_FETCH,
+ [RD_KAFKA_OP_ERR] = RD_KAFKA_EVENT_ERROR,
+ [RD_KAFKA_OP_CONSUMER_ERR] = RD_KAFKA_EVENT_ERROR,
+ [RD_KAFKA_OP_REBALANCE] = RD_KAFKA_EVENT_REBALANCE,
+ [RD_KAFKA_OP_OFFSET_COMMIT] = RD_KAFKA_EVENT_OFFSET_COMMIT,
+ [RD_KAFKA_OP_LOG] = RD_KAFKA_EVENT_LOG,
+ [RD_KAFKA_OP_STATS] = RD_KAFKA_EVENT_STATS,
+ [RD_KAFKA_OP_OAUTHBEARER_REFRESH] =
+ RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH};
+
+ return map[(int)optype & ~RD_KAFKA_OP_FLAGMASK];
+}
+
+
+/**
+ * @brief Attempt to set up an event based on rko.
+ * @returns 1 if op is event:able and set up, else 0.
+ */
+static RD_UNUSED RD_INLINE int rd_kafka_event_setup(rd_kafka_t *rk,
+ rd_kafka_op_t *rko) {
+
+ if (unlikely(rko->rko_flags & RD_KAFKA_OP_F_FORCE_CB))
+ return 0;
+
+ if (!rko->rko_evtype)
+ rko->rko_evtype = rd_kafka_op2event(rko->rko_type);
+
+ switch (rko->rko_evtype) {
+ case RD_KAFKA_EVENT_NONE:
+ return 0;
+
+ case RD_KAFKA_EVENT_DR:
+ rko->rko_rk = rk;
+ rd_dassert(!rko->rko_u.dr.do_purge2);
+ rd_kafka_msgq_init(&rko->rko_u.dr.msgq2);
+ rko->rko_u.dr.do_purge2 = 1;
+ return 1;
+
+ case RD_KAFKA_EVENT_ERROR:
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__FATAL) {
+ /* Translate ERR__FATAL to the underlying fatal error
+ * code and string */
+ rd_kafka_resp_err_t ferr;
+ char errstr[512];
+ ferr = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ if (likely(ferr)) {
+ rko->rko_err = ferr;
+ if (rko->rko_u.err.errstr)
+ rd_free(rko->rko_u.err.errstr);
+ rko->rko_u.err.errstr = rd_strdup(errstr);
+ rko->rko_u.err.fatal = 1;
+ }
+ }
+ return 1;
+
+ case RD_KAFKA_EVENT_REBALANCE:
+ case RD_KAFKA_EVENT_LOG:
+ case RD_KAFKA_EVENT_OFFSET_COMMIT:
+ case RD_KAFKA_EVENT_STATS:
+ case RD_KAFKA_EVENT_CREATETOPICS_RESULT:
+ case RD_KAFKA_EVENT_DELETETOPICS_RESULT:
+ case RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT:
+ case RD_KAFKA_EVENT_ALTERCONFIGS_RESULT:
+ case RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT:
+ case RD_KAFKA_EVENT_DELETERECORDS_RESULT:
+ case RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT:
+ case RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT:
+ case RD_KAFKA_EVENT_DELETEGROUPS_RESULT:
+ case RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT:
+ case RD_KAFKA_EVENT_CREATEACLS_RESULT:
+ case RD_KAFKA_EVENT_DESCRIBEACLS_RESULT:
+ case RD_KAFKA_EVENT_DELETEACLS_RESULT:
+ case RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT:
+ case RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT:
+ case RD_KAFKA_EVENT_OAUTHBEARER_TOKEN_REFRESH:
+ return 1;
+
+ default:
+ return 0;
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c
new file mode 100644
index 000000000..a2fc085c5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.c
@@ -0,0 +1,460 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rdkafka_int.h"
+#include "rdkafka_feature.h"
+
+#include <stdlib.h>
+
+static const char *rd_kafka_feature_names[] = {"MsgVer1",
+ "ApiVersion",
+ "BrokerBalancedConsumer",
+ "ThrottleTime",
+ "Sasl",
+ "SaslHandshake",
+ "BrokerGroupCoordinator",
+ "LZ4",
+ "OffsetTime",
+ "MsgVer2",
+ "IdempotentProducer",
+ "ZSTD",
+ "SaslAuthReq",
+ "UnitTest",
+ NULL};
+
+
+static const struct rd_kafka_feature_map {
+ /* RD_KAFKA_FEATURE_... */
+ int feature;
+
+ /* Depends on the following ApiVersions overlapping with
+ * what the broker supports: */
+ struct rd_kafka_ApiVersion depends[RD_KAFKAP__NUM];
+
+} rd_kafka_feature_map[] = {
+ /**
+ * @brief List of features and the ApiVersions they depend on.
+ *
+ * The dependency list consists of the ApiKey followed by this
+ * client's supported minimum and maximum API versions.
+ * As long as this list and its versions overlaps with the
+ * broker supported API versions the feature will be enabled.
+ */
+ {
+
+ /* @brief >=0.10.0: Message.MagicByte version 1:
+ * Relative offsets (KIP-31) and message timestamps (KIP-32). */
+ .feature = RD_KAFKA_FEATURE_MSGVER1,
+ .depends =
+ {
+ {RD_KAFKAP_Produce, 2, 2},
+ {RD_KAFKAP_Fetch, 2, 2},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.11.0: Message.MagicByte version 2 */
+ .feature = RD_KAFKA_FEATURE_MSGVER2,
+ .depends =
+ {
+ {RD_KAFKAP_Produce, 3, 3},
+ {RD_KAFKAP_Fetch, 4, 4},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.10.0: ApiVersionQuery support.
+ * @remark This is a bit of chicken-and-egg problem but needs to be
+ * set by feature_check() to avoid the feature being cleared
+ * even when broker supports it. */
+ .feature = RD_KAFKA_FEATURE_APIVERSION,
+ .depends =
+ {
+ {RD_KAFKAP_ApiVersion, 0, 0},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.8.2.0: Broker-based Group coordinator */
+ .feature = RD_KAFKA_FEATURE_BROKER_GROUP_COORD,
+ .depends =
+ {
+ {RD_KAFKAP_FindCoordinator, 0, 0},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.9.0: Broker-based balanced consumer groups. */
+ .feature = RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER,
+ .depends =
+ {
+ {RD_KAFKAP_FindCoordinator, 0, 0},
+ {RD_KAFKAP_OffsetCommit, 1, 2},
+ {RD_KAFKAP_OffsetFetch, 1, 1},
+ {RD_KAFKAP_JoinGroup, 0, 0},
+ {RD_KAFKAP_SyncGroup, 0, 0},
+ {RD_KAFKAP_Heartbeat, 0, 0},
+ {RD_KAFKAP_LeaveGroup, 0, 0},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.9.0: ThrottleTime */
+ .feature = RD_KAFKA_FEATURE_THROTTLETIME,
+ .depends =
+ {
+ {RD_KAFKAP_Produce, 1, 2},
+ {RD_KAFKAP_Fetch, 1, 2},
+ {-1},
+ },
+
+ },
+ {
+ /* @brief >=0.9.0: SASL (GSSAPI) authentication.
+ * Since SASL is not using the Kafka protocol
+ * we must use something else to map us to the
+ * proper broker version support:
+ * JoinGroup was released along with SASL in 0.9.0. */
+ .feature = RD_KAFKA_FEATURE_SASL_GSSAPI,
+ .depends =
+ {
+ {RD_KAFKAP_JoinGroup, 0, 0},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.10.0: SASL mechanism handshake (KIP-43)
+ * to automatically support other mechanisms
+ * than GSSAPI, such as PLAIN. */
+ .feature = RD_KAFKA_FEATURE_SASL_HANDSHAKE,
+ .depends =
+ {
+ {RD_KAFKAP_SaslHandshake, 0, 0},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=0.8.2: LZ4 compression.
+ * Since LZ4 initially did not rely on a specific API
+ * type or version (it does in >=0.10.0)
+ * we must use something else to map us to the
+ * proper broker version support:
+ * GrooupCoordinator was released in 0.8.2 */
+ .feature = RD_KAFKA_FEATURE_LZ4,
+ .depends =
+ {
+ {RD_KAFKAP_FindCoordinator, 0, 0},
+ {-1},
+ },
+ },
+ {/* @brief >=0.10.1.0: Offset v1 (KIP-79)
+ * Time-based offset requests */
+ .feature = RD_KAFKA_FEATURE_OFFSET_TIME,
+ .depends =
+ {
+ {RD_KAFKAP_ListOffsets, 1, 1},
+ {-1},
+ }},
+ {/* @brief >=0.11.0.0: Idempotent Producer*/
+ .feature = RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER,
+ .depends =
+ {
+ {RD_KAFKAP_InitProducerId, 0, 0},
+ {-1},
+ }},
+ {
+ /* @brief >=2.1.0-IV2: Support ZStandard Compression Codec (KIP-110) */
+ .feature = RD_KAFKA_FEATURE_ZSTD,
+ .depends =
+ {
+ {RD_KAFKAP_Produce, 7, 7},
+ {RD_KAFKAP_Fetch, 10, 10},
+ {-1},
+ },
+ },
+ {
+ /* @brief >=1.0.0: SaslAuthenticateRequest */
+ .feature = RD_KAFKA_FEATURE_SASL_AUTH_REQ,
+ .depends =
+ {
+ {RD_KAFKAP_SaslHandshake, 1, 1},
+ {RD_KAFKAP_SaslAuthenticate, 0, 0},
+ {-1},
+ },
+ },
+ {.feature = 0}, /* sentinel */
+};
+
+
+
+/**
+ * @brief In absence of KIP-35 support in earlier broker versions we provide
+ * hardcoded lists that corresponds to older broker versions.
+ */
+
+/* >= 0.10.0.0: dummy for all future versions that support ApiVersionRequest */
+static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_Queryable[] = {
+ {RD_KAFKAP_ApiVersion, 0, 0}};
+
+
+/* =~ 0.9.0 */
+static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_9_0[] = {
+ {RD_KAFKAP_Produce, 0, 1}, {RD_KAFKAP_Fetch, 0, 1},
+ {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0},
+ {RD_KAFKAP_OffsetCommit, 0, 2}, {RD_KAFKAP_OffsetFetch, 0, 1},
+ {RD_KAFKAP_FindCoordinator, 0, 0}, {RD_KAFKAP_JoinGroup, 0, 0},
+ {RD_KAFKAP_Heartbeat, 0, 0}, {RD_KAFKAP_LeaveGroup, 0, 0},
+ {RD_KAFKAP_SyncGroup, 0, 0}, {RD_KAFKAP_DescribeGroups, 0, 0},
+ {RD_KAFKAP_ListGroups, 0, 0}};
+
+/* =~ 0.8.2 */
+static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_2[] = {
+ {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0},
+ {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0},
+ {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 1},
+ {RD_KAFKAP_FindCoordinator, 0, 0}};
+
+/* =~ 0.8.1 */
+static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_1[] = {
+ {RD_KAFKAP_Produce, 0, 0}, {RD_KAFKAP_Fetch, 0, 0},
+ {RD_KAFKAP_ListOffsets, 0, 0}, {RD_KAFKAP_Metadata, 0, 0},
+ {RD_KAFKAP_OffsetCommit, 0, 1}, {RD_KAFKAP_OffsetFetch, 0, 0}};
+
+/* =~ 0.8.0 */
+static struct rd_kafka_ApiVersion rd_kafka_ApiVersion_0_8_0[] = {
+ {RD_KAFKAP_Produce, 0, 0},
+ {RD_KAFKAP_Fetch, 0, 0},
+ {RD_KAFKAP_ListOffsets, 0, 0},
+ {RD_KAFKAP_Metadata, 0, 0}};
+
+
+/**
+ * @brief Returns the ApiVersion list for legacy broker versions that do not
+ * support the ApiVersionQuery request. E.g., brokers <0.10.0.
+ *
+ * @param broker_version Broker version to match (longest prefix matching).
+ * @param use_default If no match is found return the default APIs (but return
+ * 0).
+ *
+ * @returns 1 if \p broker_version was recognized: \p *apisp will point to
+ * the ApiVersion list and *api_cntp will be set to its element count.
+ * 0 if \p broker_version was not recognized: \p *apisp remains
+ * unchanged.
+ *
+ */
+int rd_kafka_get_legacy_ApiVersions(const char *broker_version,
+ struct rd_kafka_ApiVersion **apisp,
+ size_t *api_cntp,
+ const char *fallback) {
+ static const struct {
+ const char *pfx;
+ struct rd_kafka_ApiVersion *apis;
+ size_t api_cnt;
+ } vermap[] = {
+#define _VERMAP(PFX, APIS) {PFX, APIS, RD_ARRAYSIZE(APIS)}
+ _VERMAP("0.9.0", rd_kafka_ApiVersion_0_9_0),
+ _VERMAP("0.8.2", rd_kafka_ApiVersion_0_8_2),
+ _VERMAP("0.8.1", rd_kafka_ApiVersion_0_8_1),
+ _VERMAP("0.8.0", rd_kafka_ApiVersion_0_8_0),
+ {"0.7.", NULL}, /* Unsupported */
+ {"0.6.", NULL}, /* Unsupported */
+ _VERMAP("", rd_kafka_ApiVersion_Queryable),
+ {NULL}};
+ int i;
+ int fallback_i = -1;
+ int ret = 0;
+
+ *apisp = NULL;
+ *api_cntp = 0;
+
+ for (i = 0; vermap[i].pfx; i++) {
+ if (!strncmp(vermap[i].pfx, broker_version,
+ strlen(vermap[i].pfx))) {
+ if (!vermap[i].apis)
+ return 0;
+ *apisp = vermap[i].apis;
+ *api_cntp = vermap[i].api_cnt;
+ ret = 1;
+ break;
+ } else if (fallback && !strcmp(vermap[i].pfx, fallback))
+ fallback_i = i;
+ }
+
+ if (!*apisp && fallback) {
+ rd_kafka_assert(NULL, fallback_i != -1);
+ *apisp = vermap[fallback_i].apis;
+ *api_cntp = vermap[fallback_i].api_cnt;
+ }
+
+ return ret;
+}
+
+
+/**
+ * @returns 1 if the provided broker version (probably)
+ * supports api.version.request.
+ */
+int rd_kafka_ApiVersion_is_queryable(const char *broker_version) {
+ struct rd_kafka_ApiVersion *apis;
+ size_t api_cnt;
+
+
+ if (!rd_kafka_get_legacy_ApiVersions(broker_version, &apis, &api_cnt,
+ 0))
+ return 0;
+
+ return apis == rd_kafka_ApiVersion_Queryable;
+}
+
+
+
+/**
+ * @brief Check if match's versions overlaps with \p apis.
+ *
+ * @returns 1 if true, else 0.
+ * @remark \p apis must be sorted using rd_kafka_ApiVersion_key_cmp()
+ */
+static RD_INLINE int
+rd_kafka_ApiVersion_check(const struct rd_kafka_ApiVersion *apis,
+ size_t api_cnt,
+ const struct rd_kafka_ApiVersion *match) {
+ const struct rd_kafka_ApiVersion *api;
+
+ api = bsearch(match, apis, api_cnt, sizeof(*apis),
+ rd_kafka_ApiVersion_key_cmp);
+ if (unlikely(!api))
+ return 0;
+
+ return match->MinVer <= api->MaxVer && api->MinVer <= match->MaxVer;
+}
+
+
+/**
+ * @brief Compare broker's supported API versions to our feature request map
+ * and enable/disable features accordingly.
+ *
+ * @param broker_apis Broker's supported APIs. If NULL the
+ * \p broker.version.fallback configuration property will specify a
+ * default legacy version to use.
+ * @param broker_api_cnt Number of elements in \p broker_apis
+ *
+ * @returns the supported features (bitmask) to enable.
+ */
+int rd_kafka_features_check(rd_kafka_broker_t *rkb,
+ struct rd_kafka_ApiVersion *broker_apis,
+ size_t broker_api_cnt) {
+ int features = 0;
+ int i;
+
+ /* Scan through features. */
+ for (i = 0; rd_kafka_feature_map[i].feature != 0; i++) {
+ const struct rd_kafka_ApiVersion *match;
+ int fails = 0;
+
+ /* For each feature check that all its API dependencies
+ * can be fullfilled. */
+
+ for (match = &rd_kafka_feature_map[i].depends[0];
+ match->ApiKey != -1; match++) {
+ int r;
+
+ r = rd_kafka_ApiVersion_check(broker_apis,
+ broker_api_cnt, match);
+
+ rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
+ " Feature %s: %s (%hd..%hd) "
+ "%ssupported by broker",
+ rd_kafka_features2str(
+ rd_kafka_feature_map[i].feature),
+ rd_kafka_ApiKey2str(match->ApiKey),
+ match->MinVer, match->MaxVer,
+ r ? "" : "NOT ");
+
+ fails += !r;
+ }
+
+ rd_rkb_dbg(
+ rkb, FEATURE, "APIVERSION", "%s feature %s",
+ fails ? "Disabling" : "Enabling",
+ rd_kafka_features2str(rd_kafka_feature_map[i].feature));
+
+
+ if (!fails)
+ features |= rd_kafka_feature_map[i].feature;
+ }
+
+ return features;
+}
+
+
+
+/**
+ * @brief Make an allocated and sorted copy of \p src.
+ */
+void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src,
+ size_t src_cnt,
+ struct rd_kafka_ApiVersion **dstp,
+ size_t *dst_cntp) {
+ *dstp = rd_memdup(src, sizeof(*src) * src_cnt);
+ *dst_cntp = src_cnt;
+ qsort(*dstp, *dst_cntp, sizeof(**dstp), rd_kafka_ApiVersion_key_cmp);
+}
+
+
+
+/**
+ * @returns a human-readable feature flag string.
+ */
+const char *rd_kafka_features2str(int features) {
+ static RD_TLS char ret[4][256];
+ size_t of = 0;
+ static RD_TLS int reti = 0;
+ int i;
+
+ reti = (reti + 1) % 4;
+
+ *ret[reti] = '\0';
+ for (i = 0; rd_kafka_feature_names[i]; i++) {
+ int r;
+ if (!(features & (1 << i)))
+ continue;
+
+ r = rd_snprintf(ret[reti] + of, sizeof(ret[reti]) - of, "%s%s",
+ of == 0 ? "" : ",", rd_kafka_feature_names[i]);
+ if ((size_t)r > sizeof(ret[reti]) - of) {
+ /* Out of space */
+ memcpy(&ret[reti][sizeof(ret[reti]) - 3], "..", 3);
+ break;
+ }
+
+ of += r;
+ }
+
+ return ret[reti];
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h
new file mode 100644
index 000000000..a651a07df
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_feature.h
@@ -0,0 +1,102 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_FEATURE_H_
+#define _RDKAFKA_FEATURE_H_
+
+
+/**
+ * @brief Kafka protocol features
+ */
+
+/* Message version 1 (MagicByte=1):
+ * + relative offsets (KIP-31)
+ * + timestamps (KIP-32) */
+#define RD_KAFKA_FEATURE_MSGVER1 0x1
+
+/* ApiVersionQuery support (KIP-35) */
+#define RD_KAFKA_FEATURE_APIVERSION 0x2
+
+/* >= 0.9: Broker-based Balanced Consumer */
+#define RD_KAFKA_FEATURE_BROKER_BALANCED_CONSUMER 0x4
+
+/* >= 0.9: Produce/Fetch ThrottleTime reporting */
+#define RD_KAFKA_FEATURE_THROTTLETIME 0x8
+
+/* >= 0.9: SASL GSSAPI support */
+#define RD_KAFKA_FEATURE_SASL_GSSAPI 0x10
+
+/* >= 0.10: SaslMechanismRequest (KIP-43) */
+#define RD_KAFKA_FEATURE_SASL_HANDSHAKE 0x20
+
+/* >= 0.8.2.0: Broker-based Group coordinator */
+#define RD_KAFKA_FEATURE_BROKER_GROUP_COORD 0x40
+
+/* >= 0.8.2.0: LZ4 compression (with bad and proper HC checksums) */
+#define RD_KAFKA_FEATURE_LZ4 0x80
+
+/* >= 0.10.1.0: Time-based Offset fetch (KIP-79) */
+#define RD_KAFKA_FEATURE_OFFSET_TIME 0x100
+
+/* >= 0.11.0.0: Message version 2 (MagicByte=2):
+ * + EOS message format KIP-98 */
+#define RD_KAFKA_FEATURE_MSGVER2 0x200
+
+/* >= 0.11.0.0: Idempotent Producer support */
+#define RD_KAFKA_FEATURE_IDEMPOTENT_PRODUCER 0x400
+
+/* >= 2.1.0-IV2: ZSTD compression */
+#define RD_KAFKA_FEATURE_ZSTD 0x800
+
+/* >= 1.0.0: SaslAuthenticateRequest */
+#define RD_KAFKA_FEATURE_SASL_AUTH_REQ 0x1000
+
+/* Unit-test mock broker: broker supports everything.
+ * Should be used with RD_KAFKA_FEATURE_ALL, but not be included in bitmask */
+#define RD_KAFKA_FEATURE_UNITTEST 0x4000
+
+/* All features (except UNITTEST) */
+#define RD_KAFKA_FEATURE_ALL (0xffff & ~RD_KAFKA_FEATURE_UNITTEST)
+
+
+
+int rd_kafka_get_legacy_ApiVersions(const char *broker_version,
+ struct rd_kafka_ApiVersion **apisp,
+ size_t *api_cntp,
+ const char *fallback);
+int rd_kafka_ApiVersion_is_queryable(const char *broker_version);
+void rd_kafka_ApiVersions_copy(const struct rd_kafka_ApiVersion *src,
+ size_t src_cnt,
+ struct rd_kafka_ApiVersion **dstp,
+ size_t *dst_cntp);
+int rd_kafka_features_check(rd_kafka_broker_t *rkb,
+ struct rd_kafka_ApiVersion *broker_apis,
+ size_t broker_api_cnt);
+
+const char *rd_kafka_features2str(int features);
+
+#endif /* _RDKAFKA_FEATURE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c
new file mode 100644
index 000000000..8ee67a420
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.c
@@ -0,0 +1,1145 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2022 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name Fetcher
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_msgset.h"
+#include "rdkafka_fetcher.h"
+
+
+/**
+ * Backoff the next Fetch request (due to error).
+ */
+static void rd_kafka_broker_fetch_backoff(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err) {
+ int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms;
+ rkb->rkb_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000);
+ rd_rkb_dbg(rkb, FETCH, "BACKOFF", "Fetch backoff for %dms: %s",
+ backoff_ms, rd_kafka_err2str(err));
+}
+
+/**
+ * @brief Backoff the next Fetch for specific partition
+ */
+static void rd_kafka_toppar_fetch_backoff(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err) {
+ int backoff_ms = rkb->rkb_rk->rk_conf.fetch_error_backoff_ms;
+
+ /* Don't back off on reaching end of partition */
+ if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
+ return;
+
+ /* Certain errors that may require manual intervention should have
+ * a longer backoff time. */
+ if (err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED)
+ backoff_ms = RD_MAX(1000, backoff_ms * 10);
+
+ rktp->rktp_ts_fetch_backoff = rd_clock() + (backoff_ms * 1000);
+
+ rd_rkb_dbg(rkb, FETCH, "BACKOFF",
+ "%s [%" PRId32 "]: Fetch backoff for %dms%s%s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ backoff_ms, err ? ": " : "",
+ err ? rd_kafka_err2str(err) : "");
+}
+
+
+/**
+ * @brief Handle preferred replica in fetch response.
+ *
+ * @locks rd_kafka_toppar_lock(rktp) and
+ * rd_kafka_rdlock(rk) must NOT be held.
+ *
+ * @locality broker thread
+ */
+static void rd_kafka_fetch_preferred_replica_handle(rd_kafka_toppar_t *rktp,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_broker_t *rkb,
+ int32_t preferred_id) {
+ const rd_ts_t one_minute = 60 * 1000 * 1000;
+ const rd_ts_t five_seconds = 5 * 1000 * 1000;
+ rd_kafka_broker_t *preferred_rkb;
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ rd_ts_t new_intvl =
+ rd_interval_immediate(&rktp->rktp_new_lease_intvl, one_minute, 0);
+
+ if (new_intvl < 0) {
+ /* In lieu of KIP-320, the toppar is delegated back to
+ * the leader in the event of an offset out-of-range
+ * error (KIP-392 error case #4) because this scenario
+ * implies the preferred replica is out-of-sync.
+ *
+ * If program execution reaches here, the leader has
+ * relatively quickly instructed the client back to
+ * a preferred replica, quite possibly the same one
+ * as before (possibly resulting from stale metadata),
+ * so we back off the toppar to slow down potential
+ * back-and-forth.
+ */
+
+ if (rd_interval_immediate(&rktp->rktp_new_lease_log_intvl,
+ one_minute, 0) > 0)
+ rd_rkb_log(rkb, LOG_NOTICE, "FETCH",
+ "%.*s [%" PRId32
+ "]: preferred replica "
+ "(%" PRId32
+ ") lease changing too quickly "
+ "(%" PRId64
+ "s < 60s): possibly due to "
+ "unavailable replica or stale cluster "
+ "state: backing off next fetch",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, preferred_id,
+ (one_minute - -new_intvl) / (1000 * 1000));
+
+ rd_kafka_toppar_fetch_backoff(rkb, rktp,
+ RD_KAFKA_RESP_ERR_NO_ERROR);
+ }
+
+ rd_kafka_rdlock(rk);
+ preferred_rkb = rd_kafka_broker_find_by_nodeid(rk, preferred_id);
+ rd_kafka_rdunlock(rk);
+
+ if (preferred_rkb) {
+ rd_interval_reset_to_now(&rktp->rktp_lease_intvl, 0);
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_toppar_broker_update(rktp, preferred_id, preferred_rkb,
+ "preferred replica updated");
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_broker_destroy(preferred_rkb);
+ return;
+ }
+
+ if (rd_interval_immediate(&rktp->rktp_metadata_intvl, five_seconds, 0) >
+ 0) {
+ rd_rkb_log(rkb, LOG_NOTICE, "FETCH",
+ "%.*s [%" PRId32 "]: preferred replica (%" PRId32
+ ") "
+ "is unknown: refreshing metadata",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, preferred_id);
+
+ rd_kafka_metadata_refresh_brokers(
+ rktp->rktp_rkt->rkt_rk, NULL,
+ "preferred replica unavailable");
+ }
+
+ rd_kafka_toppar_fetch_backoff(rkb, rktp,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE);
+}
+
+
+/**
+ * @brief Handle partition-specific Fetch error.
+ */
+static void rd_kafka_fetch_reply_handle_partition_error(
+ rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const struct rd_kafka_toppar_ver *tver,
+ rd_kafka_resp_err_t err,
+ int64_t HighwaterMarkOffset) {
+
+ rd_rkb_dbg(rkb, FETCH, "FETCHERR",
+ "%.*s [%" PRId32 "]: Fetch failed at %s: %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos),
+ rd_kafka_err2name(err));
+
+ /* Some errors should be passed to the
+ * application while some handled by rdkafka */
+ switch (err) {
+ /* Errors handled by rdkafka */
+ case RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
+ case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_NOT_LEADER_OR_FOLLOWER:
+ case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR:
+ case RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH:
+ case RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH:
+ if (err == RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE) {
+ /* Occurs when:
+ * - Msg exists on broker but
+ * offset > HWM, or:
+ * - HWM is >= offset, but msg not
+ * yet available at that offset
+ * (replica is out of sync).
+ * - partition leader is out of sync.
+ *
+ * Handle by requesting metadata update, changing back
+ * to the leader, and then retrying FETCH
+ * (with backoff).
+ */
+ rd_rkb_dbg(rkb, MSG, "FETCH",
+ "Topic %s [%" PRId32
+ "]: %s not "
+ "available on broker %" PRId32
+ " (leader %" PRId32
+ "): updating metadata and retrying",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(
+ rktp->rktp_offsets.fetch_pos),
+ rktp->rktp_broker_id, rktp->rktp_leader_id);
+ }
+
+ if (err == RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH) {
+ rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_CONSUMER, "FETCH",
+ "Topic %s [%" PRId32
+ "]: Fetch failed at %s: %s: broker %" PRId32
+ "has not yet caught up on latest metadata: "
+ "retrying",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(
+ rktp->rktp_offsets.fetch_pos),
+ rd_kafka_err2str(err), rktp->rktp_broker_id);
+ }
+
+ if (rktp->rktp_broker_id != rktp->rktp_leader_id) {
+ rd_kafka_toppar_delegate_to_leader(rktp);
+ }
+ /* Request metadata information update*/
+ rd_kafka_toppar_leader_unavailable(rktp, "fetch", err);
+ break;
+
+ case RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE: {
+ rd_kafka_fetch_pos_t err_pos;
+
+ if (rktp->rktp_broker_id != rktp->rktp_leader_id &&
+ rktp->rktp_offsets.fetch_pos.offset > HighwaterMarkOffset) {
+ rd_kafka_log(rkb->rkb_rk, LOG_WARNING, "FETCH",
+ "Topic %s [%" PRId32
+ "]: %s "
+ " out of range (HighwaterMark %" PRId64
+ " fetching from "
+ "broker %" PRId32 " (leader %" PRId32
+ "): reverting to leader",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(
+ rktp->rktp_offsets.fetch_pos),
+ HighwaterMarkOffset, rktp->rktp_broker_id,
+ rktp->rktp_leader_id);
+
+ /* Out of range error cannot be taken as definitive
+ * when fetching from follower.
+ * Revert back to the leader in lieu of KIP-320.
+ */
+ rd_kafka_toppar_delegate_to_leader(rktp);
+ break;
+ }
+
+ /* Application error */
+ err_pos = rktp->rktp_offsets.fetch_pos;
+ rktp->rktp_offsets.fetch_pos.offset = RD_KAFKA_OFFSET_INVALID;
+ rktp->rktp_offsets.fetch_pos.leader_epoch = -1;
+ rd_kafka_offset_reset(rktp, rd_kafka_broker_id(rkb), err_pos,
+ err,
+ "fetch failed due to requested offset "
+ "not available on the broker");
+ } break;
+
+ case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
+ /* If we're not authorized to access the
+ * topic mark it as errored to deny
+ * further Fetch requests. */
+ if (rktp->rktp_last_error != err) {
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err,
+ tver->version, NULL, rktp,
+ rktp->rktp_offsets.fetch_pos.offset,
+ "Fetch from broker %" PRId32 " failed: %s",
+ rd_kafka_broker_id(rkb), rd_kafka_err2str(err));
+ rktp->rktp_last_error = err;
+ }
+ break;
+
+
+ /* Application errors */
+ case RD_KAFKA_RESP_ERR__PARTITION_EOF:
+ if (rkb->rkb_rk->rk_conf.enable_partition_eof)
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err,
+ tver->version, NULL, rktp,
+ rktp->rktp_offsets.fetch_pos.offset,
+ "Fetch from broker %" PRId32
+ " reached end of "
+ "partition at offset %" PRId64
+ " (HighwaterMark %" PRId64 ")",
+ rd_kafka_broker_id(rkb),
+ rktp->rktp_offsets.fetch_pos.offset,
+ HighwaterMarkOffset);
+ break;
+
+ case RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE:
+ default: /* and all other errors */
+ rd_dassert(tver->version > 0);
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, rd_kafka_broker_id(rkb), err,
+ tver->version, NULL, rktp,
+ rktp->rktp_offsets.fetch_pos.offset,
+ "Fetch from broker %" PRId32 " failed at %s: %s",
+ rd_kafka_broker_id(rkb),
+ rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos),
+ rd_kafka_err2str(err));
+ break;
+ }
+
+ /* Back off the next fetch for this partition */
+ rd_kafka_toppar_fetch_backoff(rkb, rktp, err);
+}
+
+
+
+/**
+ * @brief Per-partition FetchResponse parsing and handling.
+ *
+ * @returns an error on buffer parse failure, else RD_KAFKA_RESP_ERR_NO_ERROR.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_fetch_reply_handle_partition(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *topic,
+ rd_kafka_topic_t *rkt /*possibly NULL*/,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ int16_t ErrorCode) {
+ const int log_decode_errors = LOG_ERR;
+ struct rd_kafka_toppar_ver *tver, tver_skel;
+ rd_kafka_toppar_t *rktp = NULL;
+ rd_kafka_aborted_txns_t *aborted_txns = NULL;
+ rd_slice_t save_slice;
+ int32_t fetch_version;
+ struct {
+ int32_t Partition;
+ int16_t ErrorCode;
+ int64_t HighwaterMarkOffset;
+ int64_t LastStableOffset; /* v4 */
+ int64_t LogStartOffset; /* v5 */
+ int32_t MessageSetSize;
+ int32_t PreferredReadReplica; /* v11 */
+ } hdr;
+ rd_kafka_resp_err_t err;
+ int64_t end_offset;
+
+ rd_kafka_buf_read_i32(rkbuf, &hdr.Partition);
+ rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode);
+ if (ErrorCode)
+ hdr.ErrorCode = ErrorCode;
+ rd_kafka_buf_read_i64(rkbuf, &hdr.HighwaterMarkOffset);
+
+ end_offset = hdr.HighwaterMarkOffset;
+
+ hdr.LastStableOffset = RD_KAFKA_OFFSET_INVALID;
+ hdr.LogStartOffset = RD_KAFKA_OFFSET_INVALID;
+ if (rd_kafka_buf_ApiVersion(request) >= 4) {
+ int32_t AbortedTxnCnt;
+ rd_kafka_buf_read_i64(rkbuf, &hdr.LastStableOffset);
+ if (rd_kafka_buf_ApiVersion(request) >= 5)
+ rd_kafka_buf_read_i64(rkbuf, &hdr.LogStartOffset);
+
+ rd_kafka_buf_read_i32(rkbuf, &AbortedTxnCnt);
+
+ if (rkb->rkb_rk->rk_conf.isolation_level ==
+ RD_KAFKA_READ_UNCOMMITTED) {
+
+ if (unlikely(AbortedTxnCnt > 0)) {
+ rd_rkb_log(rkb, LOG_ERR, "FETCH",
+ "%.*s [%" PRId32
+ "]: "
+ "%" PRId32
+ " aborted transaction(s) "
+ "encountered in READ_UNCOMMITTED "
+ "fetch response: ignoring.",
+ RD_KAFKAP_STR_PR(topic),
+ hdr.Partition, AbortedTxnCnt);
+
+ rd_kafka_buf_skip(rkbuf,
+ AbortedTxnCnt * (8 + 8));
+ }
+ } else {
+ /* Older brokers may return LSO -1,
+ * in which case we use the HWM. */
+ if (hdr.LastStableOffset >= 0)
+ end_offset = hdr.LastStableOffset;
+
+ if (AbortedTxnCnt > 0) {
+ int k;
+
+ if (unlikely(AbortedTxnCnt > 1000000))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "%.*s [%" PRId32
+ "]: "
+ "invalid AbortedTxnCnt %" PRId32,
+ RD_KAFKAP_STR_PR(topic),
+ hdr.Partition, AbortedTxnCnt);
+
+ aborted_txns =
+ rd_kafka_aborted_txns_new(AbortedTxnCnt);
+ for (k = 0; k < AbortedTxnCnt; k++) {
+ int64_t PID;
+ int64_t FirstOffset;
+ rd_kafka_buf_read_i64(rkbuf, &PID);
+ rd_kafka_buf_read_i64(rkbuf,
+ &FirstOffset);
+ rd_kafka_aborted_txns_add(
+ aborted_txns, PID, FirstOffset);
+ }
+ rd_kafka_aborted_txns_sort(aborted_txns);
+ }
+ }
+ }
+
+ if (rd_kafka_buf_ApiVersion(request) >= 11)
+ rd_kafka_buf_read_i32(rkbuf, &hdr.PreferredReadReplica);
+ else
+ hdr.PreferredReadReplica = -1;
+
+ rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSetSize);
+
+ if (unlikely(hdr.MessageSetSize < 0))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "%.*s [%" PRId32 "]: invalid MessageSetSize %" PRId32,
+ RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize);
+
+ /* Look up topic+partition */
+ if (likely(rkt != NULL)) {
+ rd_kafka_topic_rdlock(rkt);
+ rktp = rd_kafka_toppar_get(rkt, hdr.Partition,
+ 0 /*no ua-on-miss*/);
+ rd_kafka_topic_rdunlock(rkt);
+ }
+
+ if (unlikely(!rkt || !rktp)) {
+ rd_rkb_dbg(rkb, TOPIC, "UNKTOPIC",
+ "Received Fetch response (error %hu) for unknown "
+ "topic %.*s [%" PRId32 "]: ignoring",
+ hdr.ErrorCode, RD_KAFKAP_STR_PR(topic),
+ hdr.Partition);
+ rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_lo_offset = hdr.LogStartOffset;
+ rktp->rktp_hi_offset = hdr.HighwaterMarkOffset;
+ /* Let the LastStable offset be the effective
+ * end_offset based on protocol version, that is:
+ * if connected to a broker that does not support
+ * LastStableOffset we use the HighwaterMarkOffset. */
+ rktp->rktp_ls_offset = end_offset;
+ rd_kafka_toppar_unlock(rktp);
+
+ if (hdr.PreferredReadReplica != -1) {
+
+ rd_kafka_fetch_preferred_replica_handle(
+ rktp, rkbuf, rkb, hdr.PreferredReadReplica);
+
+ if (unlikely(hdr.MessageSetSize != 0)) {
+ rd_rkb_log(rkb, LOG_WARNING, "FETCH",
+ "%.*s [%" PRId32
+ "]: Fetch response has both preferred read "
+ "replica and non-zero message set size: "
+ "%" PRId32 ": skipping messages",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, hdr.MessageSetSize);
+ rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
+ }
+
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+ rd_kafka_toppar_destroy(rktp); /* from get */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Make sure toppar hasn't moved to another broker
+ * during the lifetime of the request. */
+ if (unlikely(rktp->rktp_broker != rkb)) {
+ rd_kafka_toppar_unlock(rktp);
+ rd_rkb_dbg(rkb, MSG, "FETCH",
+ "%.*s [%" PRId32
+ "]: partition broker has changed: "
+ "discarding fetch response",
+ RD_KAFKAP_STR_PR(topic), hdr.Partition);
+ rd_kafka_toppar_destroy(rktp); /* from get */
+ rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ fetch_version = rktp->rktp_fetch_version;
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Check if this Fetch is for an outdated fetch version,
+ * or the original rktp was removed and a new one
+ * created (due to partition count decreasing and
+ * then increasing again, which can happen in
+ * desynchronized clusters): if so ignore it. */
+ tver_skel.rktp = rktp;
+ tver = rd_list_find(request->rkbuf_rktp_vers, &tver_skel,
+ rd_kafka_toppar_ver_cmp);
+ rd_kafka_assert(NULL, tver);
+ if (tver->rktp != rktp || tver->version < fetch_version) {
+ rd_rkb_dbg(rkb, MSG, "DROP",
+ "%s [%" PRId32
+ "]: dropping outdated fetch response "
+ "(v%d < %d or old rktp)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ tver->version, fetch_version);
+ rd_atomic64_add(&rktp->rktp_c.rx_ver_drops, 1);
+ rd_kafka_toppar_destroy(rktp); /* from get */
+ rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_rkb_dbg(rkb, MSG, "FETCH",
+ "Topic %.*s [%" PRId32 "] MessageSet size %" PRId32
+ ", error \"%s\", MaxOffset %" PRId64 ", LSO %" PRId64
+ ", Ver %" PRId32 "/%" PRId32,
+ RD_KAFKAP_STR_PR(topic), hdr.Partition, hdr.MessageSetSize,
+ rd_kafka_err2str(hdr.ErrorCode), hdr.HighwaterMarkOffset,
+ hdr.LastStableOffset, tver->version, fetch_version);
+
+ /* If this is the last message of the queue,
+ * signal EOF back to the application. */
+ if (end_offset == rktp->rktp_offsets.fetch_pos.offset &&
+ rktp->rktp_offsets.eof_offset != end_offset) {
+ hdr.ErrorCode = RD_KAFKA_RESP_ERR__PARTITION_EOF;
+ rktp->rktp_offsets.eof_offset = end_offset;
+ }
+
+ if (unlikely(hdr.ErrorCode != RD_KAFKA_RESP_ERR_NO_ERROR)) {
+ /* Handle partition-level errors. */
+ rd_kafka_fetch_reply_handle_partition_error(
+ rkb, rktp, tver, hdr.ErrorCode, hdr.HighwaterMarkOffset);
+
+ rd_kafka_toppar_destroy(rktp); /* from get()*/
+
+ rd_kafka_buf_skip(rkbuf, hdr.MessageSetSize);
+
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ /* No error, clear any previous fetch error. */
+ rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (unlikely(hdr.MessageSetSize <= 0)) {
+ rd_kafka_toppar_destroy(rktp); /*from get()*/
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ /**
+ * Parse MessageSet
+ */
+ if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice,
+ (size_t)hdr.MessageSetSize))
+ rd_kafka_buf_check_len(rkbuf, hdr.MessageSetSize);
+
+ /* Parse messages */
+ err = rd_kafka_msgset_parse(rkbuf, request, rktp, aborted_txns, tver);
+
+ if (aborted_txns)
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+
+ rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice);
+ /* Continue with next partition regardless of
+ * parse errors (which are partition-specific) */
+
+ /* On error: back off the fetcher for this partition */
+ if (unlikely(err))
+ rd_kafka_toppar_fetch_backoff(rkb, rktp, err);
+
+ rd_kafka_toppar_destroy(rktp); /*from get()*/
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ if (rktp)
+ rd_kafka_toppar_destroy(rktp); /*from get()*/
+
+ return rkbuf->rkbuf_err;
+}
+
+/**
+ * Parses and handles a Fetch reply.
+ * Returns 0 on success or an error code on failure.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_fetch_reply_handle(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request) {
+ int32_t TopicArrayCnt;
+ int i;
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_topic_t *rkt = NULL;
+ int16_t ErrorCode = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (rd_kafka_buf_ApiVersion(request) >= 1) {
+ int32_t Throttle_Time;
+ rd_kafka_buf_read_i32(rkbuf, &Throttle_Time);
+
+ rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep,
+ Throttle_Time);
+ }
+
+ if (rd_kafka_buf_ApiVersion(request) >= 7) {
+ int32_t SessionId;
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ rd_kafka_buf_read_i32(rkbuf, &SessionId);
+ }
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
+ /* Verify that TopicArrayCnt seems to be in line with remaining size */
+ rd_kafka_buf_check_len(rkbuf,
+ TopicArrayCnt * (3 /*topic min size*/ +
+ 4 /*PartitionArrayCnt*/ + 4 +
+ 2 + 8 + 4 /*inner header*/));
+
+ for (i = 0; i < TopicArrayCnt; i++) {
+ rd_kafkap_str_t topic;
+ int32_t PartitionArrayCnt;
+ int j;
+
+ rd_kafka_buf_read_str(rkbuf, &topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt);
+
+ rkt = rd_kafka_topic_find0(rkb->rkb_rk, &topic);
+
+ for (j = 0; j < PartitionArrayCnt; j++) {
+ if (rd_kafka_fetch_reply_handle_partition(
+ rkb, &topic, rkt, rkbuf, request, ErrorCode))
+ goto err_parse;
+ }
+
+ if (rkt) {
+ rd_kafka_topic_destroy0(rkt);
+ rkt = NULL;
+ }
+ }
+
+ if (rd_kafka_buf_read_remain(rkbuf) != 0) {
+ rd_kafka_buf_parse_fail(rkbuf,
+ "Remaining data after message set "
+ "parse: %" PRIusz " bytes",
+ rd_kafka_buf_read_remain(rkbuf));
+ RD_NOTREACHED();
+ }
+
+ return 0;
+
+err_parse:
+ if (rkt)
+ rd_kafka_topic_destroy0(rkt);
+ rd_rkb_dbg(rkb, MSG, "BADMSG",
+ "Bad message (Fetch v%d): "
+ "is broker.version.fallback incorrectly set?",
+ (int)request->rkbuf_reqhdr.ApiVersion);
+ return rkbuf->rkbuf_err;
+}
+
+
+
+/**
+ * @broker FetchResponse handling.
+ *
+ * @locality broker thread (or any thread if err == __DESTROY).
+ */
+static void rd_kafka_broker_fetch_reply(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return; /* Terminating */
+
+ rd_kafka_assert(rkb->rkb_rk, rkb->rkb_fetching > 0);
+ rkb->rkb_fetching = 0;
+
+ /* Parse and handle the messages (unless the request errored) */
+ if (!err && reply)
+ err = rd_kafka_fetch_reply_handle(rkb, reply, request);
+
+ if (unlikely(err)) {
+ char tmp[128];
+
+ rd_rkb_dbg(rkb, MSG, "FETCH", "Fetch reply: %s",
+ rd_kafka_err2str(err));
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
+ case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION:
+ case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
+ /* Request metadata information update */
+ rd_snprintf(tmp, sizeof(tmp), "FetchRequest failed: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_metadata_refresh_known_topics(
+ rkb->rkb_rk, NULL, rd_true /*force*/, tmp);
+ /* FALLTHRU */
+
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
+ case RD_KAFKA_RESP_ERR__MSG_TIMED_OUT:
+ /* The fetch is already intervalled from
+ * consumer_serve() so dont retry. */
+ break;
+
+ default:
+ break;
+ }
+
+ rd_kafka_broker_fetch_backoff(rkb, err);
+ /* FALLTHRU */
+ }
+}
+
+
+
+/**
+ * @brief Build and send a Fetch request message for all underflowed toppars
+ * for a specific broker.
+ *
+ * @returns the number of partitions included in the FetchRequest, if any.
+ *
+ * @locality broker thread
+ */
+int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_buf_t *rkbuf;
+ int cnt = 0;
+ size_t of_TopicArrayCnt = 0;
+ int TopicArrayCnt = 0;
+ size_t of_PartitionArrayCnt = 0;
+ int PartitionArrayCnt = 0;
+ rd_kafka_topic_t *rkt_last = NULL;
+ int16_t ApiVersion = 0;
+
+ /* Create buffer and segments:
+ * 1 x ReplicaId MaxWaitTime MinBytes TopicArrayCnt
+ * N x topic name
+ * N x PartitionArrayCnt Partition FetchOffset MaxBytes
+ * where N = number of toppars.
+ * Since we dont keep track of the number of topics served by
+ * this broker, only the partition count, we do a worst-case calc
+ * when allocating and assume each partition is on its own topic
+ */
+
+ if (unlikely(rkb->rkb_active_toppar_cnt == 0))
+ return 0;
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_Fetch, 1,
+ /* ReplicaId+MaxWaitTime+MinBytes+MaxBytes+IsolationLevel+
+ * SessionId+Epoch+TopicCnt */
+ 4 + 4 + 4 + 4 + 1 + 4 + 4 + 4 +
+ /* N x PartCnt+Partition+CurrentLeaderEpoch+FetchOffset+
+ * LogStartOffset+MaxBytes+?TopicNameLen?*/
+ (rkb->rkb_active_toppar_cnt * (4 + 4 + 4 + 8 + 8 + 4 + 40)) +
+ /* ForgottenTopicsCnt */
+ 4 +
+ /* N x ForgottenTopicsData */
+ 0);
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_Fetch,
+ 0, 11, NULL);
+
+ if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion,
+ RD_KAFKA_FEATURE_MSGVER2);
+ else if (rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion,
+ RD_KAFKA_FEATURE_MSGVER1);
+ else if (rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion,
+ RD_KAFKA_FEATURE_THROTTLETIME);
+
+
+ /* FetchRequest header */
+ /* ReplicaId */
+ rd_kafka_buf_write_i32(rkbuf, -1);
+ /* MaxWaitTime */
+ rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_wait_max_ms);
+ /* MinBytes */
+ rd_kafka_buf_write_i32(rkbuf, rkb->rkb_rk->rk_conf.fetch_min_bytes);
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 3)
+ /* MaxBytes */
+ rd_kafka_buf_write_i32(rkbuf,
+ rkb->rkb_rk->rk_conf.fetch_max_bytes);
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 4)
+ /* IsolationLevel */
+ rd_kafka_buf_write_i8(rkbuf,
+ rkb->rkb_rk->rk_conf.isolation_level);
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 7) {
+ /* SessionId */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+ /* Epoch */
+ rd_kafka_buf_write_i32(rkbuf, -1);
+ }
+
+ /* Write zero TopicArrayCnt but store pointer for later update */
+ of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* Prepare map for storing the fetch version for each partition,
+ * this will later be checked in Fetch response to purge outdated
+ * responses (e.g., after a seek). */
+ rkbuf->rkbuf_rktp_vers =
+ rd_list_new(0, (void *)rd_kafka_toppar_ver_destroy);
+ rd_list_prealloc_elems(rkbuf->rkbuf_rktp_vers,
+ sizeof(struct rd_kafka_toppar_ver),
+ rkb->rkb_active_toppar_cnt, 0);
+
+ /* Round-robin start of the list. */
+ rktp = rkb->rkb_active_toppar_next;
+ do {
+ struct rd_kafka_toppar_ver *tver;
+
+ if (rkt_last != rktp->rktp_rkt) {
+ if (rkt_last != NULL) {
+ /* Update PartitionArrayCnt */
+ rd_kafka_buf_update_i32(rkbuf,
+ of_PartitionArrayCnt,
+ PartitionArrayCnt);
+ }
+
+ /* Topic name */
+ rd_kafka_buf_write_kstr(rkbuf,
+ rktp->rktp_rkt->rkt_topic);
+ TopicArrayCnt++;
+ rkt_last = rktp->rktp_rkt;
+ /* Partition count */
+ of_PartitionArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+ PartitionArrayCnt = 0;
+ }
+
+ PartitionArrayCnt++;
+
+ /* Partition */
+ rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition);
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 9) {
+ /* CurrentLeaderEpoch */
+ if (rktp->rktp_leader_epoch < 0 &&
+ rd_kafka_has_reliable_leader_epochs(rkb)) {
+ /* If current leader epoch is set to -1 and
+ * the broker has reliable leader epochs,
+ * send 0 instead, so that epoch is checked
+ * and optionally metadata is refreshed.
+ * This can happen if metadata is read initially
+ * without an existing topic (see
+ * rd_kafka_topic_metadata_update2).
+ * TODO: have a private metadata struct that
+ * stores leader epochs before topic creation.
+ */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+ } else {
+ rd_kafka_buf_write_i32(rkbuf,
+ rktp->rktp_leader_epoch);
+ }
+ }
+
+ /* FetchOffset */
+ rd_kafka_buf_write_i64(rkbuf,
+ rktp->rktp_offsets.fetch_pos.offset);
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 5)
+ /* LogStartOffset - only used by follower replica */
+ rd_kafka_buf_write_i64(rkbuf, -1);
+
+ /* MaxBytes */
+ rd_kafka_buf_write_i32(rkbuf, rktp->rktp_fetch_msg_max_bytes);
+
+ rd_rkb_dbg(rkb, FETCH, "FETCH",
+ "Fetch topic %.*s [%" PRId32 "] at offset %" PRId64
+ " (leader epoch %" PRId32
+ ", current leader epoch %" PRId32 ", v%d)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rktp->rktp_offsets.fetch_pos.offset,
+ rktp->rktp_offsets.fetch_pos.leader_epoch,
+ rktp->rktp_leader_epoch, rktp->rktp_fetch_version);
+
+ /* We must have a valid fetch offset when we get here */
+ rd_dassert(rktp->rktp_offsets.fetch_pos.offset >= 0);
+
+ /* Add toppar + op version mapping. */
+ tver = rd_list_add(rkbuf->rkbuf_rktp_vers, NULL);
+ tver->rktp = rd_kafka_toppar_keep(rktp);
+ tver->version = rktp->rktp_fetch_version;
+
+ cnt++;
+ } while ((rktp = CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
+ rktp_activelink)) !=
+ rkb->rkb_active_toppar_next);
+
+ /* Update next toppar to fetch in round-robin list. */
+ rd_kafka_broker_active_toppar_next(
+ rkb, rktp ? CIRCLEQ_LOOP_NEXT(&rkb->rkb_active_toppars, rktp,
+ rktp_activelink)
+ : NULL);
+
+ rd_rkb_dbg(rkb, FETCH, "FETCH", "Fetch %i/%i/%i toppar(s)", cnt,
+ rkb->rkb_active_toppar_cnt, rkb->rkb_toppar_cnt);
+ if (!cnt) {
+ rd_kafka_buf_destroy(rkbuf);
+ return cnt;
+ }
+
+ if (rkt_last != NULL) {
+ /* Update last topic's PartitionArrayCnt */
+ rd_kafka_buf_update_i32(rkbuf, of_PartitionArrayCnt,
+ PartitionArrayCnt);
+ }
+
+ /* Update TopicArrayCnt */
+ rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, TopicArrayCnt);
+
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 7)
+ /* Length of the ForgottenTopics list (KIP-227). Broker
+ * use only - not used by the consumer. */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 11)
+ /* RackId */
+ rd_kafka_buf_write_kstr(rkbuf,
+ rkb->rkb_rk->rk_conf.client_rack);
+
+ /* Consider Fetch requests blocking if fetch.wait.max.ms >= 1s */
+ if (rkb->rkb_rk->rk_conf.fetch_wait_max_ms >= 1000)
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
+
+ /* Use configured timeout */
+ rd_kafka_buf_set_timeout(rkbuf,
+ rkb->rkb_rk->rk_conf.socket_timeout_ms +
+ rkb->rkb_rk->rk_conf.fetch_wait_max_ms,
+ now);
+
+ /* Sort toppar versions for quicker lookups in Fetch response. */
+ rd_list_sort(rkbuf->rkbuf_rktp_vers, rd_kafka_toppar_ver_cmp);
+
+ rkb->rkb_fetching = 1;
+ rd_kafka_broker_buf_enq1(rkb, rkbuf, rd_kafka_broker_fetch_reply, NULL);
+
+ return cnt;
+}
+
+
+
+/**
+ * @brief Decide whether this toppar should be on the fetch list or not.
+ *
+ * Also:
+ * - update toppar's op version (for broker thread's copy)
+ * - finalize statistics (move rktp_offsets to rktp_offsets_fin)
+ *
+ * @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp,
+ rd_kafka_broker_t *rkb,
+ int force_remove) {
+ int should_fetch = 1;
+ const char *reason = "";
+ int32_t version;
+ rd_ts_t ts_backoff = 0;
+ rd_bool_t lease_expired = rd_false;
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Check for preferred replica lease expiry */
+ lease_expired = rktp->rktp_leader_id != rktp->rktp_broker_id &&
+ rd_interval(&rktp->rktp_lease_intvl,
+ 5 * 60 * 1000 * 1000 /*5 minutes*/, 0) > 0;
+ if (lease_expired) {
+ /* delete_to_leader() requires no locks to be held */
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_delegate_to_leader(rktp);
+ rd_kafka_toppar_lock(rktp);
+
+ reason = "preferred replica lease expired";
+ should_fetch = 0;
+ goto done;
+ }
+
+ /* Forced removal from fetch list */
+ if (unlikely(force_remove)) {
+ reason = "forced removal";
+ should_fetch = 0;
+ goto done;
+ }
+
+ if (unlikely((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_REMOVE) != 0)) {
+ reason = "partition removed";
+ should_fetch = 0;
+ goto done;
+ }
+
+ /* Skip toppars not in active fetch state */
+ if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE) {
+ reason = "not in active fetch state";
+ should_fetch = 0;
+ goto done;
+ }
+
+ /* Update broker thread's fetch op version */
+ version = rktp->rktp_op_version;
+ if (version > rktp->rktp_fetch_version ||
+ rd_kafka_fetch_pos_cmp(&rktp->rktp_next_fetch_start,
+ &rktp->rktp_last_next_fetch_start) ||
+ rktp->rktp_offsets.fetch_pos.offset == RD_KAFKA_OFFSET_INVALID) {
+ /* New version barrier, something was modified from the
+ * control plane. Reset and start over.
+ * Alternatively only the next_offset changed but not the
+ * barrier, which is the case when automatically triggering
+ * offset.reset (such as on PARTITION_EOF or
+ * OFFSET_OUT_OF_RANGE). */
+
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "FETCHDEC",
+ "Topic %s [%" PRId32
+ "]: fetch decide: "
+ "updating to version %d (was %d) at %s "
+ "(was %s)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ version, rktp->rktp_fetch_version,
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
+ rd_kafka_fetch_pos2str(rktp->rktp_offsets.fetch_pos));
+
+ rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
+
+ /* New start offset */
+ rktp->rktp_offsets.fetch_pos = rktp->rktp_next_fetch_start;
+ rktp->rktp_last_next_fetch_start = rktp->rktp_next_fetch_start;
+
+ rktp->rktp_fetch_version = version;
+
+ /* Clear last error to propagate new fetch
+ * errors if encountered. */
+ rktp->rktp_last_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
+ version);
+ }
+
+
+ if (RD_KAFKA_TOPPAR_IS_PAUSED(rktp)) {
+ should_fetch = 0;
+ reason = "paused";
+
+ } else if (RD_KAFKA_OFFSET_IS_LOGICAL(
+ rktp->rktp_next_fetch_start.offset)) {
+ should_fetch = 0;
+ reason = "no concrete offset";
+
+ } else if (rd_kafka_q_len(rktp->rktp_fetchq) >=
+ rkb->rkb_rk->rk_conf.queued_min_msgs) {
+ /* Skip toppars who's local message queue is already above
+ * the lower threshold. */
+ reason = "queued.min.messages exceeded";
+ should_fetch = 0;
+
+ } else if ((int64_t)rd_kafka_q_size(rktp->rktp_fetchq) >=
+ rkb->rkb_rk->rk_conf.queued_max_msg_bytes) {
+ reason = "queued.max.messages.kbytes exceeded";
+ should_fetch = 0;
+
+ } else if (rktp->rktp_ts_fetch_backoff > rd_clock()) {
+ reason = "fetch backed off";
+ ts_backoff = rktp->rktp_ts_fetch_backoff;
+ should_fetch = 0;
+ }
+
+done:
+ /* Copy offset stats to finalized place holder. */
+ rktp->rktp_offsets_fin = rktp->rktp_offsets;
+
+ if (rktp->rktp_fetch != should_fetch) {
+ rd_rkb_dbg(
+ rkb, FETCH, "FETCH",
+ "Topic %s [%" PRId32
+ "] in state %s at %s "
+ "(%d/%d msgs, %" PRId64
+ "/%d kb queued, "
+ "opv %" PRId32 ") is %s%s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
+ rd_kafka_q_len(rktp->rktp_fetchq),
+ rkb->rkb_rk->rk_conf.queued_min_msgs,
+ rd_kafka_q_size(rktp->rktp_fetchq) / 1024,
+ rkb->rkb_rk->rk_conf.queued_max_msg_kbytes,
+ rktp->rktp_fetch_version,
+ should_fetch ? "fetchable" : "not fetchable: ", reason);
+
+ if (should_fetch) {
+ rd_dassert(rktp->rktp_fetch_version > 0);
+ rd_kafka_broker_active_toppar_add(
+ rkb, rktp, *reason ? reason : "fetchable");
+ } else {
+ rd_kafka_broker_active_toppar_del(rkb, rktp, reason);
+ }
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Non-fetching partitions will have an
+ * indefinate backoff, unless explicitly specified. */
+ if (!should_fetch && !ts_backoff)
+ ts_backoff = RD_TS_MAX;
+
+ return ts_backoff;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h
new file mode 100644
index 000000000..0e3af82bb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_fetcher.h
@@ -0,0 +1,41 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2022 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDKAFKA_FETCHER_H_
+#define _RDKAFKA_FETCHER_H_
+
+
+int rd_kafka_broker_fetch_toppars(rd_kafka_broker_t *rkb, rd_ts_t now);
+
+rd_ts_t rd_kafka_toppar_fetch_decide(rd_kafka_toppar_t *rktp,
+ rd_kafka_broker_t *rkb,
+ int force_remove);
+
+
+#endif /* _RDKAFKA_FETCHER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c
new file mode 100644
index 000000000..98359b424
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.c
@@ -0,0 +1,220 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_header.h"
+
+
+
+#define rd_kafka_header_destroy rd_free
+
+void rd_kafka_headers_destroy(rd_kafka_headers_t *hdrs) {
+ rd_list_destroy(&hdrs->rkhdrs_list);
+ rd_free(hdrs);
+}
+
+rd_kafka_headers_t *rd_kafka_headers_new(size_t initial_count) {
+ rd_kafka_headers_t *hdrs;
+
+ hdrs = rd_malloc(sizeof(*hdrs));
+ rd_list_init(&hdrs->rkhdrs_list, (int)initial_count,
+ rd_kafka_header_destroy);
+ hdrs->rkhdrs_ser_size = 0;
+
+ return hdrs;
+}
+
+static void *rd_kafka_header_copy(const void *_src, void *opaque) {
+ rd_kafka_headers_t *hdrs = opaque;
+ const rd_kafka_header_t *src = (const rd_kafka_header_t *)_src;
+
+ return (void *)rd_kafka_header_add(
+ hdrs, src->rkhdr_name, src->rkhdr_name_size, src->rkhdr_value,
+ src->rkhdr_value_size);
+}
+
+rd_kafka_headers_t *rd_kafka_headers_copy(const rd_kafka_headers_t *src) {
+ rd_kafka_headers_t *dst;
+
+ dst = rd_malloc(sizeof(*dst));
+ rd_list_init(&dst->rkhdrs_list, rd_list_cnt(&src->rkhdrs_list),
+ rd_kafka_header_destroy);
+ dst->rkhdrs_ser_size = 0; /* Updated by header_copy() */
+ rd_list_copy_to(&dst->rkhdrs_list, &src->rkhdrs_list,
+ rd_kafka_header_copy, dst);
+
+ return dst;
+}
+
+
+
+rd_kafka_resp_err_t rd_kafka_header_add(rd_kafka_headers_t *hdrs,
+ const char *name,
+ ssize_t name_size,
+ const void *value,
+ ssize_t value_size) {
+ rd_kafka_header_t *hdr;
+ char varint_NameLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
+ char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
+
+ if (name_size == -1)
+ name_size = strlen(name);
+
+ if (value_size == -1)
+ value_size = value ? strlen(value) : 0;
+ else if (!value)
+ value_size = 0;
+
+ hdr = rd_malloc(sizeof(*hdr) + name_size + 1 + value_size + 1);
+ hdr->rkhdr_name_size = name_size;
+ memcpy((void *)hdr->rkhdr_name, name, name_size);
+ hdr->rkhdr_name[name_size] = '\0';
+
+ if (likely(value != NULL)) {
+ hdr->rkhdr_value = hdr->rkhdr_name + name_size + 1;
+ memcpy((void *)hdr->rkhdr_value, value, value_size);
+ hdr->rkhdr_value[value_size] = '\0';
+ hdr->rkhdr_value_size = value_size;
+ } else {
+ hdr->rkhdr_value = NULL;
+ hdr->rkhdr_value_size = 0;
+ }
+
+ rd_list_add(&hdrs->rkhdrs_list, hdr);
+
+ /* Calculate serialized size of header */
+ hdr->rkhdr_ser_size = name_size + value_size;
+ hdr->rkhdr_ser_size += rd_uvarint_enc_i64(
+ varint_NameLen, sizeof(varint_NameLen), name_size);
+ hdr->rkhdr_ser_size += rd_uvarint_enc_i64(
+ varint_ValueLen, sizeof(varint_ValueLen), value_size);
+ hdrs->rkhdrs_ser_size += hdr->rkhdr_ser_size;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief header_t(name) to char * comparator
+ */
+static int rd_kafka_header_cmp_str(void *_a, void *_b) {
+ const rd_kafka_header_t *a = _a;
+ const char *b = _b;
+
+ return strcmp(a->rkhdr_name, b);
+}
+
+rd_kafka_resp_err_t rd_kafka_header_remove(rd_kafka_headers_t *hdrs,
+ const char *name) {
+ size_t ser_size = 0;
+ rd_kafka_header_t *hdr;
+ int i;
+
+ RD_LIST_FOREACH_REVERSE(hdr, &hdrs->rkhdrs_list, i) {
+ if (rd_kafka_header_cmp_str(hdr, (void *)name))
+ continue;
+
+ ser_size += hdr->rkhdr_ser_size;
+ rd_list_remove_elem(&hdrs->rkhdrs_list, i);
+ rd_kafka_header_destroy(hdr);
+ }
+
+ if (ser_size == 0)
+ return RD_KAFKA_RESP_ERR__NOENT;
+
+ rd_dassert(hdrs->rkhdrs_ser_size >= ser_size);
+ hdrs->rkhdrs_ser_size -= ser_size;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+rd_kafka_resp_err_t rd_kafka_header_get_last(const rd_kafka_headers_t *hdrs,
+ const char *name,
+ const void **valuep,
+ size_t *sizep) {
+ const rd_kafka_header_t *hdr;
+ int i;
+ size_t name_size = strlen(name);
+
+ RD_LIST_FOREACH_REVERSE(hdr, &hdrs->rkhdrs_list, i) {
+ if (hdr->rkhdr_name_size == name_size &&
+ !strcmp(hdr->rkhdr_name, name)) {
+ *valuep = hdr->rkhdr_value;
+ *sizep = hdr->rkhdr_value_size;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ }
+
+ return RD_KAFKA_RESP_ERR__NOENT;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_header_get(const rd_kafka_headers_t *hdrs,
+ size_t idx,
+ const char *name,
+ const void **valuep,
+ size_t *sizep) {
+ const rd_kafka_header_t *hdr;
+ int i;
+ size_t mi = 0; /* index for matching names */
+ size_t name_size = strlen(name);
+
+ RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) {
+ if (hdr->rkhdr_name_size == name_size &&
+ !strcmp(hdr->rkhdr_name, name) && mi++ == idx) {
+ *valuep = hdr->rkhdr_value;
+ *sizep = hdr->rkhdr_value_size;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ }
+
+ return RD_KAFKA_RESP_ERR__NOENT;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_header_get_all(const rd_kafka_headers_t *hdrs,
+ size_t idx,
+ const char **namep,
+ const void **valuep,
+ size_t *sizep) {
+ const rd_kafka_header_t *hdr;
+
+ hdr = rd_list_elem(&hdrs->rkhdrs_list, (int)idx);
+ if (unlikely(!hdr))
+ return RD_KAFKA_RESP_ERR__NOENT;
+
+ *namep = hdr->rkhdr_name;
+ *valuep = hdr->rkhdr_value;
+ *sizep = hdr->rkhdr_value_size;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+size_t rd_kafka_header_cnt(const rd_kafka_headers_t *hdrs) {
+ return (size_t)rd_list_cnt(&hdrs->rkhdrs_list);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h
new file mode 100644
index 000000000..bd6b0e959
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_header.h
@@ -0,0 +1,76 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_HEADER_H
+#define _RDKAFKA_HEADER_H
+
+
+
+/**
+ * @brief The header list (rd_kafka_headers_t) wraps the generic rd_list_t
+ * with additional fields to keep track of the total on-wire size.
+ */
+struct rd_kafka_headers_s {
+ rd_list_t rkhdrs_list; /**< List of (rd_kafka_header_t *) */
+ size_t rkhdrs_ser_size; /**< Total serialized size of headers */
+};
+
+
+/**
+ * @brief The header item itself is a single-allocation immutable structure
+ * (rd_kafka_header_t) containing the header name, value and value
+ * length.
+ * Both the header name and header value are nul-terminated for
+ * API convenience.
+ * The header value is a tri-state:
+ * - proper value (considered binary) with length > 0
+ * - empty value with length = 0 (pointer is non-NULL and nul-termd)
+ * - null value with length = 0 (pointer is NULL)
+ */
+typedef struct rd_kafka_header_s {
+ size_t rkhdr_ser_size; /**< Serialized size */
+ size_t rkhdr_value_size; /**< Value length (without nul-term) */
+ size_t rkhdr_name_size; /**< Header name size (w/o nul-term) */
+ char *rkhdr_value; /**< Header value (nul-terminated string but
+ * considered binary).
+ * Will be NULL for null values, else
+ * points to rkhdr_name+.. */
+ char rkhdr_name[1]; /**< Header name (nul-terminated string).
+ * Followed by allocation for value+nul */
+} rd_kafka_header_t;
+
+
+/**
+ * @returns the serialized size for the headers
+ */
+static RD_INLINE RD_UNUSED size_t
+rd_kafka_headers_serialized_size(const rd_kafka_headers_t *hdrs) {
+ return hdrs->rkhdrs_ser_size;
+}
+
+#endif /* _RDKAFKA_HEADER_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c
new file mode 100644
index 000000000..3245e856e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.c
@@ -0,0 +1,807 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_txnmgr.h"
+#include "rdkafka_request.h"
+#include "rdunittest.h"
+
+#include <stdarg.h>
+
+/**
+ * @name Idempotent Producer logic
+ *
+ *
+ * Unrecoverable idempotent producer errors that could jeopardize the
+ * idempotency guarantees if the producer was to continue operating
+ * are treated as fatal errors, unless the producer is transactional in which
+ * case the current transaction will fail (also known as an abortable error)
+ * but the producer will not raise a fatal error.
+ *
+ */
+
+static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk,
+ rd_bool_t immediate,
+ const char *reason);
+
+
+/**
+ * @brief Set the producer's idempotence state.
+ * @locks rd_kafka_wrlock() MUST be held
+ */
+void rd_kafka_idemp_set_state(rd_kafka_t *rk,
+ rd_kafka_idemp_state_t new_state) {
+
+ if (rk->rk_eos.idemp_state == new_state)
+ return;
+
+ if (rd_kafka_fatal_error_code(rk) &&
+ new_state != RD_KAFKA_IDEMP_STATE_FATAL_ERROR &&
+ new_state != RD_KAFKA_IDEMP_STATE_TERM &&
+ new_state != RD_KAFKA_IDEMP_STATE_DRAIN_RESET &&
+ new_state != RD_KAFKA_IDEMP_STATE_DRAIN_BUMP) {
+ rd_kafka_dbg(rk, EOS, "IDEMPSTATE",
+ "Denying state change %s -> %s since a "
+ "fatal error has been raised",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
+ rd_kafka_idemp_state2str(new_state));
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR);
+ return;
+ }
+
+ rd_kafka_dbg(rk, EOS, "IDEMPSTATE",
+ "Idempotent producer state change %s -> %s",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
+ rd_kafka_idemp_state2str(new_state));
+
+ rk->rk_eos.idemp_state = new_state;
+ rk->rk_eos.ts_idemp_state = rd_clock();
+
+ /* Inform transaction manager of state change */
+ if (rd_kafka_is_transactional(rk))
+ rd_kafka_txn_idemp_state_change(rk, new_state);
+}
+
+
+
+/**
+ * @brief Find a usable broker suitable for acquiring Pid
+ * or Coordinator query.
+ *
+ * @locks rd_kafka_wrlock() MUST be held
+ *
+ * @returns a broker with increased refcount, or NULL on error.
+ */
+rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk,
+ rd_kafka_resp_err_t *errp,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_broker_t *rkb;
+ int up_cnt;
+
+ rkb = rd_kafka_broker_any_up(rk, &up_cnt,
+ rd_kafka_broker_filter_non_idempotent,
+ NULL, "acquire ProducerID");
+ if (rkb)
+ return rkb;
+
+ if (up_cnt > 0) {
+ *errp = RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ rd_snprintf(errstr, errstr_size,
+ "%s not supported by "
+ "any of the %d connected broker(s): requires "
+ "Apache Kafka broker version >= 0.11.0",
+ rd_kafka_is_transactional(rk)
+ ? "Transactions"
+ : "Idempotent producer",
+ up_cnt);
+ } else {
+ *errp = RD_KAFKA_RESP_ERR__TRANSPORT;
+ rd_snprintf(errstr, errstr_size,
+ "No brokers available for %s (%d broker(s) known)",
+ rd_kafka_is_transactional(rk)
+ ? "Transactions"
+ : "Idempotent producer",
+ rd_atomic32_get(&rk->rk_broker_cnt));
+ }
+
+ rd_kafka_dbg(rk, EOS, "PIDBROKER", "%s", errstr);
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Check if an error needs special attention, possibly
+ * raising a fatal error.
+ *
+ * @param is_fatal if true, force fatal error regardless of error code.
+ *
+ * @returns rd_true if a fatal error was triggered, else rd_false.
+ *
+ * @locks rd_kafka_wrlock() MUST be held
+ * @locality rdkafka main thread
+ */
+rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *errstr,
+ rd_bool_t is_fatal) {
+ const char *preface = "";
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE:
+ case RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT:
+ case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
+ is_fatal = rd_true;
+ break;
+
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
+ is_fatal = rd_true;
+ /* Normalize error */
+ err = RD_KAFKA_RESP_ERR__FENCED;
+ preface = "Producer fenced by newer instance: ";
+ break;
+
+ default:
+ break;
+ }
+
+ if (!is_fatal)
+ return rd_false;
+
+ if (rd_kafka_is_transactional(rk))
+ rd_kafka_txn_set_fatal_error(rk, RD_DONT_LOCK, err, "%s%s",
+ preface, errstr);
+ else
+ rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s%s",
+ preface, errstr);
+
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR);
+
+ return rd_true;
+}
+
+
+
+/**
+ * @brief State machine for PID acquisition for the idempotent
+ * and transactional producers.
+ *
+ * @locality rdkafka main thread
+ * @locks rd_kafka_wrlock() MUST be held.
+ */
+void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_broker_t *rkb;
+ rd_bool_t is_fatal = rd_false;
+
+ /* If a fatal error has been raised we do not
+ * attempt to acquire a PID. */
+ if (unlikely(rd_kafka_fatal_error_code(rk)))
+ return;
+
+redo:
+ switch (rk->rk_eos.idemp_state) {
+ case RD_KAFKA_IDEMP_STATE_INIT:
+ case RD_KAFKA_IDEMP_STATE_TERM:
+ case RD_KAFKA_IDEMP_STATE_FATAL_ERROR:
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_REQ_PID:
+ /* Request (new) PID */
+
+ /* The idempotent producer may ask any broker for a PID,
+ * while the transactional producer needs to ask its
+ * transaction coordinator for a PID. */
+ if (!rd_kafka_is_transactional(rk) ||
+ rk->rk_eos.txn_curr_coord) {
+ rd_kafka_idemp_set_state(
+ rk, RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT);
+ goto redo;
+ }
+
+
+ /*
+ * Look up transaction coordinator.
+ * When the coordinator is known this FSM will be called again.
+ */
+ if (rd_kafka_txn_coord_query(rk, "Acquire PID"))
+ return; /* Fatal error */
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT:
+ /* Waiting for broker/coordinator to become available */
+ if (rd_kafka_is_transactional(rk)) {
+ /* Check that a proper coordinator broker has
+ * been assigned by inspecting txn_curr_coord
+ * (the real broker) rather than txn_coord
+ * (the logical broker). */
+ if (!rk->rk_eos.txn_curr_coord) {
+ /*
+ * Can happen if the coordinator wasn't set or
+ * wasn't up initially and has been set to NULL
+ * after a COORDINATOR_NOT_AVAILABLE error in
+ * FindCoordinatorResponse. When the coordinator
+ * is known this FSM will be called again.
+ */
+ rd_kafka_txn_coord_query(
+ rk, "Awaiting coordinator");
+ return;
+ }
+ rkb = rk->rk_eos.txn_coord;
+ rd_kafka_broker_keep(rkb);
+
+ } else {
+ rkb = rd_kafka_idemp_broker_any(rk, &err, errstr,
+ sizeof(errstr));
+
+ if (!rkb && rd_kafka_idemp_check_error(rk, err, errstr,
+ rd_false))
+ return; /* Fatal error */
+ }
+
+ if (!rkb || !rd_kafka_broker_is_up(rkb)) {
+ /* The coordinator broker monitor will re-trigger
+ * the fsm sooner if txn_coord has a state change,
+ * else rely on the timer to retry. */
+ rd_kafka_idemp_pid_timer_restart(
+ rk, rd_false,
+ rkb ? "No broker available" : "Coordinator not up");
+
+ if (rkb)
+ rd_kafka_broker_destroy(rkb);
+ return;
+ }
+
+ if (rd_kafka_is_transactional(rk)) {
+ int err_of = 0;
+
+ /* If this is a transactional producer and the
+ * PID-epoch needs to be bumped we'll require KIP-360
+ * support on the broker, else raise a fatal error. */
+
+ if (rd_kafka_pid_valid(rk->rk_eos.pid)) {
+ rd_rkb_dbg(rkb, EOS, "GETPID",
+ "Requesting ProducerId bump for %s",
+ rd_kafka_pid2str(rk->rk_eos.pid));
+ err_of = rd_snprintf(errstr, sizeof(errstr),
+ "Failed to request "
+ "ProducerId bump: ");
+ rd_assert(err_of < 0 ||
+ err_of < (int)sizeof(errstr));
+ } else {
+ rd_rkb_dbg(rkb, EOS, "GETPID",
+ "Acquiring ProducerId");
+ }
+
+ err = rd_kafka_InitProducerIdRequest(
+ rkb, rk->rk_conf.eos.transactional_id,
+ rk->rk_conf.eos.transaction_timeout_ms,
+ rd_kafka_pid_valid(rk->rk_eos.pid) ? &rk->rk_eos.pid
+ : NULL,
+ errstr + err_of, sizeof(errstr) - err_of,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_handle_InitProducerId, NULL);
+
+ if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE &&
+ rd_kafka_pid_valid(rk->rk_eos.pid))
+ is_fatal = rd_true;
+ } else {
+ rd_rkb_dbg(rkb, EOS, "GETPID", "Acquiring ProducerId");
+
+ err = rd_kafka_InitProducerIdRequest(
+ rkb, NULL, -1, NULL, errstr, sizeof(errstr),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_handle_InitProducerId, NULL);
+ }
+
+ if (err) {
+ rd_rkb_dbg(rkb, EOS, "GETPID",
+ "Can't acquire ProducerId from "
+ "this broker: %s",
+ errstr);
+ }
+
+ rd_kafka_broker_destroy(rkb);
+
+ if (err) {
+ if (rd_kafka_idemp_check_error(rk, err, errstr,
+ is_fatal))
+ return; /* Fatal error */
+
+ /* The coordinator broker monitor will re-trigger
+ * the fsm sooner if txn_coord has a state change,
+ * else rely on the timer to retry. */
+ rd_kafka_idemp_pid_timer_restart(rk, rd_false, errstr);
+ return;
+ }
+
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_WAIT_PID);
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_WAIT_PID:
+ /* PID requested, waiting for reply */
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_ASSIGNED:
+ /* New PID assigned */
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_DRAIN_RESET:
+ /* Wait for outstanding ProduceRequests to finish
+ * before resetting and re-requesting a new PID. */
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_DRAIN_BUMP:
+ /* Wait for outstanding ProduceRequests to finish
+ * before bumping the current epoch. */
+ break;
+
+ case RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT:
+ /* Wait for txnmgr to abort its current transaction
+ * and then trigger a drain & reset or bump. */
+ break;
+ }
+}
+
+
+/**
+ * @brief Timed PID retrieval timer callback.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_idemp_pid_timer_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_t *rk = arg;
+
+ rd_kafka_wrlock(rk);
+ rd_kafka_idemp_pid_fsm(rk);
+ rd_kafka_wrunlock(rk);
+}
+
+
+/**
+ * @brief Restart the pid retrieval timer.
+ *
+ * @param immediate If true, request a pid as soon as possible,
+ * else use the default interval (500ms).
+ * @locality any
+ * @locks none
+ */
+static void rd_kafka_idemp_pid_timer_restart(rd_kafka_t *rk,
+ rd_bool_t immediate,
+ const char *reason) {
+ rd_kafka_dbg(rk, EOS, "TXN", "Starting PID FSM timer%s: %s",
+ immediate ? " (fire immediately)" : "", reason);
+ rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.pid_tmr,
+ rd_true,
+ 1000 * (immediate ? 1 : 500 /*500ms*/),
+ rd_kafka_idemp_pid_timer_cb, rk);
+}
+
+
+/**
+ * @brief Handle failure to acquire a PID from broker.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ char errstr[512];
+
+ rd_rkb_dbg(rkb, EOS, "GETPID", "Failed to acquire PID: %s",
+ rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return; /* Ignore */
+
+ rd_assert(thrd_is_current(rk->rk_thread));
+
+ rd_snprintf(errstr, sizeof(errstr),
+ "Failed to acquire %s PID from broker %s: %s",
+ rd_kafka_is_transactional(rk) ? "transactional"
+ : "idempotence",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err));
+
+ rd_kafka_wrlock(rk);
+
+ if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false)) {
+ rd_kafka_wrunlock(rk);
+ return; /* Fatal error */
+ }
+
+ RD_UT_COVERAGE(0);
+
+ if (rd_kafka_is_transactional(rk) &&
+ (err == RD_KAFKA_RESP_ERR_NOT_COORDINATOR ||
+ err == RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE))
+ rd_kafka_txn_coord_set(rk, NULL, "%s", errstr);
+
+ /* This error code is read by init_transactions() for propagation
+ * to the application. */
+ rk->rk_eos.txn_init_err = err;
+
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID);
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_log(rk, LOG_WARNING, "GETPID", "%s: retrying", errstr);
+
+ /* Restart acquisition after a short wait */
+ rd_kafka_idemp_pid_timer_restart(rk, rd_false, errstr);
+}
+
+
+/**
+ * @brief Update Producer ID from InitProducerId response.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb,
+ const rd_kafka_pid_t pid) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+
+ rd_kafka_wrlock(rk);
+ if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_PID) {
+ rd_rkb_dbg(rkb, EOS, "GETPID",
+ "Ignoring InitProduceId response (%s) "
+ "in state %s",
+ rd_kafka_pid2str(pid),
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
+ rd_kafka_wrunlock(rk);
+ return;
+ }
+
+ if (!rd_kafka_pid_valid(pid)) {
+ rd_kafka_wrunlock(rk);
+ rd_rkb_log(rkb, LOG_WARNING, "GETPID",
+ "Acquired invalid PID{%" PRId64 ",%hd}: ignoring",
+ pid.id, pid.epoch);
+ rd_kafka_idemp_request_pid_failed(rkb,
+ RD_KAFKA_RESP_ERR__BAD_MSG);
+ return;
+ }
+
+ if (rd_kafka_pid_valid(rk->rk_eos.pid))
+ rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s (previous %s)",
+ rd_kafka_pid2str(pid),
+ rd_kafka_pid2str(rk->rk_eos.pid));
+ else
+ rd_kafka_dbg(rk, EOS, "GETPID", "Acquired %s",
+ rd_kafka_pid2str(pid));
+ rk->rk_eos.pid = pid;
+ rk->rk_eos.epoch_cnt++;
+
+ /* The idempotence state change will trigger the transaction manager,
+ * see rd_kafka_txn_idemp_state_change(). */
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_ASSIGNED);
+
+ rd_kafka_wrunlock(rk);
+
+ /* Wake up all broker threads (that may have messages to send
+ * that were waiting for a Producer ID). */
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "PID updated");
+}
+
+
+/**
+ * @brief Call when all partition request queues
+ * are drained to reset and re-request a new PID.
+ *
+ * @locality any
+ * @locks none
+ */
+static void rd_kafka_idemp_drain_done(rd_kafka_t *rk) {
+ rd_bool_t restart_tmr = rd_false;
+ rd_bool_t wakeup_brokers = rd_false;
+
+ rd_kafka_wrlock(rk);
+ if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_DRAIN_RESET) {
+ rd_kafka_dbg(rk, EOS, "DRAIN", "All partitions drained");
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID);
+ restart_tmr = rd_true;
+
+ } else if (rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_DRAIN_BUMP &&
+ rd_kafka_pid_valid(rk->rk_eos.pid)) {
+
+ if (rd_kafka_is_transactional(rk)) {
+ /* The epoch bump needs to be performed by the
+ * coordinator by sending it an InitPid request. */
+ rd_kafka_dbg(rk, EOS, "DRAIN",
+ "All partitions drained, asking "
+ "coordinator to bump epoch (currently %s)",
+ rd_kafka_pid2str(rk->rk_eos.pid));
+ rd_kafka_idemp_set_state(rk,
+ RD_KAFKA_IDEMP_STATE_REQ_PID);
+ restart_tmr = rd_true;
+
+ } else {
+ /* The idempotent producer can bump its own epoch */
+ rk->rk_eos.pid = rd_kafka_pid_bump(rk->rk_eos.pid);
+ rd_kafka_dbg(rk, EOS, "DRAIN",
+ "All partitions drained, bumped "
+ "epoch to %s",
+ rd_kafka_pid2str(rk->rk_eos.pid));
+ rd_kafka_idemp_set_state(rk,
+ RD_KAFKA_IDEMP_STATE_ASSIGNED);
+ wakeup_brokers = rd_true;
+ }
+ }
+ rd_kafka_wrunlock(rk);
+
+ /* Restart timer to eventually trigger a re-request */
+ if (restart_tmr)
+ rd_kafka_idemp_pid_timer_restart(rk, rd_true, "Drain done");
+
+ /* Wake up all broker threads (that may have messages to send
+ * that were waiting for a Producer ID). */
+ if (wakeup_brokers)
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "message drain done");
+}
+
+/**
+ * @brief Check if in-flight toppars drain is done, if so transition to
+ * next state.
+ *
+ * @locality any
+ * @locks none
+ */
+static RD_INLINE void rd_kafka_idemp_check_drain_done(rd_kafka_t *rk) {
+ if (rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt) == 0)
+ rd_kafka_idemp_drain_done(rk);
+}
+
+
+/**
+ * @brief Schedule a reset and re-request of PID when the
+ * local ProduceRequest queues have been fully drained.
+ *
+ * The PID is not reset until the queues are fully drained.
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason) {
+ rd_kafka_wrlock(rk);
+ rd_kafka_dbg(rk, EOS, "DRAIN",
+ "Beginning partition drain for %s reset "
+ "for %d partition(s) with in-flight requests: %s",
+ rd_kafka_pid2str(rk->rk_eos.pid),
+ rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt), reason);
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_RESET);
+ rd_kafka_wrunlock(rk);
+
+ /* Check right away if the drain could be done. */
+ rd_kafka_idemp_check_drain_done(rk);
+}
+
+
+/**
+ * @brief Schedule an epoch bump when the local ProduceRequest queues
+ * have been fully drained.
+ *
+ * The PID is not bumped until the queues are fully drained and the current
+ * transaction is aborted (if any).
+ *
+ * @param allow_txn_abort If this is a transactional producer and this flag is
+ * true then we trigger an abortable txn error to abort
+ * the current transaction first. The txnmgr will later
+ * call us back with this flag set to false to go ahead
+ * with the epoch bump.
+ * @param fmt is a human-readable reason for the bump
+ *
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk,
+ rd_bool_t allow_txn_abort,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ va_list ap;
+ char buf[256];
+ rd_bool_t requires_txn_abort =
+ allow_txn_abort && rd_kafka_is_transactional(rk);
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ rd_kafka_wrlock(rk);
+
+
+ if (requires_txn_abort) {
+ rd_kafka_dbg(rk, EOS, "DRAIN",
+ "Need transaction abort before beginning "
+ "partition drain in state %s for %s epoch bump "
+ "for %d partition(s) with in-flight requests: %s",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
+ rd_kafka_pid2str(rk->rk_eos.pid),
+ rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt),
+ buf);
+ rd_kafka_idemp_set_state(rk,
+ RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT);
+
+ } else {
+ rd_kafka_dbg(rk, EOS, "DRAIN",
+ "Beginning partition drain in state %s "
+ "for %s epoch bump "
+ "for %d partition(s) with in-flight requests: %s",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
+ rd_kafka_pid2str(rk->rk_eos.pid),
+ rd_atomic32_get(&rk->rk_eos.inflight_toppar_cnt),
+ buf);
+
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_DRAIN_BUMP);
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ if (requires_txn_abort) {
+ /* Transactions: bumping the epoch requires the current
+ * transaction to be aborted first. */
+ rd_kafka_txn_set_abortable_error_with_bump(rk, err, "%s", buf);
+
+ } else {
+ /* Idempotent producer: check right away if the drain could
+ * be done. */
+ rd_kafka_idemp_check_drain_done(rk);
+ }
+}
+
+/**
+ * @brief Mark partition as waiting-to-drain.
+ *
+ * @locks toppar_lock MUST be held
+ * @locality broker thread (leader or not)
+ */
+void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason) {
+ if (rktp->rktp_eos.wait_drain)
+ return;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "DRAIN",
+ "%.*s [%" PRId32 "] beginning partition drain: %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, reason);
+ rktp->rktp_eos.wait_drain = rd_true;
+}
+
+
+/**
+ * @brief Mark partition as no longer having a ProduceRequest in-flight.
+ *
+ * @locality any
+ * @locks none
+ */
+void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp) {
+ int r = rd_atomic32_sub(&rk->rk_eos.inflight_toppar_cnt, 1);
+
+ if (r == 0) {
+ /* Check if we're waiting for the partitions to drain
+ * before resetting the PID, and if so trigger a reset
+ * since this was the last drained one. */
+ rd_kafka_idemp_drain_done(rk);
+ } else {
+ rd_assert(r >= 0);
+ }
+}
+
+
+/**
+ * @brief Mark partition as having a ProduceRequest in-flight.
+ *
+ * @locality toppar handler thread
+ * @locks none
+ */
+void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp) {
+ rd_atomic32_add(&rk->rk_eos.inflight_toppar_cnt, 1);
+}
+
+
+
+/**
+ * @brief Start idempotent producer (asynchronously).
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate) {
+
+ if (rd_kafka_terminating(rk))
+ return;
+
+ rd_kafka_wrlock(rk);
+ /* Don't restart PID acquisition if there's already an outstanding
+ * request. */
+ if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_PID)
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_REQ_PID);
+ rd_kafka_wrunlock(rk);
+
+ /* Schedule request timer */
+ rd_kafka_idemp_pid_timer_restart(rk, immediate,
+ "Starting idempotent producer");
+}
+
+
+/**
+ * @brief Initialize the idempotent producer.
+ *
+ * @remark Must be called from rd_kafka_new() and only once.
+ * @locality rdkafka main thread
+ * @locks none / not needed from rd_kafka_new()
+ */
+void rd_kafka_idemp_init(rd_kafka_t *rk) {
+ rd_assert(thrd_is_current(rk->rk_thread));
+
+ rd_atomic32_init(&rk->rk_eos.inflight_toppar_cnt, 0);
+ rd_kafka_pid_reset(&rk->rk_eos.pid);
+
+ /* The transactional producer acquires the PID
+ * from init_transactions(), for non-transactional producers
+ * the PID can be acquired right away. */
+ if (rd_kafka_is_transactional(rk))
+ rd_kafka_txns_init(rk);
+ else
+ /* There are no available brokers this early,
+ * so just set the state to indicate that we want to
+ * acquire a PID as soon as possible and start
+ * the timer. */
+ rd_kafka_idemp_start(rk, rd_false /*non-immediate*/);
+}
+
+
+/**
+ * @brief Terminate and clean up idempotent producer
+ *
+ * @locality rdkafka main thread
+ * @locks rd_kafka_wrlock() MUST be held
+ */
+void rd_kafka_idemp_term(rd_kafka_t *rk) {
+ rd_assert(thrd_is_current(rk->rk_thread));
+
+ rd_kafka_wrlock(rk);
+ if (rd_kafka_is_transactional(rk))
+ rd_kafka_txns_term(rk);
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_TERM);
+ rd_kafka_wrunlock(rk);
+ rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.pid_tmr, 1);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h
new file mode 100644
index 000000000..5be8d606d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_idempotence.h
@@ -0,0 +1,144 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RD_KAFKA_IDEMPOTENCE_H_
+#define _RD_KAFKA_IDEMPOTENCE_H_
+
+
+/**
+ * @define The broker maintains a window of the 5 last Produce requests
+ * for a partition to be able to de-deduplicate resends.
+ */
+#define RD_KAFKA_IDEMP_MAX_INFLIGHT 5
+#define RD_KAFKA_IDEMP_MAX_INFLIGHT_STR "5" /* For printouts */
+
+/**
+ * @brief Get the current PID if state permits.
+ *
+ * @param bumpable If true, return PID even if it may only be used for
+ * bumping the Epoch.
+ *
+ * @returns If there is no valid PID or the state
+ * does not permit further PID usage (such as when draining)
+ * then an invalid PID is returned.
+ *
+ * @locality any
+ * @locks none
+ */
+static RD_UNUSED RD_INLINE rd_kafka_pid_t
+rd_kafka_idemp_get_pid0(rd_kafka_t *rk,
+ rd_dolock_t do_lock,
+ rd_bool_t bumpable) {
+ rd_kafka_pid_t pid;
+
+ if (do_lock)
+ rd_kafka_rdlock(rk);
+ if (likely(rk->rk_eos.idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED))
+ pid = rk->rk_eos.pid;
+ else if (unlikely(bumpable && rk->rk_eos.idemp_state ==
+ RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT))
+ pid = rk->rk_eos.pid;
+ else
+ rd_kafka_pid_reset(&pid);
+ if (do_lock)
+ rd_kafka_rdunlock(rk);
+
+ return pid;
+}
+
+#define rd_kafka_idemp_get_pid(rk) \
+ rd_kafka_idemp_get_pid0(rk, RD_DO_LOCK, rd_false)
+
+void rd_kafka_idemp_set_state(rd_kafka_t *rk, rd_kafka_idemp_state_t new_state);
+void rd_kafka_idemp_request_pid_failed(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err);
+void rd_kafka_idemp_pid_update(rd_kafka_broker_t *rkb,
+ const rd_kafka_pid_t pid);
+void rd_kafka_idemp_pid_fsm(rd_kafka_t *rk);
+void rd_kafka_idemp_drain_reset(rd_kafka_t *rk, const char *reason);
+void rd_kafka_idemp_drain_epoch_bump0(rd_kafka_t *rk,
+ rd_bool_t allow_txn_abort,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 4, 5);
+#define rd_kafka_idemp_drain_epoch_bump(rk, err, ...) \
+ rd_kafka_idemp_drain_epoch_bump0(rk, rd_true, err, __VA_ARGS__)
+
+void rd_kafka_idemp_drain_toppar(rd_kafka_toppar_t *rktp, const char *reason);
+void rd_kafka_idemp_inflight_toppar_sub(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp);
+void rd_kafka_idemp_inflight_toppar_add(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp);
+
+rd_kafka_broker_t *rd_kafka_idemp_broker_any(rd_kafka_t *rk,
+ rd_kafka_resp_err_t *errp,
+ char *errstr,
+ size_t errstr_size);
+
+rd_bool_t rd_kafka_idemp_check_error(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *errstr,
+ rd_bool_t is_fatal);
+
+
+/**
+ * @brief Call when a fatal idempotence error has occurred, when the producer
+ * can't continue without risking the idempotency guarantees.
+ *
+ * If the producer is transactional this error is non-fatal and will just
+ * cause the current transaction to transition into the ABORTABLE_ERROR state.
+ * If the producer is not transactional the client instance fatal error
+ * is set and the producer instance is no longer usable.
+ *
+ * @Warning Until KIP-360 has been fully implemented any fatal idempotent
+ * producer error will also raise a fatal transactional producer error.
+ * This is to guarantee that there is no silent data loss.
+ *
+ * @param RK rd_kafka_t instance
+ * @param ERR error to raise
+ * @param ... format string with error message
+ *
+ * @locality any thread
+ * @locks none
+ */
+#define rd_kafka_idemp_set_fatal_error(RK, ERR, ...) \
+ do { \
+ if (rd_kafka_is_transactional(RK)) \
+ rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, ERR, \
+ __VA_ARGS__); \
+ else \
+ rd_kafka_set_fatal_error(RK, ERR, __VA_ARGS__); \
+ } while (0)
+
+void rd_kafka_idemp_start(rd_kafka_t *rk, rd_bool_t immediate);
+void rd_kafka_idemp_init(rd_kafka_t *rk);
+void rd_kafka_idemp_term(rd_kafka_t *rk);
+
+
+#endif /* _RD_KAFKA_IDEMPOTENCE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h
new file mode 100644
index 000000000..584ff3c96
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_int.h
@@ -0,0 +1,1054 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_INT_H_
+#define _RDKAFKA_INT_H_
+
+#ifndef _WIN32
+#define _GNU_SOURCE /* for strndup() */
+#endif
+
+#ifdef _MSC_VER
+typedef int mode_t;
+#endif
+
+#include <fcntl.h>
+
+
+#include "rdsysqueue.h"
+
+#include "rdkafka.h"
+#include "rd.h"
+#include "rdlog.h"
+#include "rdtime.h"
+#include "rdaddr.h"
+#include "rdinterval.h"
+#include "rdavg.h"
+#include "rdlist.h"
+
+#if WITH_SSL
+#include <openssl/ssl.h>
+#endif
+
+
+
+#define rd_kafka_assert(rk, cond) \
+ do { \
+ if (unlikely(!(cond))) \
+ rd_kafka_crash(__FILE__, __LINE__, __FUNCTION__, (rk), \
+ "assert: " #cond); \
+ } while (0)
+
+
+void RD_NORETURN rd_kafka_crash(const char *file,
+ int line,
+ const char *function,
+ rd_kafka_t *rk,
+ const char *reason);
+
+
+/* Forward declarations */
+struct rd_kafka_s;
+struct rd_kafka_topic_s;
+struct rd_kafka_msg_s;
+struct rd_kafka_broker_s;
+struct rd_kafka_toppar_s;
+
+typedef struct rd_kafka_lwtopic_s rd_kafka_lwtopic_t;
+
+
+/**
+ * Protocol level sanity
+ */
+#define RD_KAFKAP_BROKERS_MAX 10000
+#define RD_KAFKAP_TOPICS_MAX 1000000
+#define RD_KAFKAP_PARTITIONS_MAX 100000
+
+
+#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0)
+
+
+/**
+ * @struct Represents a fetch position:
+ * an offset and an partition leader epoch (if known, else -1).
+ */
+typedef struct rd_kafka_fetch_pos_s {
+ int64_t offset;
+ int32_t leader_epoch;
+ rd_bool_t validated;
+} rd_kafka_fetch_pos_t;
+
+
+
+#include "rdkafka_op.h"
+#include "rdkafka_queue.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_proto.h"
+#include "rdkafka_buf.h"
+#include "rdkafka_pattern.h"
+#include "rdkafka_conf.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_timer.h"
+#include "rdkafka_assignor.h"
+#include "rdkafka_metadata.h"
+#include "rdkafka_mock.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_assignment.h"
+#include "rdkafka_coord.h"
+#include "rdkafka_mock.h"
+
+/**
+ * Protocol level sanity
+ */
+#define RD_KAFKAP_BROKERS_MAX 10000
+#define RD_KAFKAP_TOPICS_MAX 1000000
+#define RD_KAFKAP_PARTITIONS_MAX 100000
+#define RD_KAFKAP_GROUPS_MAX 100000
+
+
+#define RD_KAFKA_OFFSET_IS_LOGICAL(OFF) ((OFF) < 0)
+
+
+
+/**
+ * @enum Idempotent Producer state
+ */
+typedef enum {
+ RD_KAFKA_IDEMP_STATE_INIT, /**< Initial state */
+ RD_KAFKA_IDEMP_STATE_TERM, /**< Instance is terminating */
+ RD_KAFKA_IDEMP_STATE_FATAL_ERROR, /**< A fatal error has been raised */
+ RD_KAFKA_IDEMP_STATE_REQ_PID, /**< Request new PID */
+ RD_KAFKA_IDEMP_STATE_WAIT_TRANSPORT, /**< Waiting for coordinator to
+ * become available. */
+ RD_KAFKA_IDEMP_STATE_WAIT_PID, /**< PID requested, waiting for reply */
+ RD_KAFKA_IDEMP_STATE_ASSIGNED, /**< New PID assigned */
+ RD_KAFKA_IDEMP_STATE_DRAIN_RESET, /**< Wait for outstanding
+ * ProduceRequests to finish
+ * before resetting and
+ * re-requesting a new PID. */
+ RD_KAFKA_IDEMP_STATE_DRAIN_BUMP, /**< Wait for outstanding
+ * ProduceRequests to finish
+ * before bumping the current
+ * epoch. */
+ RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT, /**< Wait for transaction abort
+ * to finish and trigger a
+ * drain and reset or bump. */
+} rd_kafka_idemp_state_t;
+
+/**
+ * @returns the idemp_state_t string representation
+ */
+static RD_UNUSED const char *
+rd_kafka_idemp_state2str(rd_kafka_idemp_state_t state) {
+ static const char *names[] = {
+ "Init", "Terminate", "FatalError", "RequestPID", "WaitTransport",
+ "WaitPID", "Assigned", "DrainReset", "DrainBump", "WaitTxnAbort"};
+ return names[state];
+}
+
+
+
+/**
+ * @enum Transactional Producer state
+ */
+typedef enum {
+ /**< Initial state */
+ RD_KAFKA_TXN_STATE_INIT,
+ /**< Awaiting PID to be acquired by rdkafka_idempotence.c */
+ RD_KAFKA_TXN_STATE_WAIT_PID,
+ /**< PID acquired, but application has not made a successful
+ * init_transactions() call. */
+ RD_KAFKA_TXN_STATE_READY_NOT_ACKED,
+ /**< PID acquired, no active transaction. */
+ RD_KAFKA_TXN_STATE_READY,
+ /**< begin_transaction() has been called. */
+ RD_KAFKA_TXN_STATE_IN_TRANSACTION,
+ /**< commit_transaction() has been called. */
+ RD_KAFKA_TXN_STATE_BEGIN_COMMIT,
+ /**< commit_transaction() has been called and all outstanding
+ * messages, partitions, and offsets have been sent. */
+ RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION,
+ /**< Transaction successfully committed but application has not made
+ * a successful commit_transaction() call yet. */
+ RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED,
+ /**< begin_transaction() has been called. */
+ RD_KAFKA_TXN_STATE_BEGIN_ABORT,
+ /**< abort_transaction() has been called. */
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION,
+ /**< Transaction successfully aborted but application has not made
+ * a successful abort_transaction() call yet. */
+ RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED,
+ /**< An abortable error has occurred. */
+ RD_KAFKA_TXN_STATE_ABORTABLE_ERROR,
+ /* A fatal error has occured. */
+ RD_KAFKA_TXN_STATE_FATAL_ERROR
+} rd_kafka_txn_state_t;
+
+
+/**
+ * @returns the txn_state_t string representation
+ */
+static RD_UNUSED const char *
+rd_kafka_txn_state2str(rd_kafka_txn_state_t state) {
+ static const char *names[] = {"Init",
+ "WaitPID",
+ "ReadyNotAcked",
+ "Ready",
+ "InTransaction",
+ "BeginCommit",
+ "CommittingTransaction",
+ "CommitNotAcked",
+ "BeginAbort",
+ "AbortingTransaction",
+ "AbortedNotAcked",
+ "AbortableError",
+ "FatalError"};
+ return names[state];
+}
+
+
+
+/**
+ * Kafka handle, internal representation of the application's rd_kafka_t.
+ */
+
+struct rd_kafka_s {
+ rd_kafka_q_t *rk_rep; /* kafka -> application reply queue */
+ rd_kafka_q_t *rk_ops; /* any -> rdkafka main thread ops */
+
+ TAILQ_HEAD(, rd_kafka_broker_s) rk_brokers;
+ rd_list_t rk_broker_by_id; /* Fast id lookups. */
+ rd_atomic32_t rk_broker_cnt;
+ /**< Number of brokers in state >= UP */
+ rd_atomic32_t rk_broker_up_cnt;
+ /**< Number of logical brokers in state >= UP, this is a sub-set
+ * of rk_broker_up_cnt. */
+ rd_atomic32_t rk_logical_broker_up_cnt;
+ /**< Number of brokers that are down, only includes brokers
+ * that have had at least one connection attempt. */
+ rd_atomic32_t rk_broker_down_cnt;
+ /**< Logical brokers currently without an address.
+ * Used for calculating ERR__ALL_BROKERS_DOWN. */
+ rd_atomic32_t rk_broker_addrless_cnt;
+
+ mtx_t rk_internal_rkb_lock;
+ rd_kafka_broker_t *rk_internal_rkb;
+
+ /* Broadcasting of broker state changes to wake up
+ * functions waiting for a state change. */
+ cnd_t rk_broker_state_change_cnd;
+ mtx_t rk_broker_state_change_lock;
+ int rk_broker_state_change_version;
+ /* List of (rd_kafka_enq_once_t*) objects waiting for broker
+ * state changes. Protected by rk_broker_state_change_lock. */
+ rd_list_t rk_broker_state_change_waiters; /**< (rd_kafka_enq_once_t*) */
+
+ TAILQ_HEAD(, rd_kafka_topic_s) rk_topics;
+ int rk_topic_cnt;
+
+ struct rd_kafka_cgrp_s *rk_cgrp;
+
+ rd_kafka_conf_t rk_conf;
+ rd_kafka_q_t *rk_logq; /* Log queue if `log.queue` set */
+ char rk_name[128];
+ rd_kafkap_str_t *rk_client_id;
+ rd_kafkap_str_t *rk_group_id; /* Consumer group id */
+
+ rd_atomic32_t rk_terminate; /**< Set to RD_KAFKA_DESTROY_F_..
+ * flags instance
+ * is being destroyed.
+ * The value set is the
+ * destroy flags from
+ * rd_kafka_destroy*() and
+ * the two internal flags shown
+ * below.
+ *
+ * Order:
+ * 1. user_flags | .._F_DESTROY_CALLED
+ * is set in rd_kafka_destroy*().
+ * 2. consumer_close() is called
+ * for consumers.
+ * 3. .._F_TERMINATE is set to
+ * signal all background threads
+ * to terminate.
+ */
+
+#define RD_KAFKA_DESTROY_F_TERMINATE \
+ 0x1 /**< Internal flag to make sure \
+ * rk_terminate is set to non-zero \
+ * value even if user passed \
+ * no destroy flags. */
+#define RD_KAFKA_DESTROY_F_DESTROY_CALLED \
+ 0x2 /**< Application has called \
+ * ..destroy*() and we've \
+ * begun the termination \
+ * process. \
+ * This flag is needed to avoid \
+ * rk_terminate from being \
+ * 0 when destroy_flags() \
+ * is called with flags=0 \
+ * and prior to _F_TERMINATE \
+ * has been set. */
+#define RD_KAFKA_DESTROY_F_IMMEDIATE \
+ 0x4 /**< Immediate non-blocking \
+ * destruction without waiting \
+ * for all resources \
+ * to be cleaned up. \
+ * WARNING: Memory and resource \
+ * leaks possible. \
+ * This flag automatically sets \
+ * .._NO_CONSUMER_CLOSE. */
+
+
+ rwlock_t rk_lock;
+ rd_kafka_type_t rk_type;
+ struct timeval rk_tv_state_change;
+
+ rd_atomic64_t rk_ts_last_poll; /**< Timestamp of last application
+ * consumer_poll() call
+ * (or equivalent).
+ * Used to enforce
+ * max.poll.interval.ms.
+ * Only relevant for consumer. */
+ /* First fatal error. */
+ struct {
+ rd_atomic32_t err; /**< rd_kafka_resp_err_t */
+ char *errstr; /**< Protected by rk_lock */
+ int cnt; /**< Number of errors raised, only
+ * the first one is stored. */
+ } rk_fatal;
+
+ rd_atomic32_t rk_last_throttle; /* Last throttle_time_ms value
+ * from broker. */
+
+ /* Locks: rd_kafka_*lock() */
+ rd_ts_t rk_ts_metadata; /* Timestamp of most recent
+ * metadata. */
+
+ struct rd_kafka_metadata *rk_full_metadata; /* Last full metadata. */
+ rd_ts_t rk_ts_full_metadata; /* Timesstamp of .. */
+ struct rd_kafka_metadata_cache rk_metadata_cache; /* Metadata cache */
+
+ char *rk_clusterid; /* ClusterId from metadata */
+ int32_t rk_controllerid; /* ControllerId from metadata */
+
+ /**< Producer: Delivery report mode */
+ enum { RD_KAFKA_DR_MODE_NONE, /**< No delivery reports */
+ RD_KAFKA_DR_MODE_CB, /**< Delivery reports through callback */
+ RD_KAFKA_DR_MODE_EVENT, /**< Delivery reports through event API*/
+ } rk_drmode;
+
+ /* Simple consumer count:
+ * >0: Running in legacy / Simple Consumer mode,
+ * 0: No consumers running
+ * <0: Running in High level consumer mode */
+ rd_atomic32_t rk_simple_cnt;
+
+ /**
+ * Exactly Once Semantics and Idempotent Producer
+ *
+ * @locks rk_lock
+ */
+ struct {
+ /*
+ * Idempotence
+ */
+ rd_kafka_idemp_state_t idemp_state; /**< Idempotent Producer
+ * state */
+ rd_ts_t ts_idemp_state; /**< Last state change */
+ rd_kafka_pid_t pid; /**< Current Producer ID and Epoch */
+ int epoch_cnt; /**< Number of times pid/epoch changed */
+ rd_atomic32_t inflight_toppar_cnt; /**< Current number of
+ * toppars with inflight
+ * requests. */
+ rd_kafka_timer_t pid_tmr; /**< PID FSM timer */
+
+ /*
+ * Transactions
+ *
+ * All field access is from the rdkafka main thread,
+ * unless a specific lock is mentioned in the doc string.
+ *
+ */
+ rd_atomic32_t txn_may_enq; /**< Transaction state allows
+ * application to enqueue
+ * (produce) messages. */
+
+ rd_kafkap_str_t *transactional_id; /**< transactional.id */
+ rd_kafka_txn_state_t txn_state; /**< Transactional state.
+ * @locks rk_lock */
+ rd_ts_t ts_txn_state; /**< Last state change.
+ * @locks rk_lock */
+ rd_kafka_broker_t *txn_coord; /**< Transaction coordinator,
+ * this is a logical broker.*/
+ rd_kafka_broker_t *txn_curr_coord; /**< Current actual coord
+ * broker.
+ * This is only used to
+ * check if the coord
+ * changes. */
+ rd_kafka_broker_monitor_t txn_coord_mon; /**< Monitor for
+ * coordinator to
+ * take action when
+ * the broker state
+ * changes. */
+ rd_bool_t txn_requires_epoch_bump; /**< Coordinator epoch bump
+ * required to recover from
+ * idempotent producer
+ * fatal error. */
+
+ /**< Blocking transactional API application call
+ * currently being handled, its state, reply queue and how
+ * to handle timeout.
+ * Only one transactional API call is allowed at any time.
+ * Protected by the rk_lock. */
+ struct {
+ char name[64]; /**< API name, e.g.,
+ * send_offsets_to_transaction.
+ * This is used to make sure
+ * conflicting APIs are not
+ * called simultaneously. */
+ rd_bool_t calling; /**< API is being actively called.
+ * I.e., application is blocking
+ * on a txn API call.
+ * This is used to make sure
+ * no concurrent API calls are
+ * being made. */
+ rd_kafka_error_t *error; /**< Last error from background
+ * processing. This is only
+ * set if the application's
+ * API call timed out.
+ * It will be returned on
+ * the next call. */
+ rd_bool_t has_result; /**< Indicates whether an API
+ * result (possibly
+ * intermediate) has been set.
+ */
+ cnd_t cnd; /**< Application thread will
+ * block on this cnd waiting
+ * for a result to be set. */
+ mtx_t lock; /**< Protects all fields of
+ * txn_curr_api. */
+ } txn_curr_api;
+
+
+ int txn_req_cnt; /**< Number of transaction
+ * requests sent.
+ * This is incremented when a
+ * AddPartitionsToTxn or
+ * AddOffsetsToTxn request
+ * has been sent for the
+ * current transaction,
+ * to keep track of
+ * whether the broker is
+ * aware of the current
+ * transaction and thus
+ * requires an EndTxn request
+ * on abort or not. */
+
+ /**< Timer to trigger registration of pending partitions */
+ rd_kafka_timer_t txn_register_parts_tmr;
+
+ /**< Lock for txn_pending_rktps and txn_waitresp_rktps */
+ mtx_t txn_pending_lock;
+
+ /**< Partitions pending being added to transaction. */
+ rd_kafka_toppar_tqhead_t txn_pending_rktps;
+
+ /**< Partitions in-flight added to transaction. */
+ rd_kafka_toppar_tqhead_t txn_waitresp_rktps;
+
+ /**< Partitions added and registered to transaction. */
+ rd_kafka_toppar_tqhead_t txn_rktps;
+
+ /**< Number of messages that failed delivery.
+ * If this number is >0 on transaction_commit then an
+ * abortable transaction error will be raised.
+ * Is reset to zero on each begin_transaction(). */
+ rd_atomic64_t txn_dr_fails;
+
+ /**< Current transaction error. */
+ rd_kafka_resp_err_t txn_err;
+
+ /**< Current transaction error string, if any. */
+ char *txn_errstr;
+
+ /**< Last InitProducerIdRequest error. */
+ rd_kafka_resp_err_t txn_init_err;
+
+ /**< Waiting for transaction coordinator query response */
+ rd_bool_t txn_wait_coord;
+
+ /**< Transaction coordinator query timer */
+ rd_kafka_timer_t txn_coord_tmr;
+ } rk_eos;
+
+ rd_atomic32_t rk_flushing; /**< Application is calling flush(). */
+
+ /**
+ * Consumer state
+ *
+ * @locality rdkafka main thread
+ * @locks_required none
+ */
+ struct {
+ /** Application consumer queue for messages, events and errors.
+ * (typically points to rkcg_q) */
+ rd_kafka_q_t *q;
+ /** Current assigned partitions through assign() et.al. */
+ rd_kafka_assignment_t assignment;
+ /** Waiting for this number of commits to finish. */
+ int wait_commit_cnt;
+ } rk_consumer;
+
+ /**<
+ * Coordinator cache.
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+ rd_kafka_coord_cache_t rk_coord_cache; /**< Coordinator cache */
+
+ TAILQ_HEAD(, rd_kafka_coord_req_s)
+ rk_coord_reqs; /**< Coordinator
+ * requests */
+
+
+ struct {
+ mtx_t lock; /* Protects acces to this struct */
+ cnd_t cnd; /* For waking up blocking injectors */
+ unsigned int cnt; /* Current message count */
+ size_t size; /* Current message size sum */
+ unsigned int max_cnt; /* Max limit */
+ size_t max_size; /* Max limit */
+ } rk_curr_msgs;
+
+ rd_kafka_timers_t rk_timers;
+ thrd_t rk_thread;
+
+ int rk_initialized; /**< Will be > 0 when the rd_kafka_t
+ * instance has been fully initialized. */
+
+ int rk_init_wait_cnt; /**< Number of background threads that
+ * need to finish initialization. */
+ cnd_t rk_init_cnd; /**< Cond-var used to wait for main thread
+ * to finish its initialization before
+ * before rd_kafka_new() returns. */
+ mtx_t rk_init_lock; /**< Lock for rk_init_wait and _cmd */
+
+ rd_ts_t rk_ts_created; /**< Timestamp (monotonic clock) of
+ * rd_kafka_t creation. */
+
+ /**
+ * Background thread and queue,
+ * enabled by setting `background_event_cb()`.
+ */
+ struct {
+ rd_kafka_q_t *q; /**< Queue served by background thread. */
+ thrd_t thread; /**< Background thread. */
+ int calling; /**< Indicates whether the event callback
+ * is being called, reset back to 0
+ * when the callback returns.
+ * This can be used for troubleshooting
+ * purposes. */
+ } rk_background;
+
+
+ /*
+ * Logs, events or actions to rate limit / suppress
+ */
+ struct {
+ /**< Log: No brokers support Idempotent Producer */
+ rd_interval_t no_idemp_brokers;
+
+ /**< Sparse connections: randomly select broker
+ * to bring up. This interval should allow
+ * for a previous connection to be established,
+ * which varies between different environments:
+ * Use 10 < reconnect.backoff.jitter.ms / 2 < 1000.
+ */
+ rd_interval_t sparse_connect_random;
+ /**< Lock for sparse_connect_random */
+ mtx_t sparse_connect_lock;
+
+ /**< Broker metadata refresh interval:
+ * this is rate-limiting the number of topic-less
+ * broker/cluster metadata refreshes when there are no
+ * topics to refresh.
+ * Will be refreshed every topic.metadata.refresh.interval.ms
+ * but no more often than every 10s.
+ * No locks: only accessed by rdkafka main thread. */
+ rd_interval_t broker_metadata_refresh;
+
+ /**< Suppression for allow.auto.create.topics=false not being
+ * supported by the broker. */
+ rd_interval_t allow_auto_create_topics;
+ } rk_suppress;
+
+ struct {
+ void *handle; /**< Provider-specific handle struct pointer.
+ * Typically assigned in provider's .init() */
+ rd_kafka_q_t *callback_q; /**< SASL callback queue, if any. */
+ } rk_sasl;
+
+ /* Test mocks */
+ struct {
+ rd_kafka_mock_cluster_t *cluster; /**< Mock cluster, created
+ * by test.mock.num.brokers
+ */
+ rd_atomic32_t cluster_cnt; /**< Total number of mock
+ * clusters, created either
+ * through
+ * test.mock.num.brokers
+ * or mock_cluster_new().
+ */
+
+ } rk_mock;
+};
+
+#define rd_kafka_wrlock(rk) rwlock_wrlock(&(rk)->rk_lock)
+#define rd_kafka_rdlock(rk) rwlock_rdlock(&(rk)->rk_lock)
+#define rd_kafka_rdunlock(rk) rwlock_rdunlock(&(rk)->rk_lock)
+#define rd_kafka_wrunlock(rk) rwlock_wrunlock(&(rk)->rk_lock)
+
+
+/**
+ * @brief Add \p cnt messages and of total size \p size bytes to the
+ * internal bookkeeping of current message counts.
+ * If the total message count or size after add would exceed the
+ * configured limits \c queue.buffering.max.messages and
+ * \c queue.buffering.max.kbytes then depending on the value of
+ * \p block the function either blocks until enough space is available
+ * if \p block is 1, else immediately returns
+ * RD_KAFKA_RESP_ERR__QUEUE_FULL.
+ *
+ * @param rdmtx If non-null and \p block is set and blocking is to ensue,
+ * then unlock this mutex for the duration of the blocking
+ * and then reacquire with a read-lock.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
+rd_kafka_curr_msgs_add(rd_kafka_t *rk,
+ unsigned int cnt,
+ size_t size,
+ int block,
+ rwlock_t *rdlock) {
+
+ if (rk->rk_type != RD_KAFKA_PRODUCER)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ mtx_lock(&rk->rk_curr_msgs.lock);
+ while (
+ unlikely((rk->rk_curr_msgs.max_cnt > 0 &&
+ rk->rk_curr_msgs.cnt + cnt > rk->rk_curr_msgs.max_cnt) ||
+ (unsigned long long)(rk->rk_curr_msgs.size + size) >
+ (unsigned long long)rk->rk_curr_msgs.max_size)) {
+ if (!block) {
+ mtx_unlock(&rk->rk_curr_msgs.lock);
+ return RD_KAFKA_RESP_ERR__QUEUE_FULL;
+ }
+
+ if (rdlock)
+ rwlock_rdunlock(rdlock);
+
+ cnd_wait(&rk->rk_curr_msgs.cnd, &rk->rk_curr_msgs.lock);
+
+ if (rdlock)
+ rwlock_rdlock(rdlock);
+ }
+
+ rk->rk_curr_msgs.cnt += cnt;
+ rk->rk_curr_msgs.size += size;
+ mtx_unlock(&rk->rk_curr_msgs.lock);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Subtract \p cnt messages of total size \p size from the
+ * current bookkeeping and broadcast a wakeup on the condvar
+ * for any waiting & blocking threads.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_curr_msgs_sub(rd_kafka_t *rk, unsigned int cnt, size_t size) {
+ int broadcast = 0;
+
+ if (rk->rk_type != RD_KAFKA_PRODUCER)
+ return;
+
+ mtx_lock(&rk->rk_curr_msgs.lock);
+ rd_kafka_assert(NULL, rk->rk_curr_msgs.cnt >= cnt &&
+ rk->rk_curr_msgs.size >= size);
+
+ /* If the subtraction would pass one of the thresholds
+ * broadcast a wake-up to any waiting listeners. */
+ if ((rk->rk_curr_msgs.cnt - cnt == 0) ||
+ (rk->rk_curr_msgs.cnt >= rk->rk_curr_msgs.max_cnt &&
+ rk->rk_curr_msgs.cnt - cnt < rk->rk_curr_msgs.max_cnt) ||
+ (rk->rk_curr_msgs.size >= rk->rk_curr_msgs.max_size &&
+ rk->rk_curr_msgs.size - size < rk->rk_curr_msgs.max_size))
+ broadcast = 1;
+
+ rk->rk_curr_msgs.cnt -= cnt;
+ rk->rk_curr_msgs.size -= size;
+
+ if (unlikely(broadcast))
+ cnd_broadcast(&rk->rk_curr_msgs.cnd);
+
+ mtx_unlock(&rk->rk_curr_msgs.lock);
+}
+
+static RD_INLINE RD_UNUSED void
+rd_kafka_curr_msgs_get(rd_kafka_t *rk, unsigned int *cntp, size_t *sizep) {
+ if (rk->rk_type != RD_KAFKA_PRODUCER) {
+ *cntp = 0;
+ *sizep = 0;
+ return;
+ }
+
+ mtx_lock(&rk->rk_curr_msgs.lock);
+ *cntp = rk->rk_curr_msgs.cnt;
+ *sizep = rk->rk_curr_msgs.size;
+ mtx_unlock(&rk->rk_curr_msgs.lock);
+}
+
+static RD_INLINE RD_UNUSED int rd_kafka_curr_msgs_cnt(rd_kafka_t *rk) {
+ int cnt;
+ if (rk->rk_type != RD_KAFKA_PRODUCER)
+ return 0;
+
+ mtx_lock(&rk->rk_curr_msgs.lock);
+ cnt = rk->rk_curr_msgs.cnt;
+ mtx_unlock(&rk->rk_curr_msgs.lock);
+
+ return cnt;
+}
+
+/**
+ * @brief Wait until \p tspec for curr_msgs to reach 0.
+ *
+ * @returns rd_true if zero is reached, or rd_false on timeout.
+ * The remaining messages are returned in \p *curr_msgsp
+ */
+static RD_INLINE RD_UNUSED rd_bool_t
+rd_kafka_curr_msgs_wait_zero(rd_kafka_t *rk,
+ int timeout_ms,
+ unsigned int *curr_msgsp) {
+ unsigned int cnt;
+ struct timespec tspec;
+
+ rd_timeout_init_timespec(&tspec, timeout_ms);
+
+ mtx_lock(&rk->rk_curr_msgs.lock);
+ while ((cnt = rk->rk_curr_msgs.cnt) > 0) {
+ if (cnd_timedwait_abs(&rk->rk_curr_msgs.cnd,
+ &rk->rk_curr_msgs.lock,
+ &tspec) == thrd_timedout)
+ break;
+ }
+ mtx_unlock(&rk->rk_curr_msgs.lock);
+
+ *curr_msgsp = cnt;
+ return cnt == 0;
+}
+
+void rd_kafka_destroy_final(rd_kafka_t *rk);
+
+void rd_kafka_global_init(void);
+
+/**
+ * @returns true if \p rk handle is terminating.
+ *
+ * @remark If consumer_close() is called from destroy*() it will be
+ * called prior to _F_TERMINATE being set and will thus not
+ * be able to use rd_kafka_terminating() to know it is shutting down.
+ * That code should instead just check that rk_terminate is non-zero
+ * (the _F_DESTROY_CALLED flag will be set).
+ */
+#define rd_kafka_terminating(rk) \
+ (rd_atomic32_get(&(rk)->rk_terminate) & RD_KAFKA_DESTROY_F_TERMINATE)
+
+/**
+ * @returns the destroy flags set matching \p flags, which might be
+ * a subset of the flags.
+ */
+#define rd_kafka_destroy_flags_check(rk, flags) \
+ (rd_atomic32_get(&(rk)->rk_terminate) & (flags))
+
+/**
+ * @returns true if no consumer callbacks, or standard consumer_close
+ * behaviour, should be triggered. */
+#define rd_kafka_destroy_flags_no_consumer_close(rk) \
+ rd_kafka_destroy_flags_check(rk, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE)
+
+#define rd_kafka_is_simple_consumer(rk) \
+ (rd_atomic32_get(&(rk)->rk_simple_cnt) > 0)
+int rd_kafka_simple_consumer_add(rd_kafka_t *rk);
+
+
+/**
+ * @returns true if idempotency is enabled (producer only).
+ */
+#define rd_kafka_is_idempotent(rk) ((rk)->rk_conf.eos.idempotence)
+
+/**
+ * @returns true if the producer is transactional (producer only).
+ */
+#define rd_kafka_is_transactional(rk) \
+ ((rk)->rk_conf.eos.transactional_id != NULL)
+
+
+#define RD_KAFKA_PURGE_F_ABORT_TXN \
+ 0x100 /**< Internal flag used when \
+ * aborting transaction */
+#define RD_KAFKA_PURGE_F_MASK 0x107
+const char *rd_kafka_purge_flags2str(int flags);
+
+
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+
+
+
+/**
+ * Debug contexts
+ */
+#define RD_KAFKA_DBG_GENERIC 0x1
+#define RD_KAFKA_DBG_BROKER 0x2
+#define RD_KAFKA_DBG_TOPIC 0x4
+#define RD_KAFKA_DBG_METADATA 0x8
+#define RD_KAFKA_DBG_FEATURE 0x10
+#define RD_KAFKA_DBG_QUEUE 0x20
+#define RD_KAFKA_DBG_MSG 0x40
+#define RD_KAFKA_DBG_PROTOCOL 0x80
+#define RD_KAFKA_DBG_CGRP 0x100
+#define RD_KAFKA_DBG_SECURITY 0x200
+#define RD_KAFKA_DBG_FETCH 0x400
+#define RD_KAFKA_DBG_INTERCEPTOR 0x800
+#define RD_KAFKA_DBG_PLUGIN 0x1000
+#define RD_KAFKA_DBG_CONSUMER 0x2000
+#define RD_KAFKA_DBG_ADMIN 0x4000
+#define RD_KAFKA_DBG_EOS 0x8000
+#define RD_KAFKA_DBG_MOCK 0x10000
+#define RD_KAFKA_DBG_ASSIGNOR 0x20000
+#define RD_KAFKA_DBG_CONF 0x40000
+#define RD_KAFKA_DBG_ALL 0xfffff
+#define RD_KAFKA_DBG_NONE 0x0
+
+
+void rd_kafka_log0(const rd_kafka_conf_t *conf,
+ const rd_kafka_t *rk,
+ const char *extra,
+ int level,
+ int ctx,
+ const char *fac,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 7, 8);
+
+#define rd_kafka_log(rk, level, fac, ...) \
+ rd_kafka_log0(&rk->rk_conf, rk, NULL, level, RD_KAFKA_DBG_NONE, fac, \
+ __VA_ARGS__)
+
+#define rd_kafka_dbg(rk, ctx, fac, ...) \
+ do { \
+ if (unlikely((rk)->rk_conf.debug & (RD_KAFKA_DBG_##ctx))) \
+ rd_kafka_log0(&rk->rk_conf, rk, NULL, LOG_DEBUG, \
+ (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \
+ } while (0)
+
+/* dbg() not requiring an rk, just the conf object, for early logging */
+#define rd_kafka_dbg0(conf, ctx, fac, ...) \
+ do { \
+ if (unlikely((conf)->debug & (RD_KAFKA_DBG_##ctx))) \
+ rd_kafka_log0(conf, NULL, NULL, LOG_DEBUG, \
+ (RD_KAFKA_DBG_##ctx), fac, __VA_ARGS__); \
+ } while (0)
+
+/* NOTE: The local copy of _logname is needed due rkb_logname_lock lock-ordering
+ * when logging another broker's name in the message. */
+#define rd_rkb_log0(rkb, level, ctx, fac, ...) \
+ do { \
+ char _logname[RD_KAFKA_NODENAME_SIZE]; \
+ mtx_lock(&(rkb)->rkb_logname_lock); \
+ rd_strlcpy(_logname, rkb->rkb_logname, sizeof(_logname)); \
+ mtx_unlock(&(rkb)->rkb_logname_lock); \
+ rd_kafka_log0(&(rkb)->rkb_rk->rk_conf, (rkb)->rkb_rk, \
+ _logname, level, ctx, fac, __VA_ARGS__); \
+ } while (0)
+
+#define rd_rkb_log(rkb, level, fac, ...) \
+ rd_rkb_log0(rkb, level, RD_KAFKA_DBG_NONE, fac, __VA_ARGS__)
+
+#define rd_rkb_dbg(rkb, ctx, fac, ...) \
+ do { \
+ if (unlikely((rkb)->rkb_rk->rk_conf.debug & \
+ (RD_KAFKA_DBG_##ctx))) { \
+ rd_rkb_log0(rkb, LOG_DEBUG, (RD_KAFKA_DBG_##ctx), fac, \
+ __VA_ARGS__); \
+ } \
+ } while (0)
+
+
+
+extern rd_kafka_resp_err_t RD_TLS rd_kafka_last_error_code;
+
+static RD_UNUSED RD_INLINE rd_kafka_resp_err_t
+rd_kafka_set_last_error(rd_kafka_resp_err_t err, int errnox) {
+ if (errnox) {
+ /* MSVC:
+ * This is the correct way to set errno on Windows,
+ * but it is still pointless due to different errnos in
+ * in different runtimes:
+ * https://social.msdn.microsoft.com/Forums/vstudio/en-US/b4500c0d-1b69-40c7-9ef5-08da1025b5bf/setting-errno-from-within-a-dll?forum=vclanguage/
+ * errno is thus highly deprecated, and buggy, on Windows
+ * when using librdkafka as a dynamically loaded DLL. */
+ rd_set_errno(errnox);
+ }
+ rd_kafka_last_error_code = err;
+ return err;
+}
+
+
+int rd_kafka_set_fatal_error0(rd_kafka_t *rk,
+ rd_dolock_t do_lock,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 4, 5);
+#define rd_kafka_set_fatal_error(rk, err, fmt, ...) \
+ rd_kafka_set_fatal_error0(rk, RD_DO_LOCK, err, fmt, __VA_ARGS__)
+
+rd_kafka_error_t *rd_kafka_get_fatal_error(rd_kafka_t *rk);
+
+static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
+rd_kafka_fatal_error_code(rd_kafka_t *rk) {
+ /* This is an optimization to avoid an atomic read which are costly
+ * on some platforms:
+ * Fatal errors are currently only raised by the idempotent producer
+ * and static consumers (group.instance.id). */
+ if ((rk->rk_type == RD_KAFKA_PRODUCER && rk->rk_conf.eos.idempotence) ||
+ (rk->rk_type == RD_KAFKA_CONSUMER && rk->rk_conf.group_instance_id))
+ return rd_atomic32_get(&rk->rk_fatal.err);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+extern rd_atomic32_t rd_kafka_thread_cnt_curr;
+extern char RD_TLS rd_kafka_thread_name[64];
+
+void rd_kafka_set_thread_name(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
+void rd_kafka_set_thread_sysname(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
+
+int rd_kafka_path_is_dir(const char *path);
+rd_bool_t rd_kafka_dir_is_empty(const char *path);
+
+rd_kafka_op_res_t rd_kafka_poll_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque);
+
+rd_kafka_resp_err_t rd_kafka_subscribe_rkt(rd_kafka_topic_t *rkt);
+
+
+/**
+ * @returns the number of milliseconds the maximum poll interval
+ * was exceeded, or 0 if not exceeded.
+ *
+ * @remark Only relevant for high-level consumer.
+ *
+ * @locality any
+ * @locks none
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_max_poll_exceeded(rd_kafka_t *rk) {
+ rd_ts_t last_poll;
+ int exceeded;
+
+ if (rk->rk_type != RD_KAFKA_CONSUMER)
+ return 0;
+
+ last_poll = rd_atomic64_get(&rk->rk_ts_last_poll);
+
+ /* Application is blocked in librdkafka function, see
+ * rd_kafka_app_poll_blocking(). */
+ if (last_poll == INT64_MAX)
+ return 0;
+
+ exceeded = (int)((rd_clock() - last_poll) / 1000ll) -
+ rk->rk_conf.max_poll_interval_ms;
+
+ if (unlikely(exceeded > 0))
+ return exceeded;
+
+ return 0;
+}
+
+/**
+ * @brief Call on entry to blocking polling function to indicate
+ * that the application is blocked waiting for librdkafka
+ * and that max.poll.interval.ms should not be enforced.
+ *
+ * Call app_polled() Upon return from the function calling
+ * this function to register the application's last time of poll.
+ *
+ * @remark Only relevant for high-level consumer.
+ *
+ * @locality any
+ * @locks none
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_app_poll_blocking(rd_kafka_t *rk) {
+ if (rk->rk_type == RD_KAFKA_CONSUMER)
+ rd_atomic64_set(&rk->rk_ts_last_poll, INT64_MAX);
+}
+
+/**
+ * @brief Set the last application poll time to now.
+ *
+ * @remark Only relevant for high-level consumer.
+ *
+ * @locality any
+ * @locks none
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_app_polled(rd_kafka_t *rk) {
+ if (rk->rk_type == RD_KAFKA_CONSUMER)
+ rd_atomic64_set(&rk->rk_ts_last_poll, rd_clock());
+}
+
+
+
+void rd_kafka_term_sig_handler(int sig);
+
+/**
+ * rdkafka_background.c
+ */
+int rd_kafka_background_thread_main(void *arg);
+rd_kafka_resp_err_t rd_kafka_background_thread_create(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size);
+
+
+#endif /* _RDKAFKA_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c
new file mode 100644
index 000000000..c962d2d99
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.c
@@ -0,0 +1,819 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_interceptor.h"
+#include "rdstring.h"
+
+/**
+ * @brief Interceptor methodtion/method reference
+ */
+typedef struct rd_kafka_interceptor_method_s {
+ union {
+ rd_kafka_interceptor_f_on_conf_set_t *on_conf_set;
+ rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup;
+ rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy;
+ rd_kafka_interceptor_f_on_new_t *on_new;
+ rd_kafka_interceptor_f_on_destroy_t *on_destroy;
+ rd_kafka_interceptor_f_on_send_t *on_send;
+ rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement;
+ rd_kafka_interceptor_f_on_consume_t *on_consume;
+ rd_kafka_interceptor_f_on_commit_t *on_commit;
+ rd_kafka_interceptor_f_on_request_sent_t *on_request_sent;
+ rd_kafka_interceptor_f_on_response_received_t
+ *on_response_received;
+ rd_kafka_interceptor_f_on_thread_start_t *on_thread_start;
+ rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit;
+ rd_kafka_interceptor_f_on_broker_state_change_t
+ *on_broker_state_change;
+ void *generic; /* For easy assignment */
+
+ } u;
+ char *ic_name;
+ void *ic_opaque;
+} rd_kafka_interceptor_method_t;
+
+/**
+ * @brief Destroy interceptor methodtion reference
+ */
+static void rd_kafka_interceptor_method_destroy(void *ptr) {
+ rd_kafka_interceptor_method_t *method = ptr;
+ rd_free(method->ic_name);
+ rd_free(method);
+}
+
+
+
+/**
+ * @brief Handle an interceptor on_... methodtion call failures.
+ */
+static RD_INLINE void
+rd_kafka_interceptor_failed(rd_kafka_t *rk,
+ const rd_kafka_interceptor_method_t *method,
+ const char *method_name,
+ rd_kafka_resp_err_t err,
+ const rd_kafka_message_t *rkmessage,
+ const char *errstr) {
+
+ /* FIXME: Suppress log messages, eventually */
+ if (rkmessage)
+ rd_kafka_log(
+ rk, LOG_WARNING, "ICFAIL",
+ "Interceptor %s failed %s for "
+ "message on %s [%" PRId32 "] @ %" PRId64 ": %s%s%s",
+ method->ic_name, method_name,
+ rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition,
+ rkmessage->offset, rd_kafka_err2str(err),
+ errstr ? ": " : "", errstr ? errstr : "");
+ else
+ rd_kafka_log(rk, LOG_WARNING, "ICFAIL",
+ "Interceptor %s failed %s: %s%s%s",
+ method->ic_name, method_name,
+ rd_kafka_err2str(err), errstr ? ": " : "",
+ errstr ? errstr : "");
+}
+
+
+
+/**
+ * @brief Create interceptor method reference.
+ * Duplicates are rejected
+ */
+static rd_kafka_interceptor_method_t *
+rd_kafka_interceptor_method_new(const char *ic_name,
+ void *func,
+ void *ic_opaque) {
+ rd_kafka_interceptor_method_t *method;
+
+ method = rd_calloc(1, sizeof(*method));
+ method->ic_name = rd_strdup(ic_name);
+ method->ic_opaque = ic_opaque;
+ method->u.generic = func;
+
+ return method;
+}
+
+
+/**
+ * @brief Method comparator to be used for finding, not sorting.
+ */
+static int rd_kafka_interceptor_method_cmp(const void *_a, const void *_b) {
+ const rd_kafka_interceptor_method_t *a = _a, *b = _b;
+
+ if (a->u.generic != b->u.generic)
+ return -1;
+
+ return strcmp(a->ic_name, b->ic_name);
+}
+
+/**
+ * @brief Add interceptor method reference
+ */
+static rd_kafka_resp_err_t rd_kafka_interceptor_method_add(rd_list_t *list,
+ const char *ic_name,
+ void *func,
+ void *ic_opaque) {
+ rd_kafka_interceptor_method_t *method;
+ const rd_kafka_interceptor_method_t skel = {.ic_name = (char *)ic_name,
+ .u = {.generic = func}};
+
+ /* Reject same method from same interceptor.
+ * This is needed to avoid duplicate interceptors when configuration
+ * objects are duplicated.
+ * An exception is made for lists with _F_UNIQUE, which is currently
+ * only on_conf_destroy() to allow interceptor cleanup. */
+ if ((list->rl_flags & RD_LIST_F_UNIQUE) &&
+ rd_list_find(list, &skel, rd_kafka_interceptor_method_cmp))
+ return RD_KAFKA_RESP_ERR__CONFLICT;
+
+ method = rd_kafka_interceptor_method_new(ic_name, func, ic_opaque);
+ rd_list_add(list, method);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Destroy all interceptors
+ * @locality application thread calling rd_kafka_conf_destroy() or
+ * rd_kafka_destroy()
+ */
+void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf) {
+ rd_list_destroy(&conf->interceptors.on_conf_set);
+ rd_list_destroy(&conf->interceptors.on_conf_dup);
+ rd_list_destroy(&conf->interceptors.on_conf_destroy);
+ rd_list_destroy(&conf->interceptors.on_new);
+ rd_list_destroy(&conf->interceptors.on_destroy);
+ rd_list_destroy(&conf->interceptors.on_send);
+ rd_list_destroy(&conf->interceptors.on_acknowledgement);
+ rd_list_destroy(&conf->interceptors.on_consume);
+ rd_list_destroy(&conf->interceptors.on_commit);
+ rd_list_destroy(&conf->interceptors.on_request_sent);
+ rd_list_destroy(&conf->interceptors.on_response_received);
+ rd_list_destroy(&conf->interceptors.on_thread_start);
+ rd_list_destroy(&conf->interceptors.on_thread_exit);
+ rd_list_destroy(&conf->interceptors.on_broker_state_change);
+
+ /* Interceptor config */
+ rd_list_destroy(&conf->interceptors.config);
+}
+
+
+/**
+ * @brief Initialize interceptor sub-system for config object.
+ * @locality application thread
+ */
+static void rd_kafka_interceptors_init(rd_kafka_conf_t *conf) {
+ rd_list_init(&conf->interceptors.on_conf_set, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_conf_dup, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ /* conf_destroy() allows duplicates entries. */
+ rd_list_init(&conf->interceptors.on_conf_destroy, 0,
+ rd_kafka_interceptor_method_destroy);
+ rd_list_init(&conf->interceptors.on_new, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_destroy, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_send, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_acknowledgement, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_consume, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_commit, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_request_sent, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_response_received, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_thread_start, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_thread_exit, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+ rd_list_init(&conf->interceptors.on_broker_state_change, 0,
+ rd_kafka_interceptor_method_destroy)
+ ->rl_flags |= RD_LIST_F_UNIQUE;
+
+ /* Interceptor config */
+ rd_list_init(&conf->interceptors.config, 0,
+ (void (*)(void *))rd_strtup_destroy);
+}
+
+
+
+/**
+ * @name Configuration backend
+ */
+
+
+/**
+ * @brief Constructor called when configuration object is created.
+ */
+void rd_kafka_conf_interceptor_ctor(int scope, void *pconf) {
+ rd_kafka_conf_t *conf = pconf;
+ assert(scope == _RK_GLOBAL);
+ rd_kafka_interceptors_init(conf);
+}
+
+/**
+ * @brief Destructor called when configuration object is destroyed.
+ */
+void rd_kafka_conf_interceptor_dtor(int scope, void *pconf) {
+ rd_kafka_conf_t *conf = pconf;
+ assert(scope == _RK_GLOBAL);
+ rd_kafka_interceptors_destroy(conf);
+}
+
+/**
+ * @brief Copy-constructor called when configuration object \p psrcp is
+ * duplicated to \p dstp.
+ * @remark Interceptors are NOT copied, but interceptor config is.
+ *
+ */
+void rd_kafka_conf_interceptor_copy(int scope,
+ void *pdst,
+ const void *psrc,
+ void *dstptr,
+ const void *srcptr,
+ size_t filter_cnt,
+ const char **filter) {
+ rd_kafka_conf_t *dconf = pdst;
+ const rd_kafka_conf_t *sconf = psrc;
+ int i;
+ const rd_strtup_t *confval;
+
+ assert(scope == _RK_GLOBAL);
+
+ /* Apply interceptor configuration values.
+ * on_conf_dup() has already been called for dconf so
+ * on_conf_set() interceptors are already in place and we can
+ * apply the configuration through the standard conf_set() API. */
+ RD_LIST_FOREACH(confval, &sconf->interceptors.config, i) {
+ size_t fi;
+ size_t nlen = strlen(confval->name);
+
+ /* Apply filter */
+ for (fi = 0; fi < filter_cnt; fi++) {
+ size_t flen = strlen(filter[fi]);
+ if (nlen >= flen &&
+ !strncmp(filter[fi], confval->name, flen))
+ break;
+ }
+
+ if (fi < filter_cnt)
+ continue; /* Filter matched: ignore property. */
+
+ /* Ignore errors for now */
+ rd_kafka_conf_set(dconf, confval->name, confval->value, NULL,
+ 0);
+ }
+}
+
+
+
+/**
+ * @brief Call interceptor on_conf_set methods.
+ * @locality application thread calling rd_kafka_conf_set() and
+ * rd_kafka_conf_dup()
+ */
+rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf,
+ const char *name,
+ const char *val,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &conf->interceptors.on_conf_set, i) {
+ rd_kafka_conf_res_t res;
+
+ res = method->u.on_conf_set(conf, name, val, errstr,
+ errstr_size, method->ic_opaque);
+ if (res == RD_KAFKA_CONF_UNKNOWN)
+ continue;
+
+ /* Add successfully handled properties to list of
+ * interceptor config properties so conf_t objects
+ * can be copied. */
+ if (res == RD_KAFKA_CONF_OK)
+ rd_list_add(&conf->interceptors.config,
+ rd_strtup_new(name, val));
+ return res;
+ }
+
+ return RD_KAFKA_CONF_UNKNOWN;
+}
+
+/**
+ * @brief Call interceptor on_conf_dup methods.
+ * @locality application thread calling rd_kafka_conf_dup()
+ */
+void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf,
+ const rd_kafka_conf_t *old_conf,
+ size_t filter_cnt,
+ const char **filter) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &old_conf->interceptors.on_conf_dup, i) {
+ /* FIXME: Ignore error for now */
+ method->u.on_conf_dup(new_conf, old_conf, filter_cnt, filter,
+ method->ic_opaque);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_conf_destroy methods.
+ * @locality application thread calling rd_kafka_conf_destroy(), rd_kafka_new(),
+ * rd_kafka_destroy()
+ */
+void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &conf->interceptors.on_conf_destroy, i) {
+ /* FIXME: Ignore error for now */
+ method->u.on_conf_destroy(method->ic_opaque);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_new methods.
+ * @locality application thread calling rd_kafka_new()
+ */
+void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+ char errstr[512];
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_new, i) {
+ rd_kafka_resp_err_t err;
+
+ err = method->u.on_new(rk, conf, method->ic_opaque, errstr,
+ sizeof(errstr));
+ if (unlikely(err))
+ rd_kafka_interceptor_failed(rk, method, "on_new", err,
+ NULL, errstr);
+ }
+}
+
+
+
+/**
+ * @brief Call interceptor on_destroy methods.
+ * @locality application thread calling rd_kafka_new() or rd_kafka_destroy()
+ */
+void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_destroy, i) {
+ rd_kafka_resp_err_t err;
+
+ err = method->u.on_destroy(rk, method->ic_opaque);
+ if (unlikely(err))
+ rd_kafka_interceptor_failed(rk, method, "on_destroy",
+ err, NULL, NULL);
+ }
+}
+
+
+
+/**
+ * @brief Call interceptor on_send methods.
+ * @locality application thread calling produce()
+ */
+void rd_kafka_interceptors_on_send(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_send, i) {
+ rd_kafka_resp_err_t err;
+
+ err = method->u.on_send(rk, rkmessage, method->ic_opaque);
+ if (unlikely(err))
+ rd_kafka_interceptor_failed(rk, method, "on_send", err,
+ rkmessage, NULL);
+ }
+}
+
+
+
+/**
+ * @brief Call interceptor on_acknowledgement methods.
+ * @locality application thread calling poll(), or the broker thread if
+ * if dr callback has been set.
+ */
+void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_acknowledgement,
+ i) {
+ rd_kafka_resp_err_t err;
+
+ err = method->u.on_acknowledgement(rk, rkmessage,
+ method->ic_opaque);
+ if (unlikely(err))
+ rd_kafka_interceptor_failed(rk, method,
+ "on_acknowledgement", err,
+ rkmessage, NULL);
+ }
+}
+
+
+/**
+ * @brief Call on_acknowledgement methods for all messages in queue.
+ *
+ * @param force_err If non-zero, sets this error on each message.
+ *
+ * @locality broker thread
+ */
+void rd_kafka_interceptors_on_acknowledgement_queue(
+ rd_kafka_t *rk,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_resp_err_t force_err) {
+ rd_kafka_msg_t *rkm;
+
+ RD_KAFKA_MSGQ_FOREACH(rkm, rkmq) {
+ if (force_err)
+ rkm->rkm_err = force_err;
+ rd_kafka_interceptors_on_acknowledgement(rk,
+ &rkm->rkm_rkmessage);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_consume methods.
+ * @locality application thread calling poll(), consume() or similar prior to
+ * passing the message to the application.
+ */
+void rd_kafka_interceptors_on_consume(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_consume, i) {
+ rd_kafka_resp_err_t err;
+
+ err = method->u.on_consume(rk, rkmessage, method->ic_opaque);
+ if (unlikely(err))
+ rd_kafka_interceptor_failed(rk, method, "on_consume",
+ err, rkmessage, NULL);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_commit methods.
+ * @locality application thread calling poll(), consume() or similar,
+ * or rdkafka main thread if no commit_cb or handler registered.
+ */
+void rd_kafka_interceptors_on_commit(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_commit, i) {
+ rd_kafka_resp_err_t ic_err;
+
+ ic_err =
+ method->u.on_commit(rk, offsets, err, method->ic_opaque);
+ if (unlikely(ic_err))
+ rd_kafka_interceptor_failed(rk, method, "on_commit",
+ ic_err, NULL, NULL);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_request_sent methods
+ * @locality internal broker thread
+ */
+void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_request_sent, i) {
+ rd_kafka_resp_err_t ic_err;
+
+ ic_err = method->u.on_request_sent(
+ rk, sockfd, brokername, brokerid, ApiKey, ApiVersion,
+ CorrId, size, method->ic_opaque);
+ if (unlikely(ic_err))
+ rd_kafka_interceptor_failed(
+ rk, method, "on_request_sent", ic_err, NULL, NULL);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_response_received methods
+ * @locality internal broker thread
+ */
+void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_response_received,
+ i) {
+ rd_kafka_resp_err_t ic_err;
+
+ ic_err = method->u.on_response_received(
+ rk, sockfd, brokername, brokerid, ApiKey, ApiVersion,
+ CorrId, size, rtt, err, method->ic_opaque);
+ if (unlikely(ic_err))
+ rd_kafka_interceptor_failed(rk, method,
+ "on_response_received",
+ ic_err, NULL, NULL);
+ }
+}
+
+
+void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_start, i) {
+ rd_kafka_resp_err_t ic_err;
+
+ ic_err = method->u.on_thread_start(
+ rk, thread_type, rd_kafka_thread_name, method->ic_opaque);
+ if (unlikely(ic_err))
+ rd_kafka_interceptor_failed(
+ rk, method, "on_thread_start", ic_err, NULL, NULL);
+ }
+}
+
+
+void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method, &rk->rk_conf.interceptors.on_thread_exit, i) {
+ rd_kafka_resp_err_t ic_err;
+
+ ic_err = method->u.on_thread_exit(
+ rk, thread_type, rd_kafka_thread_name, method->ic_opaque);
+ if (unlikely(ic_err))
+ rd_kafka_interceptor_failed(
+ rk, method, "on_thread_exit", ic_err, NULL, NULL);
+ }
+}
+
+
+/**
+ * @brief Call interceptor on_broker_state_change methods.
+ * @locality any.
+ */
+void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk,
+ int32_t broker_id,
+ const char *secproto,
+ const char *name,
+ int port,
+ const char *state) {
+ rd_kafka_interceptor_method_t *method;
+ int i;
+
+ RD_LIST_FOREACH(method,
+ &rk->rk_conf.interceptors.on_broker_state_change, i) {
+ rd_kafka_resp_err_t ic_err;
+
+ ic_err = method->u.on_broker_state_change(
+ rk, broker_id, secproto, name, port, state,
+ method->ic_opaque);
+ if (unlikely(ic_err))
+ rd_kafka_interceptor_failed(rk, method,
+ "on_broker_state_change",
+ ic_err, NULL, NULL);
+ }
+}
+
+
+
+/**
+ * @name Public API (backend)
+ * @{
+ */
+
+
+rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_set(
+ rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_conf_set_t *on_conf_set,
+ void *ic_opaque) {
+ return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_set,
+ ic_name, (void *)on_conf_set,
+ ic_opaque);
+}
+
+rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_dup(
+ rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_conf_dup_t *on_conf_dup,
+ void *ic_opaque) {
+ return rd_kafka_interceptor_method_add(&conf->interceptors.on_conf_dup,
+ ic_name, (void *)on_conf_dup,
+ ic_opaque);
+}
+
+rd_kafka_resp_err_t rd_kafka_conf_interceptor_add_on_conf_destroy(
+ rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_conf_destroy_t *on_conf_destroy,
+ void *ic_opaque) {
+ return rd_kafka_interceptor_method_add(
+ &conf->interceptors.on_conf_destroy, ic_name,
+ (void *)on_conf_destroy, ic_opaque);
+}
+
+
+
+rd_kafka_resp_err_t
+rd_kafka_conf_interceptor_add_on_new(rd_kafka_conf_t *conf,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_new_t *on_new,
+ void *ic_opaque) {
+ return rd_kafka_interceptor_method_add(
+ &conf->interceptors.on_new, ic_name, (void *)on_new, ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_destroy(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_destroy_t *on_destroy,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_destroy, ic_name, (void *)on_destroy,
+ ic_opaque);
+}
+
+rd_kafka_resp_err_t
+rd_kafka_interceptor_add_on_send(rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_send_t *on_send,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_send, ic_name, (void *)on_send,
+ ic_opaque);
+}
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_acknowledgement(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_acknowledgement_t *on_acknowledgement,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_acknowledgement, ic_name,
+ (void *)on_acknowledgement, ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_consume(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_consume_t *on_consume,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_consume, ic_name, (void *)on_consume,
+ ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_commit(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_commit_t *on_commit,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_commit, ic_name, (void *)on_commit,
+ ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_request_sent(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_request_sent_t *on_request_sent,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_request_sent, ic_name,
+ (void *)on_request_sent, ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_response_received(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_response_received_t *on_response_received,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_response_received, ic_name,
+ (void *)on_response_received, ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_start(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_thread_start_t *on_thread_start,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_thread_start, ic_name,
+ (void *)on_thread_start, ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_thread_exit(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_thread_exit_t *on_thread_exit,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_thread_exit, ic_name,
+ (void *)on_thread_exit, ic_opaque);
+}
+
+
+rd_kafka_resp_err_t rd_kafka_interceptor_add_on_broker_state_change(
+ rd_kafka_t *rk,
+ const char *ic_name,
+ rd_kafka_interceptor_f_on_broker_state_change_t *on_broker_state_change,
+ void *ic_opaque) {
+ assert(!rk->rk_initialized);
+ return rd_kafka_interceptor_method_add(
+ &rk->rk_conf.interceptors.on_broker_state_change, ic_name,
+ (void *)on_broker_state_change, ic_opaque);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h
new file mode 100644
index 000000000..85f061ba9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_interceptor.h
@@ -0,0 +1,104 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_INTERCEPTOR_H
+#define _RDKAFKA_INTERCEPTOR_H
+
+rd_kafka_conf_res_t rd_kafka_interceptors_on_conf_set(rd_kafka_conf_t *conf,
+ const char *name,
+ const char *val,
+ char *errstr,
+ size_t errstr_size);
+void rd_kafka_interceptors_on_conf_dup(rd_kafka_conf_t *new_conf,
+ const rd_kafka_conf_t *old_conf,
+ size_t filter_cnt,
+ const char **filter);
+void rd_kafka_interceptors_on_conf_destroy(rd_kafka_conf_t *conf);
+void rd_kafka_interceptors_on_new(rd_kafka_t *rk, const rd_kafka_conf_t *conf);
+void rd_kafka_interceptors_on_destroy(rd_kafka_t *rk);
+void rd_kafka_interceptors_on_send(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage);
+void rd_kafka_interceptors_on_acknowledgement(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage);
+void rd_kafka_interceptors_on_acknowledgement_queue(
+ rd_kafka_t *rk,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_resp_err_t force_err);
+
+void rd_kafka_interceptors_on_consume(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage);
+void rd_kafka_interceptors_on_commit(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_resp_err_t err);
+
+void rd_kafka_interceptors_on_request_sent(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size);
+
+void rd_kafka_interceptors_on_response_received(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err);
+
+void rd_kafka_interceptors_on_thread_start(rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type);
+void rd_kafka_interceptors_on_thread_exit(rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type);
+
+void rd_kafka_interceptors_on_broker_state_change(rd_kafka_t *rk,
+ int32_t broker_id,
+ const char *secproto,
+ const char *name,
+ int port,
+ const char *state);
+
+void rd_kafka_conf_interceptor_ctor(int scope, void *pconf);
+void rd_kafka_conf_interceptor_dtor(int scope, void *pconf);
+void rd_kafka_conf_interceptor_copy(int scope,
+ void *pdst,
+ const void *psrc,
+ void *dstptr,
+ const void *srcptr,
+ size_t filter_cnt,
+ const char **filter);
+
+void rd_kafka_interceptors_destroy(rd_kafka_conf_t *conf);
+
+#endif /* _RDKAFKA_INTERCEPTOR_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c
new file mode 100644
index 000000000..b52108bb1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.c
@@ -0,0 +1,450 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_lz4.h"
+
+#if WITH_LZ4_EXT
+#include <lz4frame.h>
+#else
+#include "lz4frame.h"
+#endif
+#include "rdxxhash.h"
+
+#include "rdbuf.h"
+
+/**
+ * Fix-up bad LZ4 framing caused by buggy Kafka client / broker.
+ * The LZ4F framing format is described in detail here:
+ * https://github.com/lz4/lz4/blob/master/doc/lz4_Frame_format.md
+ *
+ * NOTE: This modifies 'inbuf'.
+ *
+ * Returns an error on failure to fix (nothing modified), else NO_ERROR.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_lz4_decompress_fixup_bad_framing(rd_kafka_broker_t *rkb,
+ char *inbuf,
+ size_t inlen) {
+ static const char magic[4] = {0x04, 0x22, 0x4d, 0x18};
+ uint8_t FLG, HC, correct_HC;
+ size_t of = 4;
+
+ /* Format is:
+ * int32_t magic;
+ * int8_t_ FLG;
+ * int8_t BD;
+ * [ int64_t contentSize; ]
+ * int8_t HC;
+ */
+ if (inlen < 4 + 3 || memcmp(inbuf, magic, 4)) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP",
+ "Unable to fix-up legacy LZ4 framing "
+ "(%" PRIusz " bytes): invalid length or magic value",
+ inlen);
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ }
+
+ of = 4; /* past magic */
+ FLG = inbuf[of++];
+ of++; /* BD */
+
+ if ((FLG >> 3) & 1) /* contentSize */
+ of += 8;
+
+ if (of >= inlen) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP",
+ "Unable to fix-up legacy LZ4 framing "
+ "(%" PRIusz " bytes): requires %" PRIusz " bytes",
+ inlen, of);
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ }
+
+ /* Header hash code */
+ HC = inbuf[of];
+
+ /* Calculate correct header hash code */
+ correct_HC = (XXH32(inbuf + 4, of - 4, 0) >> 8) & 0xff;
+
+ if (HC != correct_HC)
+ inbuf[of] = correct_HC;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * Reverse of fix-up: break LZ4 framing caused to be compatbile with with
+ * buggy Kafka client / broker.
+ *
+ * NOTE: This modifies 'outbuf'.
+ *
+ * Returns an error on failure to recognize format (nothing modified),
+ * else NO_ERROR.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_lz4_compress_break_framing(rd_kafka_broker_t *rkb,
+ char *outbuf,
+ size_t outlen) {
+ static const char magic[4] = {0x04, 0x22, 0x4d, 0x18};
+ uint8_t FLG, HC, bad_HC;
+ size_t of = 4;
+
+ /* Format is:
+ * int32_t magic;
+ * int8_t_ FLG;
+ * int8_t BD;
+ * [ int64_t contentSize; ]
+ * int8_t HC;
+ */
+ if (outlen < 4 + 3 || memcmp(outbuf, magic, 4)) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4FIXDOWN",
+ "Unable to break legacy LZ4 framing "
+ "(%" PRIusz " bytes): invalid length or magic value",
+ outlen);
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ }
+
+ of = 4; /* past magic */
+ FLG = outbuf[of++];
+ of++; /* BD */
+
+ if ((FLG >> 3) & 1) /* contentSize */
+ of += 8;
+
+ if (of >= outlen) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4FIXUP",
+ "Unable to break legacy LZ4 framing "
+ "(%" PRIusz " bytes): requires %" PRIusz " bytes",
+ outlen, of);
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ }
+
+ /* Header hash code */
+ HC = outbuf[of];
+
+ /* Calculate bad header hash code (include magic) */
+ bad_HC = (XXH32(outbuf, of, 0) >> 8) & 0xff;
+
+ if (HC != bad_HC)
+ outbuf[of] = bad_HC;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Decompress LZ4F (framed) data.
+ * Kafka broker versions <0.10.0.0 (MsgVersion 0) breaks LZ4 framing
+ * checksum, if \p proper_hc we assume the checksum is okay
+ * (broker version >=0.10.0, MsgVersion >= 1) else we fix it up.
+ *
+ * @remark May modify \p inbuf (if not \p proper_hc)
+ */
+rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb,
+ int proper_hc,
+ int64_t Offset,
+ char *inbuf,
+ size_t inlen,
+ void **outbuf,
+ size_t *outlenp) {
+ LZ4F_errorCode_t code;
+ LZ4F_decompressionContext_t dctx;
+ LZ4F_frameInfo_t fi;
+ size_t in_sz, out_sz;
+ size_t in_of, out_of;
+ size_t r;
+ size_t estimated_uncompressed_size;
+ size_t outlen;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ char *out = NULL;
+
+ *outbuf = NULL;
+
+ code = LZ4F_createDecompressionContext(&dctx, LZ4F_VERSION);
+ if (LZ4F_isError(code)) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
+ "Unable to create LZ4 decompression context: %s",
+ LZ4F_getErrorName(code));
+ return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ }
+
+ if (!proper_hc) {
+ /* The original/legacy LZ4 framing in Kafka was buggy and
+ * calculated the LZ4 framing header hash code (HC) incorrectly.
+ * We do a fix-up of it here. */
+ if ((err = rd_kafka_lz4_decompress_fixup_bad_framing(rkb, inbuf,
+ inlen)))
+ goto done;
+ }
+
+ in_sz = inlen;
+ r = LZ4F_getFrameInfo(dctx, &fi, (const void *)inbuf, &in_sz);
+ if (LZ4F_isError(r)) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
+ "Failed to gather LZ4 frame info: %s",
+ LZ4F_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ /* If uncompressed size is unknown or out of bounds, use a sane
+ * default (4x compression) and reallocate if needed
+ * More info on max size: http://stackoverflow.com/a/25751871/1821055
+ * More info on lz4 compression ratios seen for different data sets:
+ * http://dev.ti.com/tirex/content/simplelink_msp432p4_sdk_1_50_00_12/docs/lz4/users_guide/docguide.llQpgm/benchmarking.html
+ */
+ if (fi.contentSize == 0 || fi.contentSize > inlen * 255) {
+ estimated_uncompressed_size = RD_MIN(
+ inlen * 4, (size_t)(rkb->rkb_rk->rk_conf.max_msg_size));
+ } else {
+ estimated_uncompressed_size = (size_t)fi.contentSize;
+ }
+
+ /* Allocate output buffer, we increase this later if needed,
+ * but hopefully not. */
+ out = rd_malloc(estimated_uncompressed_size);
+ if (!out) {
+ rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC",
+ "Unable to allocate decompression "
+ "buffer of %" PRIusz " bytes: %s",
+ estimated_uncompressed_size, rd_strerror(errno));
+ err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ goto done;
+ }
+
+
+ /* Decompress input buffer to output buffer until input is exhausted. */
+ outlen = estimated_uncompressed_size;
+ in_of = in_sz;
+ out_of = 0;
+ while (in_of < inlen) {
+ out_sz = outlen - out_of;
+ in_sz = inlen - in_of;
+ r = LZ4F_decompress(dctx, out + out_of, &out_sz, inbuf + in_of,
+ &in_sz, NULL);
+ if (unlikely(LZ4F_isError(r))) {
+ rd_rkb_dbg(rkb, MSG, "LZ4DEC",
+ "Failed to LZ4 (%s HC) decompress message "
+ "(offset %" PRId64
+ ") at "
+ "payload offset %" PRIusz "/%" PRIusz ": %s",
+ proper_hc ? "proper" : "legacy", Offset,
+ in_of, inlen, LZ4F_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ rd_kafka_assert(NULL, out_of + out_sz <= outlen &&
+ in_of + in_sz <= inlen);
+ out_of += out_sz;
+ in_of += in_sz;
+ if (r == 0)
+ break;
+
+ /* Need to grow output buffer, this shouldn't happen if
+ * contentSize was properly set. */
+ if (unlikely(out_of == outlen)) {
+ char *tmp;
+ /* Grow exponentially with some factor > 1 (using 1.75)
+ * for amortized O(1) copying */
+ size_t extra = RD_MAX(outlen * 3 / 4, 1024);
+
+ rd_atomic64_add(&rkb->rkb_c.zbuf_grow, 1);
+
+ if (!(tmp = rd_realloc(out, outlen + extra))) {
+ rd_rkb_log(rkb, LOG_WARNING, "LZ4DEC",
+ "Unable to grow decompression "
+ "buffer to %" PRIusz "+%" PRIusz
+ " bytes: %s",
+ outlen, extra, rd_strerror(errno));
+ err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ goto done;
+ }
+ out = tmp;
+ outlen += extra;
+ }
+ }
+
+
+ if (in_of < inlen) {
+ rd_rkb_dbg(rkb, MSG, "LZ4DEC",
+ "Failed to LZ4 (%s HC) decompress message "
+ "(offset %" PRId64
+ "): "
+ "%" PRIusz " (out of %" PRIusz ") bytes remaining",
+ proper_hc ? "proper" : "legacy", Offset,
+ inlen - in_of, inlen);
+ err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ goto done;
+ }
+
+ *outbuf = out;
+ *outlenp = out_of;
+
+done:
+ code = LZ4F_freeDecompressionContext(dctx);
+ if (LZ4F_isError(code)) {
+ rd_rkb_dbg(rkb, BROKER, "LZ4DECOMPR",
+ "Failed to close LZ4 compression context: %s",
+ LZ4F_getErrorName(code));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ }
+
+ if (err && out)
+ rd_free(out);
+
+ return err;
+}
+
+
+/**
+ * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov.
+ * @param proper_hc generate a proper HC (checksum) (kafka >=0.10.0.0,
+ * MsgVersion >= 1)
+ * @param MessageSetSize indicates (at least) full uncompressed data size,
+ * possibly including MessageSet fields that will not
+ * be compressed.
+ *
+ * @returns allocated buffer in \p *outbuf, length in \p *outlenp.
+ */
+rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb,
+ int proper_hc,
+ int comp_level,
+ rd_slice_t *slice,
+ void **outbuf,
+ size_t *outlenp) {
+ LZ4F_compressionContext_t cctx;
+ LZ4F_errorCode_t r;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ size_t len = rd_slice_remains(slice);
+ size_t out_sz;
+ size_t out_of = 0;
+ char *out;
+ const void *p;
+ size_t rlen;
+
+ /* Required by Kafka */
+ const LZ4F_preferences_t prefs = {
+ .frameInfo = {.blockMode = LZ4F_blockIndependent},
+ .compressionLevel = comp_level};
+
+ *outbuf = NULL;
+
+ out_sz = LZ4F_compressBound(len, NULL) + 1000;
+ if (LZ4F_isError(out_sz)) {
+ rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
+ "Unable to query LZ4 compressed size "
+ "(for %" PRIusz " uncompressed bytes): %s",
+ len, LZ4F_getErrorName(out_sz));
+ return RD_KAFKA_RESP_ERR__BAD_MSG;
+ }
+
+ out = rd_malloc(out_sz);
+ if (!out) {
+ rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
+ "Unable to allocate output buffer "
+ "(%" PRIusz " bytes): %s",
+ out_sz, rd_strerror(errno));
+ return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ }
+
+ r = LZ4F_createCompressionContext(&cctx, LZ4F_VERSION);
+ if (LZ4F_isError(r)) {
+ rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
+ "Unable to create LZ4 compression context: %s",
+ LZ4F_getErrorName(r));
+ rd_free(out);
+ return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ }
+
+ r = LZ4F_compressBegin(cctx, out, out_sz, &prefs);
+ if (LZ4F_isError(r)) {
+ rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
+ "Unable to begin LZ4 compression "
+ "(out buffer is %" PRIusz " bytes): %s",
+ out_sz, LZ4F_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ out_of += r;
+
+ while ((rlen = rd_slice_reader(slice, &p))) {
+ rd_assert(out_of < out_sz);
+ r = LZ4F_compressUpdate(cctx, out + out_of, out_sz - out_of, p,
+ rlen, NULL);
+ if (unlikely(LZ4F_isError(r))) {
+ rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
+ "LZ4 compression failed "
+ "(at of %" PRIusz
+ " bytes, with "
+ "%" PRIusz
+ " bytes remaining in out buffer): "
+ "%s",
+ rlen, out_sz - out_of, LZ4F_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ out_of += r;
+ }
+
+ rd_assert(rd_slice_remains(slice) == 0);
+
+ r = LZ4F_compressEnd(cctx, out + out_of, out_sz - out_of, NULL);
+ if (unlikely(LZ4F_isError(r))) {
+ rd_rkb_dbg(rkb, MSG, "LZ4COMPR",
+ "Failed to finalize LZ4 compression "
+ "of %" PRIusz " bytes: %s",
+ len, LZ4F_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ out_of += r;
+
+ /* For the broken legacy framing we need to mess up the header checksum
+ * so that the Kafka client / broker code accepts it. */
+ if (!proper_hc)
+ if ((err =
+ rd_kafka_lz4_compress_break_framing(rkb, out, out_of)))
+ goto done;
+
+
+ *outbuf = out;
+ *outlenp = out_of;
+
+done:
+ LZ4F_freeCompressionContext(cctx);
+
+ if (err)
+ rd_free(out);
+
+ return err;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h
new file mode 100644
index 000000000..eb0ef9883
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_lz4.h
@@ -0,0 +1,49 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDKAFKA_LZ4_H_
+#define _RDKAFKA_LZ4_H_
+
+
+rd_kafka_resp_err_t rd_kafka_lz4_decompress(rd_kafka_broker_t *rkb,
+ int proper_hc,
+ int64_t Offset,
+ char *inbuf,
+ size_t inlen,
+ void **outbuf,
+ size_t *outlenp);
+
+rd_kafka_resp_err_t rd_kafka_lz4_compress(rd_kafka_broker_t *rkb,
+ int proper_hc,
+ int comp_level,
+ rd_slice_t *slice,
+ void **outbuf,
+ size_t *outlenp);
+
+#endif /* _RDKAFKA_LZ4_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c
new file mode 100644
index 000000000..4e32e5d58
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.c
@@ -0,0 +1,1468 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_request.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_metadata.h"
+
+#include <string.h>
+#include <stdarg.h>
+
+
+rd_kafka_resp_err_t
+rd_kafka_metadata(rd_kafka_t *rk,
+ int all_topics,
+ rd_kafka_topic_t *only_rkt,
+ const struct rd_kafka_metadata **metadatap,
+ int timeout_ms) {
+ rd_kafka_q_t *rkq;
+ rd_kafka_broker_t *rkb;
+ rd_kafka_op_t *rko;
+ rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+ rd_list_t topics;
+ rd_bool_t allow_auto_create_topics =
+ rk->rk_conf.allow_auto_create_topics;
+
+ /* Query any broker that is up, and if none are up pick the first one,
+ * if we're lucky it will be up before the timeout */
+ rkb = rd_kafka_broker_any_usable(rk, timeout_ms, RD_DO_LOCK, 0,
+ "application metadata request");
+ if (!rkb)
+ return RD_KAFKA_RESP_ERR__TRANSPORT;
+
+ rkq = rd_kafka_q_new(rk);
+
+ rd_list_init(&topics, 0, rd_free);
+ if (!all_topics) {
+ if (only_rkt)
+ rd_list_add(&topics,
+ rd_strdup(rd_kafka_topic_name(only_rkt)));
+ else {
+ int cache_cnt;
+ rd_kafka_local_topics_to_list(rkb->rkb_rk, &topics,
+ &cache_cnt);
+ /* Don't trigger auto-create for cached topics */
+ if (rd_list_cnt(&topics) == cache_cnt)
+ allow_auto_create_topics = rd_true;
+ }
+ }
+
+ /* Async: request metadata */
+ rko = rd_kafka_op_new(RD_KAFKA_OP_METADATA);
+ rd_kafka_op_set_replyq(rko, rkq, 0);
+ rko->rko_u.metadata.force = 1; /* Force metadata request regardless
+ * of outstanding metadata requests. */
+ rd_kafka_MetadataRequest(rkb, &topics, "application requested",
+ allow_auto_create_topics,
+ /* cgrp_update:
+ * Only update consumer group state
+ * on response if this lists all
+ * topics in the cluster, since a
+ * partial request may make it seem
+ * like some subscribed topics are missing. */
+ all_topics ? rd_true : rd_false, rko);
+
+ rd_list_destroy(&topics);
+ rd_kafka_broker_destroy(rkb);
+
+ /* Wait for reply (or timeout) */
+ rko = rd_kafka_q_pop(rkq, rd_timeout_remains_us(ts_end), 0);
+
+ rd_kafka_q_destroy_owner(rkq);
+
+ /* Timeout */
+ if (!rko)
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ /* Error */
+ if (rko->rko_err) {
+ rd_kafka_resp_err_t err = rko->rko_err;
+ rd_kafka_op_destroy(rko);
+ return err;
+ }
+
+ /* Reply: pass metadata pointer to application who now owns it*/
+ rd_kafka_assert(rk, rko->rko_u.metadata.md);
+ *metadatap = rko->rko_u.metadata.md;
+ rko->rko_u.metadata.md = NULL;
+ rd_kafka_op_destroy(rko);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+void rd_kafka_metadata_destroy(const struct rd_kafka_metadata *metadata) {
+ rd_free((void *)metadata);
+}
+
+
+/**
+ * @returns a newly allocated copy of metadata \p src of size \p size
+ */
+struct rd_kafka_metadata *
+rd_kafka_metadata_copy(const struct rd_kafka_metadata *src, size_t size) {
+ struct rd_kafka_metadata *md;
+ rd_tmpabuf_t tbuf;
+ int i;
+
+ /* metadata is stored in one contigious buffer where structs and
+ * and pointed-to fields are layed out in a memory aligned fashion.
+ * rd_tmpabuf_t provides the infrastructure to do this.
+ * Because of this we copy all the structs verbatim but
+ * any pointer fields needs to be copied explicitly to update
+ * the pointer address. */
+ rd_tmpabuf_new(&tbuf, size, 1 /*assert on fail*/);
+ md = rd_tmpabuf_write(&tbuf, src, sizeof(*md));
+
+ rd_tmpabuf_write_str(&tbuf, src->orig_broker_name);
+
+
+ /* Copy Brokers */
+ md->brokers = rd_tmpabuf_write(&tbuf, src->brokers,
+ md->broker_cnt * sizeof(*md->brokers));
+
+ for (i = 0; i < md->broker_cnt; i++)
+ md->brokers[i].host =
+ rd_tmpabuf_write_str(&tbuf, src->brokers[i].host);
+
+
+ /* Copy TopicMetadata */
+ md->topics = rd_tmpabuf_write(&tbuf, src->topics,
+ md->topic_cnt * sizeof(*md->topics));
+
+ for (i = 0; i < md->topic_cnt; i++) {
+ int j;
+
+ md->topics[i].topic =
+ rd_tmpabuf_write_str(&tbuf, src->topics[i].topic);
+
+
+ /* Copy partitions */
+ md->topics[i].partitions =
+ rd_tmpabuf_write(&tbuf, src->topics[i].partitions,
+ md->topics[i].partition_cnt *
+ sizeof(*md->topics[i].partitions));
+
+ for (j = 0; j < md->topics[i].partition_cnt; j++) {
+ /* Copy replicas and ISRs */
+ md->topics[i].partitions[j].replicas = rd_tmpabuf_write(
+ &tbuf, src->topics[i].partitions[j].replicas,
+ md->topics[i].partitions[j].replica_cnt *
+ sizeof(*md->topics[i].partitions[j].replicas));
+
+ md->topics[i].partitions[j].isrs = rd_tmpabuf_write(
+ &tbuf, src->topics[i].partitions[j].isrs,
+ md->topics[i].partitions[j].isr_cnt *
+ sizeof(*md->topics[i].partitions[j].isrs));
+ }
+ }
+
+ /* Check for tmpabuf errors */
+ if (rd_tmpabuf_failed(&tbuf))
+ rd_kafka_assert(NULL, !*"metadata copy failed");
+
+ /* Delibarely not destroying the tmpabuf since we return
+ * its allocated memory. */
+
+ return md;
+}
+
+
+
+/**
+ * @brief Partition (id) comparator for partition_id_leader_epoch struct.
+ */
+static int rd_kafka_metadata_partition_leader_epoch_cmp(const void *_a,
+ const void *_b) {
+ const rd_kafka_partition_leader_epoch_t *a = _a, *b = _b;
+ return RD_CMP(a->partition_id, b->partition_id);
+}
+
+
+
+/**
+ * @brief Update topic state and information based on topic metadata.
+ *
+ * @param mdt Topic metadata.
+ * @param leader_epochs Per-partition leader epoch array, or NULL if not known.
+ *
+ * @locality rdkafka main thread
+ * @locks_acquired rd_kafka_wrlock(rk)
+ */
+static void rd_kafka_parse_Metadata_update_topic(
+ rd_kafka_broker_t *rkb,
+ const rd_kafka_metadata_topic_t *mdt,
+ const rd_kafka_partition_leader_epoch_t *leader_epochs) {
+
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ /* The indent below is intentional */
+ " Topic %s with %i partitions%s%s", mdt->topic,
+ mdt->partition_cnt, mdt->err ? ": " : "",
+ mdt->err ? rd_kafka_err2str(mdt->err) : "");
+
+ /* Ignore metadata completely for temporary errors. (issue #513)
+ * LEADER_NOT_AVAILABLE: Broker is rebalancing
+ */
+ if (mdt->err == RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE &&
+ mdt->partition_cnt == 0) {
+ rd_rkb_dbg(rkb, TOPIC, "METADATA",
+ "Temporary error in metadata reply for "
+ "topic %s (PartCnt %i): %s: ignoring",
+ mdt->topic, mdt->partition_cnt,
+ rd_kafka_err2str(mdt->err));
+ } else {
+ /* Update local topic & partition state based
+ * on metadata */
+ rd_kafka_topic_metadata_update2(rkb, mdt, leader_epochs);
+ }
+}
+
+/**
+ * @brief Only brokers with Metadata version >= 9 have reliable leader
+ * epochs. Before that version, leader epoch must be treated
+ * as missing (-1).
+ *
+ * @param rkb The broker
+ * @return Is this a broker version with reliable leader epochs?
+ *
+ * @locality rdkafka main thread
+ */
+rd_bool_t rd_kafka_has_reliable_leader_epochs(rd_kafka_broker_t *rkb) {
+ int features;
+ int16_t ApiVersion = 0;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_Metadata, 0, 9, &features);
+
+ return ApiVersion >= 9;
+}
+
+
+/**
+ * @brief Handle a Metadata response message.
+ *
+ * @param topics are the requested topics (may be NULL)
+ *
+ * The metadata will be marshalled into 'struct rd_kafka_metadata*' structs.
+ *
+ * The marshalled metadata is returned in \p *mdp, (NULL on error).
+
+ * @returns an error code on parse failure, else NO_ERRRO.
+ *
+ * @locality rdkafka main thread
+ */
+rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *request,
+ rd_kafka_buf_t *rkbuf,
+ struct rd_kafka_metadata **mdp) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ int i, j, k;
+ rd_tmpabuf_t tbuf;
+ struct rd_kafka_metadata *md = NULL;
+ size_t rkb_namelen;
+ const int log_decode_errors = LOG_ERR;
+ rd_list_t *missing_topics = NULL;
+ const rd_list_t *requested_topics = request->rkbuf_u.Metadata.topics;
+ rd_bool_t all_topics = request->rkbuf_u.Metadata.all_topics;
+ rd_bool_t cgrp_update =
+ request->rkbuf_u.Metadata.cgrp_update && rk->rk_cgrp;
+ const char *reason = request->rkbuf_u.Metadata.reason
+ ? request->rkbuf_u.Metadata.reason
+ : "(no reason)";
+ int ApiVersion = request->rkbuf_reqhdr.ApiVersion;
+ rd_kafkap_str_t cluster_id = RD_ZERO_INIT;
+ int32_t controller_id = -1;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int broker_changes = 0;
+ int cache_changes = 0;
+ /** This array is reused and resized as necessary to hold per-partition
+ * leader epochs (ApiVersion >= 7). */
+ rd_kafka_partition_leader_epoch_t *leader_epochs = NULL;
+ /** Number of allocated elements in leader_epochs. */
+ size_t leader_epochs_size = 0;
+ rd_ts_t ts_start = rd_clock();
+
+ /* Ignore metadata updates when terminating */
+ if (rd_kafka_terminating(rkb->rkb_rk)) {
+ err = RD_KAFKA_RESP_ERR__DESTROY;
+ goto done;
+ }
+
+ rd_kafka_assert(NULL, thrd_is_current(rk->rk_thread));
+
+ /* Remove topics from missing_topics as they are seen in Metadata. */
+ if (requested_topics)
+ missing_topics =
+ rd_list_copy(requested_topics, rd_list_string_copy, NULL);
+
+ rd_kafka_broker_lock(rkb);
+ rkb_namelen = strlen(rkb->rkb_name) + 1;
+ /* We assume that the marshalled representation is
+ * no more than 4 times larger than the wire representation. */
+ rd_tmpabuf_new(&tbuf,
+ sizeof(*md) + rkb_namelen + (rkbuf->rkbuf_totlen * 4),
+ 0 /*dont assert on fail*/);
+
+ if (!(md = rd_tmpabuf_alloc(&tbuf, sizeof(*md)))) {
+ rd_kafka_broker_unlock(rkb);
+ err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ goto err;
+ }
+
+ md->orig_broker_id = rkb->rkb_nodeid;
+ md->orig_broker_name =
+ rd_tmpabuf_write(&tbuf, rkb->rkb_name, rkb_namelen);
+ rd_kafka_broker_unlock(rkb);
+
+ if (ApiVersion >= 3)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ /* Read Brokers */
+ rd_kafka_buf_read_arraycnt(rkbuf, &md->broker_cnt,
+ RD_KAFKAP_BROKERS_MAX);
+
+ if (!(md->brokers = rd_tmpabuf_alloc(&tbuf, md->broker_cnt *
+ sizeof(*md->brokers))))
+ rd_kafka_buf_parse_fail(rkbuf,
+ "%d brokers: tmpabuf memory shortage",
+ md->broker_cnt);
+
+ for (i = 0; i < md->broker_cnt; i++) {
+ rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].id);
+ rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf,
+ md->brokers[i].host);
+ rd_kafka_buf_read_i32a(rkbuf, md->brokers[i].port);
+
+ if (ApiVersion >= 1) {
+ rd_kafkap_str_t rack;
+ rd_kafka_buf_read_str(rkbuf, &rack);
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ if (ApiVersion >= 2)
+ rd_kafka_buf_read_str(rkbuf, &cluster_id);
+
+ if (ApiVersion >= 1) {
+ rd_kafka_buf_read_i32(rkbuf, &controller_id);
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "ClusterId: %.*s, ControllerId: %" PRId32,
+ RD_KAFKAP_STR_PR(&cluster_id), controller_id);
+ }
+
+
+
+ /* Read TopicMetadata */
+ rd_kafka_buf_read_arraycnt(rkbuf, &md->topic_cnt, RD_KAFKAP_TOPICS_MAX);
+ rd_rkb_dbg(rkb, METADATA, "METADATA", "%i brokers, %i topics",
+ md->broker_cnt, md->topic_cnt);
+
+ if (!(md->topics =
+ rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics))))
+ rd_kafka_buf_parse_fail(
+ rkbuf, "%d topics: tmpabuf memory shortage", md->topic_cnt);
+
+ for (i = 0; i < md->topic_cnt; i++) {
+ rd_kafka_buf_read_i16a(rkbuf, md->topics[i].err);
+ rd_kafka_buf_read_str_tmpabuf(rkbuf, &tbuf,
+ md->topics[i].topic);
+ if (ApiVersion >= 1) {
+ int8_t is_internal;
+ rd_kafka_buf_read_i8(rkbuf, &is_internal);
+ }
+
+ /* PartitionMetadata */
+ rd_kafka_buf_read_arraycnt(rkbuf, &md->topics[i].partition_cnt,
+ RD_KAFKAP_PARTITIONS_MAX);
+
+ if (!(md->topics[i].partitions = rd_tmpabuf_alloc(
+ &tbuf, md->topics[i].partition_cnt *
+ sizeof(*md->topics[i].partitions))))
+ rd_kafka_buf_parse_fail(rkbuf,
+ "%s: %d partitions: "
+ "tmpabuf memory shortage",
+ md->topics[i].topic,
+ md->topics[i].partition_cnt);
+
+ /* Resize reused leader_epochs array to fit this partition's
+ * leader epochs. */
+ if (ApiVersion >= 7 && md->topics[i].partition_cnt > 0 &&
+ (size_t)md->topics[i].partition_cnt > leader_epochs_size) {
+ leader_epochs_size =
+ RD_MAX(32, md->topics[i].partition_cnt);
+ leader_epochs =
+ rd_realloc(leader_epochs, sizeof(*leader_epochs) *
+ leader_epochs_size);
+ }
+
+ for (j = 0; j < md->topics[i].partition_cnt; j++) {
+ rd_kafka_buf_read_i16a(rkbuf,
+ md->topics[i].partitions[j].err);
+ rd_kafka_buf_read_i32a(rkbuf,
+ md->topics[i].partitions[j].id);
+ rd_kafka_buf_read_i32a(
+ rkbuf, md->topics[i].partitions[j].leader);
+ if (ApiVersion >= 7) {
+ leader_epochs[j].partition_id =
+ md->topics[i].partitions[j].id;
+ rd_kafka_buf_read_i32(
+ rkbuf, &leader_epochs[j].leader_epoch);
+ }
+
+ /* Replicas */
+ rd_kafka_buf_read_arraycnt(
+ rkbuf, &md->topics[i].partitions[j].replica_cnt,
+ RD_KAFKAP_BROKERS_MAX);
+
+ if (!(md->topics[i].partitions[j].replicas =
+ rd_tmpabuf_alloc(
+ &tbuf,
+ md->topics[i].partitions[j].replica_cnt *
+ sizeof(*md->topics[i]
+ .partitions[j]
+ .replicas))))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "%s [%" PRId32
+ "]: %d replicas: "
+ "tmpabuf memory shortage",
+ md->topics[i].topic,
+ md->topics[i].partitions[j].id,
+ md->topics[i].partitions[j].replica_cnt);
+
+
+ for (k = 0; k < md->topics[i].partitions[j].replica_cnt;
+ k++)
+ rd_kafka_buf_read_i32a(
+ rkbuf,
+ md->topics[i].partitions[j].replicas[k]);
+
+ /* Isrs */
+ rd_kafka_buf_read_arraycnt(
+ rkbuf, &md->topics[i].partitions[j].isr_cnt,
+ RD_KAFKAP_BROKERS_MAX);
+
+ if (!(md->topics[i]
+ .partitions[j]
+ .isrs = rd_tmpabuf_alloc(
+ &tbuf,
+ md->topics[i].partitions[j].isr_cnt *
+ sizeof(
+ *md->topics[i].partitions[j].isrs))))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "%s [%" PRId32
+ "]: %d isrs: "
+ "tmpabuf memory shortage",
+ md->topics[i].topic,
+ md->topics[i].partitions[j].id,
+ md->topics[i].partitions[j].isr_cnt);
+
+
+ for (k = 0; k < md->topics[i].partitions[j].isr_cnt;
+ k++)
+ rd_kafka_buf_read_i32a(
+ rkbuf, md->topics[i].partitions[j].isrs[k]);
+
+ if (ApiVersion >= 5) {
+ /* OfflineReplicas int32 array (ignored) */
+ int32_t offline_replicas_cnt;
+
+ /* #OfflineReplicas */
+ rd_kafka_buf_read_arraycnt(
+ rkbuf, &offline_replicas_cnt,
+ RD_KAFKAP_BROKERS_MAX);
+ rd_kafka_buf_skip(rkbuf, offline_replicas_cnt *
+ sizeof(int32_t));
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ if (ApiVersion >= 8) {
+ int32_t TopicAuthorizedOperations;
+ /* TopicAuthorizedOperations */
+ rd_kafka_buf_read_i32(rkbuf,
+ &TopicAuthorizedOperations);
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ /* Ignore topics in blacklist */
+ if (rkb->rkb_rk->rk_conf.topic_blacklist &&
+ rd_kafka_pattern_match(rkb->rkb_rk->rk_conf.topic_blacklist,
+ md->topics[i].topic)) {
+ rd_rkb_dbg(rkb, TOPIC | RD_KAFKA_DBG_METADATA,
+ "BLACKLIST",
+ "Ignoring blacklisted topic \"%s\" "
+ "in metadata",
+ md->topics[i].topic);
+ continue;
+ }
+
+ if (leader_epochs_size > 0 &&
+ !rd_kafka_has_reliable_leader_epochs(rkb)) {
+ /* Prior to Kafka version 2.4 (which coincides with
+ * Metadata version 9), the broker does not propagate
+ * leader epoch information accurately while a
+ * reassignment is in progress. Relying on a stale
+ * epoch can lead to FENCED_LEADER_EPOCH errors which
+ * can prevent consumption throughout the course of
+ * a reassignment. It is safer in this case to revert
+ * to the behavior in previous protocol versions
+ * which checks leader status only. */
+ leader_epochs_size = 0;
+ rd_free(leader_epochs);
+ leader_epochs = NULL;
+ }
+
+
+ /* Sort partitions by partition id */
+ qsort(md->topics[i].partitions, md->topics[i].partition_cnt,
+ sizeof(*md->topics[i].partitions),
+ rd_kafka_metadata_partition_id_cmp);
+ if (leader_epochs_size > 0) {
+ /* And sort leader_epochs by partition id */
+ qsort(leader_epochs, md->topics[i].partition_cnt,
+ sizeof(*leader_epochs),
+ rd_kafka_metadata_partition_leader_epoch_cmp);
+ }
+
+ /* Update topic state based on the topic metadata */
+ rd_kafka_parse_Metadata_update_topic(rkb, &md->topics[i],
+ leader_epochs);
+
+
+ if (requested_topics) {
+ rd_list_free_cb(missing_topics,
+ rd_list_remove_cmp(missing_topics,
+ md->topics[i].topic,
+ (void *)strcmp));
+ if (!all_topics) {
+ /* Only update cache when not asking
+ * for all topics. */
+
+ rd_kafka_wrlock(rk);
+ rd_kafka_metadata_cache_topic_update(
+ rk, &md->topics[i],
+ rd_false /*propagate later*/);
+ cache_changes++;
+ rd_kafka_wrunlock(rk);
+ }
+ }
+ }
+
+ if (ApiVersion >= 8 && ApiVersion <= 10) {
+ int32_t ClusterAuthorizedOperations;
+ /* ClusterAuthorizedOperations */
+ rd_kafka_buf_read_i32(rkbuf, &ClusterAuthorizedOperations);
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ /* Entire Metadata response now parsed without errors:
+ * update our internal state according to the response. */
+
+ if (md->broker_cnt == 0 && md->topic_cnt == 0) {
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "No brokers or topics in metadata: should retry");
+ err = RD_KAFKA_RESP_ERR__PARTIAL;
+ goto err;
+ }
+
+ /* Update our list of brokers. */
+ for (i = 0; i < md->broker_cnt; i++) {
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ " Broker #%i/%i: %s:%i NodeId %" PRId32, i,
+ md->broker_cnt, md->brokers[i].host,
+ md->brokers[i].port, md->brokers[i].id);
+ rd_kafka_broker_update(rkb->rkb_rk, rkb->rkb_proto,
+ &md->brokers[i], NULL);
+ }
+
+ /* Requested topics not seen in metadata? Propogate to topic code. */
+ if (missing_topics) {
+ char *topic;
+ rd_rkb_dbg(rkb, TOPIC, "METADATA",
+ "%d/%d requested topic(s) seen in metadata",
+ rd_list_cnt(requested_topics) -
+ rd_list_cnt(missing_topics),
+ rd_list_cnt(requested_topics));
+ for (i = 0; i < rd_list_cnt(missing_topics); i++)
+ rd_rkb_dbg(rkb, TOPIC, "METADATA", "wanted %s",
+ (char *)(missing_topics->rl_elems[i]));
+ RD_LIST_FOREACH(topic, missing_topics, i) {
+ rd_kafka_topic_t *rkt;
+
+ rkt =
+ rd_kafka_topic_find(rkb->rkb_rk, topic, 1 /*lock*/);
+ if (rkt) {
+ /* Received metadata response contained no
+ * information about topic 'rkt' and thus
+ * indicates the topic is not available in the
+ * cluster.
+ * Mark the topic as non-existent */
+ rd_kafka_topic_wrlock(rkt);
+ rd_kafka_topic_set_notexists(
+ rkt, RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
+ rd_kafka_topic_wrunlock(rkt);
+
+ rd_kafka_topic_destroy0(rkt);
+ }
+ }
+ }
+
+
+ rd_kafka_wrlock(rkb->rkb_rk);
+
+ rkb->rkb_rk->rk_ts_metadata = rd_clock();
+
+ /* Update cached cluster id. */
+ if (RD_KAFKAP_STR_LEN(&cluster_id) > 0 &&
+ (!rk->rk_clusterid ||
+ rd_kafkap_str_cmp_str(&cluster_id, rk->rk_clusterid))) {
+ rd_rkb_dbg(rkb, BROKER | RD_KAFKA_DBG_GENERIC, "CLUSTERID",
+ "ClusterId update \"%s\" -> \"%.*s\"",
+ rk->rk_clusterid ? rk->rk_clusterid : "",
+ RD_KAFKAP_STR_PR(&cluster_id));
+ if (rk->rk_clusterid) {
+ rd_kafka_log(rk, LOG_WARNING, "CLUSTERID",
+ "Broker %s reports different ClusterId "
+ "\"%.*s\" than previously known \"%s\": "
+ "a client must not be simultaneously "
+ "connected to multiple clusters",
+ rd_kafka_broker_name(rkb),
+ RD_KAFKAP_STR_PR(&cluster_id),
+ rk->rk_clusterid);
+ rd_free(rk->rk_clusterid);
+ }
+
+ rk->rk_clusterid = RD_KAFKAP_STR_DUP(&cluster_id);
+ /* rd_kafka_clusterid() waits for a cache update even though
+ * the clusterid is not in the cache itself. (#3620) */
+ cache_changes++;
+ }
+
+ /* Update controller id. */
+ if (rkb->rkb_rk->rk_controllerid != controller_id) {
+ rd_rkb_dbg(rkb, BROKER, "CONTROLLERID",
+ "ControllerId update %" PRId32 " -> %" PRId32,
+ rkb->rkb_rk->rk_controllerid, controller_id);
+ rkb->rkb_rk->rk_controllerid = controller_id;
+ broker_changes++;
+ }
+
+ if (all_topics) {
+ /* Expire all cache entries that were not updated. */
+ rd_kafka_metadata_cache_evict_by_age(rkb->rkb_rk, ts_start);
+
+ if (rkb->rkb_rk->rk_full_metadata)
+ rd_kafka_metadata_destroy(
+ rkb->rkb_rk->rk_full_metadata);
+ rkb->rkb_rk->rk_full_metadata =
+ rd_kafka_metadata_copy(md, tbuf.of);
+ rkb->rkb_rk->rk_ts_full_metadata = rkb->rkb_rk->rk_ts_metadata;
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "Caching full metadata with "
+ "%d broker(s) and %d topic(s): %s",
+ md->broker_cnt, md->topic_cnt, reason);
+ } else {
+ if (cache_changes)
+ rd_kafka_metadata_cache_propagate_changes(rk);
+ rd_kafka_metadata_cache_expiry_start(rk);
+ }
+
+ /* Remove cache hints for the originally requested topics. */
+ if (requested_topics)
+ rd_kafka_metadata_cache_purge_hints(rk, requested_topics);
+
+ rd_kafka_wrunlock(rkb->rkb_rk);
+
+ if (broker_changes) {
+ /* Broadcast broker metadata changes to listeners. */
+ rd_kafka_brokers_broadcast_state_change(rkb->rkb_rk);
+ }
+
+ /* Check if cgrp effective subscription is affected by
+ * new topic metadata.
+ * Ignore if this was a broker-only refresh (no topics), or
+ * the request was from the partition assignor (!cgrp_update)
+ * which may contain only a sub-set of the subscribed topics (namely
+ * the effective subscription of available topics) as to not
+ * propagate non-included topics as non-existent. */
+ if (cgrp_update && (requested_topics || all_topics))
+ rd_kafka_cgrp_metadata_update_check(rkb->rkb_rk->rk_cgrp,
+ rd_true /*do join*/);
+
+ /* Try to acquire a Producer ID from this broker if we
+ * don't have one. */
+ if (rd_kafka_is_idempotent(rkb->rkb_rk)) {
+ rd_kafka_wrlock(rkb->rkb_rk);
+ rd_kafka_idemp_pid_fsm(rkb->rkb_rk);
+ rd_kafka_wrunlock(rkb->rkb_rk);
+ }
+
+done:
+ if (missing_topics)
+ rd_list_destroy(missing_topics);
+
+ if (leader_epochs)
+ rd_free(leader_epochs);
+
+ /* This metadata request was triggered by someone wanting
+ * the metadata information back as a reply, so send that reply now.
+ * In this case we must not rd_free the metadata memory here,
+ * the requestee will do.
+ * The tbuf is explicitly not destroyed as we return its memory
+ * to the caller. */
+ *mdp = md;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ if (requested_topics) {
+ /* Failed requests shall purge cache hints for
+ * the requested topics. */
+ rd_kafka_wrlock(rkb->rkb_rk);
+ rd_kafka_metadata_cache_purge_hints(rk, requested_topics);
+ rd_kafka_wrunlock(rkb->rkb_rk);
+ }
+
+ if (missing_topics)
+ rd_list_destroy(missing_topics);
+
+ if (leader_epochs)
+ rd_free(leader_epochs);
+
+ rd_tmpabuf_destroy(&tbuf);
+
+ return err;
+}
+
+
+/**
+ * @brief Add all topics in current cached full metadata
+ * that matches the topics in \p match
+ * to \p tinfos (rd_kafka_topic_info_t *).
+ *
+ * @param errored Any topic or wildcard pattern that did not match
+ * an available topic will be added to this list with
+ * the appropriate error set.
+ *
+ * @returns the number of topics matched and added to \p list
+ *
+ * @locks none
+ * @locality any
+ */
+size_t
+rd_kafka_metadata_topic_match(rd_kafka_t *rk,
+ rd_list_t *tinfos,
+ const rd_kafka_topic_partition_list_t *match,
+ rd_kafka_topic_partition_list_t *errored) {
+ int ti, i;
+ size_t cnt = 0;
+ const struct rd_kafka_metadata *metadata;
+ rd_kafka_topic_partition_list_t *unmatched;
+
+ rd_kafka_rdlock(rk);
+ metadata = rk->rk_full_metadata;
+ if (!metadata) {
+ rd_kafka_rdunlock(rk);
+ return 0;
+ }
+
+ /* To keep track of which patterns and topics in `match` that
+ * did not match any topic (or matched an errored topic), we
+ * create a set of all topics to match in `unmatched` and then
+ * remove from this set as a match is found.
+ * Whatever remains in `unmatched` after all matching is performed
+ * are the topics and patterns that did not match a topic. */
+ unmatched = rd_kafka_topic_partition_list_copy(match);
+
+ /* For each topic in the cluster, scan through the match list
+ * to find matching topic. */
+ for (ti = 0; ti < metadata->topic_cnt; ti++) {
+ const char *topic = metadata->topics[ti].topic;
+
+ /* Ignore topics in blacklist */
+ if (rk->rk_conf.topic_blacklist &&
+ rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic))
+ continue;
+
+ /* Scan for matches */
+ for (i = 0; i < match->cnt; i++) {
+ if (!rd_kafka_topic_match(rk, match->elems[i].topic,
+ topic))
+ continue;
+
+ /* Remove from unmatched */
+ rd_kafka_topic_partition_list_del(
+ unmatched, match->elems[i].topic,
+ RD_KAFKA_PARTITION_UA);
+
+ if (metadata->topics[ti].err) {
+ rd_kafka_topic_partition_list_add(
+ errored, topic, RD_KAFKA_PARTITION_UA)
+ ->err = metadata->topics[ti].err;
+ continue; /* Skip errored topics */
+ }
+
+ rd_list_add(
+ tinfos,
+ rd_kafka_topic_info_new(
+ topic, metadata->topics[ti].partition_cnt));
+
+ cnt++;
+ }
+ }
+ rd_kafka_rdunlock(rk);
+
+ /* Any topics/patterns still in unmatched did not match any
+ * existing topics, add them to `errored`. */
+ for (i = 0; i < unmatched->cnt; i++) {
+ rd_kafka_topic_partition_t *elem = &unmatched->elems[i];
+
+ rd_kafka_topic_partition_list_add(errored, elem->topic,
+ RD_KAFKA_PARTITION_UA)
+ ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ }
+
+ rd_kafka_topic_partition_list_destroy(unmatched);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Add all topics in \p match that matches cached metadata.
+ * @remark MUST NOT be used with wildcard topics,
+ * see rd_kafka_metadata_topic_match() for that.
+ *
+ * @param errored Non-existent and unauthorized topics are added to this
+ * list with the appropriate error code.
+ *
+ * @returns the number of topics matched and added to \p tinfos
+ * @locks none
+ */
+size_t
+rd_kafka_metadata_topic_filter(rd_kafka_t *rk,
+ rd_list_t *tinfos,
+ const rd_kafka_topic_partition_list_t *match,
+ rd_kafka_topic_partition_list_t *errored) {
+ int i;
+ size_t cnt = 0;
+
+ rd_kafka_rdlock(rk);
+ /* For each topic in match, look up the topic in the cache. */
+ for (i = 0; i < match->cnt; i++) {
+ const char *topic = match->elems[i].topic;
+ const rd_kafka_metadata_topic_t *mtopic;
+
+ /* Ignore topics in blacklist */
+ if (rk->rk_conf.topic_blacklist &&
+ rd_kafka_pattern_match(rk->rk_conf.topic_blacklist, topic))
+ continue;
+
+ mtopic =
+ rd_kafka_metadata_cache_topic_get(rk, topic, 1 /*valid*/);
+
+ if (!mtopic)
+ rd_kafka_topic_partition_list_add(errored, topic,
+ RD_KAFKA_PARTITION_UA)
+ ->err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ else if (mtopic->err)
+ rd_kafka_topic_partition_list_add(errored, topic,
+ RD_KAFKA_PARTITION_UA)
+ ->err = mtopic->err;
+ else {
+ rd_list_add(tinfos, rd_kafka_topic_info_new(
+ topic, mtopic->partition_cnt));
+
+ cnt++;
+ }
+ }
+ rd_kafka_rdunlock(rk);
+
+ return cnt;
+}
+
+
+void rd_kafka_metadata_log(rd_kafka_t *rk,
+ const char *fac,
+ const struct rd_kafka_metadata *md) {
+ int i;
+
+ rd_kafka_dbg(rk, METADATA, fac,
+ "Metadata with %d broker(s) and %d topic(s):",
+ md->broker_cnt, md->topic_cnt);
+
+ for (i = 0; i < md->broker_cnt; i++) {
+ rd_kafka_dbg(rk, METADATA, fac,
+ " Broker #%i/%i: %s:%i NodeId %" PRId32, i,
+ md->broker_cnt, md->brokers[i].host,
+ md->brokers[i].port, md->brokers[i].id);
+ }
+
+ for (i = 0; i < md->topic_cnt; i++) {
+ rd_kafka_dbg(
+ rk, METADATA, fac,
+ " Topic #%i/%i: %s with %i partitions%s%s", i,
+ md->topic_cnt, md->topics[i].topic,
+ md->topics[i].partition_cnt, md->topics[i].err ? ": " : "",
+ md->topics[i].err ? rd_kafka_err2str(md->topics[i].err)
+ : "");
+ }
+}
+
+
+
+/**
+ * @brief Refresh metadata for \p topics
+ *
+ * @param rk: used to look up usable broker if \p rkb is NULL.
+ * @param rkb: use this broker, unless NULL then any usable broker from \p rk
+ * @param force: force refresh even if topics are up-to-date in cache
+ * @param allow_auto_create: Enable/disable auto creation of topics
+ * (through MetadataRequest). Requires a modern
+ * broker version.
+ * Takes precedence over allow.auto.create.topics.
+ * @param cgrp_update: Allow consumer group state update on response.
+ *
+ * @returns an error code
+ *
+ * @locality any
+ * @locks none
+ */
+rd_kafka_resp_err_t
+rd_kafka_metadata_refresh_topics(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *topics,
+ rd_bool_t force,
+ rd_bool_t allow_auto_create,
+ rd_bool_t cgrp_update,
+ const char *reason) {
+ rd_list_t q_topics;
+ int destroy_rkb = 0;
+
+ if (!rk) {
+ rd_assert(rkb);
+ rk = rkb->rkb_rk;
+ }
+
+ rd_kafka_wrlock(rk);
+
+ if (!rkb) {
+ if (!(rkb = rd_kafka_broker_any_usable(
+ rk, RD_POLL_NOWAIT, RD_DONT_LOCK, 0, reason))) {
+ /* Hint cache that something is interested in
+ * these topics so that they will be included in
+ * a future all known_topics query. */
+ rd_kafka_metadata_cache_hint(rk, topics, NULL,
+ RD_KAFKA_RESP_ERR__NOENT,
+ 0 /*dont replace*/);
+
+ rd_kafka_wrunlock(rk);
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Skipping metadata refresh of %d topic(s):"
+ " %s: no usable brokers",
+ rd_list_cnt(topics), reason);
+
+ return RD_KAFKA_RESP_ERR__TRANSPORT;
+ }
+ destroy_rkb = 1;
+ }
+
+ rd_list_init(&q_topics, rd_list_cnt(topics), rd_free);
+
+ if (!force) {
+
+ /* Hint cache of upcoming MetadataRequest and filter
+ * out any topics that are already being requested.
+ * q_topics will contain remaining topics to query. */
+ rd_kafka_metadata_cache_hint(rk, topics, &q_topics,
+ RD_KAFKA_RESP_ERR__WAIT_CACHE,
+ rd_false /*dont replace*/);
+ rd_kafka_wrunlock(rk);
+
+ if (rd_list_cnt(&q_topics) == 0) {
+ /* No topics need new query. */
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Skipping metadata refresh of "
+ "%d topic(s): %s: "
+ "already being requested",
+ rd_list_cnt(topics), reason);
+ rd_list_destroy(&q_topics);
+ if (destroy_rkb)
+ rd_kafka_broker_destroy(rkb);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ } else {
+ rd_kafka_wrunlock(rk);
+ rd_list_copy_to(&q_topics, topics, rd_list_string_copy, NULL);
+ }
+
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Requesting metadata for %d/%d topics: %s",
+ rd_list_cnt(&q_topics), rd_list_cnt(topics), reason);
+
+ rd_kafka_MetadataRequest(rkb, &q_topics, reason, allow_auto_create,
+ cgrp_update, NULL);
+
+ rd_list_destroy(&q_topics);
+
+ if (destroy_rkb)
+ rd_kafka_broker_destroy(rkb);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Refresh metadata for known topics
+ *
+ * @param rk: used to look up usable broker if \p rkb is NULL.
+ * @param rkb: use this broker, unless NULL then any usable broker from \p rk
+ * @param force: refresh even if cache is up-to-date
+ *
+ * @returns an error code (__UNKNOWN_TOPIC if there are no local topics)
+ *
+ * @locality any
+ * @locks none
+ */
+rd_kafka_resp_err_t
+rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_bool_t force,
+ const char *reason) {
+ rd_list_t topics;
+ rd_kafka_resp_err_t err;
+ int cache_cnt = 0;
+ rd_bool_t allow_auto_create_topics;
+
+ if (!rk)
+ rk = rkb->rkb_rk;
+
+ rd_list_init(&topics, 8, rd_free);
+ rd_kafka_local_topics_to_list(rk, &topics, &cache_cnt);
+
+ /* Allow topic auto creation if there are locally known topics (rkt)
+ * and not just cached (to be queried) topics. */
+ allow_auto_create_topics = rk->rk_conf.allow_auto_create_topics &&
+ rd_list_cnt(&topics) > cache_cnt;
+
+ if (rd_list_cnt(&topics) == 0)
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ else
+ err = rd_kafka_metadata_refresh_topics(
+ rk, rkb, &topics, force, allow_auto_create_topics,
+ rd_false /*!cgrp_update*/, reason);
+
+ rd_list_destroy(&topics);
+
+ return err;
+}
+
+
+/**
+ * @brief Refresh metadata for known and subscribed topics.
+ *
+ * @param rk used to look up usable broker if \p rkb is NULL..
+ * @param rkb use this broker, unless NULL then any usable broker from \p rk.
+ * @param reason reason of refresh, used in debug logs.
+ *
+ * @returns an error code (ERR__UNKNOWN_TOPIC if no topics are desired).
+ *
+ * @locality rdkafka main thread
+ * @locks_required none
+ * @locks_acquired rk(read)
+ */
+rd_kafka_resp_err_t
+rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *reason) {
+ rd_list_t topics;
+ rd_kafka_resp_err_t err;
+ rd_kafka_cgrp_t *rkcg;
+ rd_bool_t allow_auto_create_topics =
+ rk->rk_conf.allow_auto_create_topics;
+ int cache_cnt = 0;
+
+ if (!rk) {
+ rd_assert(rkb);
+ rk = rkb->rkb_rk;
+ }
+
+ rkcg = rk->rk_cgrp;
+ rd_assert(rkcg != NULL);
+
+ if (rkcg->rkcg_flags & RD_KAFKA_CGRP_F_WILDCARD_SUBSCRIPTION) {
+ /* If there is a wildcard subscription we need to request
+ * all topics in the cluster so that we can perform
+ * regexp matching. */
+ return rd_kafka_metadata_refresh_all(rk, rkb, reason);
+ }
+
+ rd_list_init(&topics, 8, rd_free);
+
+ /* Add locally known topics, i.e., those that are currently
+ * being consumed or otherwise referenced through topic_t objects. */
+ rd_kafka_local_topics_to_list(rk, &topics, &cache_cnt);
+ if (rd_list_cnt(&topics) == cache_cnt)
+ allow_auto_create_topics = rd_false;
+
+ /* Add subscribed (non-wildcard) topics, if any. */
+ if (rkcg->rkcg_subscription)
+ rd_kafka_topic_partition_list_get_topic_names(
+ rkcg->rkcg_subscription, &topics,
+ rd_false /*no wildcards*/);
+
+ if (rd_list_cnt(&topics) == 0)
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ else
+ err = rd_kafka_metadata_refresh_topics(
+ rk, rkb, &topics, rd_true /*force*/,
+ allow_auto_create_topics, rd_true /*cgrp_update*/, reason);
+
+ rd_list_destroy(&topics);
+
+ return err;
+}
+
+
+/**
+ * @brief Refresh broker list by metadata.
+ *
+ * Attempts to use sparse metadata request if possible, else falls back
+ * on a full metadata request. (NOTE: sparse not implemented, KIP-4)
+ *
+ * @param rk: used to look up usable broker if \p rkb is NULL.
+ * @param rkb: use this broker, unless NULL then any usable broker from \p rk
+ *
+ * @returns an error code
+ *
+ * @locality any
+ * @locks none
+ */
+rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *reason) {
+ return rd_kafka_metadata_request(rk, rkb, NULL /*brokers only*/,
+ rd_false /*!allow auto create topics*/,
+ rd_false /*no cgrp update */, reason,
+ NULL);
+}
+
+
+
+/**
+ * @brief Refresh metadata for all topics in cluster.
+ * This is a full metadata request which might be taxing on the
+ * broker if the cluster has many topics.
+ *
+ * @locality any
+ * @locks none
+ */
+rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *reason) {
+ int destroy_rkb = 0;
+ rd_list_t topics;
+
+ if (!rk) {
+ rd_assert(rkb);
+ rk = rkb->rkb_rk;
+ }
+
+ if (!rkb) {
+ if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT,
+ RD_DO_LOCK, 0, reason)))
+ return RD_KAFKA_RESP_ERR__TRANSPORT;
+ destroy_rkb = 1;
+ }
+
+ rd_list_init(&topics, 0, NULL); /* empty list = all topics */
+ rd_kafka_MetadataRequest(rkb, &topics, reason,
+ rd_false /*no auto create*/,
+ rd_true /*cgrp update*/, NULL);
+ rd_list_destroy(&topics);
+
+ if (destroy_rkb)
+ rd_kafka_broker_destroy(rkb);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+
+ * @brief Lower-level Metadata request that takes a callback (with replyq set)
+ * which will be triggered after parsing is complete.
+ *
+ * @param cgrp_update Allow consumer group updates from the response.
+ *
+ * @locks none
+ * @locality any
+ */
+rd_kafka_resp_err_t
+rd_kafka_metadata_request(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *topics,
+ rd_bool_t allow_auto_create_topics,
+ rd_bool_t cgrp_update,
+ const char *reason,
+ rd_kafka_op_t *rko) {
+ int destroy_rkb = 0;
+
+ if (!rkb) {
+ if (!(rkb = rd_kafka_broker_any_usable(rk, RD_POLL_NOWAIT,
+ RD_DO_LOCK, 0, reason)))
+ return RD_KAFKA_RESP_ERR__TRANSPORT;
+ destroy_rkb = 1;
+ }
+
+ rd_kafka_MetadataRequest(rkb, topics, reason, allow_auto_create_topics,
+ cgrp_update, rko);
+
+ if (destroy_rkb)
+ rd_kafka_broker_destroy(rkb);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Query timer callback to trigger refresh for topics
+ * that have partitions missing their leaders.
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static void rd_kafka_metadata_leader_query_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_t *rk = rkts->rkts_rk;
+ rd_kafka_timer_t *rtmr = &rk->rk_metadata_cache.rkmc_query_tmr;
+ rd_kafka_topic_t *rkt;
+ rd_list_t topics;
+
+ rd_kafka_wrlock(rk);
+ rd_list_init(&topics, rk->rk_topic_cnt, rd_free);
+
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ int i, require_metadata;
+ rd_kafka_topic_rdlock(rkt);
+
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) {
+ /* Skip topics that are known to not exist. */
+ rd_kafka_topic_rdunlock(rkt);
+ continue;
+ }
+
+ require_metadata =
+ rkt->rkt_flags & RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
+
+ /* Check if any partitions are missing brokers. */
+ for (i = 0; !require_metadata && i < rkt->rkt_partition_cnt;
+ i++) {
+ rd_kafka_toppar_t *rktp = rkt->rkt_p[i];
+ rd_kafka_toppar_lock(rktp);
+ require_metadata =
+ !rktp->rktp_broker && !rktp->rktp_next_broker;
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ if (require_metadata || rkt->rkt_partition_cnt == 0)
+ rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str));
+
+ rd_kafka_topic_rdunlock(rkt);
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ if (rd_list_cnt(&topics) == 0) {
+ /* No leader-less topics+partitions, stop the timer. */
+ rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/);
+ } else {
+ rd_kafka_metadata_refresh_topics(
+ rk, NULL, &topics, rd_true /*force*/,
+ rk->rk_conf.allow_auto_create_topics,
+ rd_false /*!cgrp_update*/, "partition leader query");
+ /* Back off next query exponentially until we reach
+ * the standard query interval - then stop the timer
+ * since the intervalled querier will do the job for us. */
+ if (rk->rk_conf.metadata_refresh_interval_ms > 0 &&
+ rtmr->rtmr_interval * 2 / 1000 >=
+ rk->rk_conf.metadata_refresh_interval_ms)
+ rd_kafka_timer_stop(rkts, rtmr, 1 /*lock*/);
+ else
+ rd_kafka_timer_exp_backoff(rkts, rtmr);
+ }
+
+ rd_list_destroy(&topics);
+}
+
+
+
+/**
+ * @brief Trigger fast leader query to quickly pick up on leader changes.
+ * The fast leader query is a quick query followed by later queries at
+ * exponentially increased intervals until no topics are missing
+ * leaders.
+ *
+ * @locks none
+ * @locality any
+ */
+void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk) {
+ rd_ts_t next;
+
+ /* Restart the timer if it will speed things up. */
+ next = rd_kafka_timer_next(
+ &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/);
+ if (next == -1 /* not started */ ||
+ next >
+ (rd_ts_t)rk->rk_conf.metadata_refresh_fast_interval_ms * 1000) {
+ rd_kafka_dbg(rk, METADATA | RD_KAFKA_DBG_TOPIC, "FASTQUERY",
+ "Starting fast leader query");
+ rd_kafka_timer_start(
+ &rk->rk_timers, &rk->rk_metadata_cache.rkmc_query_tmr,
+ rk->rk_conf.metadata_refresh_fast_interval_ms * 1000,
+ rd_kafka_metadata_leader_query_tmr_cb, NULL);
+ }
+}
+
+
+
+/**
+ * @brief Create mock Metadata (for testing) based on the provided topics.
+ *
+ * @param topics elements are checked for .topic and .partition_cnt
+ * @param topic_cnt is the number of topic elements in \p topics.
+ *
+ * @returns a newly allocated metadata object that must be freed with
+ * rd_kafka_metadata_destroy().
+ *
+ * @sa rd_kafka_metadata_copy()
+ */
+rd_kafka_metadata_t *
+rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics,
+ size_t topic_cnt) {
+ rd_kafka_metadata_t *md;
+ rd_tmpabuf_t tbuf;
+ size_t topic_names_size = 0;
+ int total_partition_cnt = 0;
+ size_t i;
+
+ /* Calculate total partition count and topic names size before
+ * allocating memory. */
+ for (i = 0; i < topic_cnt; i++) {
+ topic_names_size += 1 + strlen(topics[i].topic);
+ total_partition_cnt += topics[i].partition_cnt;
+ }
+
+
+ /* Allocate contiguous buffer which will back all the memory
+ * needed by the final metadata_t object */
+ rd_tmpabuf_new(
+ &tbuf,
+ sizeof(*md) + (sizeof(*md->topics) * topic_cnt) + topic_names_size +
+ (64 /*topic name size..*/ * topic_cnt) +
+ (sizeof(*md->topics[0].partitions) * total_partition_cnt),
+ 1 /*assert on fail*/);
+
+ md = rd_tmpabuf_alloc(&tbuf, sizeof(*md));
+ memset(md, 0, sizeof(*md));
+
+ md->topic_cnt = (int)topic_cnt;
+ md->topics =
+ rd_tmpabuf_alloc(&tbuf, md->topic_cnt * sizeof(*md->topics));
+
+ for (i = 0; i < (size_t)md->topic_cnt; i++) {
+ int j;
+
+ md->topics[i].topic =
+ rd_tmpabuf_write_str(&tbuf, topics[i].topic);
+ md->topics[i].partition_cnt = topics[i].partition_cnt;
+ md->topics[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ md->topics[i].partitions = rd_tmpabuf_alloc(
+ &tbuf, md->topics[i].partition_cnt *
+ sizeof(*md->topics[i].partitions));
+
+ for (j = 0; j < md->topics[i].partition_cnt; j++) {
+ memset(&md->topics[i].partitions[j], 0,
+ sizeof(md->topics[i].partitions[j]));
+ md->topics[i].partitions[j].id = j;
+ }
+ }
+
+ /* Check for tmpabuf errors */
+ if (rd_tmpabuf_failed(&tbuf))
+ rd_assert(!*"metadata mock failed");
+
+ /* Not destroying the tmpabuf since we return
+ * its allocated memory. */
+ return md;
+}
+
+
+/**
+ * @brief Create mock Metadata (for testing) based on the
+ * var-arg tuples of (const char *topic, int partition_cnt).
+ *
+ * @param topic_cnt is the number of topic,partition_cnt tuples.
+ *
+ * @returns a newly allocated metadata object that must be freed with
+ * rd_kafka_metadata_destroy().
+ *
+ * @sa rd_kafka_metadata_new_topic_mock()
+ */
+rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...) {
+ rd_kafka_metadata_topic_t *topics;
+ va_list ap;
+ size_t i;
+
+ topics = rd_alloca(sizeof(*topics) * topic_cnt);
+
+ va_start(ap, topic_cnt);
+ for (i = 0; i < topic_cnt; i++) {
+ topics[i].topic = va_arg(ap, char *);
+ topics[i].partition_cnt = va_arg(ap, int);
+ }
+ va_end(ap);
+
+ return rd_kafka_metadata_new_topic_mock(topics, topic_cnt);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h
new file mode 100644
index 000000000..53a959b8e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata.h
@@ -0,0 +1,212 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_METADATA_H_
+#define _RDKAFKA_METADATA_H_
+
+#include "rdavl.h"
+
+rd_bool_t rd_kafka_has_reliable_leader_epochs(rd_kafka_broker_t *rkb);
+
+rd_kafka_resp_err_t rd_kafka_parse_Metadata(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *request,
+ rd_kafka_buf_t *rkbuf,
+ struct rd_kafka_metadata **mdp);
+
+struct rd_kafka_metadata *
+rd_kafka_metadata_copy(const struct rd_kafka_metadata *md, size_t size);
+
+size_t
+rd_kafka_metadata_topic_match(rd_kafka_t *rk,
+ rd_list_t *tinfos,
+ const rd_kafka_topic_partition_list_t *match,
+ rd_kafka_topic_partition_list_t *errored);
+size_t
+rd_kafka_metadata_topic_filter(rd_kafka_t *rk,
+ rd_list_t *tinfos,
+ const rd_kafka_topic_partition_list_t *match,
+ rd_kafka_topic_partition_list_t *errored);
+
+void rd_kafka_metadata_log(rd_kafka_t *rk,
+ const char *fac,
+ const struct rd_kafka_metadata *md);
+
+
+
+rd_kafka_resp_err_t
+rd_kafka_metadata_refresh_topics(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *topics,
+ rd_bool_t force,
+ rd_bool_t allow_auto_create,
+ rd_bool_t cgrp_update,
+ const char *reason);
+rd_kafka_resp_err_t
+rd_kafka_metadata_refresh_known_topics(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_bool_t force,
+ const char *reason);
+rd_kafka_resp_err_t
+rd_kafka_metadata_refresh_consumer_topics(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *reason);
+rd_kafka_resp_err_t rd_kafka_metadata_refresh_brokers(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *reason);
+rd_kafka_resp_err_t rd_kafka_metadata_refresh_all(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *reason);
+
+rd_kafka_resp_err_t
+rd_kafka_metadata_request(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *topics,
+ rd_bool_t allow_auto_create_topics,
+ rd_bool_t cgrp_update,
+ const char *reason,
+ rd_kafka_op_t *rko);
+
+
+
+int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b);
+
+rd_kafka_metadata_t *
+rd_kafka_metadata_new_topic_mock(const rd_kafka_metadata_topic_t *topics,
+ size_t topic_cnt);
+rd_kafka_metadata_t *rd_kafka_metadata_new_topic_mockv(size_t topic_cnt, ...);
+
+
+/**
+ * @{
+ *
+ * @brief Metadata cache
+ */
+
+struct rd_kafka_metadata_cache_entry {
+ rd_avl_node_t rkmce_avlnode; /* rkmc_avl */
+ TAILQ_ENTRY(rd_kafka_metadata_cache_entry) rkmce_link; /* rkmc_expiry */
+ rd_ts_t rkmce_ts_expires; /* Expire time */
+ rd_ts_t rkmce_ts_insert; /* Insert time */
+ /** Last known leader epochs array (same size as the partition count),
+ * or NULL if not known. */
+ rd_kafka_metadata_topic_t rkmce_mtopic; /* Cached topic metadata */
+ /* rkmce_topics.partitions memory points here. */
+};
+
+
+#define RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(ERR) \
+ ((ERR) == RD_KAFKA_RESP_ERR__WAIT_CACHE || \
+ (ERR) == RD_KAFKA_RESP_ERR__NOENT)
+
+#define RD_KAFKA_METADATA_CACHE_VALID(rkmce) \
+ !RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY((rkmce)->rkmce_mtopic.err)
+
+
+
+struct rd_kafka_metadata_cache {
+ rd_avl_t rkmc_avl;
+ TAILQ_HEAD(, rd_kafka_metadata_cache_entry) rkmc_expiry;
+ rd_kafka_timer_t rkmc_expiry_tmr;
+ int rkmc_cnt;
+
+ /* Protected by rk_lock */
+ rd_list_t rkmc_observers; /**< (rd_kafka_enq_once_t*) */
+
+ /* Protected by full_lock: */
+ mtx_t rkmc_full_lock;
+ int rkmc_full_topics_sent; /* Full MetadataRequest for
+ * all topics has been sent,
+ * awaiting response. */
+ int rkmc_full_brokers_sent; /* Full MetadataRequest for
+ * all brokers (but not topics)
+ * has been sent,
+ * awaiting response. */
+
+ rd_kafka_timer_t rkmc_query_tmr; /* Query timer for topic's without
+ * leaders. */
+ cnd_t rkmc_cnd; /* cache_wait_change() cond. */
+ mtx_t rkmc_cnd_lock; /* lock for rkmc_cnd */
+};
+
+
+
+void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk);
+int rd_kafka_metadata_cache_evict_by_age(rd_kafka_t *rk, rd_ts_t ts);
+void rd_kafka_metadata_cache_topic_update(rd_kafka_t *rk,
+ const rd_kafka_metadata_topic_t *mdt,
+ rd_bool_t propagate);
+void rd_kafka_metadata_cache_update(rd_kafka_t *rk,
+ const rd_kafka_metadata_t *md,
+ int abs_update);
+void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk);
+struct rd_kafka_metadata_cache_entry *
+rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid);
+void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk,
+ const rd_list_t *topics);
+int rd_kafka_metadata_cache_hint(rd_kafka_t *rk,
+ const rd_list_t *topics,
+ rd_list_t *dst,
+ rd_kafka_resp_err_t err,
+ rd_bool_t replace);
+
+int rd_kafka_metadata_cache_hint_rktparlist(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *dst,
+ int replace);
+
+const rd_kafka_metadata_topic_t *
+rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk, const char *topic, int valid);
+int rd_kafka_metadata_cache_topic_partition_get(
+ rd_kafka_t *rk,
+ const rd_kafka_metadata_topic_t **mtopicp,
+ const rd_kafka_metadata_partition_t **mpartp,
+ const char *topic,
+ int32_t partition,
+ int valid);
+
+int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk,
+ const rd_list_t *topics,
+ int *metadata_agep);
+
+void rd_kafka_metadata_fast_leader_query(rd_kafka_t *rk);
+
+void rd_kafka_metadata_cache_init(rd_kafka_t *rk);
+void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk);
+void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers);
+int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms);
+void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk);
+
+int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics);
+
+void rd_kafka_metadata_cache_wait_state_change_async(
+ rd_kafka_t *rk,
+ rd_kafka_enq_once_t *eonce);
+
+/**@}*/
+#endif /* _RDKAFKA_METADATA_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c
new file mode 100644
index 000000000..514d391a8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_metadata_cache.c
@@ -0,0 +1,836 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_request.h"
+#include "rdkafka_metadata.h"
+
+#include <string.h>
+/**
+ * @{
+ *
+ * @brief Metadata cache
+ *
+ * The metadata cache consists of cached topic metadata as
+ * retrieved from the cluster using MetadataRequest.
+ *
+ * The topic cache entries are made up \c struct rd_kafka_metadata_cache_entry
+ * each containing the topic name, a copy of the topic's metadata
+ * and a cache expiry time.
+ *
+ * On update any previous entry for the topic are removed and replaced
+ * with a new entry.
+ *
+ * The cache is also populated when the topic metadata is being requested
+ * for specific topics, this will not interfere with existing cache entries
+ * for topics, but for any topics not currently in the cache a new
+ * entry will be added with a flag (RD_KAFKA_METADATA_CACHE_VALID(rkmce))
+ * indicating that the entry is waiting to be populated by the MetadataResponse.
+ * Two special error codes are used for this purpose:
+ * RD_KAFKA_RESP_ERR__NOENT - to indicate that a topic needs to be queried,
+ * RD_KAFKA_RESP_ERR__WAIT_CACHE - to indicate that a topic is being queried
+ * and there is no need to re-query it prior
+ * to the current query finishing.
+ *
+ * The cache is locked in its entirety with rd_kafka_wr/rdlock() by the caller
+ * and the returned cache entry must only be accessed during the duration
+ * of the lock.
+ *
+ */
+
+
+
+/**
+ * @brief Remove and free cache entry.
+ *
+ * @remark The expiry timer is not updated, for simplicity.
+ * @locks rd_kafka_wrlock()
+ */
+static RD_INLINE void
+rd_kafka_metadata_cache_delete(rd_kafka_t *rk,
+ struct rd_kafka_metadata_cache_entry *rkmce,
+ int unlink_avl) {
+ if (unlink_avl)
+ RD_AVL_REMOVE_ELM(&rk->rk_metadata_cache.rkmc_avl, rkmce);
+ TAILQ_REMOVE(&rk->rk_metadata_cache.rkmc_expiry, rkmce, rkmce_link);
+ rd_kafka_assert(NULL, rk->rk_metadata_cache.rkmc_cnt > 0);
+ rk->rk_metadata_cache.rkmc_cnt--;
+
+ rd_free(rkmce);
+}
+
+/**
+ * @brief Delete cache entry by topic name
+ * @locks rd_kafka_wrlock()
+ * @returns 1 if entry was found and removed, else 0.
+ */
+static int rd_kafka_metadata_cache_delete_by_name(rd_kafka_t *rk,
+ const char *topic) {
+ struct rd_kafka_metadata_cache_entry *rkmce;
+
+ rkmce = rd_kafka_metadata_cache_find(rk, topic, 1);
+ if (rkmce)
+ rd_kafka_metadata_cache_delete(rk, rkmce, 1);
+ return rkmce ? 1 : 0;
+}
+
+static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk);
+
+/**
+ * @brief Cache eviction timer callback.
+ * @locality rdkafka main thread
+ * @locks NOT rd_kafka_*lock()
+ */
+static void rd_kafka_metadata_cache_evict_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_t *rk = arg;
+
+ rd_kafka_wrlock(rk);
+ rd_kafka_metadata_cache_evict(rk);
+ rd_kafka_wrunlock(rk);
+}
+
+
+/**
+ * @brief Evict timed out entries from cache and rearm timer for
+ * next expiry.
+ *
+ * @returns the number of entries evicted.
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+static int rd_kafka_metadata_cache_evict(rd_kafka_t *rk) {
+ int cnt = 0;
+ rd_ts_t now = rd_clock();
+ struct rd_kafka_metadata_cache_entry *rkmce;
+
+ while ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)) &&
+ rkmce->rkmce_ts_expires <= now) {
+ rd_kafka_metadata_cache_delete(rk, rkmce, 1);
+ cnt++;
+ }
+
+ if (rkmce)
+ rd_kafka_timer_start(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr,
+ rkmce->rkmce_ts_expires - now,
+ rd_kafka_metadata_cache_evict_tmr_cb, rk);
+ else
+ rd_kafka_timer_stop(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
+
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Expired %d entries from metadata cache "
+ "(%d entries remain)",
+ cnt, rk->rk_metadata_cache.rkmc_cnt);
+
+ if (cnt)
+ rd_kafka_metadata_cache_propagate_changes(rk);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Evict timed out entries from cache based on their insert/update time
+ * rather than expiry time. Any entries older than \p ts will be evicted.
+ *
+ * @returns the number of entries evicted.
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+int rd_kafka_metadata_cache_evict_by_age(rd_kafka_t *rk, rd_ts_t ts) {
+ int cnt = 0;
+ struct rd_kafka_metadata_cache_entry *rkmce, *tmp;
+
+ TAILQ_FOREACH_SAFE(rkmce, &rk->rk_metadata_cache.rkmc_expiry,
+ rkmce_link, tmp) {
+ if (rkmce->rkmce_ts_insert <= ts) {
+ rd_kafka_metadata_cache_delete(rk, rkmce, 1);
+ cnt++;
+ }
+ }
+
+ /* Update expiry timer */
+ rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry);
+ if (rkmce)
+ rd_kafka_timer_start(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr,
+ rkmce->rkmce_ts_expires - rd_clock(),
+ rd_kafka_metadata_cache_evict_tmr_cb, rk);
+ else
+ rd_kafka_timer_stop(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
+
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Expired %d entries older than %dms from metadata cache "
+ "(%d entries remain)",
+ cnt, (int)((rd_clock() - ts) / 1000),
+ rk->rk_metadata_cache.rkmc_cnt);
+
+ if (cnt)
+ rd_kafka_metadata_cache_propagate_changes(rk);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Find cache entry by topic name
+ *
+ * @param valid: entry must be valid (not hint)
+ *
+ * @locks rd_kafka_*lock()
+ */
+struct rd_kafka_metadata_cache_entry *
+rd_kafka_metadata_cache_find(rd_kafka_t *rk, const char *topic, int valid) {
+ struct rd_kafka_metadata_cache_entry skel, *rkmce;
+ skel.rkmce_mtopic.topic = (char *)topic;
+ rkmce = RD_AVL_FIND(&rk->rk_metadata_cache.rkmc_avl, &skel);
+ if (rkmce && (!valid || RD_KAFKA_METADATA_CACHE_VALID(rkmce)))
+ return rkmce;
+ return NULL;
+}
+
+
+/**
+ * @brief Partition (id) comparator
+ */
+int rd_kafka_metadata_partition_id_cmp(const void *_a, const void *_b) {
+ const rd_kafka_metadata_partition_t *a = _a, *b = _b;
+ return RD_CMP(a->id, b->id);
+}
+
+
+/**
+ * @brief Add (and replace) cache entry for topic.
+ *
+ * This makes a copy of \p topic
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+static struct rd_kafka_metadata_cache_entry *
+rd_kafka_metadata_cache_insert(rd_kafka_t *rk,
+ const rd_kafka_metadata_topic_t *mtopic,
+ rd_ts_t now,
+ rd_ts_t ts_expires) {
+ struct rd_kafka_metadata_cache_entry *rkmce, *old;
+ size_t topic_len;
+ rd_tmpabuf_t tbuf;
+ int i;
+
+ /* Metadata is stored in one contigious buffer where structs and
+ * and pointed-to fields are layed out in a memory aligned fashion.
+ * rd_tmpabuf_t provides the infrastructure to do this.
+ * Because of this we copy all the structs verbatim but
+ * any pointer fields needs to be copied explicitly to update
+ * the pointer address. */
+ topic_len = strlen(mtopic->topic) + 1;
+ rd_tmpabuf_new(&tbuf,
+ RD_ROUNDUP(sizeof(*rkmce), 8) +
+ RD_ROUNDUP(topic_len, 8) +
+ (mtopic->partition_cnt *
+ RD_ROUNDUP(sizeof(*mtopic->partitions), 8)),
+ 1 /*assert on fail*/);
+
+ rkmce = rd_tmpabuf_alloc(&tbuf, sizeof(*rkmce));
+
+ rkmce->rkmce_mtopic = *mtopic;
+
+ /* Copy topic name and update pointer */
+ rkmce->rkmce_mtopic.topic = rd_tmpabuf_write_str(&tbuf, mtopic->topic);
+
+ /* Copy partition array and update pointer */
+ rkmce->rkmce_mtopic.partitions = rd_tmpabuf_write(
+ &tbuf, mtopic->partitions,
+ mtopic->partition_cnt * sizeof(*mtopic->partitions));
+
+ /* Clear uncached fields. */
+ for (i = 0; i < mtopic->partition_cnt; i++) {
+ rkmce->rkmce_mtopic.partitions[i].replicas = NULL;
+ rkmce->rkmce_mtopic.partitions[i].replica_cnt = 0;
+ rkmce->rkmce_mtopic.partitions[i].isrs = NULL;
+ rkmce->rkmce_mtopic.partitions[i].isr_cnt = 0;
+ }
+
+ /* Sort partitions for future bsearch() lookups. */
+ qsort(rkmce->rkmce_mtopic.partitions, rkmce->rkmce_mtopic.partition_cnt,
+ sizeof(*rkmce->rkmce_mtopic.partitions),
+ rd_kafka_metadata_partition_id_cmp);
+
+ TAILQ_INSERT_TAIL(&rk->rk_metadata_cache.rkmc_expiry, rkmce,
+ rkmce_link);
+ rk->rk_metadata_cache.rkmc_cnt++;
+ rkmce->rkmce_ts_expires = ts_expires;
+ rkmce->rkmce_ts_insert = now;
+
+ /* Insert (and replace existing) entry. */
+ old = RD_AVL_INSERT(&rk->rk_metadata_cache.rkmc_avl, rkmce,
+ rkmce_avlnode);
+ if (old)
+ rd_kafka_metadata_cache_delete(rk, old, 0);
+
+ /* Explicitly not freeing the tmpabuf since rkmce points to its
+ * memory. */
+ return rkmce;
+}
+
+
+/**
+ * @brief Purge the metadata cache
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_purge(rd_kafka_t *rk, rd_bool_t purge_observers) {
+ struct rd_kafka_metadata_cache_entry *rkmce;
+ int was_empty = TAILQ_EMPTY(&rk->rk_metadata_cache.rkmc_expiry);
+
+ while ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
+ rd_kafka_metadata_cache_delete(rk, rkmce, 1);
+
+ rd_kafka_timer_stop(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr, 1);
+
+ if (!was_empty)
+ rd_kafka_metadata_cache_propagate_changes(rk);
+
+ if (purge_observers)
+ rd_list_clear(&rk->rk_metadata_cache.rkmc_observers);
+}
+
+
+/**
+ * @brief Start or update the cache expiry timer.
+ * Typically done after a series of cache_topic_update()
+ *
+ * @locks rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_expiry_start(rd_kafka_t *rk) {
+ struct rd_kafka_metadata_cache_entry *rkmce;
+
+ if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
+ rd_kafka_timer_start(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr,
+ rkmce->rkmce_ts_expires - rd_clock(),
+ rd_kafka_metadata_cache_evict_tmr_cb, rk);
+}
+
+/**
+ * @brief Update the metadata cache for a single topic
+ * with the provided metadata.
+ *
+ * If the topic has a temporary error the existing entry is removed
+ * and no new entry is added, which avoids the topic to be
+ * suppressed in upcoming metadata requests because being in the cache.
+ * In other words: we want to re-query errored topics.
+ * If the broker reports ERR_UNKNOWN_TOPIC_OR_PART we add a negative cache
+ * entry with an low expiry time, this is so that client code (cgrp) knows
+ * the topic has been queried but did not exist, otherwise it would wait
+ * forever for the unknown topic to surface.
+ *
+ * For permanent errors (authorization failures), we keep
+ * the entry cached for metadata.max.age.ms.
+ *
+ * @remark The cache expiry timer will not be updated/started,
+ * call rd_kafka_metadata_cache_expiry_start() instead.
+ *
+ * @locks rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_topic_update(rd_kafka_t *rk,
+ const rd_kafka_metadata_topic_t *mdt,
+ rd_bool_t propagate) {
+ rd_ts_t now = rd_clock();
+ rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000);
+ int changed = 1;
+
+ /* Cache unknown topics for a short while (100ms) to allow the cgrp
+ * logic to find negative cache hits. */
+ if (mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+ ts_expires = RD_MIN(ts_expires, now + (100 * 1000));
+
+ if (!mdt->err ||
+ mdt->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED ||
+ mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+ rd_kafka_metadata_cache_insert(rk, mdt, now, ts_expires);
+ else
+ changed =
+ rd_kafka_metadata_cache_delete_by_name(rk, mdt->topic);
+
+ if (changed && propagate)
+ rd_kafka_metadata_cache_propagate_changes(rk);
+}
+
+
+/**
+ * @brief Update the metadata cache with the provided metadata.
+ *
+ * @param abs_update int: absolute update: purge cache before updating.
+ *
+ * @locks rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_update(rd_kafka_t *rk,
+ const rd_kafka_metadata_t *md,
+ int abs_update) {
+ struct rd_kafka_metadata_cache_entry *rkmce;
+ rd_ts_t now = rd_clock();
+ rd_ts_t ts_expires = now + (rk->rk_conf.metadata_max_age_ms * 1000);
+ int i;
+
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "%s of metadata cache with %d topic(s)",
+ abs_update ? "Absolute update" : "Update", md->topic_cnt);
+
+ if (abs_update)
+ rd_kafka_metadata_cache_purge(rk, rd_false /*not observers*/);
+
+
+ for (i = 0; i < md->topic_cnt; i++)
+ rd_kafka_metadata_cache_insert(rk, &md->topics[i], now,
+ ts_expires);
+
+ /* Update expiry timer */
+ if ((rkmce = TAILQ_FIRST(&rk->rk_metadata_cache.rkmc_expiry)))
+ rd_kafka_timer_start(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_expiry_tmr,
+ rkmce->rkmce_ts_expires - now,
+ rd_kafka_metadata_cache_evict_tmr_cb, rk);
+
+ if (md->topic_cnt > 0 || abs_update)
+ rd_kafka_metadata_cache_propagate_changes(rk);
+}
+
+
+/**
+ * @brief Remove cache hints for topics in \p topics
+ * This is done when the Metadata response has been parsed and
+ * replaced hints with existing topic information, thus this will
+ * only remove unmatched topics from the cache.
+ *
+ * @locks rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_purge_hints(rd_kafka_t *rk,
+ const rd_list_t *topics) {
+ const char *topic;
+ int i;
+ int cnt = 0;
+
+ RD_LIST_FOREACH(topic, topics, i) {
+ struct rd_kafka_metadata_cache_entry *rkmce;
+
+ if (!(rkmce =
+ rd_kafka_metadata_cache_find(rk, topic, 0 /*any*/)) ||
+ RD_KAFKA_METADATA_CACHE_VALID(rkmce))
+ continue;
+
+ rd_kafka_metadata_cache_delete(rk, rkmce, 1 /*unlink avl*/);
+ cnt++;
+ }
+
+ if (cnt > 0) {
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Purged %d/%d cached topic hint(s)", cnt,
+ rd_list_cnt(topics));
+ rd_kafka_metadata_cache_propagate_changes(rk);
+ }
+}
+
+
+/**
+ * @brief Inserts a non-valid entry for topics in \p topics indicating
+ * that a MetadataRequest is in progress.
+ * This avoids sending multiple MetadataRequests for the same topics
+ * if there are already outstanding requests, see
+ * \c rd_kafka_metadata_refresh_topics().
+ *
+ * @remark These non-valid cache entries' expire time is set to the
+ * MetadataRequest timeout.
+ *
+ * @param dst rd_list_t(char *topicname): if not NULL: populated with
+ * topics that were added as hints to cache, e.q., topics to query.
+ * @param dst rd_list_t(char *topicname)
+ * @param err is the error to set on hint cache entries,
+ * typically ERR__WAIT_CACHE.
+ * @param replace replace existing valid entries
+ *
+ * @returns the number of topic hints inserted.
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+int rd_kafka_metadata_cache_hint(rd_kafka_t *rk,
+ const rd_list_t *topics,
+ rd_list_t *dst,
+ rd_kafka_resp_err_t err,
+ rd_bool_t replace) {
+ const char *topic;
+ rd_ts_t now = rd_clock();
+ rd_ts_t ts_expires = now + (rk->rk_conf.socket_timeout_ms * 1000);
+ int i;
+ int cnt = 0;
+
+ RD_LIST_FOREACH(topic, topics, i) {
+ rd_kafka_metadata_topic_t mtopic = {.topic = (char *)topic,
+ .err = err};
+ /*const*/ struct rd_kafka_metadata_cache_entry *rkmce;
+
+ /* !replace: Dont overwrite valid entries */
+ if (!replace && (rkmce = rd_kafka_metadata_cache_find(
+ rk, topic, 0 /*any*/))) {
+ if (RD_KAFKA_METADATA_CACHE_VALID(rkmce) ||
+ (dst && rkmce->rkmce_mtopic.err !=
+ RD_KAFKA_RESP_ERR__NOENT))
+ continue;
+ rkmce->rkmce_mtopic.err = err;
+ /* FALLTHRU */
+ }
+
+ rd_kafka_metadata_cache_insert(rk, &mtopic, now, ts_expires);
+ cnt++;
+
+ if (dst)
+ rd_list_add(dst, rd_strdup(topic));
+ }
+
+ if (cnt > 0)
+ rd_kafka_dbg(rk, METADATA, "METADATA",
+ "Hinted cache of %d/%d topic(s) being queried",
+ cnt, rd_list_cnt(topics));
+
+ return cnt;
+}
+
+
+/**
+ * @brief Same as rd_kafka_metadata_cache_hint() but takes
+ * a topic+partition list as input instead.
+ *
+ * @locks_acquired rd_kafka_wrlock()
+ */
+int rd_kafka_metadata_cache_hint_rktparlist(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *dst,
+ int replace) {
+ rd_list_t topics;
+ int r;
+
+ rd_list_init(&topics, rktparlist->cnt, rd_free);
+ rd_kafka_topic_partition_list_get_topic_names(rktparlist, &topics,
+ 0 /*dont include regex*/);
+ rd_kafka_wrlock(rk);
+ r = rd_kafka_metadata_cache_hint(
+ rk, &topics, dst, RD_KAFKA_RESP_ERR__WAIT_CACHE, replace);
+ rd_kafka_wrunlock(rk);
+
+ rd_list_destroy(&topics);
+ return r;
+}
+
+
+/**
+ * @brief Cache entry comparator (on topic name)
+ */
+static int rd_kafka_metadata_cache_entry_cmp(const void *_a, const void *_b) {
+ const struct rd_kafka_metadata_cache_entry *a = _a, *b = _b;
+ return strcmp(a->rkmce_mtopic.topic, b->rkmce_mtopic.topic);
+}
+
+
+/**
+ * @brief Initialize the metadata cache
+ *
+ * @locks rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_init(rd_kafka_t *rk) {
+ rd_avl_init(&rk->rk_metadata_cache.rkmc_avl,
+ rd_kafka_metadata_cache_entry_cmp, 0);
+ TAILQ_INIT(&rk->rk_metadata_cache.rkmc_expiry);
+ mtx_init(&rk->rk_metadata_cache.rkmc_full_lock, mtx_plain);
+ mtx_init(&rk->rk_metadata_cache.rkmc_cnd_lock, mtx_plain);
+ cnd_init(&rk->rk_metadata_cache.rkmc_cnd);
+ rd_list_init(&rk->rk_metadata_cache.rkmc_observers, 8,
+ rd_kafka_enq_once_trigger_destroy);
+}
+
+/**
+ * @brief Purge and destroy metadata cache.
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_destroy(rd_kafka_t *rk) {
+ rd_list_destroy(&rk->rk_metadata_cache.rkmc_observers);
+ rd_kafka_timer_stop(&rk->rk_timers,
+ &rk->rk_metadata_cache.rkmc_query_tmr, 1 /*lock*/);
+ rd_kafka_metadata_cache_purge(rk, rd_true /*observers too*/);
+ mtx_destroy(&rk->rk_metadata_cache.rkmc_full_lock);
+ mtx_destroy(&rk->rk_metadata_cache.rkmc_cnd_lock);
+ cnd_destroy(&rk->rk_metadata_cache.rkmc_cnd);
+ rd_avl_destroy(&rk->rk_metadata_cache.rkmc_avl);
+}
+
+
+
+/**
+ * @brief Add eonce to list of async cache observers.
+ *
+ * @locks_required rd_kafka_wrlock()
+ */
+void rd_kafka_metadata_cache_wait_state_change_async(
+ rd_kafka_t *rk,
+ rd_kafka_enq_once_t *eonce) {
+ rd_kafka_enq_once_add_source(eonce, "wait metadata cache change");
+ rd_list_add(&rk->rk_metadata_cache.rkmc_observers, eonce);
+}
+
+
+/**
+ * @brief Wait for cache update, or timeout.
+ *
+ * @returns 1 on cache update or 0 on timeout.
+ * @locks none
+ * @locality any
+ */
+int rd_kafka_metadata_cache_wait_change(rd_kafka_t *rk, int timeout_ms) {
+ int r;
+#if ENABLE_DEVEL
+ rd_ts_t ts_start = rd_clock();
+#endif
+ mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock);
+ r = cnd_timedwait_ms(&rk->rk_metadata_cache.rkmc_cnd,
+ &rk->rk_metadata_cache.rkmc_cnd_lock, timeout_ms);
+ mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock);
+
+#if ENABLE_DEVEL
+ rd_kafka_dbg(rk, METADATA, "CACHEWAIT", "%s wait took %dms: %s",
+ __FUNCTION__, (int)((rd_clock() - ts_start) / 1000),
+ r == thrd_success ? "succeeded" : "timed out");
+#endif
+ return r == thrd_success;
+}
+
+
+/**
+ * @brief eonce trigger callback for rd_list_apply() call in
+ * rd_kafka_metadata_cache_propagate_changes()
+ */
+static int
+rd_kafka_metadata_cache_propagate_changes_trigger_eonce(void *elem,
+ void *opaque) {
+ rd_kafka_enq_once_t *eonce = elem;
+ rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
+ "wait metadata cache change");
+ return 0; /* remove eonce from list */
+}
+
+
+/**
+ * @brief Propagate that the cache changed (but not what changed) to
+ * any cnd listeners and eonce observers.
+ * @locks_required rd_kafka_wrlock(rk)
+ * @locks_acquired rkmc_cnd_lock
+ * @locality any
+ */
+void rd_kafka_metadata_cache_propagate_changes(rd_kafka_t *rk) {
+ mtx_lock(&rk->rk_metadata_cache.rkmc_cnd_lock);
+ cnd_broadcast(&rk->rk_metadata_cache.rkmc_cnd);
+ mtx_unlock(&rk->rk_metadata_cache.rkmc_cnd_lock);
+
+ /* Trigger observers */
+ rd_list_apply(&rk->rk_metadata_cache.rkmc_observers,
+ rd_kafka_metadata_cache_propagate_changes_trigger_eonce,
+ NULL);
+}
+
+/**
+ * @returns the shared metadata for a topic, or NULL if not found in
+ * cache.
+ *
+ * @locks rd_kafka_*lock()
+ */
+const rd_kafka_metadata_topic_t *
+rd_kafka_metadata_cache_topic_get(rd_kafka_t *rk,
+ const char *topic,
+ int valid) {
+ struct rd_kafka_metadata_cache_entry *rkmce;
+
+ if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic, valid)))
+ return NULL;
+
+ return &rkmce->rkmce_mtopic;
+}
+
+
+
+/**
+ * @brief Looks up the shared metadata for a partition along with its topic.
+ *
+ * Cache entries with errors (such as auth errors) will not be returned unless
+ * \p valid is set to false.
+ *
+ * @param mtopicp: pointer to topic metadata
+ * @param mpartp: pointer to partition metadata
+ * @param valid: only return valid entries (no hints)
+ *
+ * @returns -1 if topic was not found in cache, 0 if topic was found
+ * but not the partition, 1 if both topic and partition was found.
+ *
+ * @locks rd_kafka_*lock()
+ */
+int rd_kafka_metadata_cache_topic_partition_get(
+ rd_kafka_t *rk,
+ const rd_kafka_metadata_topic_t **mtopicp,
+ const rd_kafka_metadata_partition_t **mpartp,
+ const char *topic,
+ int32_t partition,
+ int valid) {
+
+ const rd_kafka_metadata_topic_t *mtopic;
+ const rd_kafka_metadata_partition_t *mpart;
+ rd_kafka_metadata_partition_t skel = {.id = partition};
+
+ *mtopicp = NULL;
+ *mpartp = NULL;
+
+ if (!(mtopic = rd_kafka_metadata_cache_topic_get(rk, topic, valid)))
+ return -1;
+
+ *mtopicp = mtopic;
+
+ if (mtopic->err)
+ return -1;
+
+ /* Partitions array may be sparse so use bsearch lookup. */
+ mpart = bsearch(&skel, mtopic->partitions, mtopic->partition_cnt,
+ sizeof(*mtopic->partitions),
+ rd_kafka_metadata_partition_id_cmp);
+
+ if (!mpart)
+ return 0;
+
+ *mpartp = mpart;
+
+ return 1;
+}
+
+
+/**
+ * @returns the number of topics in \p topics that are in the cache.
+ *
+ * @param topics rd_list(const char *): topic names
+ * @param metadata_agep: age of oldest entry will be returned.
+ *
+ * @locks rd_kafka_*lock()
+ */
+int rd_kafka_metadata_cache_topics_count_exists(rd_kafka_t *rk,
+ const rd_list_t *topics,
+ int *metadata_agep) {
+ const char *topic;
+ int i;
+ int cnt = 0;
+ int max_age = -1;
+
+ RD_LIST_FOREACH(topic, topics, i) {
+ const struct rd_kafka_metadata_cache_entry *rkmce;
+ int age;
+
+ if (!(rkmce = rd_kafka_metadata_cache_find(rk, topic,
+ 1 /*valid only*/)))
+ continue;
+
+ age = (int)((rd_clock() - rkmce->rkmce_ts_insert) / 1000);
+ if (age > max_age)
+ max_age = age;
+ cnt++;
+ }
+
+ *metadata_agep = max_age;
+
+ return cnt;
+}
+
+
+/**
+ * @brief Add all topics in the metadata cache to \p topics, avoid duplicates.
+ *
+ * Element type is (char *topic_name).
+ *
+ * @returns the number of elements added to \p topics
+ *
+ * @locks_required rd_kafka_*lock()
+ */
+int rd_kafka_metadata_cache_topics_to_list(rd_kafka_t *rk, rd_list_t *topics) {
+ const struct rd_kafka_metadata_cache_entry *rkmce;
+ int precnt = rd_list_cnt(topics);
+
+ TAILQ_FOREACH(rkmce, &rk->rk_metadata_cache.rkmc_expiry, rkmce_link) {
+ /* Ignore topics that have up to date metadata info */
+ if (RD_KAFKA_METADATA_CACHE_VALID(rkmce))
+ continue;
+
+ if (rd_list_find(topics, rkmce->rkmce_mtopic.topic,
+ rd_list_cmp_str))
+ continue;
+
+ rd_list_add(topics, rd_strdup(rkmce->rkmce_mtopic.topic));
+ }
+
+ return rd_list_cnt(topics) - precnt;
+}
+
+
+/**
+ * @brief Dump cache to \p fp
+ *
+ * @locks rd_kafka_*lock()
+ */
+void rd_kafka_metadata_cache_dump(FILE *fp, rd_kafka_t *rk) {
+ const struct rd_kafka_metadata_cache *rkmc = &rk->rk_metadata_cache;
+ const struct rd_kafka_metadata_cache_entry *rkmce;
+ rd_ts_t now = rd_clock();
+
+ fprintf(fp, "Metadata cache with %d entries:\n", rkmc->rkmc_cnt);
+ TAILQ_FOREACH(rkmce, &rkmc->rkmc_expiry, rkmce_link) {
+ fprintf(fp,
+ " %s (inserted %dms ago, expires in %dms, "
+ "%d partition(s), %s)%s%s\n",
+ rkmce->rkmce_mtopic.topic,
+ (int)((now - rkmce->rkmce_ts_insert) / 1000),
+ (int)((rkmce->rkmce_ts_expires - now) / 1000),
+ rkmce->rkmce_mtopic.partition_cnt,
+ RD_KAFKA_METADATA_CACHE_VALID(rkmce) ? "valid" : "hint",
+ rkmce->rkmce_mtopic.err ? " error: " : "",
+ rkmce->rkmce_mtopic.err
+ ? rd_kafka_err2str(rkmce->rkmce_mtopic.err)
+ : "");
+ }
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c
new file mode 100644
index 000000000..ae7940533
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.c
@@ -0,0 +1,2585 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Mocks
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdbuf.h"
+#include "rdrand.h"
+#include "rdkafka_interceptor.h"
+#include "rdkafka_mock_int.h"
+#include "rdkafka_transport_int.h"
+
+#include <stdarg.h>
+
+static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster);
+
+
+static rd_kafka_mock_broker_t *
+rd_kafka_mock_broker_find(const rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id) {
+ const rd_kafka_mock_broker_t *mrkb;
+
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link)
+ if (mrkb->id == broker_id)
+ return (rd_kafka_mock_broker_t *)mrkb;
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Unlink and free message set.
+ */
+static void rd_kafka_mock_msgset_destroy(rd_kafka_mock_partition_t *mpart,
+ rd_kafka_mock_msgset_t *mset) {
+ const rd_kafka_mock_msgset_t *next = TAILQ_NEXT(mset, link);
+
+ /* Removing last messageset */
+ if (!next)
+ mpart->start_offset = mpart->end_offset;
+ else if (mset == TAILQ_FIRST(&mpart->msgsets))
+ /* Removing first messageset */
+ mpart->start_offset = next->first_offset;
+
+ if (mpart->update_follower_start_offset)
+ mpart->follower_start_offset = mpart->start_offset;
+
+ rd_assert(mpart->cnt > 0);
+ mpart->cnt--;
+ mpart->size -= RD_KAFKAP_BYTES_LEN(&mset->bytes);
+ TAILQ_REMOVE(&mpart->msgsets, mset, link);
+ rd_free(mset);
+}
+
+
+/**
+ * @brief Create a new msgset object with a copy of \p bytes
+ * and appends it to the partition log.
+ */
+static rd_kafka_mock_msgset_t *
+rd_kafka_mock_msgset_new(rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_bytes_t *bytes,
+ size_t msgcnt) {
+ rd_kafka_mock_msgset_t *mset;
+ size_t totsize = sizeof(*mset) + RD_KAFKAP_BYTES_LEN(bytes);
+ int64_t BaseOffset;
+ int32_t PartitionLeaderEpoch;
+ int64_t orig_start_offset = mpart->start_offset;
+
+ rd_assert(!RD_KAFKAP_BYTES_IS_NULL(bytes));
+
+ mset = rd_malloc(totsize);
+ rd_assert(mset != NULL);
+
+ mset->first_offset = mpart->end_offset;
+ mset->last_offset = mset->first_offset + msgcnt - 1;
+ mpart->end_offset = mset->last_offset + 1;
+ if (mpart->update_follower_end_offset)
+ mpart->follower_end_offset = mpart->end_offset;
+ mpart->cnt++;
+
+ mset->bytes.len = bytes->len;
+ mset->leader_epoch = mpart->leader_epoch;
+
+
+ mset->bytes.data = (void *)(mset + 1);
+ memcpy((void *)mset->bytes.data, bytes->data, mset->bytes.len);
+ mpart->size += mset->bytes.len;
+
+ /* Update the base Offset in the MessageSet with the
+ * actual absolute log offset. */
+ BaseOffset = htobe64(mset->first_offset);
+ memcpy((void *)mset->bytes.data, &BaseOffset, sizeof(BaseOffset));
+ /* Update the base PartitionLeaderEpoch in the MessageSet with the
+ * actual partition leader epoch. */
+ PartitionLeaderEpoch = htobe32(mset->leader_epoch);
+ memcpy(((char *)mset->bytes.data) + 12, &PartitionLeaderEpoch,
+ sizeof(PartitionLeaderEpoch));
+
+ /* Remove old msgsets until within limits */
+ while (mpart->cnt > 1 &&
+ (mpart->cnt > mpart->max_cnt || mpart->size > mpart->max_size))
+ rd_kafka_mock_msgset_destroy(mpart,
+ TAILQ_FIRST(&mpart->msgsets));
+
+ TAILQ_INSERT_TAIL(&mpart->msgsets, mset, link);
+
+ rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": Log append %s [%" PRId32
+ "] "
+ "%" PRIusz " messages, %" PRId32
+ " bytes at offset %" PRId64 " (log now %" PRId64
+ "..%" PRId64
+ ", "
+ "original start %" PRId64 ")",
+ mpart->leader->id, mpart->topic->name, mpart->id, msgcnt,
+ RD_KAFKAP_BYTES_LEN(&mset->bytes), mset->first_offset,
+ mpart->start_offset, mpart->end_offset, orig_start_offset);
+
+ return mset;
+}
+
+/**
+ * @brief Find message set containing \p offset
+ */
+const rd_kafka_mock_msgset_t *
+rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart,
+ int64_t offset,
+ rd_bool_t on_follower) {
+ const rd_kafka_mock_msgset_t *mset;
+
+ if (!on_follower &&
+ (offset < mpart->start_offset || offset > mpart->end_offset))
+ return NULL;
+
+ if (on_follower && (offset < mpart->follower_start_offset ||
+ offset > mpart->follower_end_offset))
+ return NULL;
+
+ /* FIXME: Maintain an index */
+
+ TAILQ_FOREACH(mset, &mpart->msgsets, link) {
+ if (mset->first_offset <= offset && offset <= mset->last_offset)
+ return mset;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Looks up or creates a new pidstate for the given partition and PID.
+ *
+ * The pidstate is used to verify per-partition per-producer BaseSequences
+ * for the idempotent/txn producer.
+ */
+static rd_kafka_mock_pid_t *
+rd_kafka_mock_partition_pidstate_get(rd_kafka_mock_partition_t *mpart,
+ const rd_kafka_mock_pid_t *mpid) {
+ rd_kafka_mock_pid_t *pidstate;
+ size_t tidlen;
+
+ pidstate = rd_list_find(&mpart->pidstates, mpid, rd_kafka_mock_pid_cmp);
+ if (pidstate)
+ return pidstate;
+
+ tidlen = strlen(mpid->TransactionalId);
+ pidstate = rd_malloc(sizeof(*pidstate) + tidlen);
+ pidstate->pid = mpid->pid;
+ memcpy(pidstate->TransactionalId, mpid->TransactionalId, tidlen);
+ pidstate->TransactionalId[tidlen] = '\0';
+
+ pidstate->lo = pidstate->hi = pidstate->window = 0;
+ memset(pidstate->seq, 0, sizeof(pidstate->seq));
+
+ rd_list_add(&mpart->pidstates, pidstate);
+
+ return pidstate;
+}
+
+
+/**
+ * @brief Validate ProduceRequest records in \p rkbuf.
+ *
+ * @warning The \p rkbuf must not be read, just peek()ed.
+ *
+ * This is a very selective validation, currently only:
+ * - verify idempotency TransactionalId,PID,Epoch,Seq
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_validate_records(rd_kafka_mock_partition_t *mpart,
+ rd_kafka_buf_t *rkbuf,
+ size_t RecordCount,
+ const rd_kafkap_str_t *TransactionalId,
+ rd_bool_t *is_dupd) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster;
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_mock_pid_t *mpidstate = NULL;
+ rd_kafka_pid_t pid;
+ int32_t expected_BaseSequence = -1, BaseSequence = -1;
+ rd_kafka_resp_err_t err;
+
+ *is_dupd = rd_false;
+
+ if (!TransactionalId || RD_KAFKAP_STR_LEN(TransactionalId) < 1)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_buf_peek_i64(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerId,
+ &pid.id);
+ rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch,
+ &pid.epoch);
+ rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_BaseSequence,
+ &BaseSequence);
+
+ mtx_lock(&mcluster->lock);
+ err = rd_kafka_mock_pid_find(mcluster, TransactionalId, pid, &mpid);
+ mtx_unlock(&mcluster->lock);
+
+ if (likely(!err)) {
+
+ if (mpid->pid.epoch != pid.epoch)
+ err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
+
+ /* Each partition tracks the 5 last Produce requests per PID.*/
+ mpidstate = rd_kafka_mock_partition_pidstate_get(mpart, mpid);
+
+ expected_BaseSequence = mpidstate->seq[mpidstate->hi];
+
+ /* A BaseSequence within the range of the last 5 requests is
+ * considered a legal duplicate and will be successfully acked
+ * but not written to the log. */
+ if (BaseSequence < mpidstate->seq[mpidstate->lo])
+ err = RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER;
+ else if (BaseSequence > mpidstate->seq[mpidstate->hi])
+ err = RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER;
+ else if (BaseSequence != expected_BaseSequence)
+ *is_dupd = rd_true;
+ }
+
+ if (unlikely(err)) {
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": Log append %s [%" PRId32
+ "] failed: PID mismatch: TransactionalId=%.*s "
+ "expected %s BaseSeq %" PRId32
+ ", not %s BaseSeq %" PRId32 ": %s",
+ mpart->leader->id, mpart->topic->name, mpart->id,
+ RD_KAFKAP_STR_PR(TransactionalId),
+ mpid ? rd_kafka_pid2str(mpid->pid) : "n/a",
+ expected_BaseSequence, rd_kafka_pid2str(pid),
+ BaseSequence, rd_kafka_err2name(err));
+ return err;
+ }
+
+ /* Update BaseSequence window */
+ if (unlikely(mpidstate->window < 5))
+ mpidstate->window++;
+ else
+ mpidstate->lo = (mpidstate->lo + 1) % mpidstate->window;
+ mpidstate->hi = (mpidstate->hi + 1) % mpidstate->window;
+ mpidstate->seq[mpidstate->hi] = (int32_t)(BaseSequence + RecordCount);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ return rkbuf->rkbuf_err;
+}
+
+/**
+ * @brief Append the MessageSets in \p bytes to the \p mpart partition log.
+ *
+ * @param BaseOffset will contain the first assigned offset of the message set.
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_bytes_t *records,
+ const rd_kafkap_str_t *TransactionalId,
+ int64_t *BaseOffset) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_buf_t *rkbuf;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int8_t MagicByte;
+ int32_t RecordCount;
+ int16_t Attributes;
+ rd_kafka_mock_msgset_t *mset;
+ rd_bool_t is_dup = rd_false;
+
+ /* Partially parse the MessageSet in \p bytes to get
+ * the message count. */
+ rkbuf = rd_kafka_buf_new_shadow(records->data,
+ RD_KAFKAP_BYTES_LEN(records), NULL);
+
+ rd_kafka_buf_peek_i8(rkbuf, RD_KAFKAP_MSGSET_V2_OF_MagicByte,
+ &MagicByte);
+ if (MagicByte != 2) {
+ /* We only support MsgVersion 2 for now */
+ err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION;
+ goto err;
+ }
+
+ rd_kafka_buf_peek_i32(rkbuf, RD_KAFKAP_MSGSET_V2_OF_RecordCount,
+ &RecordCount);
+ rd_kafka_buf_peek_i16(rkbuf, RD_KAFKAP_MSGSET_V2_OF_Attributes,
+ &Attributes);
+
+ if (RecordCount < 1 ||
+ (!(Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
+ (size_t)RecordCount > RD_KAFKAP_BYTES_LEN(records) /
+ RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD)) {
+ err = RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE;
+ goto err;
+ }
+
+ if ((err = rd_kafka_mock_validate_records(
+ mpart, rkbuf, (size_t)RecordCount, TransactionalId, &is_dup)))
+ goto err;
+
+ /* If this is a legit duplicate, don't write it to the log. */
+ if (is_dup)
+ goto err;
+
+ rd_kafka_buf_destroy(rkbuf);
+
+ mset = rd_kafka_mock_msgset_new(mpart, records, (size_t)RecordCount);
+
+ *BaseOffset = mset->first_offset;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ rd_kafka_buf_destroy(rkbuf);
+ return err;
+}
+
+
+/**
+ * @brief Set the partition leader, or NULL for leader-less.
+ */
+static void
+rd_kafka_mock_partition_set_leader0(rd_kafka_mock_partition_t *mpart,
+ rd_kafka_mock_broker_t *mrkb) {
+ mpart->leader = mrkb;
+ mpart->leader_epoch++;
+}
+
+
+/**
+ * @brief Verifies that the client-provided leader_epoch matches that of the
+ * partition, else returns the appropriate error.
+ */
+rd_kafka_resp_err_t rd_kafka_mock_partition_leader_epoch_check(
+ const rd_kafka_mock_partition_t *mpart,
+ int32_t leader_epoch) {
+ if (likely(leader_epoch == -1 || mpart->leader_epoch == leader_epoch))
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ else if (mpart->leader_epoch < leader_epoch)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH;
+ else if (mpart->leader_epoch > leader_epoch)
+ return RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH;
+
+ /* NOTREACHED, but avoids warning */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Returns the end offset (last offset + 1)
+ * for the passed leader epoch in the mock partition.
+ *
+ * @param mpart The mock partition
+ * @param leader_epoch The leader epoch
+ *
+ * @return The end offset for the passed \p leader_epoch in \p mpart
+ */
+int64_t rd_kafka_mock_partition_offset_for_leader_epoch(
+ const rd_kafka_mock_partition_t *mpart,
+ int32_t leader_epoch) {
+ const rd_kafka_mock_msgset_t *mset = NULL;
+
+ if (leader_epoch < 0)
+ return -1;
+
+ TAILQ_FOREACH_REVERSE(mset, &mpart->msgsets,
+ rd_kafka_mock_msgset_tailq_s, link) {
+ if (mset->leader_epoch == leader_epoch)
+ return mset->last_offset + 1;
+ }
+
+ return -1;
+}
+
+
+/**
+ * @brief Automatically assign replicas for partition
+ */
+static void
+rd_kafka_mock_partition_assign_replicas(rd_kafka_mock_partition_t *mpart) {
+ rd_kafka_mock_cluster_t *mcluster = mpart->topic->cluster;
+ int replica_cnt =
+ RD_MIN(mcluster->defaults.replication_factor, mcluster->broker_cnt);
+ rd_kafka_mock_broker_t *mrkb;
+ int i = 0;
+
+ if (mpart->replicas)
+ rd_free(mpart->replicas);
+
+ mpart->replicas = rd_calloc(replica_cnt, sizeof(*mpart->replicas));
+ mpart->replica_cnt = replica_cnt;
+
+ /* FIXME: randomize this using perhaps reservoir sampling */
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
+ if (i == mpart->replica_cnt)
+ break;
+ mpart->replicas[i++] = mrkb;
+ }
+
+ /* Select a random leader */
+ rd_kafka_mock_partition_set_leader0(
+ mpart, mpart->replicas[rd_jitter(0, replica_cnt - 1)]);
+}
+
+
+
+/**
+ * @brief Unlink and destroy committed offset
+ */
+static void
+rd_kafka_mock_committed_offset_destroy(rd_kafka_mock_partition_t *mpart,
+ rd_kafka_mock_committed_offset_t *coff) {
+ rd_kafkap_str_destroy(coff->metadata);
+ TAILQ_REMOVE(&mpart->committed_offsets, coff, link);
+ rd_free(coff);
+}
+
+
+/**
+ * @brief Find previously committed offset for group.
+ */
+rd_kafka_mock_committed_offset_t *
+rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_str_t *group) {
+ const rd_kafka_mock_committed_offset_t *coff;
+
+ TAILQ_FOREACH(coff, &mpart->committed_offsets, link) {
+ if (!rd_kafkap_str_cmp_str(group, coff->group))
+ return (rd_kafka_mock_committed_offset_t *)coff;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Commit offset for group
+ */
+rd_kafka_mock_committed_offset_t *
+rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_str_t *group,
+ int64_t offset,
+ const rd_kafkap_str_t *metadata) {
+ rd_kafka_mock_committed_offset_t *coff;
+
+ if (!(coff = rd_kafka_mock_committed_offset_find(mpart, group))) {
+ size_t slen = (size_t)RD_KAFKAP_STR_LEN(group);
+
+ coff = rd_malloc(sizeof(*coff) + slen + 1);
+
+ coff->group = (char *)(coff + 1);
+ memcpy(coff->group, group->str, slen);
+ coff->group[slen] = '\0';
+
+ coff->metadata = NULL;
+
+ TAILQ_INSERT_HEAD(&mpart->committed_offsets, coff, link);
+ }
+
+ if (coff->metadata)
+ rd_kafkap_str_destroy(coff->metadata);
+
+ coff->metadata = rd_kafkap_str_copy(metadata);
+
+ coff->offset = offset;
+
+ rd_kafka_dbg(mpart->topic->cluster->rk, MOCK, "MOCK",
+ "Topic %s [%" PRId32 "] committing offset %" PRId64
+ " for group %.*s",
+ mpart->topic->name, mpart->id, offset,
+ RD_KAFKAP_STR_PR(group));
+
+ return coff;
+}
+
+/**
+ * @brief Destroy resources for partition, but the \p mpart itself is not freed.
+ */
+static void rd_kafka_mock_partition_destroy(rd_kafka_mock_partition_t *mpart) {
+ rd_kafka_mock_msgset_t *mset, *tmp;
+ rd_kafka_mock_committed_offset_t *coff, *tmpcoff;
+
+ TAILQ_FOREACH_SAFE(mset, &mpart->msgsets, link, tmp)
+ rd_kafka_mock_msgset_destroy(mpart, mset);
+
+ TAILQ_FOREACH_SAFE(coff, &mpart->committed_offsets, link, tmpcoff)
+ rd_kafka_mock_committed_offset_destroy(mpart, coff);
+
+ rd_list_destroy(&mpart->pidstates);
+
+ rd_free(mpart->replicas);
+}
+
+
+static void rd_kafka_mock_partition_init(rd_kafka_mock_topic_t *mtopic,
+ rd_kafka_mock_partition_t *mpart,
+ int id,
+ int replication_factor) {
+ mpart->topic = mtopic;
+ mpart->id = id;
+
+ mpart->follower_id = -1;
+ mpart->leader_epoch = -1; /* Start at -1 since assign_replicas() will
+ * bump it right away to 0. */
+
+ TAILQ_INIT(&mpart->msgsets);
+
+ mpart->max_size = 1024 * 1024 * 5;
+ mpart->max_cnt = 100000;
+
+ mpart->update_follower_start_offset = rd_true;
+ mpart->update_follower_end_offset = rd_true;
+
+ TAILQ_INIT(&mpart->committed_offsets);
+
+ rd_list_init(&mpart->pidstates, 0, rd_free);
+
+ rd_kafka_mock_partition_assign_replicas(mpart);
+}
+
+rd_kafka_mock_partition_t *
+rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic,
+ int32_t partition) {
+ if (!mtopic || partition < 0 || partition >= mtopic->partition_cnt)
+ return NULL;
+
+ return (rd_kafka_mock_partition_t *)&mtopic->partitions[partition];
+}
+
+
+static void rd_kafka_mock_topic_destroy(rd_kafka_mock_topic_t *mtopic) {
+ int i;
+
+ for (i = 0; i < mtopic->partition_cnt; i++)
+ rd_kafka_mock_partition_destroy(&mtopic->partitions[i]);
+
+ TAILQ_REMOVE(&mtopic->cluster->topics, mtopic, link);
+ mtopic->cluster->topic_cnt--;
+
+ rd_free(mtopic->partitions);
+ rd_free(mtopic->name);
+ rd_free(mtopic);
+}
+
+
+static rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_new(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int partition_cnt,
+ int replication_factor) {
+ rd_kafka_mock_topic_t *mtopic;
+ int i;
+
+ mtopic = rd_calloc(1, sizeof(*mtopic));
+ mtopic->name = rd_strdup(topic);
+ mtopic->cluster = mcluster;
+
+ mtopic->partition_cnt = partition_cnt;
+ mtopic->partitions =
+ rd_calloc(partition_cnt, sizeof(*mtopic->partitions));
+
+ for (i = 0; i < partition_cnt; i++)
+ rd_kafka_mock_partition_init(mtopic, &mtopic->partitions[i], i,
+ replication_factor);
+
+ TAILQ_INSERT_TAIL(&mcluster->topics, mtopic, link);
+ mcluster->topic_cnt++;
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Created topic \"%s\" with %d partition(s) and "
+ "replication-factor %d",
+ mtopic->name, mtopic->partition_cnt, replication_factor);
+
+ return mtopic;
+}
+
+
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster,
+ const char *name) {
+ const rd_kafka_mock_topic_t *mtopic;
+
+ TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
+ if (!strcmp(mtopic->name, name))
+ return (rd_kafka_mock_topic_t *)mtopic;
+ }
+
+ return NULL;
+}
+
+
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *kname) {
+ const rd_kafka_mock_topic_t *mtopic;
+
+ TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
+ if (!strncmp(mtopic->name, kname->str,
+ RD_KAFKAP_STR_LEN(kname)) &&
+ mtopic->name[RD_KAFKAP_STR_LEN(kname)] == '\0')
+ return (rd_kafka_mock_topic_t *)mtopic;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Create a topic using default settings.
+ * The topic must not already exist.
+ *
+ * @param errp will be set to an error code that is consistent with
+ * new topics on real clusters.
+ */
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int partition_cnt,
+ rd_kafka_resp_err_t *errp) {
+ rd_assert(!rd_kafka_mock_topic_find(mcluster, topic));
+ *errp = 0; // FIXME? RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE;
+ return rd_kafka_mock_topic_new(mcluster, topic,
+ partition_cnt == -1
+ ? mcluster->defaults.partition_cnt
+ : partition_cnt,
+ mcluster->defaults.replication_factor);
+}
+
+
+/**
+ * @brief Find or create topic.
+ *
+ * @param partition_cnt If not -1 and the topic does not exist, the automatic
+ * topic creation will create this number of topics.
+ * Otherwise use the default.
+ */
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_get(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int partition_cnt) {
+ rd_kafka_mock_topic_t *mtopic;
+ rd_kafka_resp_err_t err;
+
+ if ((mtopic = rd_kafka_mock_topic_find(mcluster, topic)))
+ return mtopic;
+
+ return rd_kafka_mock_topic_auto_create(mcluster, topic, partition_cnt,
+ &err);
+}
+
+/**
+ * @brief Find or create a partition.
+ *
+ * @returns NULL if topic already exists and partition is out of range.
+ */
+static rd_kafka_mock_partition_t *
+rd_kafka_mock_partition_get(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition) {
+ rd_kafka_mock_topic_t *mtopic;
+ rd_kafka_resp_err_t err;
+
+ if (!(mtopic = rd_kafka_mock_topic_find(mcluster, topic)))
+ mtopic = rd_kafka_mock_topic_auto_create(mcluster, topic,
+ partition + 1, &err);
+
+ if (partition >= mtopic->partition_cnt)
+ return NULL;
+
+ return &mtopic->partitions[partition];
+}
+
+
+/**
+ * @brief Set IO events for fd
+ */
+static void
+rd_kafka_mock_cluster_io_set_events(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ int events) {
+ int i;
+
+ for (i = 0; i < mcluster->fd_cnt; i++) {
+ if (mcluster->fds[i].fd == fd) {
+ mcluster->fds[i].events |= events;
+ return;
+ }
+ }
+
+ rd_assert(!*"mock_cluster_io_set_events: fd not found");
+}
+
+/**
+ * @brief Set or clear single IO events for fd
+ */
+static void
+rd_kafka_mock_cluster_io_set_event(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ rd_bool_t set,
+ int event) {
+ int i;
+
+ for (i = 0; i < mcluster->fd_cnt; i++) {
+ if (mcluster->fds[i].fd == fd) {
+ if (set)
+ mcluster->fds[i].events |= event;
+ else
+ mcluster->fds[i].events &= ~event;
+ return;
+ }
+ }
+
+ rd_assert(!*"mock_cluster_io_set_event: fd not found");
+}
+
+
+/**
+ * @brief Clear IO events for fd
+ */
+static void
+rd_kafka_mock_cluster_io_clear_events(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ int events) {
+ int i;
+
+ for (i = 0; i < mcluster->fd_cnt; i++) {
+ if (mcluster->fds[i].fd == fd) {
+ mcluster->fds[i].events &= ~events;
+ return;
+ }
+ }
+
+ rd_assert(!*"mock_cluster_io_set_events: fd not found");
+}
+
+
+static void rd_kafka_mock_cluster_io_del(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd) {
+ int i;
+
+ for (i = 0; i < mcluster->fd_cnt; i++) {
+ if (mcluster->fds[i].fd == fd) {
+ if (i + 1 < mcluster->fd_cnt) {
+ memmove(&mcluster->fds[i],
+ &mcluster->fds[i + 1],
+ sizeof(*mcluster->fds) *
+ (mcluster->fd_cnt - i));
+ memmove(&mcluster->handlers[i],
+ &mcluster->handlers[i + 1],
+ sizeof(*mcluster->handlers) *
+ (mcluster->fd_cnt - i));
+ }
+
+ mcluster->fd_cnt--;
+ return;
+ }
+ }
+
+ rd_assert(!*"mock_cluster_io_del: fd not found");
+}
+
+
+/**
+ * @brief Add \p fd to IO poll with initial desired events (POLLIN, et.al).
+ */
+static void rd_kafka_mock_cluster_io_add(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ int events,
+ rd_kafka_mock_io_handler_t handler,
+ void *opaque) {
+
+ if (mcluster->fd_cnt + 1 >= mcluster->fd_size) {
+ mcluster->fd_size += 8;
+
+ mcluster->fds = rd_realloc(
+ mcluster->fds, sizeof(*mcluster->fds) * mcluster->fd_size);
+ mcluster->handlers =
+ rd_realloc(mcluster->handlers,
+ sizeof(*mcluster->handlers) * mcluster->fd_size);
+ }
+
+ memset(&mcluster->fds[mcluster->fd_cnt], 0,
+ sizeof(mcluster->fds[mcluster->fd_cnt]));
+ mcluster->fds[mcluster->fd_cnt].fd = fd;
+ mcluster->fds[mcluster->fd_cnt].events = events;
+ mcluster->fds[mcluster->fd_cnt].revents = 0;
+ mcluster->handlers[mcluster->fd_cnt].cb = handler;
+ mcluster->handlers[mcluster->fd_cnt].opaque = opaque;
+ mcluster->fd_cnt++;
+}
+
+
+static void rd_kafka_mock_connection_close(rd_kafka_mock_connection_t *mconn,
+ const char *reason) {
+ rd_kafka_buf_t *rkbuf;
+
+ rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": Connection from %s closed: %s",
+ mconn->broker->id,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT),
+ reason);
+
+ rd_kafka_mock_cgrps_connection_closed(mconn->broker->cluster, mconn);
+
+ rd_kafka_timer_stop(&mconn->broker->cluster->timers, &mconn->write_tmr,
+ rd_true);
+
+ while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) {
+ rd_kafka_bufq_deq(&mconn->outbufs, rkbuf);
+ rd_kafka_buf_destroy(rkbuf);
+ }
+
+ if (mconn->rxbuf)
+ rd_kafka_buf_destroy(mconn->rxbuf);
+
+ rd_kafka_mock_cluster_io_del(mconn->broker->cluster,
+ mconn->transport->rktrans_s);
+ TAILQ_REMOVE(&mconn->broker->connections, mconn, link);
+ rd_kafka_transport_close(mconn->transport);
+ rd_free(mconn);
+}
+
+
+void rd_kafka_mock_connection_send_response(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp) {
+
+ if (resp->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
+ /* Empty struct tags */
+ rd_kafka_buf_write_i8(resp, 0);
+ }
+
+ /* rkbuf_ts_sent might be initialized with a RTT delay, else 0. */
+ resp->rkbuf_ts_sent += rd_clock();
+
+ resp->rkbuf_reshdr.Size =
+ (int32_t)(rd_buf_write_pos(&resp->rkbuf_buf) - 4);
+
+ rd_kafka_buf_update_i32(resp, 0, resp->rkbuf_reshdr.Size);
+
+ rd_kafka_dbg(mconn->broker->cluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": Sending %sResponseV%hd to %s",
+ mconn->broker->id,
+ rd_kafka_ApiKey2str(resp->rkbuf_reqhdr.ApiKey),
+ resp->rkbuf_reqhdr.ApiVersion,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
+
+ /* Set up a buffer reader for sending the buffer. */
+ rd_slice_init_full(&resp->rkbuf_reader, &resp->rkbuf_buf);
+
+ rd_kafka_bufq_enq(&mconn->outbufs, resp);
+
+ rd_kafka_mock_cluster_io_set_events(
+ mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT);
+}
+
+
+/**
+ * @returns 1 if a complete request is available in which case \p slicep
+ * is set to a new slice containing the data,
+ * 0 if a complete request is not yet available,
+ * -1 on error.
+ */
+static int
+rd_kafka_mock_connection_read_request(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t **rkbufp) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_t *rk = mcluster->rk;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *rkbuf;
+ char errstr[128];
+ ssize_t r;
+
+ if (!(rkbuf = mconn->rxbuf)) {
+ /* Initial read for a protocol request.
+ * Allocate enough room for the protocol header
+ * (where the total size is located). */
+ rkbuf = mconn->rxbuf =
+ rd_kafka_buf_new(2, RD_KAFKAP_REQHDR_SIZE);
+
+ /* Protocol parsing code needs the rkb for logging */
+ rkbuf->rkbuf_rkb = mconn->broker->cluster->dummy_rkb;
+ rd_kafka_broker_keep(rkbuf->rkbuf_rkb);
+
+ /* Make room for request header */
+ rd_buf_write_ensure(&rkbuf->rkbuf_buf, RD_KAFKAP_REQHDR_SIZE,
+ RD_KAFKAP_REQHDR_SIZE);
+ }
+
+ /* Read as much data as possible from the socket into the
+ * connection receive buffer. */
+ r = rd_kafka_transport_recv(mconn->transport, &rkbuf->rkbuf_buf, errstr,
+ sizeof(errstr));
+ if (r == -1) {
+ rd_kafka_dbg(
+ rk, MOCK, "MOCK",
+ "Broker %" PRId32
+ ": Connection %s: "
+ "receive failed: %s",
+ mconn->broker->id,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT),
+ errstr);
+ return -1;
+ } else if (r == 0) {
+ return 0; /* Need more data */
+ }
+
+ if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == RD_KAFKAP_REQHDR_SIZE) {
+ /* Received the full header, now check full request
+ * size and allocate the buffer accordingly. */
+
+ /* Initialize reader */
+ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0,
+ RD_KAFKAP_REQHDR_SIZE);
+
+ rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.Size);
+ rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiKey);
+ rd_kafka_buf_read_i16(rkbuf, &rkbuf->rkbuf_reqhdr.ApiVersion);
+
+ if (rkbuf->rkbuf_reqhdr.ApiKey < 0 ||
+ rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM) {
+ rd_kafka_buf_parse_fail(
+ rkbuf, "Invalid ApiKey %hd from %s",
+ rkbuf->rkbuf_reqhdr.ApiKey,
+ rd_sockaddr2str(&mconn->peer,
+ RD_SOCKADDR2STR_F_PORT));
+ RD_NOTREACHED();
+ }
+
+ /* Check if request version has flexible fields (KIP-482) */
+ if (mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey]
+ .FlexVersion != -1 &&
+ rkbuf->rkbuf_reqhdr.ApiVersion >=
+ mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey]
+ .FlexVersion)
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
+
+
+ rd_kafka_buf_read_i32(rkbuf, &rkbuf->rkbuf_reqhdr.CorrId);
+
+ rkbuf->rkbuf_totlen = rkbuf->rkbuf_reqhdr.Size + 4;
+
+ if (rkbuf->rkbuf_totlen < RD_KAFKAP_REQHDR_SIZE + 2 ||
+ rkbuf->rkbuf_totlen >
+ (size_t)rk->rk_conf.recv_max_msg_size) {
+ rd_kafka_buf_parse_fail(
+ rkbuf, "Invalid request size %" PRId32 " from %s",
+ rkbuf->rkbuf_reqhdr.Size,
+ rd_sockaddr2str(&mconn->peer,
+ RD_SOCKADDR2STR_F_PORT));
+ RD_NOTREACHED();
+ }
+
+ /* Now adjust totlen to skip the header */
+ rkbuf->rkbuf_totlen -= RD_KAFKAP_REQHDR_SIZE;
+
+ if (!rkbuf->rkbuf_totlen) {
+ /* Empty request (valid) */
+ *rkbufp = rkbuf;
+ mconn->rxbuf = NULL;
+ return 1;
+ }
+
+ /* Allocate space for the request payload */
+ rd_buf_write_ensure(&rkbuf->rkbuf_buf, rkbuf->rkbuf_totlen,
+ rkbuf->rkbuf_totlen);
+
+ } else if (rd_buf_write_pos(&rkbuf->rkbuf_buf) -
+ RD_KAFKAP_REQHDR_SIZE ==
+ rkbuf->rkbuf_totlen) {
+ /* The full request is now read into the buffer. */
+
+ /* Set up response reader slice starting past the
+ * request header */
+ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf,
+ RD_KAFKAP_REQHDR_SIZE,
+ rd_buf_len(&rkbuf->rkbuf_buf) -
+ RD_KAFKAP_REQHDR_SIZE);
+
+ /* For convenience, shave off the ClientId */
+ rd_kafka_buf_skip_str(rkbuf);
+
+ /* And the flexible versions header tags, if any */
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ /* Return the buffer to the caller */
+ *rkbufp = rkbuf;
+ mconn->rxbuf = NULL;
+ return 1;
+ }
+
+ return 0;
+
+
+err_parse:
+ return -1;
+}
+
+rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request) {
+ rd_kafka_buf_t *rkbuf = rd_kafka_buf_new(1, 100);
+
+ /* Copy request header so the ApiVersion remains known */
+ rkbuf->rkbuf_reqhdr = request->rkbuf_reqhdr;
+
+ /* Size, updated later */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* CorrId */
+ rd_kafka_buf_write_i32(rkbuf, request->rkbuf_reqhdr.CorrId);
+
+ if (request->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_FLEXVER;
+ /* Write empty response header tags, unless this is the
+ * ApiVersionResponse which needs to be backwards compatible. */
+ if (request->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion)
+ rd_kafka_buf_write_i8(rkbuf, 0);
+ }
+
+ return rkbuf;
+}
+
+
+
+/**
+ * @brief Parse protocol request.
+ *
+ * @returns 0 on success, -1 on parse error.
+ */
+static int
+rd_kafka_mock_connection_parse_request(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_t *rk = mcluster->rk;
+
+ if (rkbuf->rkbuf_reqhdr.ApiKey < 0 ||
+ rkbuf->rkbuf_reqhdr.ApiKey >= RD_KAFKAP__NUM ||
+ !mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb) {
+ rd_kafka_log(
+ rk, LOG_ERR, "MOCK",
+ "Broker %" PRId32
+ ": unsupported %sRequestV%hd "
+ "from %s",
+ mconn->broker->id,
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
+ return -1;
+ }
+
+ /* ApiVersionRequest handles future versions, for everything else
+ * make sure the ApiVersion is supported. */
+ if (rkbuf->rkbuf_reqhdr.ApiKey != RD_KAFKAP_ApiVersion &&
+ !rd_kafka_mock_cluster_ApiVersion_check(
+ mcluster, rkbuf->rkbuf_reqhdr.ApiKey,
+ rkbuf->rkbuf_reqhdr.ApiVersion)) {
+ rd_kafka_log(
+ rk, LOG_ERR, "MOCK",
+ "Broker %" PRId32
+ ": unsupported %sRequest "
+ "version %hd from %s",
+ mconn->broker->id,
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
+ return -1;
+ }
+
+ rd_kafka_dbg(rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": Received %sRequestV%hd from %s",
+ mconn->broker->id,
+ rd_kafka_ApiKey2str(rkbuf->rkbuf_reqhdr.ApiKey),
+ rkbuf->rkbuf_reqhdr.ApiVersion,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
+
+ return mcluster->api_handlers[rkbuf->rkbuf_reqhdr.ApiKey].cb(mconn,
+ rkbuf);
+}
+
+
+/**
+ * @brief Timer callback to set the POLLOUT flag for a connection after
+ * the delay has expired.
+ */
+static void rd_kafka_mock_connection_write_out_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_mock_connection_t *mconn = arg;
+
+ rd_kafka_mock_cluster_io_set_events(
+ mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT);
+}
+
+
+/**
+ * @brief Send as many bytes as possible from the output buffer.
+ *
+ * @returns 1 if all buffers were sent, 0 if more buffers need to be sent, or
+ * -1 on error.
+ */
+static ssize_t
+rd_kafka_mock_connection_write_out(rd_kafka_mock_connection_t *mconn) {
+ rd_kafka_buf_t *rkbuf;
+ rd_ts_t now = rd_clock();
+ rd_ts_t rtt = mconn->broker->rtt;
+
+ while ((rkbuf = TAILQ_FIRST(&mconn->outbufs.rkbq_bufs))) {
+ ssize_t r;
+ char errstr[128];
+ rd_ts_t ts_delay = 0;
+
+ /* Connection delay/rtt is set. */
+ if (rkbuf->rkbuf_ts_sent + rtt > now)
+ ts_delay = rkbuf->rkbuf_ts_sent + rtt;
+
+ /* Response is being delayed */
+ if (rkbuf->rkbuf_ts_retry && rkbuf->rkbuf_ts_retry > now)
+ ts_delay = rkbuf->rkbuf_ts_retry + rtt;
+
+ if (ts_delay) {
+ /* Delay response */
+ rd_kafka_timer_start_oneshot(
+ &mconn->broker->cluster->timers, &mconn->write_tmr,
+ rd_false, ts_delay - now,
+ rd_kafka_mock_connection_write_out_tmr_cb, mconn);
+ break;
+ }
+
+ if ((r = rd_kafka_transport_send(mconn->transport,
+ &rkbuf->rkbuf_reader, errstr,
+ sizeof(errstr))) == -1)
+ return -1;
+
+ if (rd_slice_remains(&rkbuf->rkbuf_reader) > 0)
+ return 0; /* Partial send, continue next time */
+
+ /* Entire buffer sent, unlink and free */
+ rd_kafka_bufq_deq(&mconn->outbufs, rkbuf);
+
+ rd_kafka_buf_destroy(rkbuf);
+ }
+
+ rd_kafka_mock_cluster_io_clear_events(
+ mconn->broker->cluster, mconn->transport->rktrans_s, POLLOUT);
+
+ return 1;
+}
+
+
+/**
+ * @brief Call connection_write_out() for all the broker's connections.
+ *
+ * Use to check if any responses should be sent when RTT has changed.
+ */
+static void
+rd_kafka_mock_broker_connections_write_out(rd_kafka_mock_broker_t *mrkb) {
+ rd_kafka_mock_connection_t *mconn, *tmp;
+
+ /* Need a safe loop since connections may be removed on send error */
+ TAILQ_FOREACH_SAFE(mconn, &mrkb->connections, link, tmp) {
+ rd_kafka_mock_connection_write_out(mconn);
+ }
+}
+
+
+/**
+ * @brief Per-Connection IO handler
+ */
+static void rd_kafka_mock_connection_io(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ int events,
+ void *opaque) {
+ rd_kafka_mock_connection_t *mconn = opaque;
+
+ if (events & POLLIN) {
+ rd_kafka_buf_t *rkbuf;
+ int r;
+
+ while (1) {
+ /* Read full request */
+ r = rd_kafka_mock_connection_read_request(mconn,
+ &rkbuf);
+ if (r == 0)
+ break; /* Need more data */
+ else if (r == -1) {
+ rd_kafka_mock_connection_close(mconn,
+ "Read error");
+ return;
+ }
+
+ /* Parse and handle request */
+ r = rd_kafka_mock_connection_parse_request(mconn,
+ rkbuf);
+ rd_kafka_buf_destroy(rkbuf);
+ if (r == -1) {
+ rd_kafka_mock_connection_close(mconn,
+ "Parse error");
+ return;
+ }
+ }
+ }
+
+ if (events & (POLLERR | POLLHUP)) {
+ rd_kafka_mock_connection_close(mconn, "Disconnected");
+ return;
+ }
+
+ if (events & POLLOUT) {
+ if (rd_kafka_mock_connection_write_out(mconn) == -1) {
+ rd_kafka_mock_connection_close(mconn, "Write error");
+ return;
+ }
+ }
+}
+
+
+/**
+ * @brief Set connection as blocking, POLLIN will not be served.
+ */
+void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn,
+ rd_bool_t blocking) {
+ rd_kafka_mock_cluster_io_set_event(mconn->broker->cluster,
+ mconn->transport->rktrans_s,
+ !blocking, POLLIN);
+}
+
+
+static rd_kafka_mock_connection_t *
+rd_kafka_mock_connection_new(rd_kafka_mock_broker_t *mrkb,
+ rd_socket_t fd,
+ const struct sockaddr_in *peer) {
+ rd_kafka_mock_connection_t *mconn;
+ rd_kafka_transport_t *rktrans;
+ char errstr[128];
+
+ if (!mrkb->up) {
+ rd_socket_close(fd);
+ return NULL;
+ }
+
+ rktrans = rd_kafka_transport_new(mrkb->cluster->dummy_rkb, fd, errstr,
+ sizeof(errstr));
+ if (!rktrans) {
+ rd_kafka_log(mrkb->cluster->rk, LOG_ERR, "MOCK",
+ "Failed to create transport for new "
+ "mock connection: %s",
+ errstr);
+ rd_socket_close(fd);
+ return NULL;
+ }
+
+ rd_kafka_transport_post_connect_setup(rktrans);
+
+ mconn = rd_calloc(1, sizeof(*mconn));
+ mconn->broker = mrkb;
+ mconn->transport = rktrans;
+ mconn->peer = *peer;
+ rd_kafka_bufq_init(&mconn->outbufs);
+
+ TAILQ_INSERT_TAIL(&mrkb->connections, mconn, link);
+
+ rd_kafka_mock_cluster_io_add(mrkb->cluster, mconn->transport->rktrans_s,
+ POLLIN, rd_kafka_mock_connection_io,
+ mconn);
+
+ rd_kafka_dbg(mrkb->cluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32 ": New connection from %s", mrkb->id,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
+
+ return mconn;
+}
+
+
+
+static void rd_kafka_mock_cluster_op_io(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ int events,
+ void *opaque) {
+ /* Read wake-up fd data and throw away, just used for wake-ups*/
+ char buf[1024];
+ while (rd_socket_read(fd, buf, sizeof(buf)) > 0)
+ ; /* Read all buffered signalling bytes */
+}
+
+
+static int rd_kafka_mock_cluster_io_poll(rd_kafka_mock_cluster_t *mcluster,
+ int timeout_ms) {
+ int r;
+ int i;
+
+ r = rd_socket_poll(mcluster->fds, mcluster->fd_cnt, timeout_ms);
+ if (r == RD_SOCKET_ERROR) {
+ rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
+ "Mock cluster failed to poll %d fds: %d: %s",
+ mcluster->fd_cnt, r,
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+
+ /* Serve ops, if any */
+ rd_kafka_q_serve(mcluster->ops, RD_POLL_NOWAIT, 0,
+ RD_KAFKA_Q_CB_CALLBACK, NULL, NULL);
+
+ /* Handle IO events, if any, and if not terminating */
+ for (i = 0; mcluster->run && r > 0 && i < mcluster->fd_cnt; i++) {
+ if (!mcluster->fds[i].revents)
+ continue;
+
+ /* Call IO handler */
+ mcluster->handlers[i].cb(mcluster, mcluster->fds[i].fd,
+ mcluster->fds[i].revents,
+ mcluster->handlers[i].opaque);
+ r--;
+ }
+
+ return 0;
+}
+
+
+static int rd_kafka_mock_cluster_thread_main(void *arg) {
+ rd_kafka_mock_cluster_t *mcluster = arg;
+
+ rd_kafka_set_thread_name("mock");
+ rd_kafka_set_thread_sysname("rdk:mock");
+ rd_kafka_interceptors_on_thread_start(mcluster->rk,
+ RD_KAFKA_THREAD_BACKGROUND);
+ rd_atomic32_add(&rd_kafka_thread_cnt_curr, 1);
+
+ /* Op wakeup fd */
+ rd_kafka_mock_cluster_io_add(mcluster, mcluster->wakeup_fds[0], POLLIN,
+ rd_kafka_mock_cluster_op_io, NULL);
+
+ mcluster->run = rd_true;
+
+ while (mcluster->run) {
+ int sleeptime = (int)((rd_kafka_timers_next(&mcluster->timers,
+ 1000 * 1000 /*1s*/,
+ 1 /*lock*/) +
+ 999) /
+ 1000);
+
+ if (rd_kafka_mock_cluster_io_poll(mcluster, sleeptime) == -1)
+ break;
+
+ rd_kafka_timers_run(&mcluster->timers, RD_POLL_NOWAIT);
+ }
+
+ rd_kafka_mock_cluster_io_del(mcluster, mcluster->wakeup_fds[0]);
+
+
+ rd_kafka_interceptors_on_thread_exit(mcluster->rk,
+ RD_KAFKA_THREAD_BACKGROUND);
+ rd_atomic32_sub(&rd_kafka_thread_cnt_curr, 1);
+
+ rd_kafka_mock_cluster_destroy0(mcluster);
+
+ return 0;
+}
+
+
+
+static void rd_kafka_mock_broker_listen_io(rd_kafka_mock_cluster_t *mcluster,
+ rd_socket_t fd,
+ int events,
+ void *opaque) {
+ rd_kafka_mock_broker_t *mrkb = opaque;
+
+ if (events & (POLLERR | POLLHUP))
+ rd_assert(!*"Mock broker listen socket error");
+
+ if (events & POLLIN) {
+ rd_socket_t new_s;
+ struct sockaddr_in peer;
+ socklen_t peer_size = sizeof(peer);
+
+ new_s = accept(mrkb->listen_s, (struct sockaddr *)&peer,
+ &peer_size);
+ if (new_s == RD_SOCKET_ERROR) {
+ rd_kafka_log(mcluster->rk, LOG_ERR, "MOCK",
+ "Failed to accept mock broker socket: %s",
+ rd_socket_strerror(rd_socket_errno));
+ return;
+ }
+
+ rd_kafka_mock_connection_new(mrkb, new_s, &peer);
+ }
+}
+
+
+/**
+ * @brief Close all connections to broker.
+ */
+static void rd_kafka_mock_broker_close_all(rd_kafka_mock_broker_t *mrkb,
+ const char *reason) {
+ rd_kafka_mock_connection_t *mconn;
+
+ while ((mconn = TAILQ_FIRST(&mrkb->connections)))
+ rd_kafka_mock_connection_close(mconn, reason);
+}
+
+/**
+ * @brief Destroy error stack, must be unlinked.
+ */
+static void
+rd_kafka_mock_error_stack_destroy(rd_kafka_mock_error_stack_t *errstack) {
+ if (errstack->errs)
+ rd_free(errstack->errs);
+ rd_free(errstack);
+}
+
+
+static void rd_kafka_mock_broker_destroy(rd_kafka_mock_broker_t *mrkb) {
+ rd_kafka_mock_error_stack_t *errstack;
+
+ rd_kafka_mock_broker_close_all(mrkb, "Destroying broker");
+
+ if (mrkb->listen_s != -1) {
+ if (mrkb->up)
+ rd_kafka_mock_cluster_io_del(mrkb->cluster,
+ mrkb->listen_s);
+ rd_socket_close(mrkb->listen_s);
+ }
+
+ while ((errstack = TAILQ_FIRST(&mrkb->errstacks))) {
+ TAILQ_REMOVE(&mrkb->errstacks, errstack, link);
+ rd_kafka_mock_error_stack_destroy(errstack);
+ }
+
+ TAILQ_REMOVE(&mrkb->cluster->brokers, mrkb, link);
+ mrkb->cluster->broker_cnt--;
+
+ rd_free(mrkb);
+}
+
+
+/**
+ * @brief Starts listening on the mock broker socket.
+ *
+ * @returns 0 on success or -1 on error (logged).
+ */
+static int rd_kafka_mock_broker_start_listener(rd_kafka_mock_broker_t *mrkb) {
+ rd_assert(mrkb->listen_s != -1);
+
+ if (listen(mrkb->listen_s, 5) == RD_SOCKET_ERROR) {
+ rd_kafka_log(mrkb->cluster->rk, LOG_CRIT, "MOCK",
+ "Failed to listen on mock broker socket: %s",
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+
+ rd_kafka_mock_cluster_io_add(mrkb->cluster, mrkb->listen_s, POLLIN,
+ rd_kafka_mock_broker_listen_io, mrkb);
+
+ return 0;
+}
+
+
+/**
+ * @brief Creates a new listener socket for \p mrkb but does NOT starts
+ * listening.
+ *
+ * @param sin is the address and port to bind. If the port is zero a random
+ * port will be assigned (by the kernel) and the address and port
+ * will be returned in this pointer.
+ *
+ * @returns listener socket on success or -1 on error (errors are logged).
+ */
+static int rd_kafka_mock_broker_new_listener(rd_kafka_mock_cluster_t *mcluster,
+ struct sockaddr_in *sinp) {
+ struct sockaddr_in sin = *sinp;
+ socklen_t sin_len = sizeof(sin);
+ int listen_s;
+ int on = 1;
+
+ if (!sin.sin_family)
+ sin.sin_family = AF_INET;
+
+ /*
+ * Create and bind socket to any loopback port
+ */
+ listen_s =
+ rd_kafka_socket_cb_linux(AF_INET, SOCK_STREAM, IPPROTO_TCP, NULL);
+ if (listen_s == RD_SOCKET_ERROR) {
+ rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
+ "Unable to create mock broker listen socket: %s",
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+
+ if (setsockopt(listen_s, SOL_SOCKET, SO_REUSEADDR, (void *)&on,
+ sizeof(on)) == -1) {
+ rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
+ "Failed to set SO_REUSEADDR on mock broker "
+ "listen socket: %s",
+ rd_socket_strerror(rd_socket_errno));
+ rd_socket_close(listen_s);
+ return -1;
+ }
+
+ if (bind(listen_s, (struct sockaddr *)&sin, sizeof(sin)) ==
+ RD_SOCKET_ERROR) {
+ rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
+ "Failed to bind mock broker socket to %s: %s",
+ rd_socket_strerror(rd_socket_errno),
+ rd_sockaddr2str(&sin, RD_SOCKADDR2STR_F_PORT));
+ rd_socket_close(listen_s);
+ return -1;
+ }
+
+ if (getsockname(listen_s, (struct sockaddr *)&sin, &sin_len) ==
+ RD_SOCKET_ERROR) {
+ rd_kafka_log(mcluster->rk, LOG_CRIT, "MOCK",
+ "Failed to get mock broker socket name: %s",
+ rd_socket_strerror(rd_socket_errno));
+ rd_socket_close(listen_s);
+ return -1;
+ }
+ rd_assert(sin.sin_family == AF_INET);
+ /* If a filled in sinp was passed make sure nothing changed. */
+ rd_assert(!sinp->sin_port || !memcmp(sinp, &sin, sizeof(sin)));
+
+ *sinp = sin;
+
+ return listen_s;
+}
+
+
+static rd_kafka_mock_broker_t *
+rd_kafka_mock_broker_new(rd_kafka_mock_cluster_t *mcluster, int32_t broker_id) {
+ rd_kafka_mock_broker_t *mrkb;
+ rd_socket_t listen_s;
+ struct sockaddr_in sin = {
+ .sin_family = AF_INET,
+ .sin_addr = {.s_addr = htonl(INADDR_LOOPBACK)}};
+
+ listen_s = rd_kafka_mock_broker_new_listener(mcluster, &sin);
+ if (listen_s == -1)
+ return NULL;
+
+ /*
+ * Create mock broker object
+ */
+ mrkb = rd_calloc(1, sizeof(*mrkb));
+
+ mrkb->id = broker_id;
+ mrkb->cluster = mcluster;
+ mrkb->up = rd_true;
+ mrkb->listen_s = listen_s;
+ mrkb->sin = sin;
+ mrkb->port = ntohs(sin.sin_port);
+ rd_snprintf(mrkb->advertised_listener,
+ sizeof(mrkb->advertised_listener), "%s",
+ rd_sockaddr2str(&sin, 0));
+
+ TAILQ_INIT(&mrkb->connections);
+ TAILQ_INIT(&mrkb->errstacks);
+
+ TAILQ_INSERT_TAIL(&mcluster->brokers, mrkb, link);
+ mcluster->broker_cnt++;
+
+ if (rd_kafka_mock_broker_start_listener(mrkb) == -1) {
+ rd_kafka_mock_broker_destroy(mrkb);
+ return NULL;
+ }
+
+ return mrkb;
+}
+
+
+/**
+ * @returns the coordtype_t for a coord type string, or -1 on error.
+ */
+static rd_kafka_coordtype_t rd_kafka_mock_coord_str2type(const char *str) {
+ if (!strcmp(str, "transaction"))
+ return RD_KAFKA_COORD_TXN;
+ else if (!strcmp(str, "group"))
+ return RD_KAFKA_COORD_GROUP;
+ else
+ return (rd_kafka_coordtype_t)-1;
+}
+
+
+/**
+ * @brief Unlink and destroy coordinator.
+ */
+static void rd_kafka_mock_coord_destroy(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_mock_coord_t *mcoord) {
+ TAILQ_REMOVE(&mcluster->coords, mcoord, link);
+ rd_free(mcoord->key);
+ rd_free(mcoord);
+}
+
+/**
+ * @brief Find coordinator by type and key.
+ */
+static rd_kafka_mock_coord_t *
+rd_kafka_mock_coord_find(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_coordtype_t type,
+ const char *key) {
+ rd_kafka_mock_coord_t *mcoord;
+
+ TAILQ_FOREACH(mcoord, &mcluster->coords, link) {
+ if (mcoord->type == type && !strcmp(mcoord->key, key))
+ return mcoord;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @returns the coordinator for KeyType,Key (e.g., GROUP,mygroup).
+ */
+rd_kafka_mock_broker_t *
+rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_coordtype_t KeyType,
+ const rd_kafkap_str_t *Key) {
+ rd_kafka_mock_broker_t *mrkb;
+ rd_kafka_mock_coord_t *mcoord;
+ char *key;
+ rd_crc32_t hash;
+ int idx;
+
+ /* Try the explicit coord list first */
+ RD_KAFKAP_STR_DUPA(&key, Key);
+ if ((mcoord = rd_kafka_mock_coord_find(mcluster, KeyType, key)))
+ return rd_kafka_mock_broker_find(mcluster, mcoord->broker_id);
+
+ /* Else hash the key to select an available broker. */
+ hash = rd_crc32(Key->str, RD_KAFKAP_STR_LEN(Key));
+ idx = (int)(hash % mcluster->broker_cnt);
+
+ /* Use the broker index in the list */
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link)
+ if (idx-- == 0)
+ return mrkb;
+
+ RD_NOTREACHED();
+ return NULL;
+}
+
+
+/**
+ * @brief Explicitly set coordinator for \p key_type ("transaction", "group")
+ * and \p key.
+ */
+static rd_kafka_mock_coord_t *
+rd_kafka_mock_coord_set(rd_kafka_mock_cluster_t *mcluster,
+ const char *key_type,
+ const char *key,
+ int32_t broker_id) {
+ rd_kafka_mock_coord_t *mcoord;
+ rd_kafka_coordtype_t type;
+
+ if ((int)(type = rd_kafka_mock_coord_str2type(key_type)) == -1)
+ return NULL;
+
+ if ((mcoord = rd_kafka_mock_coord_find(mcluster, type, key)))
+ rd_kafka_mock_coord_destroy(mcluster, mcoord);
+
+ mcoord = rd_calloc(1, sizeof(*mcoord));
+ mcoord->type = type;
+ mcoord->key = rd_strdup(key);
+ mcoord->broker_id = broker_id;
+
+ TAILQ_INSERT_TAIL(&mcluster->coords, mcoord, link);
+
+ return mcoord;
+}
+
+
+/**
+ * @brief Remove and return the next error, or RD_KAFKA_RESP_ERR_NO_ERROR
+ * if no error.
+ */
+static rd_kafka_mock_error_rtt_t
+rd_kafka_mock_error_stack_next(rd_kafka_mock_error_stack_t *errstack) {
+ rd_kafka_mock_error_rtt_t err_rtt = {RD_KAFKA_RESP_ERR_NO_ERROR, 0};
+
+ if (likely(errstack->cnt == 0))
+ return err_rtt;
+
+ err_rtt = errstack->errs[0];
+ errstack->cnt--;
+ if (errstack->cnt > 0)
+ memmove(errstack->errs, &errstack->errs[1],
+ sizeof(*errstack->errs) * errstack->cnt);
+
+ return err_rtt;
+}
+
+
+/**
+ * @brief Find an error stack based on \p ApiKey
+ */
+static rd_kafka_mock_error_stack_t *
+rd_kafka_mock_error_stack_find(const rd_kafka_mock_error_stack_head_t *shead,
+ int16_t ApiKey) {
+ const rd_kafka_mock_error_stack_t *errstack;
+
+ TAILQ_FOREACH(errstack, shead, link)
+ if (errstack->ApiKey == ApiKey)
+ return (rd_kafka_mock_error_stack_t *)errstack;
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Find or create an error stack based on \p ApiKey
+ */
+static rd_kafka_mock_error_stack_t *
+rd_kafka_mock_error_stack_get(rd_kafka_mock_error_stack_head_t *shead,
+ int16_t ApiKey) {
+ rd_kafka_mock_error_stack_t *errstack;
+
+ if ((errstack = rd_kafka_mock_error_stack_find(shead, ApiKey)))
+ return errstack;
+
+ errstack = rd_calloc(1, sizeof(*errstack));
+
+ errstack->ApiKey = ApiKey;
+ TAILQ_INSERT_TAIL(shead, errstack, link);
+
+ return errstack;
+}
+
+
+
+/**
+ * @brief Removes and returns the next request error for response's ApiKey.
+ *
+ * If the error stack has a corresponding rtt/delay it is set on the
+ * provided response \p resp buffer.
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_mock_error_stack_t *errstack;
+ rd_kafka_mock_error_rtt_t err_rtt;
+
+ mtx_lock(&mcluster->lock);
+
+ errstack = rd_kafka_mock_error_stack_find(&mconn->broker->errstacks,
+ resp->rkbuf_reqhdr.ApiKey);
+ if (likely(!errstack)) {
+ errstack = rd_kafka_mock_error_stack_find(
+ &mcluster->errstacks, resp->rkbuf_reqhdr.ApiKey);
+ if (likely(!errstack)) {
+ mtx_unlock(&mcluster->lock);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ }
+
+ err_rtt = rd_kafka_mock_error_stack_next(errstack);
+ resp->rkbuf_ts_sent = err_rtt.rtt;
+
+ mtx_unlock(&mcluster->lock);
+
+ /* If the error is ERR__TRANSPORT (a librdkafka-specific error code
+ * that will never be returned by a broker), we close the connection.
+ * This allows closing the connection as soon as a certain
+ * request is seen.
+ * The handler code in rdkafka_mock_handlers.c does not need to
+ * handle this case specifically and will generate a response and
+ * enqueue it, but the connection will be down by the time it will
+ * be sent.
+ * Note: Delayed disconnects (rtt-based) are not supported. */
+ if (err_rtt.err == RD_KAFKA_RESP_ERR__TRANSPORT) {
+ rd_kafka_dbg(
+ mcluster->rk, MOCK, "MOCK",
+ "Broker %" PRId32
+ ": Forcing close of connection "
+ "from %s",
+ mconn->broker->id,
+ rd_sockaddr2str(&mconn->peer, RD_SOCKADDR2STR_F_PORT));
+ rd_kafka_transport_shutdown(mconn->transport);
+ }
+
+
+ return err_rtt.err;
+}
+
+
+void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey) {
+ rd_kafka_mock_error_stack_t *errstack;
+
+ mtx_lock(&mcluster->lock);
+
+ errstack = rd_kafka_mock_error_stack_find(&mcluster->errstacks, ApiKey);
+ if (errstack)
+ errstack->cnt = 0;
+
+ mtx_unlock(&mcluster->lock);
+}
+
+
+void rd_kafka_mock_push_request_errors_array(
+ rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ size_t cnt,
+ const rd_kafka_resp_err_t *errors) {
+ rd_kafka_mock_error_stack_t *errstack;
+ size_t totcnt;
+ size_t i;
+
+ mtx_lock(&mcluster->lock);
+
+ errstack = rd_kafka_mock_error_stack_get(&mcluster->errstacks, ApiKey);
+
+ totcnt = errstack->cnt + cnt;
+
+ if (totcnt > errstack->size) {
+ errstack->size = totcnt + 4;
+ errstack->errs = rd_realloc(
+ errstack->errs, errstack->size * sizeof(*errstack->errs));
+ }
+
+ for (i = 0; i < cnt; i++) {
+ errstack->errs[errstack->cnt].err = errors[i];
+ errstack->errs[errstack->cnt++].rtt = 0;
+ }
+
+ mtx_unlock(&mcluster->lock);
+}
+
+void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ size_t cnt,
+ ...) {
+ va_list ap;
+ rd_kafka_resp_err_t *errors = rd_alloca(sizeof(*errors) * cnt);
+ size_t i;
+
+ va_start(ap, cnt);
+ for (i = 0; i < cnt; i++)
+ errors[i] = va_arg(ap, rd_kafka_resp_err_t);
+ va_end(ap);
+
+ rd_kafka_mock_push_request_errors_array(mcluster, ApiKey, cnt, errors);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ int16_t ApiKey,
+ size_t cnt,
+ ...) {
+ rd_kafka_mock_broker_t *mrkb;
+ va_list ap;
+ rd_kafka_mock_error_stack_t *errstack;
+ size_t totcnt;
+
+ mtx_lock(&mcluster->lock);
+
+ if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) {
+ mtx_unlock(&mcluster->lock);
+ return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER;
+ }
+
+ errstack = rd_kafka_mock_error_stack_get(&mrkb->errstacks, ApiKey);
+
+ totcnt = errstack->cnt + cnt;
+
+ if (totcnt > errstack->size) {
+ errstack->size = totcnt + 4;
+ errstack->errs = rd_realloc(
+ errstack->errs, errstack->size * sizeof(*errstack->errs));
+ }
+
+ va_start(ap, cnt);
+ while (cnt-- > 0) {
+ errstack->errs[errstack->cnt].err =
+ va_arg(ap, rd_kafka_resp_err_t);
+ errstack->errs[errstack->cnt++].rtt =
+ ((rd_ts_t)va_arg(ap, int)) * 1000;
+ }
+ va_end(ap);
+
+ mtx_unlock(&mcluster->lock);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ int16_t ApiKey,
+ size_t *cntp) {
+ rd_kafka_mock_broker_t *mrkb;
+ rd_kafka_mock_error_stack_t *errstack;
+
+ if (!mcluster || !cntp)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ mtx_lock(&mcluster->lock);
+
+ if (!(mrkb = rd_kafka_mock_broker_find(mcluster, broker_id))) {
+ mtx_unlock(&mcluster->lock);
+ return RD_KAFKA_RESP_ERR__UNKNOWN_BROKER;
+ }
+
+ if ((errstack =
+ rd_kafka_mock_error_stack_find(&mrkb->errstacks, ApiKey)))
+ *cntp = errstack->cnt;
+
+ mtx_unlock(&mcluster->lock);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.name = rd_strdup(topic);
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR;
+ rko->rko_u.mock.err = err;
+
+ rko = rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE);
+ if (rko)
+ rd_kafka_op_destroy(rko);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int partition_cnt,
+ int replication_factor) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.name = rd_strdup(topic);
+ rko->rko_u.mock.lo = partition_cnt;
+ rko->rko_u.mock.hi = replication_factor;
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_TOPIC_CREATE;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition,
+ int32_t broker_id) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.name = rd_strdup(topic);
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_LEADER;
+ rko->rko_u.mock.partition = partition;
+ rko->rko_u.mock.broker_id = broker_id;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition,
+ int32_t broker_id) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.name = rd_strdup(topic);
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER;
+ rko->rko_u.mock.partition = partition;
+ rko->rko_u.mock.broker_id = broker_id;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition,
+ int64_t lo,
+ int64_t hi) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.name = rd_strdup(topic);
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS;
+ rko->rko_u.mock.partition = partition;
+ rko->rko_u.mock.lo = lo;
+ rko->rko_u.mock.hi = hi;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.broker_id = broker_id;
+ rko->rko_u.mock.lo = rd_false;
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.broker_id = broker_id;
+ rko->rko_u.mock.lo = rd_true;
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ int rtt_ms) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.broker_id = broker_id;
+ rko->rko_u.mock.lo = rtt_ms;
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RTT;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ const char *rack) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.broker_id = broker_id;
+ rko->rko_u.mock.name = rd_strdup(rack);
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_BROKER_SET_RACK;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster,
+ const char *key_type,
+ const char *key,
+ int32_t broker_id) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.name = rd_strdup(key_type);
+ rko->rko_u.mock.str = rd_strdup(key);
+ rko->rko_u.mock.broker_id = broker_id;
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_COORD_SET;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+rd_kafka_resp_err_t
+rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ int16_t MinVersion,
+ int16_t MaxVersion) {
+ rd_kafka_op_t *rko = rd_kafka_op_new(RD_KAFKA_OP_MOCK);
+
+ rko->rko_u.mock.partition = ApiKey;
+ rko->rko_u.mock.lo = MinVersion;
+ rko->rko_u.mock.hi = MaxVersion;
+ rko->rko_u.mock.cmd = RD_KAFKA_MOCK_CMD_APIVERSION_SET;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(mcluster->ops, rko, RD_POLL_INFINITE));
+}
+
+
+/**
+ * @brief Apply command to specific broker.
+ *
+ * @locality mcluster thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_broker_cmd(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_mock_broker_t *mrkb,
+ rd_kafka_op_t *rko) {
+ switch (rko->rko_u.mock.cmd) {
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN:
+ if ((rd_bool_t)rko->rko_u.mock.lo == mrkb->up)
+ break;
+
+ mrkb->up = (rd_bool_t)rko->rko_u.mock.lo;
+
+ if (!mrkb->up) {
+ rd_kafka_mock_cluster_io_del(mcluster, mrkb->listen_s);
+ rd_socket_close(mrkb->listen_s);
+ /* Re-create the listener right away so we retain the
+ * same port. The listener is not started until
+ * the broker is set up (below). */
+ mrkb->listen_s = rd_kafka_mock_broker_new_listener(
+ mcluster, &mrkb->sin);
+ rd_assert(mrkb->listen_s != -1 ||
+ !*"Failed to-create mock broker listener");
+
+ rd_kafka_mock_broker_close_all(mrkb, "Broker down");
+
+ } else {
+ int r;
+ rd_assert(mrkb->listen_s != -1);
+ r = rd_kafka_mock_broker_start_listener(mrkb);
+ rd_assert(r == 0 || !*"broker_start_listener() failed");
+ }
+ break;
+
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT:
+ mrkb->rtt = (rd_ts_t)rko->rko_u.mock.lo * 1000;
+
+ /* Check if there is anything to send now that the RTT
+ * has changed or if a timer is to be started. */
+ rd_kafka_mock_broker_connections_write_out(mrkb);
+ break;
+
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK:
+ if (mrkb->rack)
+ rd_free(mrkb->rack);
+
+ if (rko->rko_u.mock.name)
+ mrkb->rack = rd_strdup(rko->rko_u.mock.name);
+ else
+ mrkb->rack = NULL;
+ break;
+
+ default:
+ RD_BUG("Unhandled mock cmd %d", rko->rko_u.mock.cmd);
+ break;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Apply command to to one or all brokers, depending on the value of
+ * broker_id, where -1 means all, and != -1 means a specific broker.
+ *
+ * @locality mcluster thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_brokers_cmd(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_op_t *rko) {
+ rd_kafka_mock_broker_t *mrkb;
+
+ if (rko->rko_u.mock.broker_id != -1) {
+ /* Specific broker */
+ mrkb = rd_kafka_mock_broker_find(mcluster,
+ rko->rko_u.mock.broker_id);
+ if (!mrkb)
+ return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
+
+ return rd_kafka_mock_broker_cmd(mcluster, mrkb, rko);
+ }
+
+ /* All brokers */
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
+ rd_kafka_resp_err_t err;
+
+ if ((err = rd_kafka_mock_broker_cmd(mcluster, mrkb, rko)))
+ return err;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Handle command op
+ *
+ * @locality mcluster thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_cluster_cmd(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_op_t *rko) {
+ rd_kafka_mock_topic_t *mtopic;
+ rd_kafka_mock_partition_t *mpart;
+ rd_kafka_mock_broker_t *mrkb;
+
+ switch (rko->rko_u.mock.cmd) {
+ case RD_KAFKA_MOCK_CMD_TOPIC_CREATE:
+ if (rd_kafka_mock_topic_find(mcluster, rko->rko_u.mock.name))
+ return RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS;
+
+ if (!rd_kafka_mock_topic_new(mcluster, rko->rko_u.mock.name,
+ /* partition_cnt */
+ (int)rko->rko_u.mock.lo,
+ /* replication_factor */
+ (int)rko->rko_u.mock.hi))
+ return RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION;
+ break;
+
+ case RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR:
+ mtopic =
+ rd_kafka_mock_topic_get(mcluster, rko->rko_u.mock.name, -1);
+ mtopic->err = rko->rko_u.mock.err;
+ break;
+
+ case RD_KAFKA_MOCK_CMD_PART_SET_LEADER:
+ mpart = rd_kafka_mock_partition_get(
+ mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition);
+ if (!mpart)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ if (rko->rko_u.mock.broker_id != -1) {
+ mrkb = rd_kafka_mock_broker_find(
+ mcluster, rko->rko_u.mock.broker_id);
+ if (!mrkb)
+ return RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE;
+ } else {
+ mrkb = NULL;
+ }
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Set %s [%" PRId32 "] leader to %" PRId32,
+ rko->rko_u.mock.name, rko->rko_u.mock.partition,
+ rko->rko_u.mock.broker_id);
+
+ rd_kafka_mock_partition_set_leader0(mpart, mrkb);
+ break;
+
+ case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER:
+ mpart = rd_kafka_mock_partition_get(
+ mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition);
+ if (!mpart)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Set %s [%" PRId32
+ "] preferred follower "
+ "to %" PRId32,
+ rko->rko_u.mock.name, rko->rko_u.mock.partition,
+ rko->rko_u.mock.broker_id);
+
+ mpart->follower_id = rko->rko_u.mock.broker_id;
+ break;
+
+ case RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS:
+ mpart = rd_kafka_mock_partition_get(
+ mcluster, rko->rko_u.mock.name, rko->rko_u.mock.partition);
+ if (!mpart)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Set %s [%" PRId32
+ "] follower "
+ "watermark offsets to %" PRId64 "..%" PRId64,
+ rko->rko_u.mock.name, rko->rko_u.mock.partition,
+ rko->rko_u.mock.lo, rko->rko_u.mock.hi);
+
+ if (rko->rko_u.mock.lo == -1) {
+ mpart->follower_start_offset = mpart->start_offset;
+ mpart->update_follower_start_offset = rd_true;
+ } else {
+ mpart->follower_start_offset = rko->rko_u.mock.lo;
+ mpart->update_follower_start_offset = rd_false;
+ }
+
+ if (rko->rko_u.mock.hi == -1) {
+ mpart->follower_end_offset = mpart->end_offset;
+ mpart->update_follower_end_offset = rd_true;
+ } else {
+ mpart->follower_end_offset = rko->rko_u.mock.hi;
+ mpart->update_follower_end_offset = rd_false;
+ }
+ break;
+
+ /* Broker commands */
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN:
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_RTT:
+ case RD_KAFKA_MOCK_CMD_BROKER_SET_RACK:
+ return rd_kafka_mock_brokers_cmd(mcluster, rko);
+
+ case RD_KAFKA_MOCK_CMD_COORD_SET:
+ if (!rd_kafka_mock_coord_set(mcluster, rko->rko_u.mock.name,
+ rko->rko_u.mock.str,
+ rko->rko_u.mock.broker_id))
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ break;
+
+ case RD_KAFKA_MOCK_CMD_APIVERSION_SET:
+ if (rko->rko_u.mock.partition < 0 ||
+ rko->rko_u.mock.partition >= RD_KAFKAP__NUM)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ mcluster->api_handlers[(int)rko->rko_u.mock.partition]
+ .MinVersion = (int16_t)rko->rko_u.mock.lo;
+ mcluster->api_handlers[(int)rko->rko_u.mock.partition]
+ .MaxVersion = (int16_t)rko->rko_u.mock.hi;
+ break;
+
+ default:
+ rd_assert(!*"unknown mock cmd");
+ break;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static rd_kafka_op_res_t
+rd_kafka_mock_cluster_op_serve(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque) {
+ rd_kafka_mock_cluster_t *mcluster = opaque;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ switch ((int)rko->rko_type) {
+ case RD_KAFKA_OP_TERMINATE:
+ mcluster->run = rd_false;
+ break;
+
+ case RD_KAFKA_OP_MOCK:
+ err = rd_kafka_mock_cluster_cmd(mcluster, rko);
+ break;
+
+ default:
+ rd_assert(!"*unhandled op");
+ break;
+ }
+
+ rd_kafka_op_reply(rko, err);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Destroy cluster (internal)
+ */
+static void rd_kafka_mock_cluster_destroy0(rd_kafka_mock_cluster_t *mcluster) {
+ rd_kafka_mock_topic_t *mtopic;
+ rd_kafka_mock_broker_t *mrkb;
+ rd_kafka_mock_cgrp_t *mcgrp;
+ rd_kafka_mock_coord_t *mcoord;
+ rd_kafka_mock_error_stack_t *errstack;
+ thrd_t dummy_rkb_thread;
+ int ret;
+
+ while ((mtopic = TAILQ_FIRST(&mcluster->topics)))
+ rd_kafka_mock_topic_destroy(mtopic);
+
+ while ((mrkb = TAILQ_FIRST(&mcluster->brokers)))
+ rd_kafka_mock_broker_destroy(mrkb);
+
+ while ((mcgrp = TAILQ_FIRST(&mcluster->cgrps)))
+ rd_kafka_mock_cgrp_destroy(mcgrp);
+
+ while ((mcoord = TAILQ_FIRST(&mcluster->coords)))
+ rd_kafka_mock_coord_destroy(mcluster, mcoord);
+
+ rd_list_destroy(&mcluster->pids);
+
+ while ((errstack = TAILQ_FIRST(&mcluster->errstacks))) {
+ TAILQ_REMOVE(&mcluster->errstacks, errstack, link);
+ rd_kafka_mock_error_stack_destroy(errstack);
+ }
+
+ /*
+ * Destroy dummy broker
+ */
+ rd_kafka_q_enq(mcluster->dummy_rkb->rkb_ops,
+ rd_kafka_op_new(RD_KAFKA_OP_TERMINATE));
+
+ dummy_rkb_thread = mcluster->dummy_rkb->rkb_thread;
+
+ rd_kafka_broker_destroy(mcluster->dummy_rkb);
+
+ if (thrd_join(dummy_rkb_thread, &ret) != thrd_success)
+ rd_assert(!*"failed to join mock dummy broker thread");
+
+
+ rd_kafka_q_destroy_owner(mcluster->ops);
+
+ rd_kafka_timers_destroy(&mcluster->timers);
+
+ if (mcluster->fd_size > 0) {
+ rd_free(mcluster->fds);
+ rd_free(mcluster->handlers);
+ }
+
+ mtx_destroy(&mcluster->lock);
+
+ rd_free(mcluster->bootstraps);
+
+ rd_socket_close(mcluster->wakeup_fds[0]);
+ rd_socket_close(mcluster->wakeup_fds[1]);
+}
+
+
+
+void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) {
+ int res;
+ rd_kafka_op_t *rko;
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Destroying cluster");
+
+ rd_assert(rd_atomic32_get(&mcluster->rk->rk_mock.cluster_cnt) > 0);
+ rd_atomic32_sub(&mcluster->rk->rk_mock.cluster_cnt, 1);
+
+ rko = rd_kafka_op_req2(mcluster->ops, RD_KAFKA_OP_TERMINATE);
+
+ if (rko)
+ rd_kafka_op_destroy(rko);
+
+ if (thrd_join(mcluster->thread, &res) != thrd_success)
+ rd_assert(!*"failed to join mock thread");
+
+ rd_free(mcluster);
+}
+
+
+
+rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk,
+ int broker_cnt) {
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_mock_broker_t *mrkb;
+ int i, r;
+ size_t bootstraps_len = 0;
+ size_t of;
+
+ mcluster = rd_calloc(1, sizeof(*mcluster));
+ mcluster->rk = rk;
+
+ mcluster->dummy_rkb =
+ rd_kafka_broker_add(rk, RD_KAFKA_INTERNAL, RD_KAFKA_PROTO_PLAINTEXT,
+ "mock", 0, RD_KAFKA_NODEID_UA);
+ rd_snprintf(mcluster->id, sizeof(mcluster->id), "mockCluster%lx",
+ (intptr_t)mcluster >> 2);
+
+ TAILQ_INIT(&mcluster->brokers);
+
+ for (i = 1; i <= broker_cnt; i++) {
+ if (!(mrkb = rd_kafka_mock_broker_new(mcluster, i))) {
+ rd_kafka_mock_cluster_destroy(mcluster);
+ return NULL;
+ }
+
+ /* advertised listener + ":port" + "," */
+ bootstraps_len += strlen(mrkb->advertised_listener) + 6 + 1;
+ }
+
+ mtx_init(&mcluster->lock, mtx_plain);
+
+ TAILQ_INIT(&mcluster->topics);
+ mcluster->defaults.partition_cnt = 4;
+ mcluster->defaults.replication_factor = RD_MIN(3, broker_cnt);
+
+ TAILQ_INIT(&mcluster->cgrps);
+
+ TAILQ_INIT(&mcluster->coords);
+
+ rd_list_init(&mcluster->pids, 16, rd_free);
+
+ TAILQ_INIT(&mcluster->errstacks);
+
+ memcpy(mcluster->api_handlers, rd_kafka_mock_api_handlers,
+ sizeof(mcluster->api_handlers));
+
+ /* Use an op queue for controlling the cluster in
+ * a thread-safe manner without locking. */
+ mcluster->ops = rd_kafka_q_new(rk);
+ mcluster->ops->rkq_serve = rd_kafka_mock_cluster_op_serve;
+ mcluster->ops->rkq_opaque = mcluster;
+
+ rd_kafka_timers_init(&mcluster->timers, rk, mcluster->ops);
+
+ if ((r = rd_pipe_nonblocking(mcluster->wakeup_fds)) == -1) {
+ rd_kafka_log(rk, LOG_ERR, "MOCK",
+ "Failed to setup mock cluster wake-up fds: %s",
+ rd_socket_strerror(r));
+ } else {
+ const char onebyte = 1;
+ rd_kafka_q_io_event_enable(mcluster->ops,
+ mcluster->wakeup_fds[1], &onebyte,
+ sizeof(onebyte));
+ }
+
+
+ if (thrd_create(&mcluster->thread, rd_kafka_mock_cluster_thread_main,
+ mcluster) != thrd_success) {
+ rd_kafka_log(rk, LOG_CRIT, "MOCK",
+ "Failed to create mock cluster thread: %s",
+ rd_strerror(errno));
+ rd_kafka_mock_cluster_destroy(mcluster);
+ return NULL;
+ }
+
+
+ /* Construct bootstrap.servers list */
+ mcluster->bootstraps = rd_malloc(bootstraps_len + 1);
+ of = 0;
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
+ r = rd_snprintf(&mcluster->bootstraps[of], bootstraps_len - of,
+ "%s%s:%hu", of > 0 ? "," : "",
+ mrkb->advertised_listener, mrkb->port);
+ of += r;
+ rd_assert(of < bootstraps_len);
+ }
+ mcluster->bootstraps[of] = '\0';
+
+ rd_kafka_dbg(rk, MOCK, "MOCK", "Mock cluster %s bootstrap.servers=%s",
+ mcluster->id, mcluster->bootstraps);
+
+ rd_atomic32_add(&rk->rk_mock.cluster_cnt, 1);
+
+ return mcluster;
+}
+
+
+rd_kafka_t *
+rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster) {
+ return (rd_kafka_t *)mcluster->rk;
+}
+
+rd_kafka_mock_cluster_t *rd_kafka_handle_mock_cluster(const rd_kafka_t *rk) {
+ return (rd_kafka_mock_cluster_t *)rk->rk_mock.cluster;
+}
+
+
+const char *
+rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster) {
+ return mcluster->bootstraps;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h
new file mode 100644
index 000000000..f06efe8fd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock.h
@@ -0,0 +1,373 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019-2022 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_MOCK_H_
+#define _RDKAFKA_MOCK_H_
+
+#ifndef _RDKAFKA_H_
+#error "rdkafka_mock.h must be included after rdkafka.h"
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#if 0
+} /* Restore indent */
+#endif
+#endif
+
+
+/**
+ * @name Mock cluster
+ *
+ * Provides a mock Kafka cluster with a configurable number of brokers
+ * that support a reasonable subset of Kafka protocol operations,
+ * error injection, etc.
+ *
+ * There are two ways to use the mock clusters, the most simple approach
+ * is to configure `test.mock.num.brokers` (to e.g. 3) on the rd_kafka_t
+ * in an existing application, which will replace the configured
+ * `bootstrap.servers` with the mock cluster brokers.
+ * This approach is convenient to easily test existing applications.
+ *
+ * The second approach is to explicitly create a mock cluster on an
+ * rd_kafka_t instance by using rd_kafka_mock_cluster_new().
+ *
+ * Mock clusters provide localhost listeners that can be used as the bootstrap
+ * servers by multiple rd_kafka_t instances.
+ *
+ * Currently supported functionality:
+ * - Producer
+ * - Idempotent Producer
+ * - Transactional Producer
+ * - Low-level consumer
+ * - High-level balanced consumer groups with offset commits
+ * - Topic Metadata and auto creation
+ *
+ * @remark This is an experimental public API that is NOT covered by the
+ * librdkafka API or ABI stability guarantees.
+ *
+ *
+ * @warning THIS IS AN EXPERIMENTAL API, SUBJECT TO CHANGE OR REMOVAL.
+ *
+ * @{
+ */
+
+typedef struct rd_kafka_mock_cluster_s rd_kafka_mock_cluster_t;
+
+
+/**
+ * @brief Create new mock cluster with \p broker_cnt brokers.
+ *
+ * The broker ids will start at 1 up to and including \p broker_cnt.
+ *
+ * The \p rk instance is required for internal book keeping but continues
+ * to operate as usual.
+ */
+RD_EXPORT
+rd_kafka_mock_cluster_t *rd_kafka_mock_cluster_new(rd_kafka_t *rk,
+ int broker_cnt);
+
+
+/**
+ * @brief Destroy mock cluster.
+ */
+RD_EXPORT
+void rd_kafka_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
+
+
+
+/**
+ * @returns the rd_kafka_t instance for a cluster as passed to
+ * rd_kafka_mock_cluster_new().
+ */
+RD_EXPORT rd_kafka_t *
+rd_kafka_mock_cluster_handle(const rd_kafka_mock_cluster_t *mcluster);
+
+
+/**
+ * @returns the rd_kafka_mock_cluster_t instance as created by
+ * setting the `test.mock.num.brokers` configuration property,
+ * or NULL if no such instance.
+ */
+RD_EXPORT rd_kafka_mock_cluster_t *
+rd_kafka_handle_mock_cluster(const rd_kafka_t *rk);
+
+
+
+/**
+ * @returns the mock cluster's bootstrap.servers list
+ */
+RD_EXPORT const char *
+rd_kafka_mock_cluster_bootstraps(const rd_kafka_mock_cluster_t *mcluster);
+
+
+/**
+ * @brief Clear the cluster's error state for the given \p ApiKey.
+ */
+RD_EXPORT
+void rd_kafka_mock_clear_request_errors(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey);
+
+
+/**
+ * @brief Push \p cnt errors in the \p ... va-arg list onto the cluster's
+ * error stack for the given \p ApiKey.
+ *
+ * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
+ *
+ * The following \p cnt protocol requests matching \p ApiKey will fail with the
+ * provided error code and removed from the stack, starting with
+ * the first error code, then the second, etc.
+ *
+ * Passing \c RD_KAFKA_RESP_ERR__TRANSPORT will make the mock broker
+ * disconnect the client which can be useful to trigger a disconnect on certain
+ * requests.
+ */
+RD_EXPORT
+void rd_kafka_mock_push_request_errors(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ size_t cnt,
+ ...);
+
+
+/**
+ * @brief Same as rd_kafka_mock_push_request_errors() but takes
+ * an array of errors.
+ */
+RD_EXPORT void
+rd_kafka_mock_push_request_errors_array(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ size_t cnt,
+ const rd_kafka_resp_err_t *errors);
+
+
+/**
+ * @brief Push \p cnt errors and RTT tuples in the \p ... va-arg list onto
+ * the broker's error stack for the given \p ApiKey.
+ *
+ * \p ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
+ *
+ * Each entry is a tuple of:
+ * rd_kafka_resp_err_t err - error to return (or 0)
+ * int rtt_ms - response RTT/delay in milliseconds (or 0)
+ *
+ * The following \p cnt protocol requests matching \p ApiKey will fail with the
+ * provided error code and removed from the stack, starting with
+ * the first error code, then the second, etc.
+ *
+ * @remark The broker errors take precedence over the cluster errors.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_broker_push_request_error_rtts(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ int16_t ApiKey,
+ size_t cnt,
+ ...);
+
+
+
+/**
+ * @brief Get the count of errors in the broker's error stack for
+ * the given \p ApiKey.
+ *
+ * @param mcluster the mock cluster.
+ * @param broker_id id of the broker in the cluster.
+ * @param ApiKey is the Kafka protocol request type, e.g., ProduceRequest (0).
+ * @param cntp pointer for receiving the count.
+ *
+ * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR if the count was retrieved,
+ * \c RD_KAFKA_RESP_ERR__UNKNOWN_BROKER if there was no broker with this id,
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if some of the parameters are not valid.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_broker_error_stack_cnt(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ int16_t ApiKey,
+ size_t *cntp);
+
+
+/**
+ * @brief Set the topic error to return in protocol requests.
+ *
+ * Currently only used for TopicMetadataRequest and AddPartitionsToTxnRequest.
+ */
+RD_EXPORT
+void rd_kafka_mock_topic_set_error(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ rd_kafka_resp_err_t err);
+
+
+/**
+ * @brief Creates a topic.
+ *
+ * This is an alternative to automatic topic creation as performed by
+ * the client itself.
+ *
+ * @remark The Topic Admin API (CreateTopics) is not supported by the
+ * mock broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_topic_create(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int partition_cnt,
+ int replication_factor);
+
+
+/**
+ * @brief Sets the partition leader.
+ *
+ * The topic will be created if it does not exist.
+ *
+ * \p broker_id needs to be an existing broker, or -1 to make the
+ * partition leader-less.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_partition_set_leader(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition,
+ int32_t broker_id);
+
+/**
+ * @brief Sets the partition's preferred replica / follower.
+ *
+ * The topic will be created if it does not exist.
+ *
+ * \p broker_id does not need to point to an existing broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_partition_set_follower(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition,
+ int32_t broker_id);
+
+/**
+ * @brief Sets the partition's preferred replica / follower low and high
+ * watermarks.
+ *
+ * The topic will be created if it does not exist.
+ *
+ * Setting an offset to -1 will revert back to the leader's corresponding
+ * watermark.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_partition_set_follower_wmarks(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int32_t partition,
+ int64_t lo,
+ int64_t hi);
+
+
+/**
+ * @brief Disconnects the broker and disallows any new connections.
+ * This does NOT trigger leader change.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_down(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id);
+
+/**
+ * @brief Makes the broker accept connections again.
+ * This does NOT trigger leader change.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_up(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id);
+
+
+/**
+ * @brief Set broker round-trip-time delay in milliseconds.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_rtt(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ int rtt_ms);
+
+/**
+ * @brief Sets the broker's rack as reported in Metadata to the client.
+ *
+ * @param mcluster Mock cluster instance.
+ * @param broker_id Use -1 for all brokers, or >= 0 for a specific broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_broker_set_rack(rd_kafka_mock_cluster_t *mcluster,
+ int32_t broker_id,
+ const char *rack);
+
+
+
+/**
+ * @brief Explicitly sets the coordinator. If this API is not a standard
+ * hashing scheme will be used.
+ *
+ * @param key_type "transaction" or "group"
+ * @param key The transactional.id or group.id
+ * @param broker_id The new coordinator, does not have to be a valid broker.
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_coordinator_set(rd_kafka_mock_cluster_t *mcluster,
+ const char *key_type,
+ const char *key,
+ int32_t broker_id);
+
+
+
+/**
+ * @brief Set the allowed ApiVersion range for \p ApiKey.
+ *
+ * Set \p MinVersion and \p MaxVersion to -1 to disable the API
+ * completely.
+ *
+ * \p MaxVersion MUST not exceed the maximum implemented value,
+ * see rdkafka_mock_handlers.c.
+ *
+ * @param ApiKey Protocol request type/key
+ * @param MinVersion Minimum version supported (or -1 to disable).
+ * @param MinVersion Maximum version supported (or -1 to disable).
+ */
+RD_EXPORT rd_kafka_resp_err_t
+rd_kafka_mock_set_apiversion(rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ int16_t MinVersion,
+ int16_t MaxVersion);
+
+
+/**@}*/
+
+#ifdef __cplusplus
+}
+#endif
+#endif /* _RDKAFKA_MOCK_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c
new file mode 100644
index 000000000..8f71fb48c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_cgrp.c
@@ -0,0 +1,687 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Mocks
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdbuf.h"
+#include "rdkafka_mock_int.h"
+
+
+static const char *rd_kafka_mock_cgrp_state_names[] = {
+ "Empty", "Joining", "Syncing", "Rebalancing", "Up"};
+
+
+static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp,
+ const char *reason);
+static void
+rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member);
+
+static void rd_kafka_mock_cgrp_set_state(rd_kafka_mock_cgrp_t *mcgrp,
+ unsigned int new_state,
+ const char *reason) {
+ if (mcgrp->state == new_state)
+ return;
+
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Mock consumer group %s with %d member(s) "
+ "changing state %s -> %s: %s",
+ mcgrp->id, mcgrp->member_cnt,
+ rd_kafka_mock_cgrp_state_names[mcgrp->state],
+ rd_kafka_mock_cgrp_state_names[new_state], reason);
+
+ mcgrp->state = new_state;
+}
+
+
+/**
+ * @brief Mark member as active (restart session timer)
+ */
+void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member) {
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Marking mock consumer group member %s as active",
+ member->id);
+ member->ts_last_activity = rd_clock();
+}
+
+
+/**
+ * @brief Verify that the protocol request is valid in the current state.
+ *
+ * @param member may be NULL.
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member,
+ const rd_kafka_buf_t *request,
+ int32_t generation_id) {
+ int16_t ApiKey = request->rkbuf_reqhdr.ApiKey;
+ rd_bool_t has_generation_id = ApiKey == RD_KAFKAP_SyncGroup ||
+ ApiKey == RD_KAFKAP_Heartbeat ||
+ ApiKey == RD_KAFKAP_OffsetCommit;
+
+ if (has_generation_id && generation_id != mcgrp->generation_id)
+ return RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION;
+
+ if (ApiKey == RD_KAFKAP_OffsetCommit && !member)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+
+ switch (mcgrp->state) {
+ case RD_KAFKA_MOCK_CGRP_STATE_EMPTY:
+ if (ApiKey == RD_KAFKAP_JoinGroup)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ break;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_JOINING:
+ if (ApiKey == RD_KAFKAP_JoinGroup ||
+ ApiKey == RD_KAFKAP_LeaveGroup)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ else
+ return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_SYNCING:
+ if (ApiKey == RD_KAFKAP_SyncGroup ||
+ ApiKey == RD_KAFKAP_JoinGroup ||
+ ApiKey == RD_KAFKAP_LeaveGroup)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ else
+ return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING:
+ if (ApiKey == RD_KAFKAP_JoinGroup ||
+ ApiKey == RD_KAFKAP_LeaveGroup ||
+ ApiKey == RD_KAFKAP_OffsetCommit)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ else
+ return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_UP:
+ if (ApiKey == RD_KAFKAP_JoinGroup ||
+ ApiKey == RD_KAFKAP_LeaveGroup ||
+ ApiKey == RD_KAFKAP_Heartbeat ||
+ ApiKey == RD_KAFKAP_OffsetCommit)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ break;
+ }
+
+ return RD_KAFKA_RESP_ERR_INVALID_REQUEST;
+}
+
+
+/**
+ * @brief Set a member's assignment (from leader's SyncGroupRequest)
+ */
+void rd_kafka_mock_cgrp_member_assignment_set(
+ rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member,
+ const rd_kafkap_bytes_t *Metadata) {
+ if (member->assignment) {
+ rd_assert(mcgrp->assignment_cnt > 0);
+ mcgrp->assignment_cnt--;
+ rd_kafkap_bytes_destroy(member->assignment);
+ member->assignment = NULL;
+ }
+
+ if (Metadata) {
+ mcgrp->assignment_cnt++;
+ member->assignment = rd_kafkap_bytes_copy(Metadata);
+ }
+}
+
+
+/**
+ * @brief Sync done (successfully) or failed, send responses back to members.
+ */
+static void rd_kafka_mock_cgrp_sync_done(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_mock_cgrp_member_t *member;
+
+ TAILQ_FOREACH(member, &mcgrp->members, link) {
+ rd_kafka_buf_t *resp;
+
+ if ((resp = member->resp)) {
+ member->resp = NULL;
+ rd_assert(resp->rkbuf_reqhdr.ApiKey ==
+ RD_KAFKAP_SyncGroup);
+
+ rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
+ /* MemberState */
+ rd_kafka_buf_write_kbytes(
+ resp, !err ? member->assignment : NULL);
+ }
+
+ rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL);
+
+ if (member->conn) {
+ rd_kafka_mock_connection_set_blocking(member->conn,
+ rd_false);
+ if (resp)
+ rd_kafka_mock_connection_send_response(
+ member->conn, resp);
+ } else if (resp) {
+ /* Member has disconnected. */
+ rd_kafka_buf_destroy(resp);
+ }
+ }
+}
+
+
+/**
+ * @brief Check if all members have sent SyncGroupRequests, if so, propagate
+ * assignment to members.
+ */
+static void rd_kafka_mock_cgrp_sync_check(rd_kafka_mock_cgrp_t *mcgrp) {
+
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Mock consumer group %s: awaiting %d/%d syncing members "
+ "in state %s",
+ mcgrp->id, mcgrp->assignment_cnt, mcgrp->member_cnt,
+ rd_kafka_mock_cgrp_state_names[mcgrp->state]);
+
+ if (mcgrp->assignment_cnt < mcgrp->member_cnt)
+ return;
+
+ rd_kafka_mock_cgrp_sync_done(mcgrp, RD_KAFKA_RESP_ERR_NO_ERROR);
+ rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_UP,
+ "all members synced");
+}
+
+
+/**
+ * @brief Member has sent SyncGroupRequest and is waiting for a response,
+ * which will be sent when the all group member SyncGroupRequest are
+ * received.
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member,
+ rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp) {
+
+ if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_SYNCING)
+ return RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS; /* FIXME */
+
+ rd_kafka_mock_cgrp_member_active(mcgrp, member);
+
+ rd_assert(!member->resp);
+
+ member->resp = resp;
+ member->conn = mconn;
+ rd_kafka_mock_connection_set_blocking(member->conn, rd_true);
+
+ /* Check if all members now have an assignment, if so, send responses */
+ rd_kafka_mock_cgrp_sync_check(mcgrp);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Member is explicitly leaving the group (through LeaveGroupRequest)
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member) {
+
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Member %s is leaving group %s", member->id, mcgrp->id);
+
+ rd_kafka_mock_cgrp_member_destroy(mcgrp, member);
+
+ rd_kafka_mock_cgrp_rebalance(mcgrp, "explicit member leave");
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Destroys/frees an array of protocols, including the array itself.
+ */
+void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos,
+ int proto_cnt) {
+ int i;
+
+ for (i = 0; i < proto_cnt; i++) {
+ rd_free(protos[i].name);
+ if (protos[i].metadata)
+ rd_free(protos[i].metadata);
+ }
+
+ rd_free(protos);
+}
+
+static void
+rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp,
+ int timeout_ms);
+
+/**
+ * @brief Elect consumer group leader and send JoinGroup responses
+ */
+static void rd_kafka_mock_cgrp_elect_leader(rd_kafka_mock_cgrp_t *mcgrp) {
+ rd_kafka_mock_cgrp_member_t *member;
+
+ rd_assert(mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING);
+ rd_assert(!TAILQ_EMPTY(&mcgrp->members));
+
+ mcgrp->generation_id++;
+
+ /* Elect a leader.
+ * FIXME: For now we'll use the first member */
+ mcgrp->leader = TAILQ_FIRST(&mcgrp->members);
+
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Consumer group %s with %d member(s) is rebalancing: "
+ "elected leader is %s, generation id %d",
+ mcgrp->id, mcgrp->member_cnt, mcgrp->leader->id,
+ mcgrp->generation_id);
+
+ /* Find the most commonly supported protocol name among the members.
+ * FIXME: For now we'll blindly use the first protocol of the leader. */
+ if (mcgrp->protocol_name)
+ rd_free(mcgrp->protocol_name);
+ mcgrp->protocol_name = RD_KAFKAP_STR_DUP(mcgrp->leader->protos[0].name);
+
+ /* Send JoinGroupResponses to all members */
+ TAILQ_FOREACH(member, &mcgrp->members, link) {
+ rd_bool_t is_leader = member == mcgrp->leader;
+ int member_cnt = is_leader ? mcgrp->member_cnt : 0;
+ rd_kafka_buf_t *resp;
+ rd_kafka_mock_cgrp_member_t *member2;
+ rd_kafka_mock_connection_t *mconn;
+
+ /* Member connection has been closed, it will eventually
+ * reconnect or time out from the group. */
+ if (!member->conn || !member->resp)
+ continue;
+ mconn = member->conn;
+ member->conn = NULL;
+ resp = member->resp;
+ member->resp = NULL;
+
+ rd_assert(resp->rkbuf_reqhdr.ApiKey == RD_KAFKAP_JoinGroup);
+
+ rd_kafka_buf_write_i16(resp, 0); /* ErrorCode */
+ rd_kafka_buf_write_i32(resp, mcgrp->generation_id);
+ rd_kafka_buf_write_str(resp, mcgrp->protocol_name, -1);
+ rd_kafka_buf_write_str(resp, mcgrp->leader->id, -1);
+ rd_kafka_buf_write_str(resp, member->id, -1);
+ rd_kafka_buf_write_i32(resp, member_cnt);
+
+ /* Send full member list to leader */
+ if (member_cnt > 0) {
+ TAILQ_FOREACH(member2, &mcgrp->members, link) {
+ rd_kafka_buf_write_str(resp, member2->id, -1);
+ if (resp->rkbuf_reqhdr.ApiVersion >= 5)
+ rd_kafka_buf_write_str(
+ resp, member2->group_instance_id,
+ -1);
+ /* FIXME: look up correct protocol name */
+ rd_assert(!rd_kafkap_str_cmp_str(
+ member2->protos[0].name,
+ mcgrp->protocol_name));
+
+ rd_kafka_buf_write_kbytes(
+ resp, member2->protos[0].metadata);
+ }
+ }
+
+ /* Mark each member as active to avoid them timing out
+ * at the same time as a JoinGroup handler that blocks
+ * session.timeout.ms to elect a leader. */
+ rd_kafka_mock_cgrp_member_active(mcgrp, member);
+
+ rd_kafka_mock_connection_set_blocking(mconn, rd_false);
+ rd_kafka_mock_connection_send_response(mconn, resp);
+ }
+
+ mcgrp->last_member_cnt = mcgrp->member_cnt;
+
+ rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_SYNCING,
+ "leader elected, waiting for all "
+ "members to sync");
+
+ rd_kafka_mock_cgrp_rebalance_timer_restart(mcgrp,
+ mcgrp->session_timeout_ms);
+}
+
+
+/**
+ * @brief Trigger group rebalance.
+ */
+static void rd_kafka_mock_cgrp_rebalance(rd_kafka_mock_cgrp_t *mcgrp,
+ const char *reason) {
+ int timeout_ms;
+
+ if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_JOINING)
+ return; /* Do nothing, group is already rebalancing. */
+ else if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_EMPTY)
+ timeout_ms = 3000; /* First join, low timeout.
+ * Same as group.initial.rebalance.delay.ms
+ * on the broker. */
+ else if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_REBALANCING &&
+ mcgrp->member_cnt == mcgrp->last_member_cnt)
+ timeout_ms = 100; /* All members rejoined, quickly transition
+ * to election. */
+ else /* Let the rebalance delay be a bit shorter than the
+ * session timeout so that we don't time out waiting members
+ * who are also subject to the session timeout. */
+ timeout_ms = mcgrp->session_timeout_ms > 1000
+ ? mcgrp->session_timeout_ms - 1000
+ : mcgrp->session_timeout_ms;
+
+ if (mcgrp->state == RD_KAFKA_MOCK_CGRP_STATE_SYNCING)
+ /* Abort current Syncing state */
+ rd_kafka_mock_cgrp_sync_done(
+ mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS);
+
+ rd_kafka_mock_cgrp_set_state(mcgrp, RD_KAFKA_MOCK_CGRP_STATE_JOINING,
+ reason);
+ rd_kafka_mock_cgrp_rebalance_timer_restart(mcgrp, timeout_ms);
+}
+
+/**
+ * @brief Consumer group state machine triggered by timer events.
+ */
+static void rd_kafka_mock_cgrp_fsm_timeout(rd_kafka_mock_cgrp_t *mcgrp) {
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Mock consumer group %s FSM timeout in state %s",
+ mcgrp->id, rd_kafka_mock_cgrp_state_names[mcgrp->state]);
+
+ switch (mcgrp->state) {
+ case RD_KAFKA_MOCK_CGRP_STATE_EMPTY:
+ /* No members, do nothing */
+ break;
+ case RD_KAFKA_MOCK_CGRP_STATE_JOINING:
+ /* Timed out waiting for more members, elect a leader */
+ if (mcgrp->member_cnt > 0)
+ rd_kafka_mock_cgrp_elect_leader(mcgrp);
+ else
+ rd_kafka_mock_cgrp_set_state(
+ mcgrp, RD_KAFKA_MOCK_CGRP_STATE_EMPTY,
+ "no members joined");
+ break;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_SYNCING:
+ /* Timed out waiting for all members to sync */
+
+ /* Send error response to all waiting members */
+ rd_kafka_mock_cgrp_sync_done(
+ mcgrp, RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS /* FIXME */);
+
+ rd_kafka_mock_cgrp_set_state(
+ mcgrp, RD_KAFKA_MOCK_CGRP_STATE_REBALANCING,
+ "timed out waiting for all members to synchronize");
+ break;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_REBALANCING:
+ /* Timed out waiting for all members to Leave or re-Join */
+ rd_kafka_mock_cgrp_set_state(mcgrp,
+ RD_KAFKA_MOCK_CGRP_STATE_JOINING,
+ "timed out waiting for all "
+ "members to re-Join or Leave");
+ break;
+
+ case RD_KAFKA_MOCK_CGRP_STATE_UP:
+ /* No fsm timers triggered in this state, see
+ * the session_tmr instead */
+ break;
+ }
+}
+
+static void rd_kafka_mcgrp_rebalance_timer_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_mock_cgrp_t *mcgrp = arg;
+
+ rd_kafka_mock_cgrp_fsm_timeout(mcgrp);
+}
+
+
+/**
+ * @brief Restart the rebalance timer, postponing leader election.
+ */
+static void
+rd_kafka_mock_cgrp_rebalance_timer_restart(rd_kafka_mock_cgrp_t *mcgrp,
+ int timeout_ms) {
+ rd_kafka_timer_start_oneshot(
+ &mcgrp->cluster->timers, &mcgrp->rebalance_tmr, rd_true,
+ timeout_ms * 1000, rd_kafka_mcgrp_rebalance_timer_cb, mcgrp);
+}
+
+
+static void
+rd_kafka_mock_cgrp_member_destroy(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member) {
+ rd_assert(mcgrp->member_cnt > 0);
+ TAILQ_REMOVE(&mcgrp->members, member, link);
+ mcgrp->member_cnt--;
+
+ rd_free(member->id);
+
+ if (member->resp)
+ rd_kafka_buf_destroy(member->resp);
+
+ if (member->group_instance_id)
+ rd_free(member->group_instance_id);
+
+ rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member, NULL);
+
+ rd_kafka_mock_cgrp_protos_destroy(member->protos, member->proto_cnt);
+
+ rd_free(member);
+}
+
+
+/**
+ * @brief Find member in group.
+ */
+rd_kafka_mock_cgrp_member_t *
+rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp,
+ const rd_kafkap_str_t *MemberId) {
+ const rd_kafka_mock_cgrp_member_t *member;
+ TAILQ_FOREACH(member, &mcgrp->members, link) {
+ if (!rd_kafkap_str_cmp_str(MemberId, member->id))
+ return (rd_kafka_mock_cgrp_member_t *)member;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Update or add member to consumer group
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp,
+ const rd_kafkap_str_t *MemberId,
+ const rd_kafkap_str_t *ProtocolType,
+ rd_kafka_mock_cgrp_proto_t *protos,
+ int proto_cnt,
+ int session_timeout_ms) {
+ rd_kafka_mock_cgrp_member_t *member;
+ rd_kafka_resp_err_t err;
+
+ err = rd_kafka_mock_cgrp_check_state(mcgrp, NULL, resp, -1);
+ if (err)
+ return err;
+
+ /* Find member */
+ member = rd_kafka_mock_cgrp_member_find(mcgrp, MemberId);
+ if (!member) {
+ /* Not found, add member */
+ member = rd_calloc(1, sizeof(*member));
+
+ if (!RD_KAFKAP_STR_LEN(MemberId)) {
+ /* Generate a member id */
+ char memberid[32];
+ rd_snprintf(memberid, sizeof(memberid), "%p", member);
+ member->id = rd_strdup(memberid);
+ } else
+ member->id = RD_KAFKAP_STR_DUP(MemberId);
+
+ TAILQ_INSERT_TAIL(&mcgrp->members, member, link);
+ mcgrp->member_cnt++;
+ }
+
+ if (mcgrp->state != RD_KAFKA_MOCK_CGRP_STATE_JOINING)
+ rd_kafka_mock_cgrp_rebalance(mcgrp, "member join");
+
+ mcgrp->session_timeout_ms = session_timeout_ms;
+
+ if (member->protos)
+ rd_kafka_mock_cgrp_protos_destroy(member->protos,
+ member->proto_cnt);
+ member->protos = protos;
+ member->proto_cnt = proto_cnt;
+
+ rd_assert(!member->resp);
+ member->resp = resp;
+ member->conn = mconn;
+ rd_kafka_mock_cgrp_member_active(mcgrp, member);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Check if any members have exceeded the session timeout.
+ */
+static void rd_kafka_mock_cgrp_session_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_mock_cgrp_t *mcgrp = arg;
+ rd_kafka_mock_cgrp_member_t *member, *tmp;
+ rd_ts_t now = rd_clock();
+ int timeout_cnt = 0;
+
+ TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) {
+ if (member->ts_last_activity +
+ (mcgrp->session_timeout_ms * 1000) >
+ now)
+ continue;
+
+ rd_kafka_dbg(mcgrp->cluster->rk, MOCK, "MOCK",
+ "Member %s session timed out for group %s",
+ member->id, mcgrp->id);
+
+ rd_kafka_mock_cgrp_member_destroy(mcgrp, member);
+ timeout_cnt++;
+ }
+
+ if (timeout_cnt)
+ rd_kafka_mock_cgrp_rebalance(mcgrp, "member timeout");
+}
+
+
+void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp) {
+ rd_kafka_mock_cgrp_member_t *member;
+
+ TAILQ_REMOVE(&mcgrp->cluster->cgrps, mcgrp, link);
+
+ rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->rebalance_tmr,
+ rd_true);
+ rd_kafka_timer_stop(&mcgrp->cluster->timers, &mcgrp->session_tmr,
+ rd_true);
+ rd_free(mcgrp->id);
+ rd_free(mcgrp->protocol_type);
+ if (mcgrp->protocol_name)
+ rd_free(mcgrp->protocol_name);
+ while ((member = TAILQ_FIRST(&mcgrp->members)))
+ rd_kafka_mock_cgrp_member_destroy(mcgrp, member);
+ rd_free(mcgrp);
+}
+
+
+rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *GroupId) {
+ rd_kafka_mock_cgrp_t *mcgrp;
+ TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) {
+ if (!rd_kafkap_str_cmp_str(GroupId, mcgrp->id))
+ return mcgrp;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Find or create a consumer group
+ */
+rd_kafka_mock_cgrp_t *
+rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *GroupId,
+ const rd_kafkap_str_t *ProtocolType) {
+ rd_kafka_mock_cgrp_t *mcgrp;
+
+ mcgrp = rd_kafka_mock_cgrp_find(mcluster, GroupId);
+ if (mcgrp)
+ return mcgrp;
+
+ /* FIXME: What to do with mismatching ProtocolTypes? */
+
+ mcgrp = rd_calloc(1, sizeof(*mcgrp));
+
+ mcgrp->cluster = mcluster;
+ mcgrp->id = RD_KAFKAP_STR_DUP(GroupId);
+ mcgrp->protocol_type = RD_KAFKAP_STR_DUP(ProtocolType);
+ mcgrp->generation_id = 1;
+ TAILQ_INIT(&mcgrp->members);
+ rd_kafka_timer_start(&mcluster->timers, &mcgrp->session_tmr,
+ 1000 * 1000 /*1s*/,
+ rd_kafka_mock_cgrp_session_tmr_cb, mcgrp);
+
+ TAILQ_INSERT_TAIL(&mcluster->cgrps, mcgrp, link);
+
+ return mcgrp;
+}
+
+
+/**
+ * @brief A client connection closed, check if any cgrp has any state
+ * for this connection that needs to be cleared.
+ */
+void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_mock_connection_t *mconn) {
+ rd_kafka_mock_cgrp_t *mcgrp;
+
+ TAILQ_FOREACH(mcgrp, &mcluster->cgrps, link) {
+ rd_kafka_mock_cgrp_member_t *member, *tmp;
+ TAILQ_FOREACH_SAFE(member, &mcgrp->members, link, tmp) {
+ if (member->conn == mconn) {
+ member->conn = NULL;
+ if (member->resp) {
+ rd_kafka_buf_destroy(member->resp);
+ member->resp = NULL;
+ }
+ }
+ }
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c
new file mode 100644
index 000000000..3a004d41d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_handlers.c
@@ -0,0 +1,2218 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Mocks - protocol request handlers
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdbuf.h"
+#include "rdrand.h"
+#include "rdkafka_interceptor.h"
+#include "rdkafka_mock_int.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_offset.h"
+
+
+
+/**
+ * @brief Handle ProduceRequest
+ */
+static int rd_kafka_mock_handle_Produce(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ int32_t TopicsCnt;
+ rd_kafkap_str_t TransactionalId = RD_KAFKAP_STR_INITIALIZER;
+ int16_t Acks;
+ int32_t TimeoutMs;
+ rd_kafka_resp_err_t all_err;
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
+ rd_kafka_buf_read_str(rkbuf, &TransactionalId);
+
+ rd_kafka_buf_read_i16(rkbuf, &Acks);
+ rd_kafka_buf_read_i32(rkbuf, &TimeoutMs);
+ rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
+
+ /* Response: #Topics */
+ rd_kafka_buf_write_i32(resp, TopicsCnt);
+
+ /* Inject error, if any */
+ all_err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartitionCnt;
+ rd_kafka_mock_topic_t *mtopic;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+ /* Response: #Partitions */
+ rd_kafka_buf_write_i32(resp, PartitionCnt);
+
+ while (PartitionCnt-- > 0) {
+ int32_t Partition;
+ rd_kafka_mock_partition_t *mpart = NULL;
+ rd_kafkap_bytes_t records;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int64_t BaseOffset = -1;
+
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+
+ if (mtopic)
+ mpart = rd_kafka_mock_partition_find(mtopic,
+ Partition);
+
+ rd_kafka_buf_read_bytes(rkbuf, &records);
+
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ if (all_err)
+ err = all_err;
+ else if (!mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else if (mpart->leader != mconn->broker)
+ err =
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION;
+
+ /* Append to partition log */
+ if (!err)
+ err = rd_kafka_mock_partition_log_append(
+ mpart, &records, &TransactionalId,
+ &BaseOffset);
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ if (err) {
+ /* Response: BaseOffset */
+ rd_kafka_buf_write_i64(resp, BaseOffset);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* Response: LogAppendTimeMs */
+ rd_kafka_buf_write_i64(resp, -1);
+ }
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) {
+ /* Response: LogStartOffset */
+ rd_kafka_buf_write_i64(resp, -1);
+ }
+
+ } else {
+ /* Response: BaseOffset */
+ rd_kafka_buf_write_i64(resp, BaseOffset);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* Response: LogAppendTimeMs */
+ rd_kafka_buf_write_i64(resp, 1234);
+ }
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) {
+ /* Response: LogStartOffset */
+ rd_kafka_buf_write_i64(
+ resp, mpart->start_offset);
+ }
+ }
+ }
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: ThrottleTime */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle FetchRequest
+ */
+static int rd_kafka_mock_handle_Fetch(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t all_err;
+ int32_t ReplicaId, MaxWait, MinBytes, MaxBytes = -1, SessionId = -1,
+ Epoch, TopicsCnt;
+ int8_t IsolationLevel;
+ size_t totsize = 0;
+
+ rd_kafka_buf_read_i32(rkbuf, &ReplicaId);
+ rd_kafka_buf_read_i32(rkbuf, &MaxWait);
+ rd_kafka_buf_read_i32(rkbuf, &MinBytes);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
+ rd_kafka_buf_read_i32(rkbuf, &MaxBytes);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4)
+ rd_kafka_buf_read_i8(rkbuf, &IsolationLevel);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) {
+ rd_kafka_buf_read_i32(rkbuf, &SessionId);
+ rd_kafka_buf_read_i32(rkbuf, &Epoch);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: ThrottleTime */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+
+ /* Inject error, if any */
+ all_err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) {
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, all_err);
+
+ /* Response: SessionId */
+ rd_kafka_buf_write_i32(resp, SessionId);
+ }
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
+
+ /* Response: #Topics */
+ rd_kafka_buf_write_i32(resp, TopicsCnt);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartitionCnt;
+ rd_kafka_mock_topic_t *mtopic;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+ /* Response: #Partitions */
+ rd_kafka_buf_write_i32(resp, PartitionCnt);
+
+ while (PartitionCnt-- > 0) {
+ int32_t Partition, CurrentLeaderEpoch = -1,
+ PartMaxBytes;
+ int64_t FetchOffset, LogStartOffset;
+ rd_kafka_mock_partition_t *mpart = NULL;
+ rd_kafka_resp_err_t err = all_err;
+ rd_bool_t on_follower;
+ size_t partsize = 0;
+ const rd_kafka_mock_msgset_t *mset = NULL;
+
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 9)
+ rd_kafka_buf_read_i32(rkbuf,
+ &CurrentLeaderEpoch);
+
+ rd_kafka_buf_read_i64(rkbuf, &FetchOffset);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5)
+ rd_kafka_buf_read_i64(rkbuf, &LogStartOffset);
+
+ rd_kafka_buf_read_i32(rkbuf, &PartMaxBytes);
+
+ if (mtopic)
+ mpart = rd_kafka_mock_partition_find(mtopic,
+ Partition);
+
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ /* Fetch is directed at follower and this is
+ * the follower broker. */
+ on_follower =
+ mpart && mpart->follower_id == mconn->broker->id;
+
+ if (!all_err && !mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else if (!all_err && mpart->leader != mconn->broker &&
+ !on_follower)
+ err =
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION;
+
+ if (!err && mpart)
+ err =
+ rd_kafka_mock_partition_leader_epoch_check(
+ mpart, CurrentLeaderEpoch);
+
+ /* Find MessageSet for FetchOffset */
+ if (!err && FetchOffset != mpart->end_offset) {
+ /* Kafka currently only returns
+ * OFFSET_NOT_AVAILABLE
+ * in ListOffsets calls */
+ if (!(mset = rd_kafka_mock_msgset_find(
+ mpart, FetchOffset, on_follower)))
+ err =
+ RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE;
+ rd_kafka_dbg(
+ mcluster->rk, MOCK, "MOCK",
+ "Topic %.*s [%" PRId32
+ "] fetch err %s for offset %" PRId64
+ " mset %p, on_follower %d, "
+ "start %" PRId64 ", end_offset %" PRId64
+ ", current epoch %" PRId32,
+ RD_KAFKAP_STR_PR(&Topic), Partition,
+ rd_kafka_err2name(err), FetchOffset, mset,
+ on_follower, mpart->start_offset,
+ mpart->end_offset, mpart->leader_epoch);
+ }
+
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ /* Response: Highwatermark */
+ rd_kafka_buf_write_i64(
+ resp,
+ mpart ? (on_follower ? mpart->follower_end_offset
+ : mpart->end_offset)
+ : -1);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) {
+ /* Response: LastStableOffset */
+ rd_kafka_buf_write_i64(
+ resp, mpart ? mpart->end_offset : -1);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) {
+ /* Response: LogStartOffset */
+ rd_kafka_buf_write_i64(
+ resp,
+ !mpart ? -1
+ : (on_follower
+ ? mpart->follower_start_offset
+ : mpart->start_offset));
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) {
+ /* Response: #Aborted */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) {
+ int32_t PreferredReadReplica =
+ mpart && mpart->leader == mconn->broker &&
+ mpart->follower_id != -1
+ ? mpart->follower_id
+ : -1;
+
+ /* Response: PreferredReplica */
+ rd_kafka_buf_write_i32(resp,
+ PreferredReadReplica);
+
+ if (PreferredReadReplica != -1) {
+ /* Don't return any data when
+ * PreferredReadReplica is set */
+ mset = NULL;
+ MaxWait = 0;
+ }
+ }
+
+
+ if (mset && partsize < (size_t)PartMaxBytes &&
+ totsize < (size_t)MaxBytes) {
+ /* Response: Records */
+ rd_kafka_buf_write_kbytes(resp, &mset->bytes);
+ partsize += RD_KAFKAP_BYTES_SIZE(&mset->bytes);
+ totsize += RD_KAFKAP_BYTES_SIZE(&mset->bytes);
+
+ /* FIXME: Multiple messageSets ? */
+ } else {
+ /* Empty Response: Records: Null */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+ }
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7) {
+ int32_t ForgottenTopicCnt;
+ rd_kafka_buf_read_i32(rkbuf, &ForgottenTopicCnt);
+ while (ForgottenTopicCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t ForgPartCnt;
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_i32(rkbuf, &ForgPartCnt);
+ while (ForgPartCnt-- > 0) {
+ int32_t Partition;
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+ }
+ }
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 11) {
+ rd_kafkap_str_t RackId;
+ char *rack;
+ rd_kafka_buf_read_str(rkbuf, &RackId);
+ RD_KAFKAP_STR_DUPA(&rack, &RackId);
+ /* Matt might do something sensible with this */
+ }
+
+ /* If there was no data, delay up to MaxWait.
+ * This isn't strictly correct since we should cut the wait short
+ * and feed newly produced data if a producer writes to the
+ * partitions, but that is too much of a hassle here since we
+ * can't block the thread. */
+ if (!totsize && MaxWait > 0)
+ resp->rkbuf_ts_retry = rd_clock() + (MaxWait * 1000);
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle ListOffsets
+ */
+static int rd_kafka_mock_handle_ListOffsets(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t all_err;
+ int32_t ReplicaId, TopicsCnt;
+ int8_t IsolationLevel;
+
+ rd_kafka_buf_read_i32(rkbuf, &ReplicaId);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2)
+ rd_kafka_buf_read_i8(rkbuf, &IsolationLevel);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* Response: ThrottleTime */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+
+ /* Inject error, if any */
+ all_err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
+
+ /* Response: #Topics */
+ rd_kafka_buf_write_i32(resp, TopicsCnt);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartitionCnt;
+ rd_kafka_mock_topic_t *mtopic;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+ /* Response: #Partitions */
+ rd_kafka_buf_write_i32(resp, PartitionCnt);
+
+ while (PartitionCnt-- > 0) {
+ int32_t Partition, CurrentLeaderEpoch = -1;
+ int64_t Timestamp, Offset = -1;
+ int32_t MaxNumOffsets;
+ rd_kafka_mock_partition_t *mpart = NULL;
+ rd_kafka_resp_err_t err = all_err;
+
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4)
+ rd_kafka_buf_read_i32(rkbuf,
+ &CurrentLeaderEpoch);
+
+ rd_kafka_buf_read_i64(rkbuf, &Timestamp);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion == 0)
+ rd_kafka_buf_read_i32(rkbuf, &MaxNumOffsets);
+
+ if (mtopic)
+ mpart = rd_kafka_mock_partition_find(mtopic,
+ Partition);
+
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ if (!all_err && !mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else if (!all_err && mpart->leader != mconn->broker)
+ err =
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION;
+
+ if (!err && mpart)
+ err =
+ rd_kafka_mock_partition_leader_epoch_check(
+ mpart, CurrentLeaderEpoch);
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ if (!err && mpart) {
+ if (Timestamp == RD_KAFKA_OFFSET_BEGINNING)
+ Offset = mpart->start_offset;
+ else if (Timestamp == RD_KAFKA_OFFSET_END)
+ Offset = mpart->end_offset;
+ else if (Timestamp < 0)
+ Offset = -1;
+ else /* FIXME: by timestamp */
+ Offset = -1;
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion == 0) {
+ /* Response: #OldStyleOffsets */
+ rd_kafka_buf_write_i32(resp,
+ Offset != -1 ? 1 : 0);
+ /* Response: OldStyleOffsets[0] */
+ if (Offset != -1)
+ rd_kafka_buf_write_i64(resp, Offset);
+ } else {
+ /* Response: Timestamp (FIXME) */
+ rd_kafka_buf_write_i64(resp, -1);
+
+ /* Response: Offset */
+ rd_kafka_buf_write_i64(resp, Offset);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4) {
+ /* Response: LeaderEpoch */
+ rd_kafka_buf_write_i32(
+ resp, mpart ? mpart->leader_epoch : -1);
+ }
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Topic %.*s [%" PRId32
+ "] returning "
+ "offset %" PRId64 " (leader epoch %" PRId32
+ ") for %s: %s",
+ RD_KAFKAP_STR_PR(&Topic), Partition,
+ Offset, mpart ? mpart->leader_epoch : -1,
+ rd_kafka_offset2str(Timestamp),
+ rd_kafka_err2str(err));
+ }
+ }
+
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Handle OffsetFetch (fetch committed offsets)
+ */
+static int rd_kafka_mock_handle_OffsetFetch(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_mock_broker_t *mrkb;
+ rd_kafka_resp_err_t all_err;
+ int32_t TopicsCnt;
+ rd_kafkap_str_t GroupId;
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
+ /* Response: ThrottleTime */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+
+ /* Inject error, if any */
+ all_err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ mrkb = rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP,
+ &GroupId);
+ if (!mrkb && !all_err)
+ all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR; // FIXME? check if
+ // its this mrkb?
+
+
+ rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, 100000);
+
+ /* Response: #Topics */
+ rd_kafka_buf_write_arraycnt(resp, TopicsCnt);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartitionCnt;
+ rd_kafka_mock_topic_t *mtopic;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_arraycnt(rkbuf, &PartitionCnt, 100000);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+ /* Response: #Partitions */
+ rd_kafka_buf_write_arraycnt(resp, PartitionCnt);
+
+ while (PartitionCnt-- > 0) {
+ int32_t Partition;
+ rd_kafka_mock_partition_t *mpart = NULL;
+ const rd_kafka_mock_committed_offset_t *coff = NULL;
+ rd_kafka_resp_err_t err = all_err;
+
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+
+ if (mtopic)
+ mpart = rd_kafka_mock_partition_find(mtopic,
+ Partition);
+
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ if (!all_err && !mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ if (!err)
+ coff = rd_kafka_mock_committed_offset_find(
+ mpart, &GroupId);
+
+ /* Response: CommittedOffset */
+ rd_kafka_buf_write_i64(resp, coff ? coff->offset : -1);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5) {
+ /* Response: CommittedLeaderEpoch */
+ rd_kafka_buf_write_i32(
+ resp, mpart ? mpart->leader_epoch : -1);
+ }
+
+ /* Response: Metadata */
+ rd_kafka_buf_write_kstr(resp,
+ coff ? coff->metadata : NULL);
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ /* Response: Struct tags */
+ rd_kafka_buf_write_tags(resp);
+
+ if (coff)
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Topic %s [%" PRId32
+ "] returning "
+ "committed offset %" PRId64
+ " for group %s",
+ mtopic->name, mpart->id,
+ coff->offset, coff->group);
+ else
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "Topic %.*s [%" PRId32
+ "] has no "
+ "committed offset for group %.*s: "
+ "%s",
+ RD_KAFKAP_STR_PR(&Topic),
+ Partition,
+ RD_KAFKAP_STR_PR(&GroupId),
+ rd_kafka_err2str(err));
+ }
+
+ /* Request: Skip struct tags */
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ /* Response: Struct tags */
+ rd_kafka_buf_write_tags(resp);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* Response: Outer ErrorCode */
+ rd_kafka_buf_write_i16(resp, all_err);
+ }
+
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle OffsetCommit
+ */
+static int rd_kafka_mock_handle_OffsetCommit(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_mock_broker_t *mrkb;
+ rd_kafka_resp_err_t all_err;
+ int32_t GenerationId = -1, TopicsCnt;
+ rd_kafkap_str_t GroupId, MemberId, GroupInstanceId;
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
+ /* Response: ThrottleTime */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ rd_kafka_buf_read_i32(rkbuf, &GenerationId);
+ rd_kafka_buf_read_str(rkbuf, &MemberId);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 7)
+ rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2 &&
+ rkbuf->rkbuf_reqhdr.ApiVersion <= 4) {
+ int64_t RetentionTimeMs;
+ rd_kafka_buf_read_i64(rkbuf, &RetentionTimeMs);
+ }
+
+
+ /* Inject error, if any */
+ all_err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ mrkb = rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP,
+ &GroupId);
+ if (!mrkb && !all_err)
+ all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+
+
+ if (!all_err) {
+ rd_kafka_mock_cgrp_t *mcgrp;
+
+ mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
+ if (mcgrp) {
+ rd_kafka_mock_cgrp_member_t *member = NULL;
+
+ if (!RD_KAFKAP_STR_IS_NULL(&MemberId))
+ member = rd_kafka_mock_cgrp_member_find(
+ mcgrp, &MemberId);
+
+ if (!member)
+ all_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+ else
+ all_err = rd_kafka_mock_cgrp_check_state(
+ mcgrp, member, rkbuf, GenerationId);
+ }
+
+ /* FIXME: also check that partitions are assigned to member */
+ }
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
+
+ /* Response: #Topics */
+ rd_kafka_buf_write_i32(resp, TopicsCnt);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartitionCnt;
+ rd_kafka_mock_topic_t *mtopic;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartitionCnt);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+ /* Response: #Partitions */
+ rd_kafka_buf_write_i32(resp, PartitionCnt);
+
+ while (PartitionCnt-- > 0) {
+ int32_t Partition;
+ rd_kafka_mock_partition_t *mpart = NULL;
+ rd_kafka_resp_err_t err = all_err;
+ int64_t CommittedOffset;
+ rd_kafkap_str_t Metadata;
+
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+
+ if (mtopic)
+ mpart = rd_kafka_mock_partition_find(mtopic,
+ Partition);
+
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ if (!all_err && !mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ rd_kafka_buf_read_i64(rkbuf, &CommittedOffset);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 6) {
+ int32_t CommittedLeaderEpoch;
+ rd_kafka_buf_read_i32(rkbuf,
+ &CommittedLeaderEpoch);
+
+ if (!err && mpart)
+ err =
+ rd_kafka_mock_partition_leader_epoch_check(
+ mpart, CommittedLeaderEpoch);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion == 1) {
+ int64_t CommitTimestamp;
+ rd_kafka_buf_read_i64(rkbuf, &CommitTimestamp);
+ }
+
+ rd_kafka_buf_read_str(rkbuf, &Metadata);
+
+ if (!err)
+ rd_kafka_mock_commit_offset(mpart, &GroupId,
+ CommittedOffset,
+ &Metadata);
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+ }
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle ApiVersionRequest
+ */
+static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf);
+
+
+/**
+ * @brief Write a MetadataResponse.Topics. entry to \p resp.
+ *
+ * @param mtopic may be NULL
+ */
+static void
+rd_kafka_mock_buf_write_Metadata_Topic(rd_kafka_buf_t *resp,
+ int16_t ApiVersion,
+ const char *topic,
+ const rd_kafka_mock_topic_t *mtopic,
+ rd_kafka_resp_err_t err) {
+ int i;
+ int partition_cnt =
+ (!mtopic || err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+ ? 0
+ : mtopic->partition_cnt;
+
+ /* Response: Topics.ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+ /* Response: Topics.Name */
+ rd_kafka_buf_write_str(resp, topic, -1);
+ if (ApiVersion >= 1) {
+ /* Response: Topics.IsInternal */
+ rd_kafka_buf_write_bool(resp, rd_false);
+ }
+ /* Response: Topics.#Partitions */
+ rd_kafka_buf_write_arraycnt(resp, partition_cnt);
+
+ for (i = 0; mtopic && i < partition_cnt; i++) {
+ const rd_kafka_mock_partition_t *mpart = &mtopic->partitions[i];
+ int r;
+
+ /* Response: ..Partitions.ErrorCode */
+ rd_kafka_buf_write_i16(resp, 0);
+ /* Response: ..Partitions.PartitionIndex */
+ rd_kafka_buf_write_i32(resp, mpart->id);
+ /* Response: ..Partitions.Leader */
+ rd_kafka_buf_write_i32(resp,
+ mpart->leader ? mpart->leader->id : -1);
+
+ if (ApiVersion >= 7) {
+ /* Response: ..Partitions.LeaderEpoch */
+ rd_kafka_buf_write_i32(resp, mpart->leader_epoch);
+ }
+
+ /* Response: ..Partitions.#ReplicaNodes */
+ rd_kafka_buf_write_arraycnt(resp, mpart->replica_cnt);
+ for (r = 0; r < mpart->replica_cnt; r++)
+ rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id);
+
+ /* Response: ..Partitions.#IsrNodes */
+ /* Let Replicas == ISRs for now */
+ rd_kafka_buf_write_arraycnt(resp, mpart->replica_cnt);
+ for (r = 0; r < mpart->replica_cnt; r++)
+ rd_kafka_buf_write_i32(resp, mpart->replicas[r]->id);
+
+ if (ApiVersion >= 5) {
+ /* Response: ...OfflineReplicas */
+ rd_kafka_buf_write_arraycnt(resp, 0);
+ }
+
+ rd_kafka_buf_write_tags(resp);
+ }
+
+ if (ApiVersion >= 8) {
+ /* Response: Topics.TopicAuthorizedOperations */
+ rd_kafka_buf_write_i32(resp, INT32_MIN);
+ }
+
+ rd_kafka_buf_write_tags(resp);
+}
+
+
+/**
+ * @brief Handle MetadataRequest
+ */
+static int rd_kafka_mock_handle_Metadata(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_bool_t AllowAutoTopicCreation = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ const rd_kafka_mock_broker_t *mrkb;
+ rd_kafka_topic_partition_list_t *requested_topics = NULL;
+ rd_bool_t list_all_topics = rd_false;
+ int32_t TopicsCnt;
+ int i;
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
+ /* Response: ThrottleTime */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ /* Response: #Brokers */
+ rd_kafka_buf_write_arraycnt(resp, mcluster->broker_cnt);
+
+ TAILQ_FOREACH(mrkb, &mcluster->brokers, link) {
+ /* Response: Brokers.Nodeid */
+ rd_kafka_buf_write_i32(resp, mrkb->id);
+ /* Response: Brokers.Host */
+ rd_kafka_buf_write_str(resp, mrkb->advertised_listener, -1);
+ /* Response: Brokers.Port */
+ rd_kafka_buf_write_i32(resp, (int32_t)mrkb->port);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: Brokers.Rack (Matt's going to love this) */
+ rd_kafka_buf_write_str(resp, mrkb->rack, -1);
+ }
+ rd_kafka_buf_write_tags(resp);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* Response: ClusterId */
+ rd_kafka_buf_write_str(resp, mcluster->id, -1);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: ControllerId */
+ rd_kafka_buf_write_i32(resp, mcluster->controller_id);
+ }
+
+ /* #Topics */
+ rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX);
+
+ if (TopicsCnt > 0)
+ requested_topics = rd_kafka_topic_partition_list_new(TopicsCnt);
+ else if (rkbuf->rkbuf_reqhdr.ApiVersion == 0 || TopicsCnt == -1)
+ list_all_topics = rd_true;
+
+ for (i = 0; i < TopicsCnt; i++) {
+ rd_kafkap_str_t Topic;
+ char *topic;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ RD_KAFKAP_STR_DUPA(&topic, &Topic);
+
+ rd_kafka_topic_partition_list_add(requested_topics, topic,
+ RD_KAFKA_PARTITION_UA);
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 4)
+ rd_kafka_buf_read_bool(rkbuf, &AllowAutoTopicCreation);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8) {
+ rd_bool_t IncludeClusterAuthorizedOperations;
+ rd_bool_t IncludeTopicAuthorizedOperations;
+ rd_kafka_buf_read_bool(rkbuf,
+ &IncludeClusterAuthorizedOperations);
+ rd_kafka_buf_read_bool(rkbuf,
+ &IncludeTopicAuthorizedOperations);
+ }
+
+ if (list_all_topics) {
+ rd_kafka_mock_topic_t *mtopic;
+ /* Response: #Topics */
+ rd_kafka_buf_write_arraycnt(resp, mcluster->topic_cnt);
+
+ TAILQ_FOREACH(mtopic, &mcluster->topics, link) {
+ rd_kafka_mock_buf_write_Metadata_Topic(
+ resp, rkbuf->rkbuf_reqhdr.ApiVersion, mtopic->name,
+ mtopic, mtopic->err);
+ }
+
+ } else if (requested_topics) {
+ /* Response: #Topics */
+ rd_kafka_buf_write_arraycnt(resp, requested_topics->cnt);
+
+ for (i = 0; i < requested_topics->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &requested_topics->elems[i];
+ rd_kafka_mock_topic_t *mtopic;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ mtopic =
+ rd_kafka_mock_topic_find(mcluster, rktpar->topic);
+ if (!mtopic && AllowAutoTopicCreation)
+ mtopic = rd_kafka_mock_topic_auto_create(
+ mcluster, rktpar->topic, -1, &err);
+ else if (!mtopic)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ rd_kafka_mock_buf_write_Metadata_Topic(
+ resp, rkbuf->rkbuf_reqhdr.ApiVersion, rktpar->topic,
+ mtopic, err ? err : mtopic->err);
+ }
+
+ } else {
+ /* Response: #Topics: brokers only */
+ rd_kafka_buf_write_arraycnt(resp, 0);
+ }
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 8 &&
+ rkbuf->rkbuf_reqhdr.ApiVersion <= 10) {
+ /* ClusterAuthorizedOperations */
+ rd_kafka_buf_write_i32(resp, INT32_MIN);
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+ rd_kafka_buf_write_tags(resp);
+
+ if (requested_topics)
+ rd_kafka_topic_partition_list_destroy(requested_topics);
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ if (requested_topics)
+ rd_kafka_topic_partition_list_destroy(requested_topics);
+
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Handle FindCoordinatorRequest
+ */
+static int
+rd_kafka_mock_handle_FindCoordinator(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafkap_str_t Key;
+ int8_t KeyType = RD_KAFKA_COORD_GROUP;
+ const rd_kafka_mock_broker_t *mrkb = NULL;
+ rd_kafka_resp_err_t err;
+
+ /* Key */
+ rd_kafka_buf_read_str(rkbuf, &Key);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* KeyType */
+ rd_kafka_buf_read_i8(rkbuf, &KeyType);
+ }
+
+
+ /*
+ * Construct response
+ */
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: Throttle */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ /* Inject error, if any */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err && RD_KAFKAP_STR_LEN(&Key) > 0) {
+ mrkb = rd_kafka_mock_cluster_get_coord(mcluster, KeyType, &Key);
+ rd_assert(mrkb);
+ }
+
+ if (!mrkb && !err)
+ err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
+
+ if (err) {
+ /* Response: ErrorCode and ErrorMessage */
+ rd_kafka_buf_write_i16(resp, err);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_write_str(resp, rd_kafka_err2str(err), -1);
+
+ /* Response: NodeId, Host, Port */
+ rd_kafka_buf_write_i32(resp, -1);
+ rd_kafka_buf_write_str(resp, NULL, -1);
+ rd_kafka_buf_write_i32(resp, -1);
+ } else {
+ /* Response: ErrorCode and ErrorMessage */
+ rd_kafka_buf_write_i16(resp, 0);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_write_str(resp, NULL, -1);
+
+ /* Response: NodeId, Host, Port */
+ rd_kafka_buf_write_i32(resp, mrkb->id);
+ rd_kafka_buf_write_str(resp, mrkb->advertised_listener, -1);
+ rd_kafka_buf_write_i32(resp, (int32_t)mrkb->port);
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle JoinGroupRequest
+ */
+static int rd_kafka_mock_handle_JoinGroup(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_mock_broker_t *mrkb;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafkap_str_t GroupId, MemberId, ProtocolType;
+ rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
+ int32_t SessionTimeoutMs;
+ int32_t MaxPollIntervalMs = -1;
+ int32_t ProtocolCnt = 0;
+ int32_t i;
+ rd_kafka_resp_err_t err;
+ rd_kafka_mock_cgrp_t *mcgrp;
+ rd_kafka_mock_cgrp_proto_t *protos = NULL;
+
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+ rd_kafka_buf_read_i32(rkbuf, &SessionTimeoutMs);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_i32(rkbuf, &MaxPollIntervalMs);
+ rd_kafka_buf_read_str(rkbuf, &MemberId);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 5)
+ rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
+ rd_kafka_buf_read_str(rkbuf, &ProtocolType);
+ rd_kafka_buf_read_i32(rkbuf, &ProtocolCnt);
+
+ if (ProtocolCnt > 1000) {
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "JoinGroupRequest: ProtocolCnt %" PRId32
+ " > max allowed 1000",
+ ProtocolCnt);
+ rd_kafka_buf_destroy(resp);
+ return -1;
+ }
+
+ protos = rd_malloc(sizeof(*protos) * ProtocolCnt);
+ for (i = 0; i < ProtocolCnt; i++) {
+ rd_kafkap_str_t ProtocolName;
+ rd_kafkap_bytes_t Metadata;
+ rd_kafka_buf_read_str(rkbuf, &ProtocolName);
+ rd_kafka_buf_read_bytes(rkbuf, &Metadata);
+ protos[i].name = rd_kafkap_str_copy(&ProtocolName);
+ protos[i].metadata = rd_kafkap_bytes_copy(&Metadata);
+ }
+
+ /*
+ * Construct response
+ */
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* Response: Throttle */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ /* Inject error, if any */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err) {
+ mrkb = rd_kafka_mock_cluster_get_coord(
+ mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
+
+ if (!mrkb)
+ err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
+ else if (mrkb != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+ }
+
+ if (!err) {
+ mcgrp =
+ rd_kafka_mock_cgrp_get(mcluster, &GroupId, &ProtocolType);
+ rd_assert(mcgrp);
+
+ /* This triggers an async rebalance, the response will be
+ * sent later. */
+ err = rd_kafka_mock_cgrp_member_add(
+ mcgrp, mconn, resp, &MemberId, &ProtocolType, protos,
+ ProtocolCnt, SessionTimeoutMs);
+ if (!err) {
+ /* .._add() assumes ownership of resp and protos */
+ protos = NULL;
+ rd_kafka_mock_connection_set_blocking(mconn, rd_true);
+ return 0;
+ }
+ }
+
+ rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt);
+
+ /* Error case */
+ rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
+ rd_kafka_buf_write_i32(resp, -1); /* GenerationId */
+ rd_kafka_buf_write_str(resp, NULL, -1); /* ProtocolName */
+ rd_kafka_buf_write_str(resp, NULL, -1); /* LeaderId */
+ rd_kafka_buf_write_kstr(resp, NULL); /* MemberId */
+ rd_kafka_buf_write_i32(resp, 0); /* MemberCnt */
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ if (protos)
+ rd_kafka_mock_cgrp_protos_destroy(protos, ProtocolCnt);
+ return -1;
+}
+
+
+/**
+ * @brief Handle HeartbeatRequest
+ */
+static int rd_kafka_mock_handle_Heartbeat(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_mock_broker_t *mrkb;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafkap_str_t GroupId, MemberId;
+ rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
+ int32_t GenerationId;
+ rd_kafka_resp_err_t err;
+ rd_kafka_mock_cgrp_t *mcgrp;
+ rd_kafka_mock_cgrp_member_t *member = NULL;
+
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+ rd_kafka_buf_read_i32(rkbuf, &GenerationId);
+ rd_kafka_buf_read_str(rkbuf, &MemberId);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
+ rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
+
+ /*
+ * Construct response
+ */
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: Throttle */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ /* Inject error, if any */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+ if (!err) {
+ mrkb = rd_kafka_mock_cluster_get_coord(
+ mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
+
+ if (!mrkb)
+ err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
+ else if (mrkb != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+ }
+
+ if (!err) {
+ mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
+ if (!mcgrp)
+ err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
+ }
+
+ if (!err) {
+ member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId);
+ if (!member)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+ }
+
+ if (!err)
+ err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf,
+ GenerationId);
+
+ if (!err)
+ rd_kafka_mock_cgrp_member_active(mcgrp, member);
+
+ rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Handle LeaveGroupRequest
+ */
+static int rd_kafka_mock_handle_LeaveGroup(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_mock_broker_t *mrkb;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafkap_str_t GroupId, MemberId;
+ rd_kafka_resp_err_t err;
+ rd_kafka_mock_cgrp_t *mcgrp;
+ rd_kafka_mock_cgrp_member_t *member = NULL;
+
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+ rd_kafka_buf_read_str(rkbuf, &MemberId);
+
+ /*
+ * Construct response
+ */
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: Throttle */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ /* Inject error, if any */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+ if (!err) {
+ mrkb = rd_kafka_mock_cluster_get_coord(
+ mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
+
+ if (!mrkb)
+ err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
+ else if (mrkb != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+ }
+
+ if (!err) {
+ mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
+ if (!mcgrp)
+ err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
+ }
+
+ if (!err) {
+ member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId);
+ if (!member)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+ }
+
+ if (!err)
+ err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf, -1);
+
+ if (!err)
+ rd_kafka_mock_cgrp_member_leave(mcgrp, member);
+
+ rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle SyncGroupRequest
+ */
+static int rd_kafka_mock_handle_SyncGroup(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_mock_broker_t *mrkb;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafkap_str_t GroupId, MemberId;
+ rd_kafkap_str_t GroupInstanceId = RD_KAFKAP_STR_INITIALIZER;
+ int32_t GenerationId, AssignmentCnt;
+ int32_t i;
+ rd_kafka_resp_err_t err;
+ rd_kafka_mock_cgrp_t *mcgrp = NULL;
+ rd_kafka_mock_cgrp_member_t *member = NULL;
+
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+ rd_kafka_buf_read_i32(rkbuf, &GenerationId);
+ rd_kafka_buf_read_str(rkbuf, &MemberId);
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3)
+ rd_kafka_buf_read_str(rkbuf, &GroupInstanceId);
+ rd_kafka_buf_read_i32(rkbuf, &AssignmentCnt);
+
+ /*
+ * Construct response
+ */
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* Response: Throttle */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ /* Inject error, if any */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+ if (!err) {
+ mrkb = rd_kafka_mock_cluster_get_coord(
+ mcluster, RD_KAFKA_COORD_GROUP, &GroupId);
+
+ if (!mrkb)
+ err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
+ else if (mrkb != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+ }
+
+ if (!err) {
+ mcgrp = rd_kafka_mock_cgrp_find(mcluster, &GroupId);
+ if (!mcgrp)
+ err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
+ }
+
+ if (!err) {
+ member = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId);
+ if (!member)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+ }
+
+ if (!err)
+ err = rd_kafka_mock_cgrp_check_state(mcgrp, member, rkbuf,
+ GenerationId);
+
+ if (!err)
+ rd_kafka_mock_cgrp_member_active(mcgrp, member);
+
+ if (!err) {
+ rd_bool_t is_leader = mcgrp->leader && mcgrp->leader == member;
+
+ if (AssignmentCnt > 0 && !is_leader)
+ err =
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION; /* FIXME
+ */
+ else if (AssignmentCnt == 0 && is_leader)
+ err = RD_KAFKA_RESP_ERR_INVALID_PARTITIONS; /* FIXME */
+ }
+
+ for (i = 0; i < AssignmentCnt; i++) {
+ rd_kafkap_str_t MemberId2;
+ rd_kafkap_bytes_t Metadata;
+ rd_kafka_mock_cgrp_member_t *member2;
+
+ rd_kafka_buf_read_str(rkbuf, &MemberId2);
+ rd_kafka_buf_read_bytes(rkbuf, &Metadata);
+
+ if (err)
+ continue;
+
+ /* Find member */
+ member2 = rd_kafka_mock_cgrp_member_find(mcgrp, &MemberId2);
+ if (!member2)
+ continue;
+
+ rd_kafka_mock_cgrp_member_assignment_set(mcgrp, member2,
+ &Metadata);
+ }
+
+ if (!err) {
+ err = rd_kafka_mock_cgrp_member_sync_set(mcgrp, member, mconn,
+ resp);
+ /* .._sync_set() assumes ownership of resp */
+ if (!err)
+ return 0; /* Response will be sent when all members
+ * are synchronized */
+ }
+
+ /* Error case */
+ rd_kafka_buf_write_i16(resp, err); /* ErrorCode */
+ rd_kafka_buf_write_bytes(resp, NULL, -1); /* MemberState */
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Generate a unique ProducerID
+ */
+static const rd_kafka_pid_t
+rd_kafka_mock_pid_new(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId) {
+ size_t tidlen =
+ TransactionalId ? RD_KAFKAP_STR_LEN(TransactionalId) : 0;
+ rd_kafka_mock_pid_t *mpid = rd_malloc(sizeof(*mpid) + tidlen);
+ rd_kafka_pid_t ret;
+
+ mpid->pid.id = rd_jitter(1, 900000) * 1000;
+ mpid->pid.epoch = 0;
+
+ if (tidlen > 0)
+ memcpy(mpid->TransactionalId, TransactionalId->str, tidlen);
+ mpid->TransactionalId[tidlen] = '\0';
+
+ mtx_lock(&mcluster->lock);
+ rd_list_add(&mcluster->pids, mpid);
+ ret = mpid->pid;
+ mtx_unlock(&mcluster->lock);
+
+ return ret;
+}
+
+
+/**
+ * @brief Finds a matching mcluster mock PID for the given \p pid.
+ *
+ * @locks_required mcluster->lock
+ */
+rd_kafka_resp_err_t
+rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
+ const rd_kafka_pid_t pid,
+ rd_kafka_mock_pid_t **mpidp) {
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_mock_pid_t skel = {pid};
+
+ *mpidp = NULL;
+ mpid = rd_list_find(&mcluster->pids, &skel, rd_kafka_mock_pid_cmp_pid);
+
+ if (!mpid)
+ return RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
+ else if (((TransactionalId != NULL) !=
+ (*mpid->TransactionalId != '\0')) ||
+ (TransactionalId &&
+ rd_kafkap_str_cmp_str(TransactionalId,
+ mpid->TransactionalId)))
+ return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING;
+
+ *mpidp = mpid;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Checks if the given pid is known, else returns an error.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_pid_check(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
+ const rd_kafka_pid_t check_pid) {
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ mtx_lock(&mcluster->lock);
+ err =
+ rd_kafka_mock_pid_find(mcluster, TransactionalId, check_pid, &mpid);
+ if (!err && check_pid.epoch != mpid->pid.epoch)
+ err = RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
+ mtx_unlock(&mcluster->lock);
+
+ if (unlikely(err))
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK",
+ "PID check failed for TransactionalId=%.*s: "
+ "expected %s, not %s: %s",
+ RD_KAFKAP_STR_PR(TransactionalId),
+ mpid ? rd_kafka_pid2str(mpid->pid) : "none",
+ rd_kafka_pid2str(check_pid),
+ rd_kafka_err2name(err));
+ return err;
+}
+
+
+/**
+ * @brief Bump the epoch for an existing pid, or return an error
+ * if the current_pid does not match an existing pid.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_mock_pid_bump(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
+ rd_kafka_pid_t *current_pid) {
+ rd_kafka_mock_pid_t *mpid;
+ rd_kafka_resp_err_t err;
+
+ mtx_lock(&mcluster->lock);
+ err = rd_kafka_mock_pid_find(mcluster, TransactionalId, *current_pid,
+ &mpid);
+ if (err) {
+ mtx_unlock(&mcluster->lock);
+ return err;
+ }
+
+ if (current_pid->epoch != mpid->pid.epoch) {
+ mtx_unlock(&mcluster->lock);
+ return RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH;
+ }
+
+ mpid->pid.epoch++;
+ *current_pid = mpid->pid;
+ mtx_unlock(&mcluster->lock);
+
+ rd_kafka_dbg(mcluster->rk, MOCK, "MOCK", "Bumped PID %s",
+ rd_kafka_pid2str(*current_pid));
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Handle InitProducerId
+ */
+static int
+rd_kafka_mock_handle_InitProducerId(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafkap_str_t TransactionalId;
+ rd_kafka_pid_t pid = RD_KAFKA_PID_INITIALIZER;
+ rd_kafka_pid_t current_pid = RD_KAFKA_PID_INITIALIZER;
+ int32_t TxnTimeoutMs;
+ rd_kafka_resp_err_t err;
+
+ /* TransactionalId */
+ rd_kafka_buf_read_str(rkbuf, &TransactionalId);
+ /* TransactionTimeoutMs */
+ rd_kafka_buf_read_i32(rkbuf, &TxnTimeoutMs);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
+ /* ProducerId */
+ rd_kafka_buf_read_i64(rkbuf, &current_pid.id);
+ /* ProducerEpoch */
+ rd_kafka_buf_read_i16(rkbuf, &current_pid.epoch);
+ }
+
+ /*
+ * Construct response
+ */
+
+ /* ThrottleTimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+
+ /* Inject error */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err && !RD_KAFKAP_STR_IS_NULL(&TransactionalId)) {
+ if (RD_KAFKAP_STR_LEN(&TransactionalId) == 0)
+ err = RD_KAFKA_RESP_ERR_INVALID_REQUEST;
+ else if (rd_kafka_mock_cluster_get_coord(
+ mcluster, RD_KAFKA_COORD_TXN, &TransactionalId) !=
+ mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+ }
+
+ if (!err) {
+ if (rd_kafka_pid_valid(current_pid)) {
+ /* Producer is asking for the transactional coordinator
+ * to bump the epoch (KIP-360).
+ * Verify that current_pid matches and then
+ * bump the epoch. */
+ err = rd_kafka_mock_pid_bump(mcluster, &TransactionalId,
+ &current_pid);
+ if (!err)
+ pid = current_pid;
+
+ } else {
+ /* Generate a new pid */
+ pid = rd_kafka_mock_pid_new(mcluster, &TransactionalId);
+ }
+ }
+
+ /* ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ /* ProducerId */
+ rd_kafka_buf_write_i64(resp, pid.id);
+ /* ProducerEpoch */
+ rd_kafka_buf_write_i16(resp, pid.epoch);
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+
+/**
+ * @brief Handle AddPartitionsToTxn
+ */
+static int
+rd_kafka_mock_handle_AddPartitionsToTxn(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t all_err;
+ rd_kafkap_str_t TransactionalId;
+ rd_kafka_pid_t pid;
+ int32_t TopicsCnt;
+
+ /* Response: ThrottleTimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+
+ /* TransactionalId */
+ rd_kafka_buf_read_str(rkbuf, &TransactionalId);
+ /* ProducerId */
+ rd_kafka_buf_read_i64(rkbuf, &pid.id);
+ /* Epoch */
+ rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
+ /* #Topics */
+ rd_kafka_buf_read_i32(rkbuf, &TopicsCnt);
+
+ /* Response: #Results */
+ rd_kafka_buf_write_i32(resp, TopicsCnt);
+
+ /* Inject error */
+ all_err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!all_err &&
+ rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN,
+ &TransactionalId) != mconn->broker)
+ all_err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+
+ if (!all_err)
+ all_err =
+ rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartsCnt;
+ const rd_kafka_mock_topic_t *mtopic;
+
+ /* Topic */
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+
+ /* #Partitions */
+ rd_kafka_buf_read_i32(rkbuf, &PartsCnt);
+ /* Response: #Partitions */
+ rd_kafka_buf_write_i32(resp, PartsCnt);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ while (PartsCnt--) {
+ int32_t Partition;
+ rd_kafka_resp_err_t err = all_err;
+
+ /* Partition */
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ if (!mtopic || Partition < 0 ||
+ Partition >= mtopic->partition_cnt)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else if (mtopic && mtopic->err)
+ err = mtopic->err;
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+ }
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Handle AddOffsetsToTxn
+ */
+static int
+rd_kafka_mock_handle_AddOffsetsToTxn(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t err;
+ rd_kafkap_str_t TransactionalId, GroupId;
+ rd_kafka_pid_t pid;
+
+ /* TransactionalId */
+ rd_kafka_buf_read_str(rkbuf, &TransactionalId);
+ /* ProducerId */
+ rd_kafka_buf_read_i64(rkbuf, &pid.id);
+ /* Epoch */
+ rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
+ /* GroupIdId */
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+
+ /* Response: ThrottleTimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+
+ /* Inject error */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err &&
+ rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN,
+ &TransactionalId) != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+
+ if (!err)
+ err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Handle TxnOffsetCommit
+ */
+static int
+rd_kafka_mock_handle_TxnOffsetCommit(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t err;
+ rd_kafkap_str_t TransactionalId, GroupId;
+ rd_kafka_pid_t pid;
+ int32_t TopicsCnt;
+
+ /* Response: ThrottleTimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+
+ /* TransactionalId */
+ rd_kafka_buf_read_str(rkbuf, &TransactionalId);
+ /* GroupId */
+ rd_kafka_buf_read_str(rkbuf, &GroupId);
+ /* ProducerId */
+ rd_kafka_buf_read_i64(rkbuf, &pid.id);
+ /* Epoch */
+ rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
+ int32_t GenerationId;
+ rd_kafkap_str_t kMemberId, kGroupInstanceId;
+
+ /* GenerationId */
+ rd_kafka_buf_read_i32(rkbuf, &GenerationId);
+ /* MemberId */
+ rd_kafka_buf_read_str(rkbuf, &kMemberId);
+ /* GroupInstanceId */
+ rd_kafka_buf_read_str(rkbuf, &kGroupInstanceId);
+ }
+
+ /* #Topics */
+ rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, 100000);
+
+ /* Response: #Results */
+ rd_kafka_buf_write_arraycnt(resp, TopicsCnt);
+
+ /* Inject error */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err &&
+ rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_GROUP,
+ &GroupId) != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+
+ if (!err)
+ err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
+
+ while (TopicsCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ int32_t PartsCnt;
+ rd_kafka_mock_topic_t *mtopic;
+
+ /* Topic */
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* #Partitions */
+ rd_kafka_buf_read_arraycnt(rkbuf, &PartsCnt, 100000);
+
+ /* Response: #Partitions */
+ rd_kafka_buf_write_arraycnt(resp, PartsCnt);
+
+ while (PartsCnt-- > 0) {
+ int32_t Partition;
+ int64_t Offset;
+ rd_kafkap_str_t Metadata;
+ rd_kafka_mock_partition_t *mpart;
+
+ /* Partition */
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+
+ mpart = rd_kafka_mock_partition_find(mtopic, Partition);
+ if (!err && !mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ /* CommittedOffset */
+ rd_kafka_buf_read_i64(rkbuf, &Offset);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 2) {
+ /* CommittedLeaderEpoch */
+ int32_t CommittedLeaderEpoch;
+ rd_kafka_buf_read_i32(rkbuf,
+ &CommittedLeaderEpoch);
+ if (!err && mpart)
+ err =
+ rd_kafka_mock_partition_leader_epoch_check(
+ mpart, CommittedLeaderEpoch);
+ }
+
+ /* CommittedMetadata */
+ rd_kafka_buf_read_str(rkbuf, &Metadata);
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ /* Request: Struct tags */
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ /* Response: Struct tags */
+ rd_kafka_buf_write_tags(resp);
+ }
+
+ /* Request: Struct tags */
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ /* Response: Struct tags */
+ rd_kafka_buf_write_tags(resp);
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Handle EndTxn
+ */
+static int rd_kafka_mock_handle_EndTxn(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t err;
+ rd_kafkap_str_t TransactionalId;
+ rd_kafka_pid_t pid;
+ rd_bool_t committed;
+
+ /* TransactionalId */
+ rd_kafka_buf_read_str(rkbuf, &TransactionalId);
+ /* ProducerId */
+ rd_kafka_buf_read_i64(rkbuf, &pid.id);
+ /* ProducerEpoch */
+ rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
+ /* Committed */
+ rd_kafka_buf_read_bool(rkbuf, &committed);
+
+ /*
+ * Construct response
+ */
+
+ /* ThrottleTimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+
+ /* Inject error */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err &&
+ rd_kafka_mock_cluster_get_coord(mcluster, RD_KAFKA_COORD_TXN,
+ &TransactionalId) != mconn->broker)
+ err = RD_KAFKA_RESP_ERR_NOT_COORDINATOR;
+
+ if (!err)
+ err = rd_kafka_mock_pid_check(mcluster, &TransactionalId, pid);
+
+ /* ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+static int
+rd_kafka_mock_handle_OffsetForLeaderEpoch(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ const rd_bool_t log_decode_errors = rd_true;
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ rd_kafka_resp_err_t err;
+ int32_t TopicsCnt, i;
+
+ /* Response: ThrottleTimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+
+ /* #Topics */
+ rd_kafka_buf_read_arraycnt(rkbuf, &TopicsCnt, RD_KAFKAP_TOPICS_MAX);
+
+ /* Response: #Topics */
+ rd_kafka_buf_write_arraycnt(resp, TopicsCnt);
+
+ /* Inject error */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ for (i = 0; i < TopicsCnt; i++) {
+ rd_kafkap_str_t Topic;
+ int32_t PartitionsCnt, j;
+ rd_kafka_mock_topic_t *mtopic;
+
+ /* Topic */
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+
+ mtopic = rd_kafka_mock_topic_find_by_kstr(mcluster, &Topic);
+
+ /* Response: Topic */
+ rd_kafka_buf_write_kstr(resp, &Topic);
+
+ /* #Partitions */
+ rd_kafka_buf_read_arraycnt(rkbuf, &PartitionsCnt,
+ RD_KAFKAP_PARTITIONS_MAX);
+
+ /* Response: #Partitions */
+ rd_kafka_buf_write_arraycnt(resp, PartitionsCnt);
+
+ for (j = 0; j < PartitionsCnt; j++) {
+ rd_kafka_mock_partition_t *mpart;
+ int32_t Partition, CurrentLeaderEpoch, LeaderEpoch;
+ int64_t EndOffset = -1;
+
+ /* Partition */
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+ /* CurrentLeaderEpoch */
+ rd_kafka_buf_read_i32(rkbuf, &CurrentLeaderEpoch);
+ /* LeaderEpoch */
+ rd_kafka_buf_read_i32(rkbuf, &LeaderEpoch);
+
+ mpart = rd_kafka_mock_partition_find(mtopic, Partition);
+ if (!err && !mpart)
+ err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+
+ if (!err && mpart)
+ err =
+ rd_kafka_mock_partition_leader_epoch_check(
+ mpart, CurrentLeaderEpoch);
+
+ if (!err && mpart) {
+ EndOffset =
+ rd_kafka_mock_partition_offset_for_leader_epoch(
+ mpart, LeaderEpoch);
+ }
+
+ /* Response: ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+ /* Response: Partition */
+ rd_kafka_buf_write_i32(resp, Partition);
+ /* Response: LeaderEpoch */
+ rd_kafka_buf_write_i32(resp, LeaderEpoch);
+ /* Response: Partition */
+ rd_kafka_buf_write_i64(resp, EndOffset);
+ }
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+
+err_parse:
+ rd_kafka_buf_destroy(resp);
+ return -1;
+}
+
+
+/**
+ * @brief Default request handlers
+ */
+const struct rd_kafka_mock_api_handler
+ rd_kafka_mock_api_handlers[RD_KAFKAP__NUM] = {
+ /* [request-type] = { MinVersion, MaxVersion, FlexVersion, callback } */
+ [RD_KAFKAP_Produce] = {0, 7, -1, rd_kafka_mock_handle_Produce},
+ [RD_KAFKAP_Fetch] = {0, 11, -1, rd_kafka_mock_handle_Fetch},
+ [RD_KAFKAP_ListOffsets] = {0, 5, -1, rd_kafka_mock_handle_ListOffsets},
+ [RD_KAFKAP_OffsetFetch] = {0, 6, 6, rd_kafka_mock_handle_OffsetFetch},
+ [RD_KAFKAP_OffsetCommit] = {0, 8, 8, rd_kafka_mock_handle_OffsetCommit},
+ [RD_KAFKAP_ApiVersion] = {0, 2, 3, rd_kafka_mock_handle_ApiVersion},
+ [RD_KAFKAP_Metadata] = {0, 9, 9, rd_kafka_mock_handle_Metadata},
+ [RD_KAFKAP_FindCoordinator] = {0, 3, 3,
+ rd_kafka_mock_handle_FindCoordinator},
+ [RD_KAFKAP_InitProducerId] = {0, 4, 2,
+ rd_kafka_mock_handle_InitProducerId},
+ [RD_KAFKAP_JoinGroup] = {0, 6, 6, rd_kafka_mock_handle_JoinGroup},
+ [RD_KAFKAP_Heartbeat] = {0, 5, 4, rd_kafka_mock_handle_Heartbeat},
+ [RD_KAFKAP_LeaveGroup] = {0, 4, 4, rd_kafka_mock_handle_LeaveGroup},
+ [RD_KAFKAP_SyncGroup] = {0, 4, 4, rd_kafka_mock_handle_SyncGroup},
+ [RD_KAFKAP_AddPartitionsToTxn] =
+ {0, 1, -1, rd_kafka_mock_handle_AddPartitionsToTxn},
+ [RD_KAFKAP_AddOffsetsToTxn] = {0, 1, -1,
+ rd_kafka_mock_handle_AddOffsetsToTxn},
+ [RD_KAFKAP_TxnOffsetCommit] = {0, 3, 3,
+ rd_kafka_mock_handle_TxnOffsetCommit},
+ [RD_KAFKAP_EndTxn] = {0, 1, -1, rd_kafka_mock_handle_EndTxn},
+ [RD_KAFKAP_OffsetForLeaderEpoch] =
+ {2, 2, -1, rd_kafka_mock_handle_OffsetForLeaderEpoch},
+};
+
+
+
+/**
+ * @brief Handle ApiVersionRequest.
+ *
+ * @remark This is the only handler that needs to handle unsupported
+ * ApiVersions.
+ */
+static int rd_kafka_mock_handle_ApiVersion(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_mock_cluster_t *mcluster = mconn->broker->cluster;
+ rd_kafka_buf_t *resp = rd_kafka_mock_buf_new_response(rkbuf);
+ size_t of_ApiKeysCnt;
+ int cnt = 0;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int i;
+
+ /* Inject error */
+ err = rd_kafka_mock_next_request_error(mconn, resp);
+
+ if (!err && !rd_kafka_mock_cluster_ApiVersion_check(
+ mcluster, rkbuf->rkbuf_reqhdr.ApiKey,
+ rkbuf->rkbuf_reqhdr.ApiVersion))
+ err = RD_KAFKA_RESP_ERR_UNSUPPORTED_VERSION;
+
+ /* ApiVersionRequest/Response with flexver (>=v3) has a mix
+ * of flexver and standard fields for backwards compatibility reasons,
+ * so we handcraft the response instead. */
+ resp->rkbuf_flags &= ~RD_KAFKA_OP_F_FLEXVER;
+
+ /* ErrorCode */
+ rd_kafka_buf_write_i16(resp, err);
+
+ /* #ApiKeys (updated later) */
+ /* FIXME: FLEXVER: This is a uvarint and will require more than 1 byte
+ * if the array count exceeds 126. */
+ if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER)
+ of_ApiKeysCnt = rd_kafka_buf_write_i8(resp, 0);
+ else
+ of_ApiKeysCnt = rd_kafka_buf_write_i32(resp, 0);
+
+ for (i = 0; i < RD_KAFKAP__NUM; i++) {
+ if (!mcluster->api_handlers[i].cb ||
+ mcluster->api_handlers[i].MaxVersion == -1)
+ continue;
+
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 3) {
+ if (err && i != RD_KAFKAP_ApiVersion)
+ continue;
+ }
+
+ /* ApiKey */
+ rd_kafka_buf_write_i16(resp, (int16_t)i);
+ /* MinVersion */
+ rd_kafka_buf_write_i16(resp,
+ mcluster->api_handlers[i].MinVersion);
+ /* MaxVersion */
+ rd_kafka_buf_write_i16(resp,
+ mcluster->api_handlers[i].MaxVersion);
+
+ cnt++;
+ }
+
+ /* FIXME: uvarint */
+ if (rkbuf->rkbuf_flags & RD_KAFKA_OP_F_FLEXVER) {
+ rd_assert(cnt <= 126);
+ rd_kafka_buf_update_i8(resp, of_ApiKeysCnt, cnt);
+ } else
+ rd_kafka_buf_update_i32(resp, of_ApiKeysCnt, cnt);
+
+ if (rkbuf->rkbuf_reqhdr.ApiVersion >= 1) {
+ /* ThrottletimeMs */
+ rd_kafka_buf_write_i32(resp, 0);
+ }
+
+ rd_kafka_mock_connection_send_response(mconn, resp);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h
new file mode 100644
index 000000000..ea3b6cab4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_mock_int.h
@@ -0,0 +1,538 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_MOCK_INT_H_
+#define _RDKAFKA_MOCK_INT_H_
+
+/**
+ * @name Mock cluster - internal data types
+ *
+ */
+
+
+/**
+ * @struct Response error and/or RTT-delay to return to client.
+ */
+typedef struct rd_kafka_mock_error_rtt_s {
+ rd_kafka_resp_err_t err; /**< Error response (or 0) */
+ rd_ts_t rtt; /**< RTT/delay in microseconds (or 0) */
+} rd_kafka_mock_error_rtt_t;
+
+/**
+ * @struct A stack of errors or rtt latencies to return to the client,
+ * one by one until the stack is depleted.
+ */
+typedef struct rd_kafka_mock_error_stack_s {
+ TAILQ_ENTRY(rd_kafka_mock_error_stack_s) link;
+ int16_t ApiKey; /**< Optional ApiKey for which this stack
+ * applies to, else -1. */
+ size_t cnt; /**< Current number of errors in .errs */
+ size_t size; /**< Current allocated size for .errs (in elements) */
+ rd_kafka_mock_error_rtt_t *errs; /**< Array of errors/rtts */
+} rd_kafka_mock_error_stack_t;
+
+typedef TAILQ_HEAD(rd_kafka_mock_error_stack_head_s,
+ rd_kafka_mock_error_stack_s)
+ rd_kafka_mock_error_stack_head_t;
+
+
+/**
+ * @struct Consumer group protocol name and metadata.
+ */
+typedef struct rd_kafka_mock_cgrp_proto_s {
+ rd_kafkap_str_t *name;
+ rd_kafkap_bytes_t *metadata;
+} rd_kafka_mock_cgrp_proto_t;
+
+/**
+ * @struct Consumer group member
+ */
+typedef struct rd_kafka_mock_cgrp_member_s {
+ TAILQ_ENTRY(rd_kafka_mock_cgrp_member_s) link;
+ char *id; /**< MemberId */
+ char *group_instance_id; /**< Group instance id */
+ rd_ts_t ts_last_activity; /**< Last activity, e.g., Heartbeat */
+ rd_kafka_mock_cgrp_proto_t *protos; /**< Protocol names */
+ int proto_cnt; /**< Number of protocols */
+ rd_kafkap_bytes_t *assignment; /**< Current assignment */
+ rd_kafka_buf_t *resp; /**< Current response buffer */
+ struct rd_kafka_mock_connection_s *conn; /**< Connection, may be NULL
+ * if there is no ongoing
+ * request. */
+} rd_kafka_mock_cgrp_member_t;
+
+/**
+ * @struct Consumer group.
+ */
+typedef struct rd_kafka_mock_cgrp_s {
+ TAILQ_ENTRY(rd_kafka_mock_cgrp_s) link;
+ struct rd_kafka_mock_cluster_s *cluster; /**< Cluster */
+ struct rd_kafka_mock_connection_s *conn; /**< Connection */
+ char *id; /**< Group Id */
+ char *protocol_type; /**< Protocol type */
+ char *protocol_name; /**< Elected protocol name */
+ int32_t generation_id; /**< Generation Id */
+ int session_timeout_ms; /**< Session timeout */
+ enum { RD_KAFKA_MOCK_CGRP_STATE_EMPTY, /* No members */
+ RD_KAFKA_MOCK_CGRP_STATE_JOINING, /* Members are joining */
+ RD_KAFKA_MOCK_CGRP_STATE_SYNCING, /* Syncing assignments */
+ RD_KAFKA_MOCK_CGRP_STATE_REBALANCING, /* Rebalance triggered */
+ RD_KAFKA_MOCK_CGRP_STATE_UP, /* Group is operational */
+ } state; /**< Consumer group state */
+ rd_kafka_timer_t session_tmr; /**< Session timeout timer */
+ rd_kafka_timer_t rebalance_tmr; /**< Rebalance state timer */
+ TAILQ_HEAD(, rd_kafka_mock_cgrp_member_s) members; /**< Group members */
+ int member_cnt; /**< Number of group members */
+ int last_member_cnt; /**< Mumber of group members at last election */
+ int assignment_cnt; /**< Number of member assignments in last Sync */
+ rd_kafka_mock_cgrp_member_t *leader; /**< Elected leader */
+} rd_kafka_mock_cgrp_t;
+
+
+/**
+ * @struct TransactionalId + PID (+ optional sequence state)
+ */
+typedef struct rd_kafka_mock_pid_s {
+ rd_kafka_pid_t pid;
+
+ /* BaseSequence tracking (partition) */
+ int8_t window; /**< increases up to 5 */
+ int8_t lo; /**< Window low bucket: oldest */
+ int8_t hi; /**< Window high bucket: most recent */
+ int32_t seq[5]; /**< Next expected BaseSequence for each bucket */
+
+ char TransactionalId[1]; /**< Allocated after this structure */
+} rd_kafka_mock_pid_t;
+
+/**
+ * @brief rd_kafka_mock_pid_t.pid Pid (not epoch) comparator
+ */
+static RD_UNUSED int rd_kafka_mock_pid_cmp_pid(const void *_a, const void *_b) {
+ const rd_kafka_mock_pid_t *a = _a, *b = _b;
+
+ if (a->pid.id < b->pid.id)
+ return -1;
+ else if (a->pid.id > b->pid.id)
+ return 1;
+
+ return 0;
+}
+
+/**
+ * @brief rd_kafka_mock_pid_t.pid TransactionalId,Pid,epoch comparator
+ */
+static RD_UNUSED int rd_kafka_mock_pid_cmp(const void *_a, const void *_b) {
+ const rd_kafka_mock_pid_t *a = _a, *b = _b;
+ int r;
+
+ r = strcmp(a->TransactionalId, b->TransactionalId);
+ if (r)
+ return r;
+
+ if (a->pid.id < b->pid.id)
+ return -1;
+ else if (a->pid.id > b->pid.id)
+ return 1;
+
+ if (a->pid.epoch < b->pid.epoch)
+ return -1;
+ if (a->pid.epoch > b->pid.epoch)
+ return 1;
+
+ return 0;
+}
+
+
+
+/**
+ * @struct A real TCP connection from the client to a mock broker.
+ */
+typedef struct rd_kafka_mock_connection_s {
+ TAILQ_ENTRY(rd_kafka_mock_connection_s) link;
+ rd_kafka_transport_t *transport; /**< Socket transport */
+ rd_kafka_buf_t *rxbuf; /**< Receive buffer */
+ rd_kafka_bufq_t outbufs; /**< Send buffers */
+ short *poll_events; /**< Events to poll, points to
+ * the broker's pfd array */
+ struct sockaddr_in peer; /**< Peer address */
+ struct rd_kafka_mock_broker_s *broker;
+ rd_kafka_timer_t write_tmr; /**< Socket write delay timer */
+} rd_kafka_mock_connection_t;
+
+
+/**
+ * @struct Mock broker
+ */
+typedef struct rd_kafka_mock_broker_s {
+ TAILQ_ENTRY(rd_kafka_mock_broker_s) link;
+ int32_t id;
+ char advertised_listener[128];
+ struct sockaddr_in sin; /**< Bound address:port */
+ uint16_t port;
+ char *rack;
+ rd_bool_t up;
+ rd_ts_t rtt; /**< RTT in microseconds */
+
+ rd_socket_t listen_s; /**< listen() socket */
+
+ TAILQ_HEAD(, rd_kafka_mock_connection_s) connections;
+
+ /**< Per-protocol request error stack.
+ * @locks mcluster->lock */
+ rd_kafka_mock_error_stack_head_t errstacks;
+
+ struct rd_kafka_mock_cluster_s *cluster;
+} rd_kafka_mock_broker_t;
+
+
+/**
+ * @struct A Kafka-serialized MessageSet
+ */
+typedef struct rd_kafka_mock_msgset_s {
+ TAILQ_ENTRY(rd_kafka_mock_msgset_s) link;
+ int64_t first_offset; /**< First offset in batch */
+ int64_t last_offset; /**< Last offset in batch */
+ int32_t leader_epoch; /**< Msgset leader epoch */
+ rd_kafkap_bytes_t bytes;
+ /* Space for bytes.data is allocated after the msgset_t */
+} rd_kafka_mock_msgset_t;
+
+
+/**
+ * @struct Committed offset for a group and partition.
+ */
+typedef struct rd_kafka_mock_committed_offset_s {
+ /**< mpart.committed_offsets */
+ TAILQ_ENTRY(rd_kafka_mock_committed_offset_s) link;
+ char *group; /**< Allocated along with the struct */
+ int64_t offset; /**< Committed offset */
+ rd_kafkap_str_t *metadata; /**< Metadata, allocated separately */
+} rd_kafka_mock_committed_offset_t;
+
+
+TAILQ_HEAD(rd_kafka_mock_msgset_tailq_s, rd_kafka_mock_msgset_s);
+
+/**
+ * @struct Mock partition
+ */
+typedef struct rd_kafka_mock_partition_s {
+ TAILQ_ENTRY(rd_kafka_mock_partition_s) leader_link;
+ int32_t id;
+
+ int32_t leader_epoch; /**< Leader epoch, bumped on each
+ * partition leader change. */
+ int64_t start_offset; /**< Actual/leader start offset */
+ int64_t end_offset; /**< Actual/leader end offset */
+ int64_t follower_start_offset; /**< Follower's start offset */
+ int64_t follower_end_offset; /**< Follower's end offset */
+ rd_bool_t update_follower_start_offset; /**< Keep follower_start_offset
+ * in synch with start_offset
+ */
+ rd_bool_t update_follower_end_offset; /**< Keep follower_end_offset
+ * in synch with end_offset
+ */
+
+ struct rd_kafka_mock_msgset_tailq_s msgsets;
+ size_t size; /**< Total size of all .msgsets */
+ size_t cnt; /**< Total count of .msgsets */
+ size_t max_size; /**< Maximum size of all .msgsets, may be overshot. */
+ size_t max_cnt; /**< Maximum number of .msgsets */
+
+ /**< Committed offsets */
+ TAILQ_HEAD(, rd_kafka_mock_committed_offset_s) committed_offsets;
+
+ rd_kafka_mock_broker_t *leader;
+ rd_kafka_mock_broker_t **replicas;
+ int replica_cnt;
+
+ rd_list_t pidstates; /**< PID states */
+
+ int32_t follower_id; /**< Preferred replica/follower */
+
+ struct rd_kafka_mock_topic_s *topic;
+} rd_kafka_mock_partition_t;
+
+
+/**
+ * @struct Mock topic
+ */
+typedef struct rd_kafka_mock_topic_s {
+ TAILQ_ENTRY(rd_kafka_mock_topic_s) link;
+ char *name;
+
+ rd_kafka_mock_partition_t *partitions;
+ int partition_cnt;
+
+ rd_kafka_resp_err_t err; /**< Error to return in protocol requests
+ * for this topic. */
+
+ struct rd_kafka_mock_cluster_s *cluster;
+} rd_kafka_mock_topic_t;
+
+/**
+ * @struct Explicitly set coordinator.
+ */
+typedef struct rd_kafka_mock_coord_s {
+ TAILQ_ENTRY(rd_kafka_mock_coord_s) link;
+ rd_kafka_coordtype_t type;
+ char *key;
+ int32_t broker_id;
+} rd_kafka_mock_coord_t;
+
+
+typedef void(rd_kafka_mock_io_handler_t)(
+ struct rd_kafka_mock_cluster_s *mcluster,
+ rd_socket_t fd,
+ int events,
+ void *opaque);
+
+struct rd_kafka_mock_api_handler {
+ int16_t MinVersion;
+ int16_t MaxVersion;
+ int16_t FlexVersion; /**< First Flexible version */
+ int (*cb)(rd_kafka_mock_connection_t *mconn, rd_kafka_buf_t *rkbuf);
+};
+
+extern const struct rd_kafka_mock_api_handler
+ rd_kafka_mock_api_handlers[RD_KAFKAP__NUM];
+
+
+
+/**
+ * @struct Mock cluster.
+ *
+ * The cluster IO loop runs in a separate thread where all
+ * broker IO is handled.
+ *
+ * No locking is needed.
+ */
+struct rd_kafka_mock_cluster_s {
+ char id[32]; /**< Generated cluster id */
+
+ rd_kafka_t *rk;
+
+ int32_t controller_id; /**< Current controller */
+
+ TAILQ_HEAD(, rd_kafka_mock_broker_s) brokers;
+ int broker_cnt;
+
+ TAILQ_HEAD(, rd_kafka_mock_topic_s) topics;
+ int topic_cnt;
+
+ TAILQ_HEAD(, rd_kafka_mock_cgrp_s) cgrps;
+
+ /** Explicit coordinators (set with mock_set_coordinator()) */
+ TAILQ_HEAD(, rd_kafka_mock_coord_s) coords;
+
+ /** Current transactional producer PIDs.
+ * Element type is a malloced rd_kafka_mock_pid_t*. */
+ rd_list_t pids;
+
+ char *bootstraps; /**< bootstrap.servers */
+
+ thrd_t thread; /**< Mock thread */
+
+ rd_kafka_q_t *ops; /**< Control ops queue for interacting with the
+ * cluster. */
+
+ rd_socket_t wakeup_fds[2]; /**< Wake-up fds for use with .ops */
+
+ rd_bool_t run; /**< Cluster will run while this value is true */
+
+ int fd_cnt; /**< Number of file descriptors */
+ int fd_size; /**< Allocated size of .fds
+ * and .handlers */
+ struct pollfd *fds; /**< Dynamic array */
+
+ rd_kafka_broker_t *dummy_rkb; /**< Some internal librdkafka APIs
+ * that we are reusing requires a
+ * broker object, we use the
+ * internal broker and store it
+ * here for convenient access. */
+
+ struct {
+ int partition_cnt; /**< Auto topic create part cnt */
+ int replication_factor; /**< Auto topic create repl factor */
+ } defaults;
+
+ /**< Dynamic array of IO handlers for corresponding fd in .fds */
+ struct {
+ rd_kafka_mock_io_handler_t *cb; /**< Callback */
+ void *opaque; /**< Callbacks' opaque */
+ } * handlers;
+
+ /**< Per-protocol request error stack. */
+ rd_kafka_mock_error_stack_head_t errstacks;
+
+ /**< Request handlers */
+ struct rd_kafka_mock_api_handler api_handlers[RD_KAFKAP__NUM];
+
+ /**< Mutex for:
+ * .errstacks
+ * .apiversions
+ */
+ mtx_t lock;
+
+ rd_kafka_timers_t timers; /**< Timers */
+};
+
+
+
+rd_kafka_buf_t *rd_kafka_mock_buf_new_response(const rd_kafka_buf_t *request);
+void rd_kafka_mock_connection_send_response(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp);
+void rd_kafka_mock_connection_set_blocking(rd_kafka_mock_connection_t *mconn,
+ rd_bool_t blocking);
+
+rd_kafka_mock_partition_t *
+rd_kafka_mock_partition_find(const rd_kafka_mock_topic_t *mtopic,
+ int32_t partition);
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_auto_create(rd_kafka_mock_cluster_t *mcluster,
+ const char *topic,
+ int partition_cnt,
+ rd_kafka_resp_err_t *errp);
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_find(const rd_kafka_mock_cluster_t *mcluster,
+ const char *name);
+rd_kafka_mock_topic_t *
+rd_kafka_mock_topic_find_by_kstr(const rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *kname);
+rd_kafka_mock_broker_t *
+rd_kafka_mock_cluster_get_coord(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_coordtype_t KeyType,
+ const rd_kafkap_str_t *Key);
+
+rd_kafka_mock_committed_offset_t *
+rd_kafka_mock_committed_offset_find(const rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_str_t *group);
+rd_kafka_mock_committed_offset_t *
+rd_kafka_mock_commit_offset(rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_str_t *group,
+ int64_t offset,
+ const rd_kafkap_str_t *metadata);
+
+const rd_kafka_mock_msgset_t *
+rd_kafka_mock_msgset_find(const rd_kafka_mock_partition_t *mpart,
+ int64_t offset,
+ rd_bool_t on_follower);
+
+rd_kafka_resp_err_t
+rd_kafka_mock_next_request_error(rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp);
+
+rd_kafka_resp_err_t
+rd_kafka_mock_partition_log_append(rd_kafka_mock_partition_t *mpart,
+ const rd_kafkap_bytes_t *records,
+ const rd_kafkap_str_t *TransactionalId,
+ int64_t *BaseOffset);
+
+rd_kafka_resp_err_t rd_kafka_mock_partition_leader_epoch_check(
+ const rd_kafka_mock_partition_t *mpart,
+ int32_t leader_epoch);
+
+int64_t rd_kafka_mock_partition_offset_for_leader_epoch(
+ const rd_kafka_mock_partition_t *mpart,
+ int32_t leader_epoch);
+
+
+/**
+ * @returns true if the ApiVersion is supported, else false.
+ */
+static RD_UNUSED rd_bool_t
+rd_kafka_mock_cluster_ApiVersion_check(const rd_kafka_mock_cluster_t *mcluster,
+ int16_t ApiKey,
+ int16_t ApiVersion) {
+ return (ApiVersion >= mcluster->api_handlers[ApiKey].MinVersion &&
+ ApiVersion <= mcluster->api_handlers[ApiKey].MaxVersion);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_mock_pid_find(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *TransactionalId,
+ const rd_kafka_pid_t pid,
+ rd_kafka_mock_pid_t **mpidp);
+
+
+/**
+ * @name Mock consumer group (rdkafka_mock_cgrp.c)
+ * @{
+ */
+void rd_kafka_mock_cgrp_member_active(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member);
+void rd_kafka_mock_cgrp_member_assignment_set(
+ rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member,
+ const rd_kafkap_bytes_t *Metadata);
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_member_sync_set(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member,
+ rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp);
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_member_leave(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member);
+void rd_kafka_mock_cgrp_protos_destroy(rd_kafka_mock_cgrp_proto_t *protos,
+ int proto_cnt);
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_member_add(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_connection_t *mconn,
+ rd_kafka_buf_t *resp,
+ const rd_kafkap_str_t *MemberId,
+ const rd_kafkap_str_t *ProtocolType,
+ rd_kafka_mock_cgrp_proto_t *protos,
+ int proto_cnt,
+ int session_timeout_ms);
+rd_kafka_resp_err_t
+rd_kafka_mock_cgrp_check_state(rd_kafka_mock_cgrp_t *mcgrp,
+ rd_kafka_mock_cgrp_member_t *member,
+ const rd_kafka_buf_t *request,
+ int32_t generation_id);
+rd_kafka_mock_cgrp_member_t *
+rd_kafka_mock_cgrp_member_find(const rd_kafka_mock_cgrp_t *mcgrp,
+ const rd_kafkap_str_t *MemberId);
+void rd_kafka_mock_cgrp_destroy(rd_kafka_mock_cgrp_t *mcgrp);
+rd_kafka_mock_cgrp_t *rd_kafka_mock_cgrp_find(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *GroupId);
+rd_kafka_mock_cgrp_t *
+rd_kafka_mock_cgrp_get(rd_kafka_mock_cluster_t *mcluster,
+ const rd_kafkap_str_t *GroupId,
+ const rd_kafkap_str_t *ProtocolType);
+void rd_kafka_mock_cgrps_connection_closed(rd_kafka_mock_cluster_t *mcluster,
+ rd_kafka_mock_connection_t *mconn);
+
+
+/**
+ *@}
+ */
+
+
+#include "rdkafka_mock.h"
+
+#endif /* _RDKAFKA_MOCK_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c
new file mode 100644
index 000000000..17b67999b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.c
@@ -0,0 +1,2517 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_interceptor.h"
+#include "rdkafka_header.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_txnmgr.h"
+#include "rdkafka_error.h"
+#include "rdcrc32.h"
+#include "rdfnv1a.h"
+#include "rdmurmur2.h"
+#include "rdrand.h"
+#include "rdtime.h"
+#include "rdsysqueue.h"
+#include "rdunittest.h"
+
+#include <stdarg.h>
+
+
+const char *rd_kafka_message_errstr(const rd_kafka_message_t *rkmessage) {
+ if (!rkmessage->err)
+ return NULL;
+
+ if (rkmessage->payload)
+ return (const char *)rkmessage->payload;
+
+ return rd_kafka_err2str(rkmessage->err);
+}
+
+
+/**
+ * @brief Check if producing is allowed.
+ *
+ * @param errorp If non-NULL and an producing is prohibited a new error_t
+ * object will be allocated and returned in this pointer.
+ *
+ * @returns an error if not allowed, else 0.
+ *
+ * @remarks Also sets the corresponding errno.
+ */
+static RD_INLINE rd_kafka_resp_err_t
+rd_kafka_check_produce(rd_kafka_t *rk, rd_kafka_error_t **errorp) {
+ rd_kafka_resp_err_t err;
+
+ if (unlikely((err = rd_kafka_fatal_error_code(rk)))) {
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__FATAL, ECANCELED);
+ if (errorp) {
+ rd_kafka_rdlock(rk);
+ *errorp = rd_kafka_error_new_fatal(
+ err,
+ "Producing not allowed since a previous fatal "
+ "error was raised: %s",
+ rk->rk_fatal.errstr);
+ rd_kafka_rdunlock(rk);
+ }
+ return RD_KAFKA_RESP_ERR__FATAL;
+ }
+
+ if (likely(rd_kafka_txn_may_enq_msg(rk)))
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Transactional state forbids producing */
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__STATE, ENOEXEC);
+
+ if (errorp) {
+ rd_kafka_rdlock(rk);
+ *errorp = rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__STATE,
+ "Producing not allowed in transactional state %s",
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state));
+ rd_kafka_rdunlock(rk);
+ }
+
+ return RD_KAFKA_RESP_ERR__STATE;
+}
+
+
+void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm) {
+ // FIXME
+ if (rkm->rkm_flags & RD_KAFKA_MSG_F_ACCOUNT) {
+ rd_dassert(rk || rkm->rkm_rkmessage.rkt);
+ rd_kafka_curr_msgs_sub(rk ? rk : rkm->rkm_rkmessage.rkt->rkt_rk,
+ 1, rkm->rkm_len);
+ }
+
+ if (rkm->rkm_headers)
+ rd_kafka_headers_destroy(rkm->rkm_headers);
+
+ if (likely(rkm->rkm_rkmessage.rkt != NULL))
+ rd_kafka_topic_destroy0(rkm->rkm_rkmessage.rkt);
+
+ if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE && rkm->rkm_payload)
+ rd_free(rkm->rkm_payload);
+
+ if (rkm->rkm_flags & RD_KAFKA_MSG_F_FREE_RKM)
+ rd_free(rkm);
+}
+
+
+
+/**
+ * @brief Create a new Producer message, copying the payload as
+ * indicated by msgflags.
+ *
+ * @returns the new message
+ */
+static rd_kafka_msg_t *rd_kafka_msg_new00(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int msgflags,
+ char *payload,
+ size_t len,
+ const void *key,
+ size_t keylen,
+ void *msg_opaque) {
+ rd_kafka_msg_t *rkm;
+ size_t mlen = sizeof(*rkm);
+ char *p;
+
+ /* If we are to make a copy of the payload, allocate space for it too */
+ if (msgflags & RD_KAFKA_MSG_F_COPY) {
+ msgflags &= ~RD_KAFKA_MSG_F_FREE;
+ mlen += len;
+ }
+
+ mlen += keylen;
+
+ /* Note: using rd_malloc here, not rd_calloc, so make sure all fields
+ * are properly set up. */
+ rkm = rd_malloc(mlen);
+ rkm->rkm_err = 0;
+ rkm->rkm_flags =
+ (RD_KAFKA_MSG_F_PRODUCER | RD_KAFKA_MSG_F_FREE_RKM | msgflags);
+ rkm->rkm_len = len;
+ rkm->rkm_opaque = msg_opaque;
+ rkm->rkm_rkmessage.rkt = rd_kafka_topic_keep(rkt);
+
+ rkm->rkm_broker_id = -1;
+ rkm->rkm_partition = partition;
+ rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID;
+ rkm->rkm_timestamp = 0;
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
+ rkm->rkm_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ rkm->rkm_headers = NULL;
+
+ p = (char *)(rkm + 1);
+
+ if (payload && msgflags & RD_KAFKA_MSG_F_COPY) {
+ /* Copy payload to space following the ..msg_t */
+ rkm->rkm_payload = p;
+ memcpy(rkm->rkm_payload, payload, len);
+ p += len;
+
+ } else {
+ /* Just point to the provided payload. */
+ rkm->rkm_payload = payload;
+ }
+
+ if (key) {
+ rkm->rkm_key = p;
+ rkm->rkm_key_len = keylen;
+ memcpy(rkm->rkm_key, key, keylen);
+ } else {
+ rkm->rkm_key = NULL;
+ rkm->rkm_key_len = 0;
+ }
+
+ return rkm;
+}
+
+
+
+/**
+ * @brief Create a new Producer message.
+ *
+ * @remark Must only be used by producer code.
+ *
+ * Returns 0 on success or -1 on error.
+ * Both errno and 'errp' are set appropriately.
+ */
+static rd_kafka_msg_t *rd_kafka_msg_new0(rd_kafka_topic_t *rkt,
+ int32_t force_partition,
+ int msgflags,
+ char *payload,
+ size_t len,
+ const void *key,
+ size_t keylen,
+ void *msg_opaque,
+ rd_kafka_resp_err_t *errp,
+ int *errnop,
+ rd_kafka_headers_t *hdrs,
+ int64_t timestamp,
+ rd_ts_t now) {
+ rd_kafka_msg_t *rkm;
+ size_t hdrs_size = 0;
+
+ if (unlikely(!payload))
+ len = 0;
+ if (!key)
+ keylen = 0;
+ if (hdrs)
+ hdrs_size = rd_kafka_headers_serialized_size(hdrs);
+
+ if (unlikely(len > INT32_MAX || keylen > INT32_MAX ||
+ rd_kafka_msg_max_wire_size(keylen, len, hdrs_size) >
+ (size_t)rkt->rkt_rk->rk_conf.max_msg_size)) {
+ *errp = RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE;
+ if (errnop)
+ *errnop = EMSGSIZE;
+ return NULL;
+ }
+
+ if (msgflags & RD_KAFKA_MSG_F_BLOCK)
+ *errp = rd_kafka_curr_msgs_add(
+ rkt->rkt_rk, 1, len, 1 /*block*/,
+ (msgflags & RD_KAFKA_MSG_F_RKT_RDLOCKED) ? &rkt->rkt_lock
+ : NULL);
+ else
+ *errp = rd_kafka_curr_msgs_add(rkt->rkt_rk, 1, len, 0, NULL);
+
+ if (unlikely(*errp)) {
+ if (errnop)
+ *errnop = ENOBUFS;
+ return NULL;
+ }
+
+
+ rkm = rd_kafka_msg_new00(
+ rkt, force_partition,
+ msgflags | RD_KAFKA_MSG_F_ACCOUNT /* curr_msgs_add() */, payload,
+ len, key, keylen, msg_opaque);
+
+ memset(&rkm->rkm_u.producer, 0, sizeof(rkm->rkm_u.producer));
+
+ if (timestamp)
+ rkm->rkm_timestamp = timestamp;
+ else
+ rkm->rkm_timestamp = rd_uclock() / 1000;
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
+
+ if (hdrs) {
+ rd_dassert(!rkm->rkm_headers);
+ rkm->rkm_headers = hdrs;
+ }
+
+ rkm->rkm_ts_enq = now;
+
+ if (rkt->rkt_conf.message_timeout_ms == 0) {
+ rkm->rkm_ts_timeout = INT64_MAX;
+ } else {
+ rkm->rkm_ts_timeout =
+ now + (int64_t)rkt->rkt_conf.message_timeout_ms * 1000;
+ }
+
+ /* Call interceptor chain for on_send */
+ rd_kafka_interceptors_on_send(rkt->rkt_rk, &rkm->rkm_rkmessage);
+
+ return rkm;
+}
+
+
+/**
+ * @brief Produce: creates a new message, runs the partitioner and enqueues
+ * into on the selected partition.
+ *
+ * @returns 0 on success or -1 on error.
+ *
+ * If the function returns -1 and RD_KAFKA_MSG_F_FREE was specified, then
+ * the memory associated with the payload is still the caller's
+ * responsibility.
+ *
+ * @locks none
+ */
+int rd_kafka_msg_new(rd_kafka_topic_t *rkt,
+ int32_t force_partition,
+ int msgflags,
+ char *payload,
+ size_t len,
+ const void *key,
+ size_t keylen,
+ void *msg_opaque) {
+ rd_kafka_msg_t *rkm;
+ rd_kafka_resp_err_t err;
+ int errnox;
+
+ if (unlikely((err = rd_kafka_check_produce(rkt->rkt_rk, NULL))))
+ return -1;
+
+ /* Create message */
+ rkm = rd_kafka_msg_new0(rkt, force_partition, msgflags, payload, len,
+ key, keylen, msg_opaque, &err, &errnox, NULL, 0,
+ rd_clock());
+ if (unlikely(!rkm)) {
+ /* errno is already set by msg_new() */
+ rd_kafka_set_last_error(err, errnox);
+ return -1;
+ }
+
+
+ /* Partition the message */
+ err = rd_kafka_msg_partitioner(rkt, rkm, 1);
+ if (likely(!err)) {
+ rd_kafka_set_last_error(0, 0);
+ return 0;
+ }
+
+ /* Interceptor: unroll failing messages by triggering on_ack.. */
+ rkm->rkm_err = err;
+ rd_kafka_interceptors_on_acknowledgement(rkt->rkt_rk,
+ &rkm->rkm_rkmessage);
+
+ /* Handle partitioner failures: it only fails when the application
+ * attempts to force a destination partition that does not exist
+ * in the cluster. Note we must clear the RD_KAFKA_MSG_F_FREE
+ * flag since our contract says we don't free the payload on
+ * failure. */
+
+ rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
+ rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
+
+ /* Translate error codes to errnos. */
+ if (err == RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ rd_kafka_set_last_error(err, ESRCH);
+ else if (err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ rd_kafka_set_last_error(err, ENOENT);
+ else
+ rd_kafka_set_last_error(err, EINVAL); /* NOTREACHED */
+
+ return -1;
+}
+
+
+/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */
+rd_kafka_error_t *
+rd_kafka_produceva(rd_kafka_t *rk, const rd_kafka_vu_t *vus, size_t cnt) {
+ rd_kafka_msg_t s_rkm = {
+ /* Message defaults */
+ .rkm_partition = RD_KAFKA_PARTITION_UA,
+ .rkm_timestamp = 0, /* current time */
+ };
+ rd_kafka_msg_t *rkm = &s_rkm;
+ rd_kafka_topic_t *rkt = NULL;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_headers_t *hdrs = NULL;
+ rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */
+ size_t i;
+
+ if (unlikely(rd_kafka_check_produce(rk, &error)))
+ return error;
+
+ for (i = 0; i < cnt; i++) {
+ const rd_kafka_vu_t *vu = &vus[i];
+ switch (vu->vtype) {
+ case RD_KAFKA_VTYPE_TOPIC:
+ rkt =
+ rd_kafka_topic_new0(rk, vu->u.cstr, NULL, NULL, 1);
+ break;
+
+ case RD_KAFKA_VTYPE_RKT:
+ rkt = rd_kafka_topic_proper(vu->u.rkt);
+ rd_kafka_topic_keep(rkt);
+ break;
+
+ case RD_KAFKA_VTYPE_PARTITION:
+ rkm->rkm_partition = vu->u.i32;
+ break;
+
+ case RD_KAFKA_VTYPE_VALUE:
+ rkm->rkm_payload = vu->u.mem.ptr;
+ rkm->rkm_len = vu->u.mem.size;
+ break;
+
+ case RD_KAFKA_VTYPE_KEY:
+ rkm->rkm_key = vu->u.mem.ptr;
+ rkm->rkm_key_len = vu->u.mem.size;
+ break;
+
+ case RD_KAFKA_VTYPE_OPAQUE:
+ rkm->rkm_opaque = vu->u.ptr;
+ break;
+
+ case RD_KAFKA_VTYPE_MSGFLAGS:
+ rkm->rkm_flags = vu->u.i;
+ break;
+
+ case RD_KAFKA_VTYPE_TIMESTAMP:
+ rkm->rkm_timestamp = vu->u.i64;
+ break;
+
+ case RD_KAFKA_VTYPE_HEADER:
+ if (unlikely(app_hdrs != NULL)) {
+ error = rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__CONFLICT,
+ "VTYPE_HEADER and VTYPE_HEADERS "
+ "are mutually exclusive");
+ goto err;
+ }
+
+ if (unlikely(!hdrs))
+ hdrs = rd_kafka_headers_new(8);
+
+ err = rd_kafka_header_add(hdrs, vu->u.header.name, -1,
+ vu->u.header.val,
+ vu->u.header.size);
+ if (unlikely(err)) {
+ error = rd_kafka_error_new(
+ err, "Failed to add header: %s",
+ rd_kafka_err2str(err));
+ goto err;
+ }
+ break;
+
+ case RD_KAFKA_VTYPE_HEADERS:
+ if (unlikely(hdrs != NULL)) {
+ error = rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__CONFLICT,
+ "VTYPE_HEADERS and VTYPE_HEADER "
+ "are mutually exclusive");
+ goto err;
+ }
+ app_hdrs = vu->u.headers;
+ break;
+
+ default:
+ error = rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Unsupported VTYPE %d", (int)vu->vtype);
+ goto err;
+ }
+ }
+
+ rd_assert(!error);
+
+ if (unlikely(!rkt)) {
+ error = rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Topic name or object required");
+ goto err;
+ }
+
+ rkm = rd_kafka_msg_new0(
+ rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload,
+ rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len, rkm->rkm_opaque, &err,
+ NULL, app_hdrs ? app_hdrs : hdrs, rkm->rkm_timestamp, rd_clock());
+
+ if (unlikely(err)) {
+ error = rd_kafka_error_new(err, "Failed to produce message: %s",
+ rd_kafka_err2str(err));
+ goto err;
+ }
+
+ /* Partition the message */
+ err = rd_kafka_msg_partitioner(rkt, rkm, 1);
+ if (unlikely(err)) {
+ /* Handle partitioner failures: it only fails when
+ * the application attempts to force a destination
+ * partition that does not exist in the cluster. */
+
+ /* Interceptors: Unroll on_send by on_ack.. */
+ rkm->rkm_err = err;
+ rd_kafka_interceptors_on_acknowledgement(rk,
+ &rkm->rkm_rkmessage);
+
+ /* Note we must clear the RD_KAFKA_MSG_F_FREE
+ * flag since our contract says we don't free the payload on
+ * failure. */
+ rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
+
+ /* Deassociate application owned headers from message
+ * since headers remain in application ownership
+ * when producev() fails */
+ if (app_hdrs && app_hdrs == rkm->rkm_headers)
+ rkm->rkm_headers = NULL;
+
+ rd_kafka_msg_destroy(rk, rkm);
+
+ error = rd_kafka_error_new(err, "Failed to enqueue message: %s",
+ rd_kafka_err2str(err));
+ goto err;
+ }
+
+ rd_kafka_topic_destroy0(rkt);
+
+ return NULL;
+
+err:
+ if (rkt)
+ rd_kafka_topic_destroy0(rkt);
+
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+
+ rd_assert(error != NULL);
+ return error;
+}
+
+
+
+/** @remark Keep rd_kafka_produceva() and rd_kafka_producev() in synch */
+rd_kafka_resp_err_t rd_kafka_producev(rd_kafka_t *rk, ...) {
+ va_list ap;
+ rd_kafka_msg_t s_rkm = {
+ /* Message defaults */
+ .rkm_partition = RD_KAFKA_PARTITION_UA,
+ .rkm_timestamp = 0, /* current time */
+ };
+ rd_kafka_msg_t *rkm = &s_rkm;
+ rd_kafka_vtype_t vtype;
+ rd_kafka_topic_t *rkt = NULL;
+ rd_kafka_resp_err_t err;
+ rd_kafka_headers_t *hdrs = NULL;
+ rd_kafka_headers_t *app_hdrs = NULL; /* App-provided headers list */
+
+ if (unlikely((err = rd_kafka_check_produce(rk, NULL))))
+ return err;
+
+ va_start(ap, rk);
+ while (!err &&
+ (vtype = va_arg(ap, rd_kafka_vtype_t)) != RD_KAFKA_VTYPE_END) {
+ switch (vtype) {
+ case RD_KAFKA_VTYPE_TOPIC:
+ rkt = rd_kafka_topic_new0(rk, va_arg(ap, const char *),
+ NULL, NULL, 1);
+ break;
+
+ case RD_KAFKA_VTYPE_RKT:
+ rkt = rd_kafka_topic_proper(
+ va_arg(ap, rd_kafka_topic_t *));
+ rd_kafka_topic_keep(rkt);
+ break;
+
+ case RD_KAFKA_VTYPE_PARTITION:
+ rkm->rkm_partition = va_arg(ap, int32_t);
+ break;
+
+ case RD_KAFKA_VTYPE_VALUE:
+ rkm->rkm_payload = va_arg(ap, void *);
+ rkm->rkm_len = va_arg(ap, size_t);
+ break;
+
+ case RD_KAFKA_VTYPE_KEY:
+ rkm->rkm_key = va_arg(ap, void *);
+ rkm->rkm_key_len = va_arg(ap, size_t);
+ break;
+
+ case RD_KAFKA_VTYPE_OPAQUE:
+ rkm->rkm_opaque = va_arg(ap, void *);
+ break;
+
+ case RD_KAFKA_VTYPE_MSGFLAGS:
+ rkm->rkm_flags = va_arg(ap, int);
+ break;
+
+ case RD_KAFKA_VTYPE_TIMESTAMP:
+ rkm->rkm_timestamp = va_arg(ap, int64_t);
+ break;
+
+ case RD_KAFKA_VTYPE_HEADER: {
+ const char *name;
+ const void *value;
+ ssize_t size;
+
+ if (unlikely(app_hdrs != NULL)) {
+ err = RD_KAFKA_RESP_ERR__CONFLICT;
+ break;
+ }
+
+ if (unlikely(!hdrs))
+ hdrs = rd_kafka_headers_new(8);
+
+ name = va_arg(ap, const char *);
+ value = va_arg(ap, const void *);
+ size = va_arg(ap, ssize_t);
+
+ err = rd_kafka_header_add(hdrs, name, -1, value, size);
+ } break;
+
+ case RD_KAFKA_VTYPE_HEADERS:
+ if (unlikely(hdrs != NULL)) {
+ err = RD_KAFKA_RESP_ERR__CONFLICT;
+ break;
+ }
+ app_hdrs = va_arg(ap, rd_kafka_headers_t *);
+ break;
+
+ default:
+ err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+ break;
+ }
+ }
+
+ va_end(ap);
+
+ if (unlikely(!rkt))
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ if (likely(!err))
+ rkm = rd_kafka_msg_new0(
+ rkt, rkm->rkm_partition, rkm->rkm_flags, rkm->rkm_payload,
+ rkm->rkm_len, rkm->rkm_key, rkm->rkm_key_len,
+ rkm->rkm_opaque, &err, NULL, app_hdrs ? app_hdrs : hdrs,
+ rkm->rkm_timestamp, rd_clock());
+
+ if (unlikely(err)) {
+ rd_kafka_topic_destroy0(rkt);
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+ return err;
+ }
+
+ /* Partition the message */
+ err = rd_kafka_msg_partitioner(rkt, rkm, 1);
+ if (unlikely(err)) {
+ /* Handle partitioner failures: it only fails when
+ * the application attempts to force a destination
+ * partition that does not exist in the cluster. */
+
+ /* Interceptors: Unroll on_send by on_ack.. */
+ rkm->rkm_err = err;
+ rd_kafka_interceptors_on_acknowledgement(rk,
+ &rkm->rkm_rkmessage);
+
+ /* Note we must clear the RD_KAFKA_MSG_F_FREE
+ * flag since our contract says we don't free the payload on
+ * failure. */
+ rkm->rkm_flags &= ~RD_KAFKA_MSG_F_FREE;
+
+ /* Deassociate application owned headers from message
+ * since headers remain in application ownership
+ * when producev() fails */
+ if (app_hdrs && app_hdrs == rkm->rkm_headers)
+ rkm->rkm_headers = NULL;
+
+ rd_kafka_msg_destroy(rk, rkm);
+ }
+
+ rd_kafka_topic_destroy0(rkt);
+
+ return err;
+}
+
+
+
+/**
+ * @brief Produce a single message.
+ * @locality any application thread
+ * @locks none
+ */
+int rd_kafka_produce(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int msgflags,
+ void *payload,
+ size_t len,
+ const void *key,
+ size_t keylen,
+ void *msg_opaque) {
+ return rd_kafka_msg_new(rkt, partition, msgflags, payload, len, key,
+ keylen, msg_opaque);
+}
+
+
+
+/**
+ * Produce a batch of messages.
+ * Returns the number of messages succesfully queued for producing.
+ * Each message's .err will be set accordingly.
+ */
+int rd_kafka_produce_batch(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int msgflags,
+ rd_kafka_message_t *rkmessages,
+ int message_cnt) {
+ rd_kafka_msgq_t tmpq = RD_KAFKA_MSGQ_INITIALIZER(tmpq);
+ int i;
+ int64_t utc_now = rd_uclock() / 1000;
+ rd_ts_t now = rd_clock();
+ int good = 0;
+ int multiple_partitions = (partition == RD_KAFKA_PARTITION_UA ||
+ (msgflags & RD_KAFKA_MSG_F_PARTITION));
+ rd_kafka_resp_err_t all_err;
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp = NULL;
+
+ /* Propagated per-message below */
+ all_err = rd_kafka_check_produce(rkt->rkt_rk, NULL);
+
+ rd_kafka_topic_rdlock(rkt);
+ if (!multiple_partitions) {
+ /* Single partition: look up the rktp once. */
+ rktp = rd_kafka_toppar_get_avail(rkt, partition,
+ 1 /*ua on miss*/, &all_err);
+
+ } else {
+ /* Indicate to lower-level msg_new..() that rkt is locked
+ * so that they may unlock it momentarily if blocking. */
+ msgflags |= RD_KAFKA_MSG_F_RKT_RDLOCKED;
+ }
+
+ for (i = 0; i < message_cnt; i++) {
+ rd_kafka_msg_t *rkm;
+
+ /* Propagate error for all messages. */
+ if (unlikely(all_err)) {
+ rkmessages[i].err = all_err;
+ continue;
+ }
+
+ /* Create message */
+ rkm = rd_kafka_msg_new0(
+ rkt,
+ (msgflags & RD_KAFKA_MSG_F_PARTITION)
+ ? rkmessages[i].partition
+ : partition,
+ msgflags, rkmessages[i].payload, rkmessages[i].len,
+ rkmessages[i].key, rkmessages[i].key_len,
+ rkmessages[i]._private, &rkmessages[i].err, NULL, NULL,
+ utc_now, now);
+ if (unlikely(!rkm)) {
+ if (rkmessages[i].err == RD_KAFKA_RESP_ERR__QUEUE_FULL)
+ all_err = rkmessages[i].err;
+ continue;
+ }
+
+ /* Three cases here:
+ * partition==UA: run the partitioner (slow)
+ * RD_KAFKA_MSG_F_PARTITION: produce message to specified
+ * partition
+ * fixed partition: simply concatenate the queue
+ * to partit */
+ if (multiple_partitions) {
+ if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
+ /* Partition the message */
+ rkmessages[i].err = rd_kafka_msg_partitioner(
+ rkt, rkm, 0 /*already locked*/);
+ } else {
+ if (rktp == NULL || rkm->rkm_partition !=
+ rktp->rktp_partition) {
+ rd_kafka_resp_err_t err;
+ if (rktp != NULL)
+ rd_kafka_toppar_destroy(rktp);
+ rktp = rd_kafka_toppar_get_avail(
+ rkt, rkm->rkm_partition,
+ 1 /*ua on miss*/, &err);
+
+ if (unlikely(!rktp)) {
+ rkmessages[i].err = err;
+ continue;
+ }
+ }
+ rd_kafka_toppar_enq_msg(rktp, rkm, now);
+
+ if (rd_kafka_is_transactional(rkt->rkt_rk)) {
+ /* Add partition to transaction */
+ rd_kafka_txn_add_partition(rktp);
+ }
+ }
+
+ if (unlikely(rkmessages[i].err)) {
+ /* Interceptors: Unroll on_send by on_ack.. */
+ rd_kafka_interceptors_on_acknowledgement(
+ rkt->rkt_rk, &rkmessages[i]);
+
+ rd_kafka_msg_destroy(rkt->rkt_rk, rkm);
+ continue;
+ }
+
+
+ } else {
+ /* Single destination partition. */
+ rd_kafka_toppar_enq_msg(rktp, rkm, now);
+ }
+
+ rkmessages[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ good++;
+ }
+
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (!multiple_partitions && good > 0 &&
+ rd_kafka_is_transactional(rkt->rkt_rk) &&
+ rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
+ /* Add single destination partition to transaction */
+ rd_kafka_txn_add_partition(rktp);
+ }
+
+ if (rktp != NULL)
+ rd_kafka_toppar_destroy(rktp);
+
+ return good;
+}
+
+/**
+ * @brief Scan \p rkmq for messages that have timed out and remove them from
+ * \p rkmq and add to \p timedout queue.
+ *
+ * @param abs_next_timeout will be set to the next message timeout, or 0
+ * if no timeout. Optional, may be NULL.
+ *
+ * @returns the number of messages timed out.
+ *
+ * @locality any
+ * @locks toppar_lock MUST be held
+ */
+int rd_kafka_msgq_age_scan(rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_msgq_t *timedout,
+ rd_ts_t now,
+ rd_ts_t *abs_next_timeout) {
+ rd_kafka_msg_t *rkm, *tmp, *first = NULL;
+ int cnt = timedout->rkmq_msg_cnt;
+
+ if (abs_next_timeout)
+ *abs_next_timeout = 0;
+
+ /* Assume messages are added in time sequencial order */
+ TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp) {
+ /* NOTE: this is not true for the deprecated (and soon removed)
+ * LIFO queuing strategy. */
+ if (likely(rkm->rkm_ts_timeout > now)) {
+ if (abs_next_timeout)
+ *abs_next_timeout = rkm->rkm_ts_timeout;
+ break;
+ }
+
+ if (!first)
+ first = rkm;
+
+ rd_kafka_msgq_deq(rkmq, rkm, 1);
+ rd_kafka_msgq_enq(timedout, rkm);
+ }
+
+ return timedout->rkmq_msg_cnt - cnt;
+}
+
+
+int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq,
+ rd_kafka_msg_t *rkm,
+ int (*order_cmp)(const void *, const void *)) {
+ TAILQ_INSERT_SORTED(&rkmq->rkmq_msgs, rkm, rd_kafka_msg_t *, rkm_link,
+ order_cmp);
+ rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
+ return ++rkmq->rkmq_msg_cnt;
+}
+
+int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_msg_t *rkm) {
+ rd_dassert(rkm->rkm_u.producer.msgid != 0);
+ return rd_kafka_msgq_enq_sorted0(rkmq, rkm,
+ rkt->rkt_conf.msg_order_cmp);
+}
+
+/**
+ * @brief Find the insert before position (i.e., the msg which comes
+ * after \p rkm sequencially) for message \p rkm.
+ *
+ * @param rkmq insert queue.
+ * @param start_pos the element in \p rkmq to start scanning at, or NULL
+ * to start with the first element.
+ * @param rkm message to insert.
+ * @param cmp message comparator.
+ * @param cntp the accumulated number of messages up to, but not including,
+ * the returned insert position. Optional (NULL).
+ * Do not use when start_pos is set.
+ * @param bytesp the accumulated number of bytes up to, but not inclduing,
+ * the returned insert position. Optional (NULL).
+ * Do not use when start_pos is set.
+ *
+ * @remark cntp and bytesp will NOT be accurate when \p start_pos is non-NULL.
+ *
+ * @returns the insert position element, or NULL if \p rkm should be
+ * added at tail of queue.
+ */
+rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq,
+ const rd_kafka_msg_t *start_pos,
+ const rd_kafka_msg_t *rkm,
+ int (*cmp)(const void *, const void *),
+ int *cntp,
+ int64_t *bytesp) {
+ const rd_kafka_msg_t *curr;
+ int cnt = 0;
+ int64_t bytes = 0;
+
+ for (curr = start_pos ? start_pos : rd_kafka_msgq_first(rkmq); curr;
+ curr = TAILQ_NEXT(curr, rkm_link)) {
+ if (cmp(rkm, curr) < 0) {
+ if (cntp) {
+ *cntp = cnt;
+ *bytesp = bytes;
+ }
+ return (rd_kafka_msg_t *)curr;
+ }
+ if (cntp) {
+ cnt++;
+ bytes += rkm->rkm_len + rkm->rkm_key_len;
+ }
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Split the original \p leftq into a left and right part,
+ * with element \p first_right being the first element in the
+ * right part (\p rightq).
+ *
+ * @param cnt is the number of messages up to, but not including \p first_right
+ * in \p leftq, namely the number of messages to remain in
+ * \p leftq after the split.
+ * @param bytes is the bytes counterpart to \p cnt.
+ */
+void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq,
+ rd_kafka_msgq_t *rightq,
+ rd_kafka_msg_t *first_right,
+ int cnt,
+ int64_t bytes) {
+ rd_kafka_msg_t *llast;
+
+ rd_assert(first_right != TAILQ_FIRST(&leftq->rkmq_msgs));
+
+ llast = TAILQ_PREV(first_right, rd_kafka_msg_head_s, rkm_link);
+
+ rd_kafka_msgq_init(rightq);
+
+ rightq->rkmq_msgs.tqh_first = first_right;
+ rightq->rkmq_msgs.tqh_last = leftq->rkmq_msgs.tqh_last;
+
+ first_right->rkm_link.tqe_prev = &rightq->rkmq_msgs.tqh_first;
+
+ leftq->rkmq_msgs.tqh_last = &llast->rkm_link.tqe_next;
+ llast->rkm_link.tqe_next = NULL;
+
+ rightq->rkmq_msg_cnt = leftq->rkmq_msg_cnt - cnt;
+ rightq->rkmq_msg_bytes = leftq->rkmq_msg_bytes - bytes;
+ leftq->rkmq_msg_cnt = cnt;
+ leftq->rkmq_msg_bytes = bytes;
+
+ rd_kafka_msgq_verify_order(NULL, leftq, 0, rd_false);
+ rd_kafka_msgq_verify_order(NULL, rightq, 0, rd_false);
+}
+
+
+/**
+ * @brief Set per-message metadata for all messages in \p rkmq
+ */
+void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq,
+ int32_t broker_id,
+ int64_t base_offset,
+ int64_t timestamp,
+ rd_kafka_msg_status_t status) {
+ rd_kafka_msg_t *rkm;
+
+ TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
+ rkm->rkm_broker_id = broker_id;
+ rkm->rkm_offset = base_offset++;
+ if (timestamp != -1) {
+ rkm->rkm_timestamp = timestamp;
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
+ }
+
+ /* Don't downgrade a message from any form of PERSISTED
+ * to NOT_PERSISTED, since the original cause of indicating
+ * PERSISTED can't be changed.
+ * E.g., a previous ack or in-flight timeout. */
+ if (unlikely(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
+ rkm->rkm_status !=
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED))
+ continue;
+
+ rkm->rkm_status = status;
+ }
+}
+
+
+/**
+ * @brief Move all messages in \p src to \p dst whose msgid <= last_msgid.
+ *
+ * @remark src must be ordered
+ */
+void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest,
+ rd_kafka_msgq_t *src,
+ uint64_t last_msgid,
+ rd_kafka_msg_status_t status) {
+ rd_kafka_msg_t *rkm;
+
+ while ((rkm = rd_kafka_msgq_first(src)) &&
+ rkm->rkm_u.producer.msgid <= last_msgid) {
+ rd_kafka_msgq_deq(src, rkm, 1);
+ rd_kafka_msgq_enq(dest, rkm);
+
+ rkm->rkm_status = status;
+ }
+
+ rd_kafka_msgq_verify_order(NULL, dest, 0, rd_false);
+ rd_kafka_msgq_verify_order(NULL, src, 0, rd_false);
+}
+
+
+
+int32_t rd_kafka_msg_partitioner_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ int32_t p = rd_jitter(0, partition_cnt - 1);
+ if (unlikely(!rd_kafka_topic_partition_available(rkt, p)))
+ return rd_jitter(0, partition_cnt - 1);
+ else
+ return p;
+}
+
+int32_t rd_kafka_msg_partitioner_consistent(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ return rd_crc32(key, keylen) % partition_cnt;
+}
+
+int32_t rd_kafka_msg_partitioner_consistent_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ if (keylen == 0)
+ return rd_kafka_msg_partitioner_random(
+ rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
+ else
+ return rd_kafka_msg_partitioner_consistent(
+ rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
+}
+
+int32_t rd_kafka_msg_partitioner_murmur2(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt;
+}
+
+int32_t rd_kafka_msg_partitioner_murmur2_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ if (!key)
+ return rd_kafka_msg_partitioner_random(
+ rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
+ else
+ return (rd_murmur2(key, keylen) & 0x7fffffff) % partition_cnt;
+}
+
+int32_t rd_kafka_msg_partitioner_fnv1a(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ return rd_fnv1a(key, keylen) % partition_cnt;
+}
+
+int32_t rd_kafka_msg_partitioner_fnv1a_random(const rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ if (!key)
+ return rd_kafka_msg_partitioner_random(
+ rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
+ else
+ return rd_fnv1a(key, keylen) % partition_cnt;
+}
+
+int32_t rd_kafka_msg_sticky_partition(rd_kafka_topic_t *rkt,
+ const void *key,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+
+ if (!rd_kafka_topic_partition_available(rkt, rkt->rkt_sticky_partition))
+ rd_interval_expedite(&rkt->rkt_sticky_intvl, 0);
+
+ if (rd_interval(&rkt->rkt_sticky_intvl,
+ rkt->rkt_rk->rk_conf.sticky_partition_linger_ms * 1000,
+ 0) > 0) {
+ rkt->rkt_sticky_partition = rd_kafka_msg_partitioner_random(
+ rkt, key, keylen, partition_cnt, rkt_opaque, msg_opaque);
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "PARTITIONER",
+ "%s [%" PRId32 "] is the new sticky partition",
+ rkt->rkt_topic->str, rkt->rkt_sticky_partition);
+ }
+
+ return rkt->rkt_sticky_partition;
+}
+
+/**
+ * @brief Assigns a message to a topic partition using a partitioner.
+ *
+ * @param do_lock if RD_DO_LOCK then acquire topic lock.
+ *
+ * @returns RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION or .._UNKNOWN_TOPIC if
+ * partitioning failed, or 0 on success.
+ *
+ * @locality any
+ * @locks rd_kafka_
+ */
+int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt,
+ rd_kafka_msg_t *rkm,
+ rd_dolock_t do_lock) {
+ int32_t partition;
+ rd_kafka_toppar_t *rktp_new;
+ rd_kafka_resp_err_t err;
+
+ if (do_lock)
+ rd_kafka_topic_rdlock(rkt);
+
+ switch (rkt->rkt_state) {
+ case RD_KAFKA_TOPIC_S_UNKNOWN:
+ /* No metadata received from cluster yet.
+ * Put message in UA partition and re-run partitioner when
+ * cluster comes up. */
+ partition = RD_KAFKA_PARTITION_UA;
+ break;
+
+ case RD_KAFKA_TOPIC_S_NOTEXISTS:
+ /* Topic not found in cluster.
+ * Fail message immediately. */
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ if (do_lock)
+ rd_kafka_topic_rdunlock(rkt);
+ return err;
+
+ case RD_KAFKA_TOPIC_S_ERROR:
+ /* Topic has permanent error.
+ * Fail message immediately. */
+ err = rkt->rkt_err;
+ if (do_lock)
+ rd_kafka_topic_rdunlock(rkt);
+ return err;
+
+ case RD_KAFKA_TOPIC_S_EXISTS:
+ /* Topic exists in cluster. */
+
+ /* Topic exists but has no partitions.
+ * This is usually an transient state following the
+ * auto-creation of a topic. */
+ if (unlikely(rkt->rkt_partition_cnt == 0)) {
+ partition = RD_KAFKA_PARTITION_UA;
+ break;
+ }
+
+ /* Partition not assigned, run partitioner. */
+ if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA) {
+
+ if (!rkt->rkt_conf.random_partitioner &&
+ (!rkm->rkm_key ||
+ (rkm->rkm_key_len == 0 &&
+ rkt->rkt_conf.partitioner ==
+ rd_kafka_msg_partitioner_consistent_random))) {
+ partition = rd_kafka_msg_sticky_partition(
+ rkt, rkm->rkm_key, rkm->rkm_key_len,
+ rkt->rkt_partition_cnt,
+ rkt->rkt_conf.opaque, rkm->rkm_opaque);
+ } else {
+ partition = rkt->rkt_conf.partitioner(
+ rkt, rkm->rkm_key, rkm->rkm_key_len,
+ rkt->rkt_partition_cnt,
+ rkt->rkt_conf.opaque, rkm->rkm_opaque);
+ }
+ } else
+ partition = rkm->rkm_partition;
+
+ /* Check that partition exists. */
+ if (partition >= rkt->rkt_partition_cnt) {
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ if (do_lock)
+ rd_kafka_topic_rdunlock(rkt);
+ return err;
+ }
+ break;
+
+ default:
+ rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
+ break;
+ }
+
+ /* Get new partition */
+ rktp_new = rd_kafka_toppar_get(rkt, partition, 0);
+
+ if (unlikely(!rktp_new)) {
+ /* Unknown topic or partition */
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ else
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ if (do_lock)
+ rd_kafka_topic_rdunlock(rkt);
+
+ return err;
+ }
+
+ rd_atomic64_add(&rktp_new->rktp_c.producer_enq_msgs, 1);
+
+ /* Update message partition */
+ if (rkm->rkm_partition == RD_KAFKA_PARTITION_UA)
+ rkm->rkm_partition = partition;
+
+ /* Partition is available: enqueue msg on partition's queue */
+ rd_kafka_toppar_enq_msg(rktp_new, rkm, rd_clock());
+ if (do_lock)
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (rktp_new->rktp_partition != RD_KAFKA_PARTITION_UA &&
+ rd_kafka_is_transactional(rkt->rkt_rk)) {
+ /* Add partition to transaction */
+ rd_kafka_txn_add_partition(rktp_new);
+ }
+
+ rd_kafka_toppar_destroy(rktp_new); /* from _get() */
+ return 0;
+}
+
+
+
+/**
+ * @name Public message type (rd_kafka_message_t)
+ */
+void rd_kafka_message_destroy(rd_kafka_message_t *rkmessage) {
+ rd_kafka_op_t *rko;
+
+ if (likely((rko = (rd_kafka_op_t *)rkmessage->_private) != NULL))
+ rd_kafka_op_destroy(rko);
+ else {
+ rd_kafka_msg_t *rkm = rd_kafka_message2msg(rkmessage);
+ rd_kafka_msg_destroy(NULL, rkm);
+ }
+}
+
+
+rd_kafka_message_t *rd_kafka_message_new(void) {
+ rd_kafka_msg_t *rkm = rd_calloc(1, sizeof(*rkm));
+ rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM;
+ rkm->rkm_broker_id = -1;
+ return (rd_kafka_message_t *)rkm;
+}
+
+
+/**
+ * @brief Set up a rkmessage from an rko for passing to the application.
+ * @remark Will trigger on_consume() interceptors if any.
+ */
+static rd_kafka_message_t *
+rd_kafka_message_setup(rd_kafka_op_t *rko, rd_kafka_message_t *rkmessage) {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_toppar_t *rktp = NULL;
+
+ if (rko->rko_type == RD_KAFKA_OP_DR) {
+ rkt = rko->rko_u.dr.rkt;
+ } else {
+ if (rko->rko_rktp) {
+ rktp = rko->rko_rktp;
+ rkt = rktp->rktp_rkt;
+ } else
+ rkt = NULL;
+
+ rkmessage->_private = rko;
+ }
+
+
+ if (!rkmessage->rkt && rkt)
+ rkmessage->rkt = rd_kafka_topic_keep(rkt);
+
+ if (rktp)
+ rkmessage->partition = rktp->rktp_partition;
+
+ if (!rkmessage->err)
+ rkmessage->err = rko->rko_err;
+
+ /* Call on_consume interceptors */
+ switch (rko->rko_type) {
+ case RD_KAFKA_OP_FETCH:
+ if (!rkmessage->err && rkt)
+ rd_kafka_interceptors_on_consume(rkt->rkt_rk,
+ rkmessage);
+ break;
+
+ default:
+ break;
+ }
+
+ return rkmessage;
+}
+
+
+
+/**
+ * @brief Get rkmessage from rkm (for EVENT_DR)
+ * @remark Must only be called just prior to passing a dr to the application.
+ */
+rd_kafka_message_t *rd_kafka_message_get_from_rkm(rd_kafka_op_t *rko,
+ rd_kafka_msg_t *rkm) {
+ return rd_kafka_message_setup(rko, &rkm->rkm_rkmessage);
+}
+
+/**
+ * @brief Convert rko to rkmessage
+ * @remark Must only be called just prior to passing a consumed message
+ * or event to the application.
+ * @remark Will trigger on_consume() interceptors, if any.
+ * @returns a rkmessage (bound to the rko).
+ */
+rd_kafka_message_t *rd_kafka_message_get(rd_kafka_op_t *rko) {
+ rd_kafka_message_t *rkmessage;
+
+ if (!rko)
+ return rd_kafka_message_new(); /* empty */
+
+ switch (rko->rko_type) {
+ case RD_KAFKA_OP_FETCH:
+ /* Use embedded rkmessage */
+ rkmessage = &rko->rko_u.fetch.rkm.rkm_rkmessage;
+ break;
+
+ case RD_KAFKA_OP_ERR:
+ case RD_KAFKA_OP_CONSUMER_ERR:
+ rkmessage = &rko->rko_u.err.rkm.rkm_rkmessage;
+ rkmessage->payload = rko->rko_u.err.errstr;
+ rkmessage->len =
+ rkmessage->payload ? strlen(rkmessage->payload) : 0;
+ rkmessage->offset = rko->rko_u.err.offset;
+ break;
+
+ default:
+ rd_kafka_assert(NULL, !*"unhandled optype");
+ RD_NOTREACHED();
+ return NULL;
+ }
+
+ return rd_kafka_message_setup(rko, rkmessage);
+}
+
+
+int64_t rd_kafka_message_timestamp(const rd_kafka_message_t *rkmessage,
+ rd_kafka_timestamp_type_t *tstype) {
+ rd_kafka_msg_t *rkm;
+
+ if (rkmessage->err) {
+ if (tstype)
+ *tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
+ return -1;
+ }
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ if (tstype)
+ *tstype = rkm->rkm_tstype;
+
+ return rkm->rkm_timestamp;
+}
+
+
+int64_t rd_kafka_message_latency(const rd_kafka_message_t *rkmessage) {
+ rd_kafka_msg_t *rkm;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ if (unlikely(!rkm->rkm_ts_enq))
+ return -1;
+
+ return rd_clock() - rkm->rkm_ts_enq;
+}
+
+
+int32_t rd_kafka_message_broker_id(const rd_kafka_message_t *rkmessage) {
+ rd_kafka_msg_t *rkm;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ return rkm->rkm_broker_id;
+}
+
+
+
+/**
+ * @brief Parse serialized message headers and populate
+ * rkm->rkm_headers (which must be NULL).
+ */
+static rd_kafka_resp_err_t rd_kafka_msg_headers_parse(rd_kafka_msg_t *rkm) {
+ rd_kafka_buf_t *rkbuf;
+ int64_t HeaderCount;
+ const int log_decode_errors = 0;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ int i;
+ rd_kafka_headers_t *hdrs = NULL;
+
+ rd_dassert(!rkm->rkm_headers);
+
+ if (RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs) == 0)
+ return RD_KAFKA_RESP_ERR__NOENT;
+
+ rkbuf = rd_kafka_buf_new_shadow(
+ rkm->rkm_u.consumer.binhdrs.data,
+ RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs), NULL);
+
+ rd_kafka_buf_read_varint(rkbuf, &HeaderCount);
+
+ if (HeaderCount <= 0) {
+ rd_kafka_buf_destroy(rkbuf);
+ return RD_KAFKA_RESP_ERR__NOENT;
+ } else if (unlikely(HeaderCount > 100000)) {
+ rd_kafka_buf_destroy(rkbuf);
+ return RD_KAFKA_RESP_ERR__BAD_MSG;
+ }
+
+ hdrs = rd_kafka_headers_new((size_t)HeaderCount);
+
+ for (i = 0; (int64_t)i < HeaderCount; i++) {
+ int64_t KeyLen, ValueLen;
+ const char *Key, *Value;
+
+ rd_kafka_buf_read_varint(rkbuf, &KeyLen);
+ rd_kafka_buf_read_ptr(rkbuf, &Key, (size_t)KeyLen);
+
+ rd_kafka_buf_read_varint(rkbuf, &ValueLen);
+ if (unlikely(ValueLen == -1))
+ Value = NULL;
+ else
+ rd_kafka_buf_read_ptr(rkbuf, &Value, (size_t)ValueLen);
+
+ rd_kafka_header_add(hdrs, Key, (ssize_t)KeyLen, Value,
+ (ssize_t)ValueLen);
+ }
+
+ rkm->rkm_headers = hdrs;
+
+ rd_kafka_buf_destroy(rkbuf);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+ rd_kafka_buf_destroy(rkbuf);
+ if (hdrs)
+ rd_kafka_headers_destroy(hdrs);
+ return err;
+}
+
+
+
+rd_kafka_resp_err_t
+rd_kafka_message_headers(const rd_kafka_message_t *rkmessage,
+ rd_kafka_headers_t **hdrsp) {
+ rd_kafka_msg_t *rkm;
+ rd_kafka_resp_err_t err;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ if (rkm->rkm_headers) {
+ *hdrsp = rkm->rkm_headers;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ /* Producer (rkm_headers will be set if there were any headers) */
+ if (rkm->rkm_flags & RD_KAFKA_MSG_F_PRODUCER)
+ return RD_KAFKA_RESP_ERR__NOENT;
+
+ /* Consumer */
+
+ /* No previously parsed headers, check if the underlying
+ * protocol message had headers and if so, parse them. */
+ if (unlikely(!RD_KAFKAP_BYTES_LEN(&rkm->rkm_u.consumer.binhdrs)))
+ return RD_KAFKA_RESP_ERR__NOENT;
+
+ err = rd_kafka_msg_headers_parse(rkm);
+ if (unlikely(err))
+ return err;
+
+ *hdrsp = rkm->rkm_headers;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_message_detach_headers(rd_kafka_message_t *rkmessage,
+ rd_kafka_headers_t **hdrsp) {
+ rd_kafka_msg_t *rkm;
+ rd_kafka_resp_err_t err;
+
+ err = rd_kafka_message_headers(rkmessage, hdrsp);
+ if (err)
+ return err;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+ rkm->rkm_headers = NULL;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+void rd_kafka_message_set_headers(rd_kafka_message_t *rkmessage,
+ rd_kafka_headers_t *hdrs) {
+ rd_kafka_msg_t *rkm;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ if (rkm->rkm_headers) {
+ assert(rkm->rkm_headers != hdrs);
+ rd_kafka_headers_destroy(rkm->rkm_headers);
+ }
+
+ rkm->rkm_headers = hdrs;
+}
+
+
+
+rd_kafka_msg_status_t
+rd_kafka_message_status(const rd_kafka_message_t *rkmessage) {
+ rd_kafka_msg_t *rkm;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ return rkm->rkm_status;
+}
+
+
+int32_t rd_kafka_message_leader_epoch(const rd_kafka_message_t *rkmessage) {
+ rd_kafka_msg_t *rkm;
+
+ if (unlikely(!rkmessage->rkt ||
+ rkmessage->rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER))
+ return -1;
+
+ rkm = rd_kafka_message2msg((rd_kafka_message_t *)rkmessage);
+
+ return rkm->rkm_u.consumer.leader_epoch;
+}
+
+
+void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq) {
+ rd_kafka_msg_t *rkm;
+ int cnt = 0;
+
+ fprintf(fp, "%s msgq_dump (%d messages, %" PRIusz " bytes):\n", what,
+ rd_kafka_msgq_len(rkmq), rd_kafka_msgq_size(rkmq));
+ TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
+ fprintf(fp,
+ " [%" PRId32 "]@%" PRId64 ": rkm msgid %" PRIu64
+ ": \"%.*s\"\n",
+ rkm->rkm_partition, rkm->rkm_offset,
+ rkm->rkm_u.producer.msgid, (int)rkm->rkm_len,
+ (const char *)rkm->rkm_payload);
+ rd_assert(cnt++ < rkmq->rkmq_msg_cnt);
+ }
+}
+
+
+
+/**
+ * @brief Destroy resources associated with msgbatch
+ */
+void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb) {
+ if (rkmb->rktp) {
+ rd_kafka_toppar_destroy(rkmb->rktp);
+ rkmb->rktp = NULL;
+ }
+
+ rd_assert(RD_KAFKA_MSGQ_EMPTY(&rkmb->msgq));
+}
+
+
+/**
+ * @brief Initialize a message batch for the Idempotent Producer.
+ */
+void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid) {
+ memset(rkmb, 0, sizeof(*rkmb));
+
+ rkmb->rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_msgq_init(&rkmb->msgq);
+
+ rkmb->pid = pid;
+ rkmb->first_seq = -1;
+ rkmb->epoch_base_msgid = epoch_base_msgid;
+}
+
+
+/**
+ * @brief Set the first message in the batch. which is used to set
+ * the BaseSequence and keep track of batch reconstruction range.
+ *
+ * @param rkm is the first message in the batch.
+ */
+void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb,
+ rd_kafka_msg_t *rkm) {
+ rd_assert(rkmb->first_msgid == 0);
+
+ if (!rd_kafka_pid_valid(rkmb->pid))
+ return;
+
+ rkmb->first_msgid = rkm->rkm_u.producer.msgid;
+
+ /* Our msgid counter is 64-bits, but the
+ * Kafka protocol's sequence is only 31 (signed), so we'll
+ * need to handle wrapping. */
+ rkmb->first_seq = rd_kafka_seq_wrap(rkm->rkm_u.producer.msgid -
+ rkmb->epoch_base_msgid);
+
+ /* Check if there is a stored last message
+ * on the first msg, which means an entire
+ * batch of messages are being retried and
+ * we need to maintain the exact messages
+ * of the original batch.
+ * Simply tracking the last message, on
+ * the first message, is sufficient for now.
+ * Will be 0 if not applicable. */
+ rkmb->last_msgid = rkm->rkm_u.producer.last_msgid;
+}
+
+
+
+/**
+ * @brief Message batch is ready to be transmitted.
+ *
+ * @remark This function assumes the batch will be transmitted and increases
+ * the toppar's in-flight count.
+ */
+void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb) {
+ rd_kafka_toppar_t *rktp = rkmb->rktp;
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+
+ /* Keep track of number of requests in-flight per partition,
+ * and the number of partitions with in-flight requests when
+ * idempotent producer - this is used to drain partitions
+ * before resetting the PID. */
+ if (rd_atomic32_add(&rktp->rktp_msgs_inflight,
+ rd_kafka_msgq_len(&rkmb->msgq)) ==
+ rd_kafka_msgq_len(&rkmb->msgq) &&
+ rd_kafka_is_idempotent(rk))
+ rd_kafka_idemp_inflight_toppar_add(rk, rktp);
+}
+
+
+
+/**
+ * @brief Allow queue wakeups after \p abstime, or when the
+ * given \p batch_msg_cnt or \p batch_msg_bytes have been reached.
+ *
+ * @param rkmq Queue to monitor and set wakeup parameters on.
+ * @param dest_rkmq Destination queue used to meter current queue depths
+ * and oldest message. May be the same as \p rkmq but is
+ * typically the rktp_xmit_msgq.
+ * @param next_wakeup If non-NULL: update the caller's next scheduler wakeup
+ * according to the wakeup time calculated by this function.
+ * @param now The current time.
+ * @param linger_us The configured queue linger / batching time.
+ * @param batch_msg_cnt Queue threshold before signalling.
+ * @param batch_msg_bytes Queue threshold before signalling.
+ *
+ * @returns true if the wakeup conditions are already met and messages are ready
+ * to be sent, else false.
+ *
+ * @locks_required rd_kafka_toppar_lock()
+ *
+ *
+ * Producer queue and broker thread wake-up behaviour.
+ *
+ * There are contradicting requirements at play here:
+ * - Latency: queued messages must be batched and sent according to
+ * batch size and linger.ms configuration.
+ * - Wakeups: keep the number of thread wake-ups to a minimum to avoid
+ * high CPU utilization and context switching.
+ *
+ * The message queue (rd_kafka_msgq_t) has functionality for the writer (app)
+ * to wake up the reader (broker thread) when there's a new message added.
+ * This wakeup is done thru a combination of cndvar signalling and IO writes
+ * to make sure a thread wakeup is triggered regardless if the broker thread
+ * is blocking on cnd_timedwait() or on IO poll.
+ * When the broker thread is woken up it will scan all the partitions it is
+ * the leader for to check if there are messages to be sent - all according
+ * to the configured batch size and linger.ms - and then decide its next
+ * wait time depending on the lowest remaining linger.ms setting of any
+ * partition with messages enqueued.
+ *
+ * This wait time must also be set as a threshold on the message queue, telling
+ * the writer (app) that it must not trigger a wakeup until the wait time
+ * has expired, or the batch sizes have been exceeded.
+ *
+ * The message queue wakeup time is per partition, while the broker thread
+ * wakeup time is the lowest of all its partitions' wakeup times.
+ *
+ * The per-partition wakeup constraints are calculated and set by
+ * rd_kafka_msgq_allow_wakeup_at() which is called from the broker thread's
+ * per-partition handler.
+ * This function is called each time there are changes to the broker-local
+ * partition transmit queue (rktp_xmit_msgq), such as:
+ * - messages are moved from the partition queue (rktp_msgq) to rktp_xmit_msgq
+ * - messages are moved to a ProduceRequest
+ * - messages are timed out from the rktp_xmit_msgq
+ * - the flushing state changed (rd_kafka_flush() is called or returned).
+ *
+ * If none of these things happen, the broker thread will simply read the
+ * last stored wakeup time for each partition and use that for calculating its
+ * minimum wait time.
+ *
+ *
+ * On the writer side, namely the application calling rd_kafka_produce(), the
+ * followings checks are performed to see if it may trigger a wakeup when
+ * it adds a new message to the partition queue:
+ * - the current time has reached the wakeup time (e.g., remaining linger.ms
+ * has expired), or
+ * - with the new message(s) being added, either the batch.size or
+ * batch.num.messages thresholds have been exceeded, or
+ * - the application is calling rd_kafka_flush(),
+ * - and no wakeup has been signalled yet. This is critical since it may take
+ * some time for the broker thread to do its work we'll want to avoid
+ * flooding it with wakeups. So a wakeup is only sent once per
+ * wakeup period.
+ */
+rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
+ const rd_kafka_msgq_t *dest_rkmq,
+ rd_ts_t *next_wakeup,
+ rd_ts_t now,
+ rd_ts_t linger_us,
+ int32_t batch_msg_cnt,
+ int64_t batch_msg_bytes) {
+ int32_t msg_cnt = rd_kafka_msgq_len(dest_rkmq);
+ int64_t msg_bytes = rd_kafka_msgq_size(dest_rkmq);
+
+ if (RD_KAFKA_MSGQ_EMPTY(dest_rkmq)) {
+ rkmq->rkmq_wakeup.on_first = rd_true;
+ rkmq->rkmq_wakeup.abstime = now + linger_us;
+ /* Leave next_wakeup untouched since the queue is empty */
+ msg_cnt = 0;
+ msg_bytes = 0;
+ } else {
+ const rd_kafka_msg_t *rkm = rd_kafka_msgq_first(dest_rkmq);
+
+ rkmq->rkmq_wakeup.on_first = rd_false;
+
+ if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
+ /* Honour retry.backoff.ms:
+ * wait for backoff to expire */
+ rkmq->rkmq_wakeup.abstime =
+ rkm->rkm_u.producer.ts_backoff;
+ } else {
+ /* Use message's produce() time + linger.ms */
+ rkmq->rkmq_wakeup.abstime =
+ rd_kafka_msg_enq_time(rkm) + linger_us;
+ if (rkmq->rkmq_wakeup.abstime <= now)
+ rkmq->rkmq_wakeup.abstime = now;
+ }
+
+ /* Update the caller's scheduler wakeup time */
+ if (next_wakeup && rkmq->rkmq_wakeup.abstime < *next_wakeup)
+ *next_wakeup = rkmq->rkmq_wakeup.abstime;
+
+ msg_cnt = rd_kafka_msgq_len(dest_rkmq);
+ msg_bytes = rd_kafka_msgq_size(dest_rkmq);
+ }
+
+ /*
+ * If there are more messages or bytes in queue than the batch limits,
+ * or the linger time has been exceeded,
+ * then there is no need for wakeup since the broker thread will
+ * produce those messages as quickly as it can.
+ */
+ if (msg_cnt >= batch_msg_cnt || msg_bytes >= batch_msg_bytes ||
+ (msg_cnt > 0 && now >= rkmq->rkmq_wakeup.abstime)) {
+ /* Prevent further signalling */
+ rkmq->rkmq_wakeup.signalled = rd_true;
+
+ /* Batch is ready */
+ return rd_true;
+ }
+
+ /* If the current msg or byte count is less than the batch limit
+ * then set the rkmq count to the remaining count or size to
+ * reach the batch limits.
+ * This is for the case where the producer is waiting for more
+ * messages to accumulate into a batch. The wakeup should only
+ * occur once a threshold is reached or the abstime has expired.
+ */
+ rkmq->rkmq_wakeup.signalled = rd_false;
+ rkmq->rkmq_wakeup.msg_cnt = batch_msg_cnt - msg_cnt;
+ rkmq->rkmq_wakeup.msg_bytes = batch_msg_bytes - msg_bytes;
+
+ return rd_false;
+}
+
+
+
+/**
+ * @brief Verify order (by msgid) in message queue.
+ * For development use only.
+ */
+void rd_kafka_msgq_verify_order0(const char *function,
+ int line,
+ const rd_kafka_toppar_t *rktp,
+ const rd_kafka_msgq_t *rkmq,
+ uint64_t exp_first_msgid,
+ rd_bool_t gapless) {
+ const rd_kafka_msg_t *rkm;
+ uint64_t exp;
+ int errcnt = 0;
+ int cnt = 0;
+ const char *topic = rktp ? rktp->rktp_rkt->rkt_topic->str : "n/a";
+ int32_t partition = rktp ? rktp->rktp_partition : -1;
+
+ if (rd_kafka_msgq_len(rkmq) == 0)
+ return;
+
+ if (exp_first_msgid)
+ exp = exp_first_msgid;
+ else {
+ exp = rd_kafka_msgq_first(rkmq)->rkm_u.producer.msgid;
+ if (exp == 0) /* message without msgid (e.g., UA partition) */
+ return;
+ }
+
+ TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
+#if 0
+ printf("%s:%d: %s [%"PRId32"]: rkm #%d (%p) "
+ "msgid %"PRIu64"\n",
+ function, line,
+ topic, partition,
+ cnt, rkm, rkm->rkm_u.producer.msgid);
+#endif
+ if (gapless && rkm->rkm_u.producer.msgid != exp) {
+ printf("%s:%d: %s [%" PRId32
+ "]: rkm #%d (%p) "
+ "msgid %" PRIu64
+ ": "
+ "expected msgid %" PRIu64 "\n",
+ function, line, topic, partition, cnt, rkm,
+ rkm->rkm_u.producer.msgid, exp);
+ errcnt++;
+ } else if (!gapless && rkm->rkm_u.producer.msgid < exp) {
+ printf("%s:%d: %s [%" PRId32
+ "]: rkm #%d (%p) "
+ "msgid %" PRIu64
+ ": "
+ "expected increased msgid >= %" PRIu64 "\n",
+ function, line, topic, partition, cnt, rkm,
+ rkm->rkm_u.producer.msgid, exp);
+ errcnt++;
+ } else
+ exp++;
+
+ if (cnt >= rkmq->rkmq_msg_cnt) {
+ printf("%s:%d: %s [%" PRId32
+ "]: rkm #%d (%p) "
+ "msgid %" PRIu64 ": loop in queue?\n",
+ function, line, topic, partition, cnt, rkm,
+ rkm->rkm_u.producer.msgid);
+ errcnt++;
+ break;
+ }
+
+ cnt++;
+ }
+
+ rd_assert(!errcnt);
+}
+
+
+
+/**
+ * @name Unit tests
+ */
+
+/**
+ * @brief Unittest: message allocator
+ */
+rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize) {
+ rd_kafka_msg_t *rkm;
+
+ rkm = rd_calloc(1, sizeof(*rkm));
+ rkm->rkm_flags = RD_KAFKA_MSG_F_FREE_RKM;
+ rkm->rkm_offset = RD_KAFKA_OFFSET_INVALID;
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_NOT_AVAILABLE;
+
+ if (msgsize) {
+ rd_assert(msgsize <= sizeof(*rkm));
+ rkm->rkm_payload = rkm;
+ rkm->rkm_len = msgsize;
+ }
+
+ return rkm;
+}
+
+
+
+/**
+ * @brief Unittest: destroy all messages in queue
+ */
+void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq) {
+ rd_kafka_msg_t *rkm, *tmp;
+
+ TAILQ_FOREACH_SAFE(rkm, &rkmq->rkmq_msgs, rkm_link, tmp)
+ rd_kafka_msg_destroy(NULL, rkm);
+
+
+ rd_kafka_msgq_init(rkmq);
+}
+
+
+
+static int ut_verify_msgq_order(const char *what,
+ const rd_kafka_msgq_t *rkmq,
+ uint64_t first,
+ uint64_t last,
+ rd_bool_t req_consecutive) {
+ const rd_kafka_msg_t *rkm;
+ uint64_t expected = first;
+ int incr = first < last ? +1 : -1;
+ int fails = 0;
+ int cnt = 0;
+
+ TAILQ_FOREACH(rkm, &rkmq->rkmq_msgs, rkm_link) {
+ if ((req_consecutive &&
+ rkm->rkm_u.producer.msgid != expected) ||
+ (!req_consecutive &&
+ rkm->rkm_u.producer.msgid < expected)) {
+ if (fails++ < 100)
+ RD_UT_SAY("%s: expected msgid %s %" PRIu64
+ " not %" PRIu64 " at index #%d",
+ what, req_consecutive ? "==" : ">=",
+ expected, rkm->rkm_u.producer.msgid,
+ cnt);
+ }
+
+ cnt++;
+ expected += incr;
+
+ if (cnt > rkmq->rkmq_msg_cnt) {
+ RD_UT_SAY("%s: loop in queue?", what);
+ fails++;
+ break;
+ }
+ }
+
+ RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails);
+ return fails;
+}
+
+/**
+ * @brief Verify ordering comparator for message queues.
+ */
+static int unittest_msgq_order(const char *what,
+ int fifo,
+ int (*cmp)(const void *, const void *)) {
+ rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
+ rd_kafka_msg_t *rkm;
+ rd_kafka_msgq_t sendq, sendq2;
+ const size_t msgsize = 100;
+ int i;
+
+ RD_UT_SAY("%s: testing in %s mode", what, fifo ? "FIFO" : "LIFO");
+
+ for (i = 1; i <= 6; i++) {
+ rkm = ut_rd_kafka_msg_new(msgsize);
+ rkm->rkm_u.producer.msgid = i;
+ rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp);
+ }
+
+ if (fifo) {
+ if (ut_verify_msgq_order("added", &rkmq, 1, 6, rd_true))
+ return 1;
+ } else {
+ if (ut_verify_msgq_order("added", &rkmq, 6, 1, rd_true))
+ return 1;
+ }
+
+ /* Move 3 messages to "send" queue which we then re-insert
+ * in the original queue (i.e., "retry"). */
+ rd_kafka_msgq_init(&sendq);
+ while (rd_kafka_msgq_len(&sendq) < 3)
+ rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
+
+ if (fifo) {
+ if (ut_verify_msgq_order("send removed", &rkmq, 4, 6, rd_true))
+ return 1;
+
+ if (ut_verify_msgq_order("sendq", &sendq, 1, 3, rd_true))
+ return 1;
+ } else {
+ if (ut_verify_msgq_order("send removed", &rkmq, 3, 1, rd_true))
+ return 1;
+
+ if (ut_verify_msgq_order("sendq", &sendq, 6, 4, rd_true))
+ return 1;
+ }
+
+ /* Retry the messages, which moves them back to sendq
+ * maintaining the original order */
+ rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0,
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
+
+ RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0,
+ "sendq FIFO should be empty, not contain %d messages",
+ rd_kafka_msgq_len(&sendq));
+
+ if (fifo) {
+ if (ut_verify_msgq_order("readded", &rkmq, 1, 6, rd_true))
+ return 1;
+ } else {
+ if (ut_verify_msgq_order("readded", &rkmq, 6, 1, rd_true))
+ return 1;
+ }
+
+ /* Move 4 first messages to to "send" queue, then
+ * retry them with max_retries=1 which should now fail for
+ * the 3 first messages that were already retried. */
+ rd_kafka_msgq_init(&sendq);
+ while (rd_kafka_msgq_len(&sendq) < 4)
+ rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
+
+ if (fifo) {
+ if (ut_verify_msgq_order("send removed #2", &rkmq, 5, 6,
+ rd_true))
+ return 1;
+
+ if (ut_verify_msgq_order("sendq #2", &sendq, 1, 4, rd_true))
+ return 1;
+ } else {
+ if (ut_verify_msgq_order("send removed #2", &rkmq, 2, 1,
+ rd_true))
+ return 1;
+
+ if (ut_verify_msgq_order("sendq #2", &sendq, 6, 3, rd_true))
+ return 1;
+ }
+
+ /* Retry the messages, which should now keep the 3 first messages
+ * on sendq (no more retries) and just number 4 moved back. */
+ rd_kafka_retry_msgq(&rkmq, &sendq, 1, 1, 0,
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
+
+ if (fifo) {
+ if (ut_verify_msgq_order("readded #2", &rkmq, 4, 6, rd_true))
+ return 1;
+
+ if (ut_verify_msgq_order("no more retries", &sendq, 1, 3,
+ rd_true))
+ return 1;
+
+ } else {
+ if (ut_verify_msgq_order("readded #2", &rkmq, 3, 1, rd_true))
+ return 1;
+
+ if (ut_verify_msgq_order("no more retries", &sendq, 6, 4,
+ rd_true))
+ return 1;
+ }
+
+ /* Move all messages back on rkmq */
+ rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0,
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
+
+
+ /* Move first half of messages to sendq (1,2,3).
+ * Move second half o messages to sendq2 (4,5,6).
+ * Add new message to rkmq (7).
+ * Move first half of messages back on rkmq (1,2,3,7).
+ * Move second half back on the rkmq (1,2,3,4,5,6,7). */
+ rd_kafka_msgq_init(&sendq);
+ rd_kafka_msgq_init(&sendq2);
+
+ while (rd_kafka_msgq_len(&sendq) < 3)
+ rd_kafka_msgq_enq(&sendq, rd_kafka_msgq_pop(&rkmq));
+
+ while (rd_kafka_msgq_len(&sendq2) < 3)
+ rd_kafka_msgq_enq(&sendq2, rd_kafka_msgq_pop(&rkmq));
+
+ rkm = ut_rd_kafka_msg_new(msgsize);
+ rkm->rkm_u.producer.msgid = i;
+ rd_kafka_msgq_enq_sorted0(&rkmq, rkm, cmp);
+
+ rd_kafka_retry_msgq(&rkmq, &sendq, 0, 1000, 0,
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
+ rd_kafka_retry_msgq(&rkmq, &sendq2, 0, 1000, 0,
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED, cmp);
+
+ RD_UT_ASSERT(rd_kafka_msgq_len(&sendq) == 0,
+ "sendq FIFO should be empty, not contain %d messages",
+ rd_kafka_msgq_len(&sendq));
+ RD_UT_ASSERT(rd_kafka_msgq_len(&sendq2) == 0,
+ "sendq2 FIFO should be empty, not contain %d messages",
+ rd_kafka_msgq_len(&sendq2));
+
+ if (fifo) {
+ if (ut_verify_msgq_order("inject", &rkmq, 1, 7, rd_true))
+ return 1;
+ } else {
+ if (ut_verify_msgq_order("readded #2", &rkmq, 7, 1, rd_true))
+ return 1;
+ }
+
+ RD_UT_ASSERT(rd_kafka_msgq_size(&rkmq) ==
+ rd_kafka_msgq_len(&rkmq) * msgsize,
+ "expected msgq size %" PRIusz ", not %" PRIusz,
+ (size_t)rd_kafka_msgq_len(&rkmq) * msgsize,
+ rd_kafka_msgq_size(&rkmq));
+
+
+ ut_rd_kafka_msgq_purge(&sendq);
+ ut_rd_kafka_msgq_purge(&sendq2);
+ ut_rd_kafka_msgq_purge(&rkmq);
+
+ return 0;
+}
+
+/**
+ * @brief Verify that rd_kafka_seq_wrap() works.
+ */
+static int unittest_msg_seq_wrap(void) {
+ static const struct exp {
+ int64_t in;
+ int32_t out;
+ } exp[] = {
+ {0, 0},
+ {1, 1},
+ {(int64_t)INT32_MAX + 2, 1},
+ {(int64_t)INT32_MAX + 1, 0},
+ {INT32_MAX, INT32_MAX},
+ {INT32_MAX - 1, INT32_MAX - 1},
+ {INT32_MAX - 2, INT32_MAX - 2},
+ {((int64_t)1 << 33) - 2, INT32_MAX - 1},
+ {((int64_t)1 << 33) - 1, INT32_MAX},
+ {((int64_t)1 << 34), 0},
+ {((int64_t)1 << 35) + 3, 3},
+ {1710 + 1229, 2939},
+ {-1, -1},
+ };
+ int i;
+
+ for (i = 0; exp[i].in != -1; i++) {
+ int32_t wseq = rd_kafka_seq_wrap(exp[i].in);
+ RD_UT_ASSERT(wseq == exp[i].out,
+ "Expected seq_wrap(%" PRId64 ") -> %" PRId32
+ ", not %" PRId32,
+ exp[i].in, exp[i].out, wseq);
+ }
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Populate message queue with message ids from lo..hi (inclusive)
+ */
+static void ut_msgq_populate(rd_kafka_msgq_t *rkmq,
+ uint64_t lo,
+ uint64_t hi,
+ size_t msgsize) {
+ uint64_t i;
+
+ for (i = lo; i <= hi; i++) {
+ rd_kafka_msg_t *rkm = ut_rd_kafka_msg_new(msgsize);
+ rkm->rkm_u.producer.msgid = i;
+ rd_kafka_msgq_enq(rkmq, rkm);
+ }
+}
+
+
+struct ut_msg_range {
+ uint64_t lo;
+ uint64_t hi;
+};
+
+/**
+ * @brief Verify that msgq insert sorts are optimized. Issue #2508.
+ * All source ranges are combined into a single queue before insert.
+ */
+static int
+unittest_msgq_insert_all_sort(const char *what,
+ double max_us_per_msg,
+ double *ret_us_per_msg,
+ const struct ut_msg_range *src_ranges,
+ const struct ut_msg_range *dest_ranges) {
+ rd_kafka_msgq_t destq, srcq;
+ int i;
+ uint64_t lo = UINT64_MAX, hi = 0;
+ uint64_t cnt = 0;
+ const size_t msgsize = 100;
+ size_t totsize = 0;
+ rd_ts_t ts;
+ double us_per_msg;
+
+ RD_UT_SAY("Testing msgq insert (all) efficiency: %s", what);
+
+ rd_kafka_msgq_init(&destq);
+ rd_kafka_msgq_init(&srcq);
+
+ for (i = 0; src_ranges[i].hi > 0; i++) {
+ uint64_t this_cnt;
+
+ ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi,
+ msgsize);
+ if (src_ranges[i].lo < lo)
+ lo = src_ranges[i].lo;
+ if (src_ranges[i].hi > hi)
+ hi = src_ranges[i].hi;
+ this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1;
+ cnt += this_cnt;
+ totsize += msgsize * (size_t)this_cnt;
+ }
+
+ for (i = 0; dest_ranges[i].hi > 0; i++) {
+ uint64_t this_cnt;
+
+ ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi,
+ msgsize);
+ if (dest_ranges[i].lo < lo)
+ lo = dest_ranges[i].lo;
+ if (dest_ranges[i].hi > hi)
+ hi = dest_ranges[i].hi;
+ this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1;
+ cnt += this_cnt;
+ totsize += msgsize * (size_t)this_cnt;
+ }
+
+ RD_UT_SAY("Begin insert of %d messages into destq with %d messages",
+ rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq));
+
+ ts = rd_clock();
+ rd_kafka_msgq_insert_msgq(&destq, &srcq, rd_kafka_msg_cmp_msgid);
+ ts = rd_clock() - ts;
+ us_per_msg = (double)ts / (double)cnt;
+
+ RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts, us_per_msg);
+
+ RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0,
+ "srcq should be empty, but contains %d messages",
+ rd_kafka_msgq_len(&srcq));
+ RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt,
+ "destq should contain %d messages, not %d", (int)cnt,
+ rd_kafka_msgq_len(&destq));
+
+ if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false))
+ return 1;
+
+ RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize,
+ "expected destq size to be %" PRIusz
+ " bytes, not %" PRIusz,
+ totsize, rd_kafka_msgq_size(&destq));
+
+ ut_rd_kafka_msgq_purge(&srcq);
+ ut_rd_kafka_msgq_purge(&destq);
+
+ if (!rd_unittest_slow)
+ RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001),
+ "maximum us/msg exceeded: %.4f > %.4f us/msg",
+ us_per_msg, max_us_per_msg);
+ else if (us_per_msg > max_us_per_msg + 0.0001)
+ RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg",
+ us_per_msg, max_us_per_msg);
+
+ if (ret_us_per_msg)
+ *ret_us_per_msg = us_per_msg;
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Verify that msgq insert sorts are optimized. Issue #2508.
+ * Inserts each source range individually.
+ */
+static int
+unittest_msgq_insert_each_sort(const char *what,
+ double max_us_per_msg,
+ double *ret_us_per_msg,
+ const struct ut_msg_range *src_ranges,
+ const struct ut_msg_range *dest_ranges) {
+ rd_kafka_msgq_t destq;
+ int i;
+ uint64_t lo = UINT64_MAX, hi = 0;
+ uint64_t cnt = 0;
+ uint64_t scnt = 0;
+ const size_t msgsize = 100;
+ size_t totsize = 0;
+ double us_per_msg;
+ rd_ts_t accum_ts = 0;
+
+ RD_UT_SAY("Testing msgq insert (each) efficiency: %s", what);
+
+ rd_kafka_msgq_init(&destq);
+
+ for (i = 0; dest_ranges[i].hi > 0; i++) {
+ uint64_t this_cnt;
+
+ ut_msgq_populate(&destq, dest_ranges[i].lo, dest_ranges[i].hi,
+ msgsize);
+ if (dest_ranges[i].lo < lo)
+ lo = dest_ranges[i].lo;
+ if (dest_ranges[i].hi > hi)
+ hi = dest_ranges[i].hi;
+ this_cnt = (dest_ranges[i].hi - dest_ranges[i].lo) + 1;
+ cnt += this_cnt;
+ totsize += msgsize * (size_t)this_cnt;
+ }
+
+
+ for (i = 0; src_ranges[i].hi > 0; i++) {
+ rd_kafka_msgq_t srcq;
+ uint64_t this_cnt;
+ rd_ts_t ts;
+
+ rd_kafka_msgq_init(&srcq);
+
+ ut_msgq_populate(&srcq, src_ranges[i].lo, src_ranges[i].hi,
+ msgsize);
+ if (src_ranges[i].lo < lo)
+ lo = src_ranges[i].lo;
+ if (src_ranges[i].hi > hi)
+ hi = src_ranges[i].hi;
+ this_cnt = (src_ranges[i].hi - src_ranges[i].lo) + 1;
+ cnt += this_cnt;
+ scnt += this_cnt;
+ totsize += msgsize * (size_t)this_cnt;
+
+ RD_UT_SAY(
+ "Begin insert of %d messages into destq with "
+ "%d messages",
+ rd_kafka_msgq_len(&srcq), rd_kafka_msgq_len(&destq));
+
+ ts = rd_clock();
+ rd_kafka_msgq_insert_msgq(&destq, &srcq,
+ rd_kafka_msg_cmp_msgid);
+ ts = rd_clock() - ts;
+ accum_ts += ts;
+
+ RD_UT_SAY("Done: took %" PRId64 "us, %.4fus/msg", ts,
+ (double)ts / (double)this_cnt);
+
+ RD_UT_ASSERT(rd_kafka_msgq_len(&srcq) == 0,
+ "srcq should be empty, but contains %d messages",
+ rd_kafka_msgq_len(&srcq));
+ RD_UT_ASSERT(rd_kafka_msgq_len(&destq) == (int)cnt,
+ "destq should contain %d messages, not %d",
+ (int)cnt, rd_kafka_msgq_len(&destq));
+
+ if (ut_verify_msgq_order("after", &destq, lo, hi, rd_false))
+ return 1;
+
+ RD_UT_ASSERT(rd_kafka_msgq_size(&destq) == totsize,
+ "expected destq size to be %" PRIusz
+ " bytes, not %" PRIusz,
+ totsize, rd_kafka_msgq_size(&destq));
+
+ ut_rd_kafka_msgq_purge(&srcq);
+ }
+
+ ut_rd_kafka_msgq_purge(&destq);
+
+ us_per_msg = (double)accum_ts / (double)scnt;
+
+ RD_UT_SAY("Total: %.4fus/msg over %" PRId64 " messages in %" PRId64
+ "us",
+ us_per_msg, scnt, accum_ts);
+
+ if (!rd_unittest_slow)
+ RD_UT_ASSERT(!(us_per_msg > max_us_per_msg + 0.0001),
+ "maximum us/msg exceeded: %.4f > %.4f us/msg",
+ us_per_msg, max_us_per_msg);
+ else if (us_per_msg > max_us_per_msg + 0.0001)
+ RD_UT_WARN("maximum us/msg exceeded: %.4f > %.4f us/msg",
+ us_per_msg, max_us_per_msg);
+
+
+ if (ret_us_per_msg)
+ *ret_us_per_msg = us_per_msg;
+
+ RD_UT_PASS();
+}
+
+
+
+/**
+ * @brief Calls both insert_all and insert_each
+ */
+static int unittest_msgq_insert_sort(const char *what,
+ double max_us_per_msg,
+ double *ret_us_per_msg,
+ const struct ut_msg_range *src_ranges,
+ const struct ut_msg_range *dest_ranges) {
+ double ret_all = 0.0, ret_each = 0.0;
+ int r;
+
+ r = unittest_msgq_insert_all_sort(what, max_us_per_msg, &ret_all,
+ src_ranges, dest_ranges);
+ if (r)
+ return r;
+
+ r = unittest_msgq_insert_each_sort(what, max_us_per_msg, &ret_each,
+ src_ranges, dest_ranges);
+ if (r)
+ return r;
+
+ if (ret_us_per_msg)
+ *ret_us_per_msg = RD_MAX(ret_all, ret_each);
+
+ return 0;
+}
+
+
+int unittest_msg(void) {
+ int fails = 0;
+ double insert_baseline = 0.0;
+
+ fails += unittest_msgq_order("FIFO", 1, rd_kafka_msg_cmp_msgid);
+ fails += unittest_msg_seq_wrap();
+
+ fails += unittest_msgq_insert_sort(
+ "get baseline insert time", 100000.0, &insert_baseline,
+ (const struct ut_msg_range[]) {{1, 1}, {3, 3}, {0, 0}},
+ (const struct ut_msg_range[]) {{2, 2}, {4, 4}, {0, 0}});
+
+ /* Allow some wiggle room in baseline time. */
+ if (insert_baseline < 0.1)
+ insert_baseline = 0.2;
+ insert_baseline *= 3;
+
+ fails += unittest_msgq_insert_sort(
+ "single-message ranges", insert_baseline, NULL,
+ (const struct ut_msg_range[]) {
+ {2, 2}, {4, 4}, {9, 9}, {33692864, 33692864}, {0, 0}},
+ (const struct ut_msg_range[]) {{1, 1},
+ {3, 3},
+ {5, 5},
+ {10, 10},
+ {33692865, 33692865},
+ {0, 0}});
+ fails += unittest_msgq_insert_sort(
+ "many messages", insert_baseline, NULL,
+ (const struct ut_msg_range[]) {{100000, 200000},
+ {400000, 450000},
+ {900000, 920000},
+ {33692864, 33751992},
+ {33906868, 33993690},
+ {40000000, 44000000},
+ {0, 0}},
+ (const struct ut_msg_range[]) {{1, 199},
+ {350000, 360000},
+ {500000, 500010},
+ {1000000, 1000200},
+ {33751993, 33906867},
+ {50000001, 50000001},
+ {0, 0}});
+ fails += unittest_msgq_insert_sort(
+ "issue #2508", insert_baseline, NULL,
+ (const struct ut_msg_range[]) {
+ {33692864, 33751992}, {33906868, 33993690}, {0, 0}},
+ (const struct ut_msg_range[]) {{33751993, 33906867}, {0, 0}});
+
+ /* The standard case where all of the srcq
+ * goes after the destq.
+ * Create a big destq and a number of small srcqs.
+ * Should not result in O(n) scans to find the insert position. */
+ fails += unittest_msgq_insert_sort(
+ "issue #2450 (v1.2.1 regression)", insert_baseline, NULL,
+ (const struct ut_msg_range[]) {{200000, 200001},
+ {200002, 200006},
+ {200009, 200012},
+ {200015, 200016},
+ {200020, 200022},
+ {200030, 200090},
+ {200091, 200092},
+ {200093, 200094},
+ {200095, 200096},
+ {200097, 200099},
+ {0, 0}},
+ (const struct ut_msg_range[]) {{1, 199999}, {0, 0}});
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h
new file mode 100644
index 000000000..877fac15c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msg.h
@@ -0,0 +1,583 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_MSG_H_
+#define _RDKAFKA_MSG_H_
+
+#include "rdsysqueue.h"
+
+#include "rdkafka_proto.h"
+#include "rdkafka_header.h"
+
+
+/**
+ * @brief Internal RD_KAFKA_MSG_F_.. flags
+ */
+#define RD_KAFKA_MSG_F_RKT_RDLOCKED 0x100000 /* rkt is rdlock():ed */
+
+
+/**
+ * @brief Message.MsgAttributes for MsgVersion v0..v1,
+ * also used for MessageSet.Attributes for MsgVersion v2.
+ */
+#define RD_KAFKA_MSG_ATTR_GZIP (1 << 0)
+#define RD_KAFKA_MSG_ATTR_SNAPPY (1 << 1)
+#define RD_KAFKA_MSG_ATTR_LZ4 (3)
+#define RD_KAFKA_MSG_ATTR_ZSTD (4)
+#define RD_KAFKA_MSG_ATTR_COMPRESSION_MASK 0x7
+#define RD_KAFKA_MSG_ATTR_CREATE_TIME (0 << 3)
+#define RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME (1 << 3)
+
+/**
+ * @brief MessageSet.Attributes for MsgVersion v2
+ *
+ * Attributes:
+ * -------------------------------------------------------------------------------------------------
+ * | Unused (6-15) | Control (5) | Transactional (4) | Timestamp Type (3) |
+ * Compression Type (0-2) |
+ * -------------------------------------------------------------------------------------------------
+ */
+/* Compression types same as MsgVersion 0 above */
+/* Timestamp type same as MsgVersion 0 above */
+#define RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL (1 << 4)
+#define RD_KAFKA_MSGSET_V2_ATTR_CONTROL (1 << 5)
+
+
+typedef struct rd_kafka_msg_s {
+ rd_kafka_message_t rkm_rkmessage; /* MUST be first field */
+#define rkm_len rkm_rkmessage.len
+#define rkm_payload rkm_rkmessage.payload
+#define rkm_opaque rkm_rkmessage._private
+#define rkm_partition rkm_rkmessage.partition
+#define rkm_offset rkm_rkmessage.offset
+#define rkm_key rkm_rkmessage.key
+#define rkm_key_len rkm_rkmessage.key_len
+#define rkm_err rkm_rkmessage.err
+
+ TAILQ_ENTRY(rd_kafka_msg_s) rkm_link;
+
+ int rkm_flags;
+ /* @remark These additional flags must not collide with
+ * the RD_KAFKA_MSG_F_* flags in rdkafka.h */
+#define RD_KAFKA_MSG_F_FREE_RKM 0x10000 /* msg_t is allocated */
+#define RD_KAFKA_MSG_F_ACCOUNT 0x20000 /* accounted for in curr_msgs */
+#define RD_KAFKA_MSG_F_PRODUCER 0x40000 /* Producer message */
+#define RD_KAFKA_MSG_F_CONTROL 0x80000 /* Control message */
+
+ rd_kafka_timestamp_type_t rkm_tstype; /* rkm_timestamp type */
+ int64_t rkm_timestamp; /* Message format V1.
+ * Meaning of timestamp depends on
+ * message Attribute LogAppendtime (broker)
+ * or CreateTime (producer).
+ * Unit is milliseconds since epoch (UTC).*/
+
+
+ rd_kafka_headers_t *rkm_headers; /**< Parsed headers list, if any. */
+
+ rd_kafka_msg_status_t rkm_status; /**< Persistence status. Updated in
+ * the ProduceResponse handler:
+ * this value is always up to date.
+ */
+ int32_t rkm_broker_id; /**< Broker message was produced to
+ * or fetched from. */
+
+ union {
+ struct {
+ rd_ts_t ts_timeout; /* Message timeout */
+ rd_ts_t ts_enq; /* Enqueue/Produce time */
+ rd_ts_t ts_backoff; /* Backoff next Produce until
+ * this time. */
+ uint64_t msgid; /**< Message sequencial id,
+ * used to maintain ordering.
+ * Starts at 1. */
+ uint64_t last_msgid; /**< On retry this is set
+ * on the first message
+ * in a batch to point
+ * out the last message
+ * of the batch so that
+ * the batch can be
+ * identically reconstructed.
+ */
+ int retries; /* Number of retries so far */
+ } producer;
+#define rkm_ts_timeout rkm_u.producer.ts_timeout
+#define rkm_ts_enq rkm_u.producer.ts_enq
+#define rkm_msgid rkm_u.producer.msgid
+
+ struct {
+ rd_kafkap_bytes_t binhdrs; /**< Unparsed
+ * binary headers in
+ * protocol msg */
+ int32_t leader_epoch; /**< Leader epoch at the time
+ * the message was fetched. */
+ } consumer;
+ } rkm_u;
+} rd_kafka_msg_t;
+
+TAILQ_HEAD(rd_kafka_msg_head_s, rd_kafka_msg_s);
+
+
+/** @returns the absolute time a message was enqueued (producer) */
+#define rd_kafka_msg_enq_time(rkm) ((rkm)->rkm_ts_enq)
+
+/**
+ * @returns the message's total maximum on-wire size.
+ * @remark Depending on message version (MagicByte) the actual size
+ * may be smaller.
+ */
+static RD_INLINE RD_UNUSED size_t
+rd_kafka_msg_wire_size(const rd_kafka_msg_t *rkm, int MsgVersion) {
+ static const size_t overheads[] = {
+ [0] = RD_KAFKAP_MESSAGE_V0_OVERHEAD,
+ [1] = RD_KAFKAP_MESSAGE_V1_OVERHEAD,
+ [2] = RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD};
+ size_t size;
+ rd_dassert(MsgVersion >= 0 && MsgVersion <= 2);
+
+ size = overheads[MsgVersion] + rkm->rkm_len + rkm->rkm_key_len;
+ if (MsgVersion == 2 && rkm->rkm_headers)
+ size += rd_kafka_headers_serialized_size(rkm->rkm_headers);
+
+ return size;
+}
+
+
+/**
+ * @returns the maximum total on-wire message size regardless of MsgVersion.
+ *
+ * @remark This does not account for the ProduceRequest, et.al, just the
+ * per-message overhead.
+ */
+static RD_INLINE RD_UNUSED size_t rd_kafka_msg_max_wire_size(size_t keylen,
+ size_t valuelen,
+ size_t hdrslen) {
+ return RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD + keylen + valuelen + hdrslen;
+}
+
+/**
+ * @returns the enveloping rd_kafka_msg_t pointer for a rd_kafka_msg_t
+ * wrapped rd_kafka_message_t.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_msg_t *
+rd_kafka_message2msg(rd_kafka_message_t *rkmessage) {
+ return (rd_kafka_msg_t *)rkmessage;
+}
+
+
+
+/**
+ * @brief Message queue with message and byte counters.
+ */
+TAILQ_HEAD(rd_kafka_msgs_head_s, rd_kafka_msg_s);
+typedef struct rd_kafka_msgq_s {
+ struct rd_kafka_msgs_head_s rkmq_msgs; /* TAILQ_HEAD */
+ int32_t rkmq_msg_cnt;
+ int64_t rkmq_msg_bytes;
+ struct {
+ rd_ts_t abstime; /**< Allow wake-ups after this point in time.*/
+ int32_t msg_cnt; /**< Signal wake-up when this message count
+ * is reached. */
+ int64_t msg_bytes; /**< .. or when this byte count is
+ * reached. */
+ rd_bool_t on_first; /**< Wake-up on first message enqueued
+ * regardless of .abstime. */
+ rd_bool_t signalled; /**< Wake-up (already) signalled. */
+ } rkmq_wakeup;
+} rd_kafka_msgq_t;
+
+#define RD_KAFKA_MSGQ_INITIALIZER(rkmq) \
+ { .rkmq_msgs = TAILQ_HEAD_INITIALIZER((rkmq).rkmq_msgs) }
+
+#define RD_KAFKA_MSGQ_FOREACH(elm, head) \
+ TAILQ_FOREACH(elm, &(head)->rkmq_msgs, rkm_link)
+
+/* @brief Check if queue is empty. Proper locks must be held. */
+#define RD_KAFKA_MSGQ_EMPTY(rkmq) TAILQ_EMPTY(&(rkmq)->rkmq_msgs)
+
+/**
+ * Returns the number of messages in the specified queue.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_msgq_len(const rd_kafka_msgq_t *rkmq) {
+ return (int)rkmq->rkmq_msg_cnt;
+}
+
+/**
+ * Returns the total number of bytes in the specified queue.
+ */
+static RD_INLINE RD_UNUSED size_t
+rd_kafka_msgq_size(const rd_kafka_msgq_t *rkmq) {
+ return (size_t)rkmq->rkmq_msg_bytes;
+}
+
+
+void rd_kafka_msg_destroy(rd_kafka_t *rk, rd_kafka_msg_t *rkm);
+
+int rd_kafka_msg_new(rd_kafka_topic_t *rkt,
+ int32_t force_partition,
+ int msgflags,
+ char *payload,
+ size_t len,
+ const void *keydata,
+ size_t keylen,
+ void *msg_opaque);
+
+static RD_INLINE RD_UNUSED void rd_kafka_msgq_init(rd_kafka_msgq_t *rkmq) {
+ TAILQ_INIT(&rkmq->rkmq_msgs);
+ rkmq->rkmq_msg_cnt = 0;
+ rkmq->rkmq_msg_bytes = 0;
+}
+
+#if ENABLE_DEVEL
+#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \
+ rd_kafka_msgq_verify_order0(__FUNCTION__, __LINE__, rktp, rkmq, \
+ exp_first_msgid, gapless)
+#else
+#define rd_kafka_msgq_verify_order(rktp, rkmq, exp_first_msgid, gapless) \
+ do { \
+ } while (0)
+#endif
+
+void rd_kafka_msgq_verify_order0(const char *function,
+ int line,
+ const struct rd_kafka_toppar_s *rktp,
+ const rd_kafka_msgq_t *rkmq,
+ uint64_t exp_first_msgid,
+ rd_bool_t gapless);
+
+
+/**
+ * Concat all elements of 'src' onto tail of 'dst'.
+ * 'src' will be cleared.
+ * Proper locks for 'src' and 'dst' must be held.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_msgq_concat(rd_kafka_msgq_t *dst,
+ rd_kafka_msgq_t *src) {
+ TAILQ_CONCAT(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link);
+ dst->rkmq_msg_cnt += src->rkmq_msg_cnt;
+ dst->rkmq_msg_bytes += src->rkmq_msg_bytes;
+ rd_kafka_msgq_init(src);
+ rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false);
+}
+
+/**
+ * Move queue 'src' to 'dst' (overwrites dst)
+ * Source will be cleared.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_msgq_move(rd_kafka_msgq_t *dst,
+ rd_kafka_msgq_t *src) {
+ TAILQ_MOVE(&dst->rkmq_msgs, &src->rkmq_msgs, rkm_link);
+ dst->rkmq_msg_cnt = src->rkmq_msg_cnt;
+ dst->rkmq_msg_bytes = src->rkmq_msg_bytes;
+ rd_kafka_msgq_init(src);
+ rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false);
+}
+
+
+/**
+ * @brief Prepend all elements of \ src onto head of \p dst.
+ * \p src will be cleared/re-initialized.
+ *
+ * @locks proper locks for \p src and \p dst MUST be held.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_msgq_prepend(rd_kafka_msgq_t *dst,
+ rd_kafka_msgq_t *src) {
+ rd_kafka_msgq_concat(src, dst);
+ rd_kafka_msgq_move(dst, src);
+ rd_kafka_msgq_verify_order(NULL, dst, 0, rd_false);
+}
+
+
+/**
+ * rd_free all msgs in msgq and reinitialize the msgq.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_msgq_purge(rd_kafka_t *rk,
+ rd_kafka_msgq_t *rkmq) {
+ rd_kafka_msg_t *rkm, *next;
+
+ next = TAILQ_FIRST(&rkmq->rkmq_msgs);
+ while (next) {
+ rkm = next;
+ next = TAILQ_NEXT(next, rkm_link);
+
+ rd_kafka_msg_destroy(rk, rkm);
+ }
+
+ rd_kafka_msgq_init(rkmq);
+}
+
+
+/**
+ * Remove message from message queue
+ */
+static RD_INLINE RD_UNUSED rd_kafka_msg_t *
+rd_kafka_msgq_deq(rd_kafka_msgq_t *rkmq, rd_kafka_msg_t *rkm, int do_count) {
+ if (likely(do_count)) {
+ rd_kafka_assert(NULL, rkmq->rkmq_msg_cnt > 0);
+ rd_kafka_assert(NULL,
+ rkmq->rkmq_msg_bytes >=
+ (int64_t)(rkm->rkm_len + rkm->rkm_key_len));
+ rkmq->rkmq_msg_cnt--;
+ rkmq->rkmq_msg_bytes -= rkm->rkm_len + rkm->rkm_key_len;
+ }
+
+ TAILQ_REMOVE(&rkmq->rkmq_msgs, rkm, rkm_link);
+
+ return rkm;
+}
+
+static RD_INLINE RD_UNUSED rd_kafka_msg_t *
+rd_kafka_msgq_pop(rd_kafka_msgq_t *rkmq) {
+ rd_kafka_msg_t *rkm;
+
+ if (((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs))))
+ rd_kafka_msgq_deq(rkmq, rkm, 1);
+
+ return rkm;
+}
+
+
+/**
+ * @returns the first message in the queue, or NULL if empty.
+ *
+ * @locks caller's responsibility
+ */
+static RD_INLINE RD_UNUSED rd_kafka_msg_t *
+rd_kafka_msgq_first(const rd_kafka_msgq_t *rkmq) {
+ return TAILQ_FIRST(&rkmq->rkmq_msgs);
+}
+
+/**
+ * @returns the last message in the queue, or NULL if empty.
+ *
+ * @locks caller's responsibility
+ */
+static RD_INLINE RD_UNUSED rd_kafka_msg_t *
+rd_kafka_msgq_last(const rd_kafka_msgq_t *rkmq) {
+ return TAILQ_LAST(&rkmq->rkmq_msgs, rd_kafka_msgs_head_s);
+}
+
+
+/**
+ * @returns the MsgId of the first message in the queue, or 0 if empty.
+ *
+ * @locks caller's responsibility
+ */
+static RD_INLINE RD_UNUSED uint64_t
+rd_kafka_msgq_first_msgid(const rd_kafka_msgq_t *rkmq) {
+ const rd_kafka_msg_t *rkm = TAILQ_FIRST(&rkmq->rkmq_msgs);
+ if (rkm)
+ return rkm->rkm_u.producer.msgid;
+ else
+ return 0;
+}
+
+
+
+rd_bool_t rd_kafka_msgq_allow_wakeup_at(rd_kafka_msgq_t *rkmq,
+ const rd_kafka_msgq_t *dest_rkmq,
+ rd_ts_t *next_wakeup,
+ rd_ts_t now,
+ rd_ts_t linger_us,
+ int32_t batch_msg_cnt,
+ int64_t batch_msg_bytes);
+
+/**
+ * @returns true if msgq may be awoken.
+ */
+
+static RD_INLINE RD_UNUSED rd_bool_t
+rd_kafka_msgq_may_wakeup(const rd_kafka_msgq_t *rkmq, rd_ts_t now) {
+ /* No: Wakeup already signalled */
+ if (rkmq->rkmq_wakeup.signalled)
+ return rd_false;
+
+ /* Yes: Wakeup linger time has expired */
+ if (now >= rkmq->rkmq_wakeup.abstime)
+ return rd_true;
+
+ /* Yes: First message enqueued may trigger wakeup */
+ if (rkmq->rkmq_msg_cnt == 1 && rkmq->rkmq_wakeup.on_first)
+ return rd_true;
+
+ /* Yes: batch.size or batch.num.messages exceeded */
+ if (rkmq->rkmq_msg_cnt >= rkmq->rkmq_wakeup.msg_cnt ||
+ rkmq->rkmq_msg_bytes > rkmq->rkmq_wakeup.msg_bytes)
+ return rd_true;
+
+ /* No */
+ return rd_false;
+}
+
+
+/**
+ * @brief Message ordering comparator using the message id
+ * number to order messages in ascending order (FIFO).
+ */
+static RD_INLINE int rd_kafka_msg_cmp_msgid(const void *_a, const void *_b) {
+ const rd_kafka_msg_t *a = _a, *b = _b;
+
+ rd_dassert(a->rkm_u.producer.msgid);
+
+ return RD_CMP(a->rkm_u.producer.msgid, b->rkm_u.producer.msgid);
+}
+
+/**
+ * @brief Message ordering comparator using the message id
+ * number to order messages in descending order (LIFO).
+ */
+static RD_INLINE int rd_kafka_msg_cmp_msgid_lifo(const void *_a,
+ const void *_b) {
+ const rd_kafka_msg_t *a = _a, *b = _b;
+
+ rd_dassert(a->rkm_u.producer.msgid);
+
+ return RD_CMP(b->rkm_u.producer.msgid, a->rkm_u.producer.msgid);
+}
+
+
+/**
+ * @brief Insert message at its sorted position using the msgid.
+ * @remark This is an O(n) operation.
+ * @warning The message must have a msgid set.
+ * @returns the message count of the queue after enqueuing the message.
+ */
+int rd_kafka_msgq_enq_sorted0(rd_kafka_msgq_t *rkmq,
+ rd_kafka_msg_t *rkm,
+ int (*order_cmp)(const void *, const void *));
+
+/**
+ * @brief Insert message at its sorted position using the msgid.
+ * @remark This is an O(n) operation.
+ * @warning The message must have a msgid set.
+ * @returns the message count of the queue after enqueuing the message.
+ */
+int rd_kafka_msgq_enq_sorted(const rd_kafka_topic_t *rkt,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_msg_t *rkm);
+
+/**
+ * Insert message at head of message queue.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_msgq_insert(rd_kafka_msgq_t *rkmq,
+ rd_kafka_msg_t *rkm) {
+ TAILQ_INSERT_HEAD(&rkmq->rkmq_msgs, rkm, rkm_link);
+ rkmq->rkmq_msg_cnt++;
+ rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
+}
+
+/**
+ * Append message to tail of message queue.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_msgq_enq(rd_kafka_msgq_t *rkmq,
+ rd_kafka_msg_t *rkm) {
+ TAILQ_INSERT_TAIL(&rkmq->rkmq_msgs, rkm, rkm_link);
+ rkmq->rkmq_msg_bytes += rkm->rkm_len + rkm->rkm_key_len;
+ return (int)++rkmq->rkmq_msg_cnt;
+}
+
+
+/**
+ * @returns true if the MsgId extents (first, last) in the two queues overlap.
+ */
+static RD_INLINE RD_UNUSED rd_bool_t
+rd_kafka_msgq_overlap(const rd_kafka_msgq_t *a, const rd_kafka_msgq_t *b) {
+ const rd_kafka_msg_t *fa, *la, *fb, *lb;
+
+ if (RD_KAFKA_MSGQ_EMPTY(a) || RD_KAFKA_MSGQ_EMPTY(b))
+ return rd_false;
+
+ fa = rd_kafka_msgq_first(a);
+ fb = rd_kafka_msgq_first(b);
+ la = rd_kafka_msgq_last(a);
+ lb = rd_kafka_msgq_last(b);
+
+ return (rd_bool_t)(
+ fa->rkm_u.producer.msgid <= lb->rkm_u.producer.msgid &&
+ fb->rkm_u.producer.msgid <= la->rkm_u.producer.msgid);
+}
+
+/**
+ * Scans a message queue for timed out messages and removes them from
+ * 'rkmq' and adds them to 'timedout', returning the number of timed out
+ * messages.
+ * 'timedout' must be initialized.
+ */
+int rd_kafka_msgq_age_scan(struct rd_kafka_toppar_s *rktp,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_msgq_t *timedout,
+ rd_ts_t now,
+ rd_ts_t *abs_next_timeout);
+
+void rd_kafka_msgq_split(rd_kafka_msgq_t *leftq,
+ rd_kafka_msgq_t *rightq,
+ rd_kafka_msg_t *first_right,
+ int cnt,
+ int64_t bytes);
+
+rd_kafka_msg_t *rd_kafka_msgq_find_pos(const rd_kafka_msgq_t *rkmq,
+ const rd_kafka_msg_t *start_pos,
+ const rd_kafka_msg_t *rkm,
+ int (*cmp)(const void *, const void *),
+ int *cntp,
+ int64_t *bytesp);
+
+void rd_kafka_msgq_set_metadata(rd_kafka_msgq_t *rkmq,
+ int32_t broker_id,
+ int64_t base_offset,
+ int64_t timestamp,
+ rd_kafka_msg_status_t status);
+
+void rd_kafka_msgq_move_acked(rd_kafka_msgq_t *dest,
+ rd_kafka_msgq_t *src,
+ uint64_t last_msgid,
+ rd_kafka_msg_status_t status);
+
+int rd_kafka_msg_partitioner(rd_kafka_topic_t *rkt,
+ rd_kafka_msg_t *rkm,
+ rd_dolock_t do_lock);
+
+
+rd_kafka_message_t *rd_kafka_message_get(struct rd_kafka_op_s *rko);
+rd_kafka_message_t *rd_kafka_message_get_from_rkm(struct rd_kafka_op_s *rko,
+ rd_kafka_msg_t *rkm);
+rd_kafka_message_t *rd_kafka_message_new(void);
+
+
+/**
+ * @returns a (possibly) wrapped Kafka protocol message sequence counter
+ * for the non-overflowing \p seq.
+ */
+static RD_INLINE RD_UNUSED int32_t rd_kafka_seq_wrap(int64_t seq) {
+ return (int32_t)(seq & (int64_t)INT32_MAX);
+}
+
+void rd_kafka_msgq_dump(FILE *fp, const char *what, rd_kafka_msgq_t *rkmq);
+
+rd_kafka_msg_t *ut_rd_kafka_msg_new(size_t msgsize);
+void ut_rd_kafka_msgq_purge(rd_kafka_msgq_t *rkmq);
+int unittest_msg(void);
+
+#endif /* _RDKAFKA_MSG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h
new file mode 100644
index 000000000..09c797706
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgbatch.h
@@ -0,0 +1,62 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ * PRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_MSGBATCH_H_
+#define _RDKAFKA_MSGBATCH_H_
+
+typedef struct rd_kafka_msgbatch_s {
+ rd_kafka_toppar_t *rktp; /**< Reference to partition */
+
+ rd_kafka_msgq_t msgq; /**< Messages in batch */
+
+ /* Following fields are for Idempotent Producer use */
+ rd_kafka_pid_t pid; /**< Producer Id and Epoch */
+ int32_t first_seq; /**< Base sequence */
+ int64_t first_msgid; /**< Base msgid */
+ uint64_t epoch_base_msgid; /**< The partition epoch's
+ * base msgid. */
+ uint64_t last_msgid; /**< Last message to add to batch.
+ * This is used when reconstructing
+ * batches for resends with
+ * the idempotent producer which
+ * require retries to have the
+ * exact same messages in them. */
+
+} rd_kafka_msgbatch_t;
+
+
+
+/* defined in rdkafka_msg.c */
+void rd_kafka_msgbatch_destroy(rd_kafka_msgbatch_t *rkmb);
+void rd_kafka_msgbatch_init(rd_kafka_msgbatch_t *rkmb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid);
+void rd_kafka_msgbatch_set_first_msg(rd_kafka_msgbatch_t *rkmb,
+ rd_kafka_msg_t *rkm);
+void rd_kafka_msgbatch_ready_produce(rd_kafka_msgbatch_t *rkmb);
+
+#endif /* _RDKAFKA_MSGBATCH_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h
new file mode 100644
index 000000000..b79f1c946
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset.h
@@ -0,0 +1,82 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_MSGSET_H_
+#define _RDKAFKA_MSGSET_H_
+
+
+
+/**
+ * @struct rd_kafka_aborted_txns_t
+ *
+ * @brief A collection of aborted transactions.
+ */
+typedef struct rd_kafka_aborted_txns_s {
+ rd_avl_t avl;
+ /* Note: A list of nodes is maintained alongside
+ * the AVL tree to facilitate traversal.
+ */
+ rd_list_t list;
+ int32_t cnt;
+} rd_kafka_aborted_txns_t;
+
+
+rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt);
+
+void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns);
+
+void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns);
+
+void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid,
+ int64_t first_offset);
+
+
+/**
+ * @name MessageSet writers
+ */
+rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq,
+ const rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid,
+ size_t *MessageSetSizep);
+
+/**
+ * @name MessageSet readers
+ */
+rd_kafka_resp_err_t
+rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_aborted_txns_t *aborted_txns,
+ const struct rd_kafka_toppar_ver *tver);
+
+int unittest_aborted_txns(void);
+
+#endif /* _RDKAFKA_MSGSET_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c
new file mode 100644
index 000000000..58779f3be
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_reader.c
@@ -0,0 +1,1794 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name MessageSet reader interface
+ *
+ * Parses FetchResponse for Messages
+ *
+ *
+ * @remark
+ * The broker may send partial messages, when this happens we bail out
+ * silently and keep the messages that we successfully parsed.
+ *
+ * "A Guide To The Kafka Protocol" states:
+ * "As an optimization the server is allowed to
+ * return a partial message at the end of the
+ * message set.
+ * Clients should handle this case."
+ *
+ * We're handling it by not passing the error upstream.
+ * This is why most err_parse: goto labels (that are called from buf parsing
+ * macros) suppress the error message and why log_decode_errors is off
+ * unless PROTOCOL debugging is enabled.
+ *
+ * When a FetchResponse contains multiple partitions, each partition's
+ * MessageSet may be partial, regardless of the other partitions.
+ * To make sure the next partition can be parsed, each partition parse
+ * uses its own sub-slice of only that partition's MessageSetSize length.
+ */
+
+#include "rd.h"
+#include "rdunittest.h"
+#include "rdavl.h"
+#include "rdlist.h"
+#include "rdkafka_int.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_msgset.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_header.h"
+#include "rdkafka_lz4.h"
+
+#include "rdvarint.h"
+#include "crc32c.h"
+
+#if WITH_ZLIB
+#include "rdgz.h"
+#endif
+#if WITH_SNAPPY
+#include "snappy.h"
+#endif
+#if WITH_ZSTD
+#include "rdkafka_zstd.h"
+#endif
+
+
+static RD_INLINE int64_t
+rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid,
+ int64_t max_offset);
+static RD_INLINE int64_t
+rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid);
+
+
+struct msgset_v2_hdr {
+ int64_t BaseOffset;
+ int32_t Length;
+ int32_t PartitionLeaderEpoch;
+ int8_t MagicByte;
+ int32_t Crc;
+ int16_t Attributes;
+ int32_t LastOffsetDelta;
+ int64_t BaseTimestamp;
+ int64_t MaxTimestamp;
+ int64_t PID;
+ int16_t ProducerEpoch;
+ int32_t BaseSequence;
+ int32_t RecordCount;
+};
+
+
+/**
+ * @struct rd_kafka_aborted_txn_start_offsets_t
+ *
+ * @brief A sorted list of aborted transaction start offsets
+ * (ascending) for a PID, and an offset into that list.
+ */
+typedef struct rd_kafka_aborted_txn_start_offsets_s {
+ rd_avl_node_t avl_node;
+ int64_t pid;
+ int offsets_idx;
+ rd_list_t offsets;
+} rd_kafka_aborted_txn_start_offsets_t;
+
+
+typedef struct rd_kafka_msgset_reader_s {
+ rd_kafka_buf_t *msetr_rkbuf; /**< Response read buffer */
+
+ int msetr_relative_offsets; /**< Bool: using relative offsets */
+
+ /**< Outer/wrapper Message fields. */
+ struct {
+ int64_t offset; /**< Relative_offsets: outer message's
+ * Offset (last offset) */
+ rd_kafka_timestamp_type_t tstype; /**< Compressed
+ * MessageSet's
+ * timestamp type. */
+ int64_t timestamp; /**< ... timestamp*/
+ } msetr_outer;
+
+ struct msgset_v2_hdr *msetr_v2_hdr; /**< MessageSet v2 header */
+
+ /*
+ * Aborted Transaction Start Offsets. These are arranged in a map
+ * (ABORTED_TXN_OFFSETS), with PID as the key and value as follows:
+ * - OFFSETS: sorted list of aborted transaction start offsets
+ * (ascending)
+ * - IDX: an index into OFFSETS list, initialized to 0.
+ *
+ * The logic for processing fetched data is as follows (note: this is
+ * different from the Java client):
+ *
+ * 1. If the message is a transaction control message and the status is
+ * ABORT then increment ABORTED_TXN_OFFSETS(PID).IDX. note: sanity check
+ * that OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX] is less than the current
+ * offset before incrementing. If the status is COMMIT, do nothing.
+ *
+ * 2. If the message is a normal message, find the corresponding OFFSETS
+ * list in ABORTED_TXN_OFFSETS. If it doesn't exist, then keep the
+ * message. If the PID does exist, compare ABORTED_TXN_OFFSETS(PID).IDX
+ * with len(OFFSETS). If it's >= then the message should be kept. If
+ * not, compare the message offset with
+ * OFFSETS[ABORTED_TXN_OFFSETS(PID).IDX]. If it's greater than or equal
+ * to this value, then the message should be ignored. If it's less than,
+ * then the message should be kept.
+ *
+ * Note: A MessageSet comprises messages from at most one transaction,
+ * so the logic in step 2 is done at the message set level.
+ */
+ rd_kafka_aborted_txns_t *msetr_aborted_txns;
+
+ const struct rd_kafka_toppar_ver *msetr_tver; /**< Toppar op version of
+ * request. */
+
+ int32_t msetr_leader_epoch; /**< Current MessageSet's partition
+ * leader epoch (or -1). */
+
+ int32_t msetr_broker_id; /**< Broker id (of msetr_rkb) */
+ rd_kafka_broker_t *msetr_rkb; /* @warning Not a refcounted
+ * reference! */
+ rd_kafka_toppar_t *msetr_rktp; /* @warning Not a refcounted
+ * reference! */
+
+ int msetr_msgcnt; /**< Number of messages in rkq */
+ int64_t msetr_msg_bytes; /**< Number of bytes in rkq */
+ rd_kafka_q_t msetr_rkq; /**< Temp Message and error queue */
+ rd_kafka_q_t *msetr_par_rkq; /**< Parent message and error queue,
+ * the temp msetr_rkq will be moved
+ * to this queue when parsing
+ * is done.
+ * Refcount is not increased. */
+
+ int64_t msetr_next_offset; /**< Next offset to fetch after
+ * this reader run is done.
+ * Optional: only used for special
+ * cases where the per-message offset
+ * can't be relied on for next
+ * fetch offset, such as with
+ * compacted topics. */
+
+ int msetr_ctrl_cnt; /**< Number of control messages
+ * or MessageSets received. */
+
+ int msetr_aborted_cnt; /**< Number of aborted MessageSets
+ * encountered. */
+
+ const char *msetr_srcname; /**< Optional message source string,
+ * used in debug logging to
+ * indicate messages were
+ * from an inner compressed
+ * message set.
+ * Not freed (use const memory).
+ * Add trailing space. */
+
+ rd_kafka_compression_t msetr_compression; /**< Compression codec */
+} rd_kafka_msgset_reader_t;
+
+
+
+/* Forward declarations */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr);
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr);
+
+
+/**
+ * @brief Set up a MessageSet reader but don't start reading messages.
+ */
+static void rd_kafka_msgset_reader_init(rd_kafka_msgset_reader_t *msetr,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_toppar_t *rktp,
+ const struct rd_kafka_toppar_ver *tver,
+ rd_kafka_aborted_txns_t *aborted_txns,
+ rd_kafka_q_t *par_rkq) {
+
+ memset(msetr, 0, sizeof(*msetr));
+
+ msetr->msetr_rkb = rkbuf->rkbuf_rkb;
+ msetr->msetr_leader_epoch = -1;
+ msetr->msetr_broker_id = rd_kafka_broker_id(msetr->msetr_rkb);
+ msetr->msetr_rktp = rktp;
+ msetr->msetr_aborted_txns = aborted_txns;
+ msetr->msetr_tver = tver;
+ msetr->msetr_rkbuf = rkbuf;
+ msetr->msetr_srcname = "";
+
+ rkbuf->rkbuf_uflow_mitigation = "truncated response from broker (ok)";
+
+ /* All parsed messages are put on this temporary op
+ * queue first and then moved in one go to the real op queue. */
+ rd_kafka_q_init(&msetr->msetr_rkq, msetr->msetr_rkb->rkb_rk);
+
+ /* Make sure enqueued ops get the correct serve/opaque reflecting the
+ * original queue. */
+ msetr->msetr_rkq.rkq_serve = par_rkq->rkq_serve;
+ msetr->msetr_rkq.rkq_opaque = par_rkq->rkq_opaque;
+
+ /* Keep (non-refcounted) reference to parent queue for
+ * moving the messages and events in msetr_rkq to when
+ * parsing is done. */
+ msetr->msetr_par_rkq = par_rkq;
+}
+
+
+
+/**
+ * @brief Decompress MessageSet, pass the uncompressed MessageSet to
+ * the MessageSet reader.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_decompress(rd_kafka_msgset_reader_t *msetr,
+ int MsgVersion,
+ int Attributes,
+ int64_t Timestamp,
+ int64_t Offset,
+ const void *compressed,
+ size_t compressed_size) {
+ struct iovec iov = {.iov_base = NULL, .iov_len = 0};
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ int codec = Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_buf_t *rkbufz;
+
+ msetr->msetr_compression = codec;
+
+ switch (codec) {
+#if WITH_ZLIB
+ case RD_KAFKA_COMPRESSION_GZIP: {
+ uint64_t outlenx = 0;
+
+ /* Decompress Message payload */
+ iov.iov_base = rd_gz_decompress(compressed,
+ (int)compressed_size, &outlenx);
+ if (unlikely(!iov.iov_base)) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "GZIP",
+ "Failed to decompress Gzip "
+ "message at offset %" PRId64 " of %" PRIusz
+ " bytes: "
+ "ignoring message",
+ Offset, compressed_size);
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto err;
+ }
+
+ iov.iov_len = (size_t)outlenx;
+ } break;
+#endif
+
+#if WITH_SNAPPY
+ case RD_KAFKA_COMPRESSION_SNAPPY: {
+ const char *inbuf = compressed;
+ size_t inlen = compressed_size;
+ int r;
+ static const unsigned char snappy_java_magic[] = {
+ 0x82, 'S', 'N', 'A', 'P', 'P', 'Y', 0};
+ static const size_t snappy_java_hdrlen = 8 + 4 + 4;
+
+ /* snappy-java adds its own header (SnappyCodec)
+ * which is not compatible with the official Snappy
+ * implementation.
+ * 8: magic, 4: version, 4: compatible
+ * followed by any number of chunks:
+ * 4: length
+ * ...: snappy-compressed data. */
+ if (likely(inlen > snappy_java_hdrlen + 4 &&
+ !memcmp(inbuf, snappy_java_magic, 8))) {
+ /* snappy-java framing */
+ char errstr[128];
+
+ inbuf = inbuf + snappy_java_hdrlen;
+ inlen -= snappy_java_hdrlen;
+ iov.iov_base = rd_kafka_snappy_java_uncompress(
+ inbuf, inlen, &iov.iov_len, errstr, sizeof(errstr));
+
+ if (unlikely(!iov.iov_base)) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
+ "%s [%" PRId32
+ "]: "
+ "Snappy decompression for message "
+ "at offset %" PRId64
+ " failed: %s: "
+ "ignoring message",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, Offset,
+ errstr);
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto err;
+ }
+
+
+ } else {
+ /* No framing */
+
+ /* Acquire uncompressed length */
+ if (unlikely(!rd_kafka_snappy_uncompressed_length(
+ inbuf, inlen, &iov.iov_len))) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
+ "Failed to get length of Snappy "
+ "compressed payload "
+ "for message at offset %" PRId64
+ " (%" PRIusz
+ " bytes): "
+ "ignoring message",
+ Offset, inlen);
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto err;
+ }
+
+ /* Allocate output buffer for uncompressed data */
+ iov.iov_base = rd_malloc(iov.iov_len);
+ if (unlikely(!iov.iov_base)) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
+ "Failed to allocate Snappy "
+ "decompress buffer of size %" PRIusz
+ "for message at offset %" PRId64
+ " (%" PRIusz
+ " bytes): %s: "
+ "ignoring message",
+ iov.iov_len, Offset, inlen,
+ rd_strerror(errno));
+ err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ goto err;
+ }
+
+ /* Uncompress to outbuf */
+ if (unlikely((r = rd_kafka_snappy_uncompress(
+ inbuf, inlen, iov.iov_base)))) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "SNAPPY",
+ "Failed to decompress Snappy "
+ "payload for message at offset "
+ "%" PRId64 " (%" PRIusz
+ " bytes): %s: "
+ "ignoring message",
+ Offset, inlen,
+ rd_strerror(-r /*negative errno*/));
+ rd_free(iov.iov_base);
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto err;
+ }
+ }
+
+ } break;
+#endif
+
+ case RD_KAFKA_COMPRESSION_LZ4: {
+ err =
+ rd_kafka_lz4_decompress(msetr->msetr_rkb,
+ /* Proper HC? */
+ MsgVersion >= 1 ? 1 : 0, Offset,
+ /* @warning Will modify compressed
+ * if no proper HC */
+ (char *)compressed, compressed_size,
+ &iov.iov_base, &iov.iov_len);
+ if (err)
+ goto err;
+ } break;
+
+#if WITH_ZSTD
+ case RD_KAFKA_COMPRESSION_ZSTD: {
+ err = rd_kafka_zstd_decompress(
+ msetr->msetr_rkb, (char *)compressed, compressed_size,
+ &iov.iov_base, &iov.iov_len);
+ if (err)
+ goto err;
+ } break;
+#endif
+
+ default:
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "CODEC",
+ "%s [%" PRId32 "]: Message at offset %" PRId64
+ " with unsupported "
+ "compression codec 0x%x: message ignored",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ Offset, (int)codec);
+
+ err = RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+ goto err;
+ }
+
+
+ rd_assert(iov.iov_base);
+
+ /*
+ * Decompression successful
+ */
+
+ /* Create a new buffer pointing to the uncompressed
+ * allocated buffer (outbuf) and let messages keep a reference to
+ * this new buffer. */
+ rkbufz = rd_kafka_buf_new_shadow(iov.iov_base, iov.iov_len, rd_free);
+ rkbufz->rkbuf_rkb = msetr->msetr_rkbuf->rkbuf_rkb;
+ rd_kafka_broker_keep(rkbufz->rkbuf_rkb);
+
+
+ /* In MsgVersion v0..1 the decompressed data contains
+ * an inner MessageSet, pass it to a new MessageSet reader.
+ *
+ * For MsgVersion v2 the decompressed data are the list of messages.
+ */
+
+ if (MsgVersion <= 1) {
+ /* Pass decompressed data (inner Messageset)
+ * to new instance of the MessageSet parser. */
+ rd_kafka_msgset_reader_t inner_msetr;
+ rd_kafka_msgset_reader_init(
+ &inner_msetr, rkbufz, msetr->msetr_rktp, msetr->msetr_tver,
+ /* there is no aborted transaction
+ * support for MsgVersion < 2 */
+ NULL, &msetr->msetr_rkq);
+
+ inner_msetr.msetr_srcname = "compressed ";
+
+ if (MsgVersion == 1) {
+ /* postproc() will convert relative to
+ * absolute offsets */
+ inner_msetr.msetr_relative_offsets = 1;
+ inner_msetr.msetr_outer.offset = Offset;
+
+ /* Apply single LogAppendTime timestamp for
+ * all messages. */
+ if (Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) {
+ inner_msetr.msetr_outer.tstype =
+ RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
+ inner_msetr.msetr_outer.timestamp = Timestamp;
+ }
+ }
+
+ /* Parse the inner MessageSet */
+ err = rd_kafka_msgset_reader_run(&inner_msetr);
+
+ /* Transfer message count from inner to outer */
+ msetr->msetr_msgcnt += inner_msetr.msetr_msgcnt;
+ msetr->msetr_msg_bytes += inner_msetr.msetr_msg_bytes;
+
+
+ } else {
+ /* MsgVersion 2 */
+ rd_kafka_buf_t *orig_rkbuf = msetr->msetr_rkbuf;
+
+ rkbufz->rkbuf_uflow_mitigation =
+ "truncated response from broker (ok)";
+
+ /* Temporarily replace read buffer with uncompressed buffer */
+ msetr->msetr_rkbuf = rkbufz;
+
+ /* Read messages */
+ err = rd_kafka_msgset_reader_msgs_v2(msetr);
+
+ /* Restore original buffer */
+ msetr->msetr_rkbuf = orig_rkbuf;
+ }
+
+ /* Loose our refcnt of the uncompressed rkbuf.
+ * Individual messages/rko's will have their own reference. */
+ rd_kafka_buf_destroy(rkbufz);
+
+ return err;
+
+err:
+ /* Enqueue error messsage:
+ * Create op and push on temporary queue. */
+ rd_kafka_consumer_err(
+ &msetr->msetr_rkq, msetr->msetr_broker_id, err,
+ msetr->msetr_tver->version, NULL, rktp, Offset,
+ "Decompression (codec 0x%x) of message at %" PRIu64 " of %" PRIusz
+ " bytes failed: %s",
+ codec, Offset, compressed_size, rd_kafka_err2str(err));
+
+ return err;
+}
+
+
+
+/**
+ * @brief Message parser for MsgVersion v0..1
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR on success or on single-message errors,
+ * or any other error code when the MessageSet parser should stop
+ * parsing (such as for partial Messages).
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_msg_v0_1(rd_kafka_msgset_reader_t *msetr) {
+ rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ rd_kafka_broker_t *rkb = msetr->msetr_rkb;
+ struct {
+ int64_t Offset; /* MessageSet header */
+ int32_t MessageSize; /* MessageSet header */
+ int32_t Crc;
+ int8_t MagicByte; /* MsgVersion */
+ int8_t Attributes;
+ int64_t Timestamp; /* v1 */
+ } hdr; /* Message header */
+ rd_kafkap_bytes_t Key;
+ rd_kafkap_bytes_t Value;
+ int32_t Value_len;
+ rd_kafka_op_t *rko;
+ size_t hdrsize = 6; /* Header size following MessageSize */
+ rd_slice_t crc_slice;
+ rd_kafka_msg_t *rkm;
+ int relative_offsets = 0;
+ const char *reloff_str = "";
+ /* Only log decoding errors if protocol debugging enabled. */
+ int log_decode_errors =
+ (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
+ ? LOG_DEBUG
+ : 0;
+ size_t message_end;
+
+ rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
+ rd_kafka_buf_read_i32(rkbuf, &hdr.MessageSize);
+ message_end = rd_slice_offset(&rkbuf->rkbuf_reader) + hdr.MessageSize;
+
+ rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
+ if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader, &crc_slice,
+ hdr.MessageSize - 4))
+ rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - 4);
+
+ rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte);
+ rd_kafka_buf_read_i8(rkbuf, &hdr.Attributes);
+
+ if (hdr.MagicByte == 1) { /* MsgVersion */
+ rd_kafka_buf_read_i64(rkbuf, &hdr.Timestamp);
+ hdrsize += 8;
+ /* MsgVersion 1 has relative offsets for compressed
+ * MessageSets*/
+ if (!(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) &&
+ msetr->msetr_relative_offsets) {
+ relative_offsets = 1;
+ reloff_str = "relative ";
+ }
+ } else
+ hdr.Timestamp = 0;
+
+ /* Verify MessageSize */
+ if (unlikely(hdr.MessageSize < (ssize_t)hdrsize))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "Message at %soffset %" PRId64 " MessageSize %" PRId32
+ " < hdrsize %" PRIusz,
+ reloff_str, hdr.Offset, hdr.MessageSize, hdrsize);
+
+ /* Early check for partial messages */
+ rd_kafka_buf_check_len(rkbuf, hdr.MessageSize - hdrsize);
+
+ if (rkb->rkb_rk->rk_conf.check_crcs) {
+ /* Verify CRC32 if desired. */
+ uint32_t calc_crc;
+
+ calc_crc = rd_slice_crc32(&crc_slice);
+ rd_dassert(rd_slice_remains(&crc_slice) == 0);
+
+ if (unlikely(hdr.Crc != (int32_t)calc_crc)) {
+ /* Propagate CRC error to application and
+ * continue with next message. */
+ rd_kafka_consumer_err(
+ &msetr->msetr_rkq, msetr->msetr_broker_id,
+ RD_KAFKA_RESP_ERR__BAD_MSG,
+ msetr->msetr_tver->version, NULL, rktp, hdr.Offset,
+ "Message at %soffset %" PRId64 " (%" PRId32
+ " bytes) "
+ "failed CRC32 check "
+ "(original 0x%" PRIx32
+ " != "
+ "calculated 0x%" PRIx32 ")",
+ reloff_str, hdr.Offset, hdr.MessageSize, hdr.Crc,
+ calc_crc);
+ rd_kafka_buf_skip_to(rkbuf, message_end);
+ rd_atomic64_add(&rkb->rkb_c.rx_err, 1);
+ /* Continue with next message */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ }
+
+
+ /* Extract key */
+ rd_kafka_buf_read_bytes(rkbuf, &Key);
+
+ /* Extract Value */
+ rd_kafka_buf_read_bytes(rkbuf, &Value);
+ Value_len = RD_KAFKAP_BYTES_LEN(&Value);
+
+ /* MessageSets may contain offsets earlier than we
+ * requested (compressed MessageSets in particular),
+ * drop the earlier messages.
+ * Note: the inner offset may only be trusted for
+ * absolute offsets. KIP-31 introduced
+ * ApiVersion 2 that maintains relative offsets
+ * of compressed messages and the base offset
+ * in the outer message is the offset of
+ * the *LAST* message in the MessageSet.
+ * This requires us to assign offsets
+ * after all messages have been read from
+ * the messageset, and it also means
+ * we cant perform this offset check here
+ * in that case. */
+ if (!relative_offsets &&
+ hdr.Offset < rktp->rktp_offsets.fetch_pos.offset)
+ return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */
+
+ /* Handle compressed MessageSet */
+ if (unlikely(hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK))
+ return rd_kafka_msgset_reader_decompress(
+ msetr, hdr.MagicByte, hdr.Attributes, hdr.Timestamp,
+ hdr.Offset, Value.data, Value_len);
+
+
+ /* Pure uncompressed message, this is the innermost
+ * handler after all compression and cascaded
+ * MessageSets have been peeled off. */
+
+ /* Create op/message container for message. */
+ rko = rd_kafka_op_new_fetch_msg(
+ &rkm, rktp, msetr->msetr_tver->version, rkbuf, hdr.Offset,
+ (size_t)RD_KAFKAP_BYTES_LEN(&Key),
+ RD_KAFKAP_BYTES_IS_NULL(&Key) ? NULL : Key.data,
+ (size_t)RD_KAFKAP_BYTES_LEN(&Value),
+ RD_KAFKAP_BYTES_IS_NULL(&Value) ? NULL : Value.data);
+
+ rkm->rkm_u.consumer.leader_epoch = msetr->msetr_leader_epoch;
+ rkm->rkm_broker_id = msetr->msetr_broker_id;
+
+ /* Assign message timestamp.
+ * If message was in a compressed MessageSet and the outer/wrapper
+ * Message.Attribute had a LOG_APPEND_TIME set, use the
+ * outer timestamp */
+ if (msetr->msetr_outer.tstype == RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME) {
+ rkm->rkm_timestamp = msetr->msetr_outer.timestamp;
+ rkm->rkm_tstype = msetr->msetr_outer.tstype;
+
+ } else if (hdr.MagicByte >= 1 && hdr.Timestamp) {
+ rkm->rkm_timestamp = hdr.Timestamp;
+ if (hdr.Attributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
+ else
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
+ }
+
+ /* Enqueue message on temporary queue */
+ rd_kafka_q_enq(&msetr->msetr_rkq, rko);
+ msetr->msetr_msgcnt++;
+ msetr->msetr_msg_bytes += rkm->rkm_key_len + rkm->rkm_len;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue */
+
+err_parse:
+ /* Count all parse errors as partial message errors. */
+ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
+ return rkbuf->rkbuf_err;
+}
+
+
+
+/**
+ * @brief Message parser for MsgVersion v2
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_msg_v2(rd_kafka_msgset_reader_t *msetr) {
+ rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ struct {
+ int64_t Length;
+ int8_t MsgAttributes;
+ int64_t TimestampDelta;
+ int64_t OffsetDelta;
+ int64_t Offset; /* Absolute offset */
+ rd_kafkap_bytes_t Key;
+ rd_kafkap_bytes_t Value;
+ rd_kafkap_bytes_t Headers;
+ } hdr;
+ rd_kafka_op_t *rko;
+ rd_kafka_msg_t *rkm;
+ /* Only log decoding errors if protocol debugging enabled. */
+ int log_decode_errors =
+ (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
+ ? LOG_DEBUG
+ : 0;
+ size_t message_end;
+
+ rd_kafka_buf_read_varint(rkbuf, &hdr.Length);
+ message_end =
+ rd_slice_offset(&rkbuf->rkbuf_reader) + (size_t)hdr.Length;
+ rd_kafka_buf_read_i8(rkbuf, &hdr.MsgAttributes);
+
+ rd_kafka_buf_read_varint(rkbuf, &hdr.TimestampDelta);
+ rd_kafka_buf_read_varint(rkbuf, &hdr.OffsetDelta);
+ hdr.Offset = msetr->msetr_v2_hdr->BaseOffset + hdr.OffsetDelta;
+
+ /* Skip message if outdated */
+ if (hdr.Offset < rktp->rktp_offsets.fetch_pos.offset) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
+ "%s [%" PRId32
+ "]: "
+ "Skip offset %" PRId64 " < fetch_offset %" PRId64,
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ hdr.Offset, rktp->rktp_offsets.fetch_pos.offset);
+ rd_kafka_buf_skip_to(rkbuf, message_end);
+ return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next msg */
+ }
+
+ /* Handle control messages */
+ if (msetr->msetr_v2_hdr->Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL) {
+ struct {
+ int64_t KeySize;
+ int16_t Version;
+ int16_t Type;
+ } ctrl_data;
+ int64_t aborted_txn_start_offset;
+
+ rd_kafka_buf_read_varint(rkbuf, &ctrl_data.KeySize);
+
+ if (unlikely(ctrl_data.KeySize < 2))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "%s [%" PRId32
+ "]: "
+ "Ctrl message at offset %" PRId64
+ " has invalid key size %" PRId64,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, hdr.Offset,
+ ctrl_data.KeySize);
+
+ rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Version);
+
+ if (ctrl_data.Version != 0) {
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
+ "%s [%" PRId32
+ "]: "
+ "Skipping ctrl msg with "
+ "unsupported version %" PRId16
+ " at offset %" PRId64,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, ctrl_data.Version,
+ hdr.Offset);
+ rd_kafka_buf_skip_to(rkbuf, message_end);
+ return RD_KAFKA_RESP_ERR_NO_ERROR; /* Continue with next
+ msg */
+ }
+
+ if (unlikely(ctrl_data.KeySize != 4))
+ rd_kafka_buf_parse_fail(
+ rkbuf,
+ "%s [%" PRId32
+ "]: "
+ "Ctrl message at offset %" PRId64
+ " has invalid key size %" PRId64,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, hdr.Offset,
+ ctrl_data.KeySize);
+
+ rd_kafka_buf_read_i16(rkbuf, &ctrl_data.Type);
+
+ /* Client is uninterested in value of commit marker */
+ rd_kafka_buf_skip(
+ rkbuf, (int32_t)(message_end -
+ rd_slice_offset(&rkbuf->rkbuf_reader)));
+
+ switch (ctrl_data.Type) {
+ case RD_KAFKA_CTRL_MSG_COMMIT:
+ /* always ignore. */
+ break;
+
+ case RD_KAFKA_CTRL_MSG_ABORT:
+ if (msetr->msetr_rkb->rkb_rk->rk_conf.isolation_level !=
+ RD_KAFKA_READ_COMMITTED)
+ break;
+
+ if (unlikely(!msetr->msetr_aborted_txns)) {
+ rd_rkb_dbg(msetr->msetr_rkb,
+ MSG | RD_KAFKA_DBG_EOS, "TXN",
+ "%s [%" PRId32
+ "] received abort txn "
+ "ctrl msg at offset %" PRId64
+ " for "
+ "PID %" PRId64
+ ", but there are no "
+ "known aborted transactions: "
+ "ignoring",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, hdr.Offset,
+ msetr->msetr_v2_hdr->PID);
+ break;
+ }
+
+ /* This marks the end of this (aborted) transaction,
+ * advance to next aborted transaction in list */
+ aborted_txn_start_offset =
+ rd_kafka_aborted_txns_pop_offset(
+ msetr->msetr_aborted_txns,
+ msetr->msetr_v2_hdr->PID, hdr.Offset);
+
+ if (unlikely(aborted_txn_start_offset == -1)) {
+ rd_rkb_dbg(msetr->msetr_rkb,
+ MSG | RD_KAFKA_DBG_EOS, "TXN",
+ "%s [%" PRId32
+ "] received abort txn "
+ "ctrl msg at offset %" PRId64
+ " for "
+ "PID %" PRId64
+ ", but this offset is "
+ "not listed as an aborted "
+ "transaction: aborted transaction "
+ "was possibly empty: ignoring",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, hdr.Offset,
+ msetr->msetr_v2_hdr->PID);
+ break;
+ }
+ break;
+
+
+ default:
+ rd_rkb_dbg(msetr->msetr_rkb, MSG,
+ "TXN"
+ "%s [%" PRId32
+ "]: "
+ "Unsupported ctrl message "
+ "type %" PRId16
+ " at offset"
+ " %" PRId64 ": ignoring",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, ctrl_data.Type,
+ hdr.Offset);
+ break;
+ }
+
+ rko = rd_kafka_op_new_ctrl_msg(rktp, msetr->msetr_tver->version,
+ rkbuf, hdr.Offset);
+ rd_kafka_q_enq(&msetr->msetr_rkq, rko);
+ msetr->msetr_msgcnt++;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ /* Regular message */
+
+ /* Note: messages in aborted transactions are skipped at the MessageSet
+ * level */
+
+ rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Key);
+ rd_kafka_buf_read_bytes_varint(rkbuf, &hdr.Value);
+
+ /* We parse the Headers later, just store the size (possibly truncated)
+ * and pointer to the headers. */
+ hdr.Headers.len =
+ (int32_t)(message_end - rd_slice_offset(&rkbuf->rkbuf_reader));
+ rd_kafka_buf_read_ptr(rkbuf, &hdr.Headers.data, hdr.Headers.len);
+
+ /* Create op/message container for message. */
+ rko = rd_kafka_op_new_fetch_msg(
+ &rkm, rktp, msetr->msetr_tver->version, rkbuf, hdr.Offset,
+ (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Key),
+ RD_KAFKAP_BYTES_IS_NULL(&hdr.Key) ? NULL : hdr.Key.data,
+ (size_t)RD_KAFKAP_BYTES_LEN(&hdr.Value),
+ RD_KAFKAP_BYTES_IS_NULL(&hdr.Value) ? NULL : hdr.Value.data);
+
+ rkm->rkm_u.consumer.leader_epoch = msetr->msetr_leader_epoch;
+ rkm->rkm_broker_id = msetr->msetr_broker_id;
+
+ /* Store pointer to unparsed message headers, they will
+ * be parsed on the first access.
+ * This pointer points to the rkbuf payload.
+ * Note: can't perform struct copy here due to const fields (MSVC) */
+ rkm->rkm_u.consumer.binhdrs.len = hdr.Headers.len;
+ rkm->rkm_u.consumer.binhdrs.data = hdr.Headers.data;
+
+ /* Set timestamp.
+ *
+ * When broker assigns the timestamps (LOG_APPEND_TIME) it will
+ * assign the same timestamp for all messages in a MessageSet
+ * using MaxTimestamp.
+ */
+ if ((msetr->msetr_v2_hdr->Attributes &
+ RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME) ||
+ (hdr.MsgAttributes & RD_KAFKA_MSG_ATTR_LOG_APPEND_TIME)) {
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_LOG_APPEND_TIME;
+ rkm->rkm_timestamp = msetr->msetr_v2_hdr->MaxTimestamp;
+ } else {
+ rkm->rkm_tstype = RD_KAFKA_TIMESTAMP_CREATE_TIME;
+ rkm->rkm_timestamp =
+ msetr->msetr_v2_hdr->BaseTimestamp + hdr.TimestampDelta;
+ }
+
+
+ /* Enqueue message on temporary queue */
+ rd_kafka_q_enq(&msetr->msetr_rkq, rko);
+ msetr->msetr_msgcnt++;
+ msetr->msetr_msg_bytes += rkm->rkm_key_len + rkm->rkm_len;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ /* Count all parse errors as partial message errors. */
+ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
+ return rkbuf->rkbuf_err;
+}
+
+
+/**
+ * @brief Read v2 messages from current buffer position.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_msgs_v2(rd_kafka_msgset_reader_t *msetr) {
+ rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ /* Only log decoding errors if protocol debugging enabled. */
+ int log_decode_errors =
+ (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
+ ? LOG_DEBUG
+ : 0;
+
+ if (msetr->msetr_aborted_txns != NULL &&
+ (msetr->msetr_v2_hdr->Attributes &
+ (RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL |
+ RD_KAFKA_MSGSET_V2_ATTR_CONTROL)) ==
+ RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL) {
+ /* Transactional non-control MessageSet:
+ * check if it is part of an aborted transaction. */
+ int64_t txn_start_offset = rd_kafka_aborted_txns_get_offset(
+ msetr->msetr_aborted_txns, msetr->msetr_v2_hdr->PID);
+
+ if (txn_start_offset != -1 &&
+ msetr->msetr_v2_hdr->BaseOffset >= txn_start_offset) {
+ /* MessageSet is part of aborted transaction */
+ rd_rkb_dbg(msetr->msetr_rkb, MSG, "MSG",
+ "%s [%" PRId32
+ "]: "
+ "Skipping %" PRId32
+ " message(s) "
+ "in aborted transaction "
+ "at offset %" PRId64 " for PID %" PRId64,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ msetr->msetr_v2_hdr->RecordCount,
+ txn_start_offset, msetr->msetr_v2_hdr->PID);
+ rd_kafka_buf_skip(
+ msetr->msetr_rkbuf,
+ rd_slice_remains(
+ &msetr->msetr_rkbuf->rkbuf_reader));
+ msetr->msetr_aborted_cnt++;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ }
+
+ while (rd_kafka_buf_read_remain(msetr->msetr_rkbuf)) {
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_msgset_reader_msg_v2(msetr);
+ if (unlikely(err))
+ return err;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ /* Count all parse errors as partial message errors. */
+ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
+ msetr->msetr_v2_hdr = NULL;
+ return rkbuf->rkbuf_err;
+}
+
+
+
+/**
+ * @brief MessageSet reader for MsgVersion v2 (FetchRequest v4)
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_v2(rd_kafka_msgset_reader_t *msetr) {
+ rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ struct msgset_v2_hdr hdr;
+ rd_slice_t save_slice;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ size_t len_start;
+ size_t payload_size;
+ int64_t LastOffset; /* Last absolute Offset in MessageSet header */
+ /* Only log decoding errors if protocol debugging enabled. */
+ int log_decode_errors =
+ (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
+ ? LOG_DEBUG
+ : 0;
+
+ rd_kafka_buf_read_i64(rkbuf, &hdr.BaseOffset);
+ rd_kafka_buf_read_i32(rkbuf, &hdr.Length);
+ len_start = rd_slice_offset(&rkbuf->rkbuf_reader);
+
+ if (unlikely(hdr.Length < RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4))
+ rd_kafka_buf_parse_fail(rkbuf,
+ "%s [%" PRId32
+ "] "
+ "MessageSet at offset %" PRId64
+ " length %" PRId32 " < header size %d",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, hdr.BaseOffset,
+ hdr.Length,
+ RD_KAFKAP_MSGSET_V2_SIZE - 8 - 4);
+
+ rd_kafka_buf_read_i32(rkbuf, &hdr.PartitionLeaderEpoch);
+ msetr->msetr_leader_epoch = hdr.PartitionLeaderEpoch;
+
+ rd_kafka_buf_read_i8(rkbuf, &hdr.MagicByte);
+ rd_kafka_buf_read_i32(rkbuf, &hdr.Crc);
+
+ if (msetr->msetr_rkb->rkb_rk->rk_conf.check_crcs) {
+ /* Verify CRC32C if desired. */
+ uint32_t calc_crc;
+ rd_slice_t crc_slice;
+ size_t crc_len = hdr.Length - 4 - 1 - 4;
+
+ if (!rd_slice_narrow_copy_relative(&rkbuf->rkbuf_reader,
+ &crc_slice, crc_len))
+ rd_kafka_buf_check_len(rkbuf, crc_len);
+
+ calc_crc = rd_slice_crc32c(&crc_slice);
+
+ if (unlikely((uint32_t)hdr.Crc != calc_crc)) {
+ /* Propagate CRC error to application and
+ * continue with next message. */
+ rd_kafka_consumer_err(
+ &msetr->msetr_rkq, msetr->msetr_broker_id,
+ RD_KAFKA_RESP_ERR__BAD_MSG,
+ msetr->msetr_tver->version, NULL, rktp,
+ hdr.BaseOffset,
+ "MessageSet at offset %" PRId64 " (%" PRId32
+ " bytes) "
+ "failed CRC32C check "
+ "(original 0x%" PRIx32
+ " != "
+ "calculated 0x%" PRIx32 ")",
+ hdr.BaseOffset, hdr.Length, hdr.Crc, calc_crc);
+ rd_kafka_buf_skip_to(rkbuf, crc_len);
+ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_err, 1);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ }
+
+ rd_kafka_buf_read_i16(rkbuf, &hdr.Attributes);
+ rd_kafka_buf_read_i32(rkbuf, &hdr.LastOffsetDelta);
+ LastOffset = hdr.BaseOffset + hdr.LastOffsetDelta;
+ rd_kafka_buf_read_i64(rkbuf, &hdr.BaseTimestamp);
+ rd_kafka_buf_read_i64(rkbuf, &hdr.MaxTimestamp);
+ rd_kafka_buf_read_i64(rkbuf, &hdr.PID);
+ rd_kafka_buf_read_i16(rkbuf, &hdr.ProducerEpoch);
+ rd_kafka_buf_read_i32(rkbuf, &hdr.BaseSequence);
+ rd_kafka_buf_read_i32(rkbuf, &hdr.RecordCount);
+
+ /* Payload size is hdr.Length - MessageSet headers */
+ payload_size =
+ hdr.Length - (rd_slice_offset(&rkbuf->rkbuf_reader) - len_start);
+
+ if (unlikely(payload_size > rd_kafka_buf_read_remain(rkbuf)))
+ rd_kafka_buf_underflow_fail(
+ rkbuf, payload_size,
+ "%s [%" PRId32
+ "] "
+ "MessageSet at offset %" PRId64 " payload size %" PRIusz,
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ hdr.BaseOffset, payload_size);
+
+ /* If entire MessageSet contains old outdated offsets, skip it. */
+ if (LastOffset < rktp->rktp_offsets.fetch_pos.offset) {
+ rd_kafka_buf_skip(rkbuf, payload_size);
+ goto done;
+ }
+
+ if (hdr.Attributes & RD_KAFKA_MSGSET_V2_ATTR_CONTROL)
+ msetr->msetr_ctrl_cnt++;
+
+ msetr->msetr_v2_hdr = &hdr;
+
+ /* Handle compressed MessageSet */
+ if (hdr.Attributes & RD_KAFKA_MSG_ATTR_COMPRESSION_MASK) {
+ const void *compressed;
+
+ compressed =
+ rd_slice_ensure_contig(&rkbuf->rkbuf_reader, payload_size);
+ rd_assert(compressed);
+
+ err = rd_kafka_msgset_reader_decompress(
+ msetr, 2 /*MsgVersion v2*/, hdr.Attributes,
+ hdr.BaseTimestamp, hdr.BaseOffset, compressed,
+ payload_size);
+ if (err)
+ goto err;
+
+ } else {
+ /* Read uncompressed messages */
+
+ /* Save original slice, reduce size of the current one to
+ * be limited by the MessageSet.Length, and then start reading
+ * messages until the lesser slice is exhausted. */
+ if (!rd_slice_narrow_relative(&rkbuf->rkbuf_reader, &save_slice,
+ payload_size))
+ rd_kafka_buf_check_len(rkbuf, payload_size);
+
+ /* Read messages */
+ err = rd_kafka_msgset_reader_msgs_v2(msetr);
+
+ /* Restore wider slice */
+ rd_slice_widen(&rkbuf->rkbuf_reader, &save_slice);
+
+ if (unlikely(err))
+ goto err;
+ }
+
+
+done:
+ /* Set the next fetch offset to the MessageSet header's last offset + 1
+ * to avoid getting stuck on compacted MessageSets where the last
+ * Message in the MessageSet has an Offset < MessageSet header's
+ * last offset. See KAFKA-5443 */
+ msetr->msetr_next_offset = LastOffset + 1;
+
+ msetr->msetr_v2_hdr = NULL;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ /* Count all parse errors as partial message errors. */
+ rd_atomic64_add(&msetr->msetr_rkb->rkb_c.rx_partial, 1);
+ err = rkbuf->rkbuf_err;
+ /* FALLTHRU */
+err:
+ msetr->msetr_v2_hdr = NULL;
+ return err;
+}
+
+
+/**
+ * @brief Peek into the next MessageSet to find the MsgVersion.
+ *
+ * @param MagicBytep the MsgVersion is returned here on success.
+ *
+ * @returns an error on read underflow or if the MsgVersion is
+ * unsupported.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_peek_msg_version(rd_kafka_msgset_reader_t *msetr,
+ int8_t *MagicBytep) {
+ rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ /* Only log decoding errors if protocol debugging enabled. */
+ int log_decode_errors =
+ (rkbuf->rkbuf_rkb->rkb_rk->rk_conf.debug & RD_KAFKA_DBG_PROTOCOL)
+ ? LOG_DEBUG
+ : 0;
+ size_t read_offset = rd_slice_offset(&rkbuf->rkbuf_reader);
+
+ rd_kafka_buf_peek_i8(rkbuf, read_offset + 8 + 4 + 4, MagicBytep);
+
+ if (unlikely(*MagicBytep < 0 || *MagicBytep > 2)) {
+ int64_t Offset; /* For error logging */
+ int32_t Length;
+
+ rd_kafka_buf_read_i64(rkbuf, &Offset);
+
+ rd_rkb_dbg(msetr->msetr_rkb,
+ MSG | RD_KAFKA_DBG_PROTOCOL | RD_KAFKA_DBG_FETCH,
+ "MAGICBYTE",
+ "%s [%" PRId32
+ "]: "
+ "Unsupported Message(Set) MagicByte %d at "
+ "offset %" PRId64
+ " "
+ "(buffer position %" PRIusz "/%" PRIusz
+ "): skipping",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ (int)*MagicBytep, Offset, read_offset,
+ rd_slice_size(&rkbuf->rkbuf_reader));
+
+ if (Offset >=
+ msetr->msetr_rktp->rktp_offsets.fetch_pos.offset) {
+ rd_kafka_consumer_err(
+ &msetr->msetr_rkq, msetr->msetr_broker_id,
+ RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED,
+ msetr->msetr_tver->version, NULL, rktp, Offset,
+ "Unsupported Message(Set) MagicByte %d "
+ "at offset %" PRId64,
+ (int)*MagicBytep, Offset);
+ /* Skip message(set) */
+ msetr->msetr_rktp->rktp_offsets.fetch_pos.offset =
+ Offset + 1;
+ }
+
+ /* Skip this Message(Set).
+ * If the message is malformed, the skip may trigger err_parse
+ * and return ERR__BAD_MSG. */
+ rd_kafka_buf_read_i32(rkbuf, &Length);
+ rd_kafka_buf_skip(rkbuf, Length);
+
+ return RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err_parse:
+ return RD_KAFKA_RESP_ERR__BAD_MSG;
+}
+
+
+/**
+ * @brief Parse and read messages from msgset reader buffer.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader(rd_kafka_msgset_reader_t *msetr) {
+ rd_kafka_buf_t *rkbuf = msetr->msetr_rkbuf;
+ rd_kafka_resp_err_t (*reader[])(rd_kafka_msgset_reader_t *) = {
+ /* Indexed by MsgVersion/MagicByte, pointing to
+ * a Msg(Set)Version reader */
+ [0] = rd_kafka_msgset_reader_msg_v0_1,
+ [1] = rd_kafka_msgset_reader_msg_v0_1,
+ [2] = rd_kafka_msgset_reader_v2};
+ rd_kafka_resp_err_t err;
+
+ /* Parse MessageSets until the slice is exhausted or an
+ * error occurs (typically a partial message). */
+ do {
+ int8_t MagicByte;
+
+ /* We dont know the MsgVersion at this point, peek where the
+ * MagicByte resides both in MsgVersion v0..1 and v2 to
+ * know which MessageSet reader to use. */
+ err =
+ rd_kafka_msgset_reader_peek_msg_version(msetr, &MagicByte);
+ if (unlikely(err)) {
+ if (err == RD_KAFKA_RESP_ERR__BAD_MSG)
+ /* Read underflow, not an error.
+ * Broker may return a partial Fetch response
+ * due to its use of sendfile(2). */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Continue on unsupported MsgVersions, the
+ * MessageSet will be skipped. */
+ continue;
+ }
+
+ /* Use MsgVersion-specific reader */
+ err = reader[(int)MagicByte](msetr);
+
+ } while (!err && rd_slice_remains(&rkbuf->rkbuf_reader) > 0);
+
+ return err;
+}
+
+
+
+/**
+ * @brief MessageSet post-processing.
+ *
+ * @param last_offsetp will be set to the offset of the last message in the set,
+ * or -1 if not applicable.
+ */
+static void rd_kafka_msgset_reader_postproc(rd_kafka_msgset_reader_t *msetr,
+ int64_t *last_offsetp) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_q_last(&msetr->msetr_rkq, RD_KAFKA_OP_FETCH,
+ 0 /* no error ops */);
+ if (rko) {
+ *last_offsetp = rko->rko_u.fetch.rkm.rkm_offset;
+
+ if (*last_offsetp != -1 && msetr->msetr_relative_offsets) {
+ /* Update messages to absolute offsets
+ * and purge any messages older than the current
+ * fetch offset. */
+ rd_kafka_q_fix_offsets(
+ &msetr->msetr_rkq,
+ msetr->msetr_rktp->rktp_offsets.fetch_pos.offset,
+ msetr->msetr_outer.offset - *last_offsetp);
+ }
+ }
+}
+
+
+
+/**
+ * @brief Run the MessageSet reader, read messages until buffer is
+ * exhausted (or error encountered), enqueue parsed messages on
+ * partition queue.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if MessageSet was successfully
+ * or partially parsed. When other error codes are returned it
+ * indicates a semi-permanent error (such as unsupported MsgVersion)
+ * and the fetcher should back off this partition to avoid
+ * busy-looping.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_msgset_reader_run(rd_kafka_msgset_reader_t *msetr) {
+ rd_kafka_toppar_t *rktp = msetr->msetr_rktp;
+ rd_kafka_resp_err_t err;
+ int64_t last_offset = -1;
+
+ /* Parse MessageSets and messages */
+ err = rd_kafka_msgset_reader(msetr);
+
+ if (unlikely(rd_kafka_q_len(&msetr->msetr_rkq) == 0)) {
+ /* The message set didn't contain at least one full message
+ * or no error was posted on the response queue.
+ * This means the size limit perhaps was too tight,
+ * increase it automatically.
+ * If there was at least one control message there
+ * is probably not a size limit and nothing is done.
+ * If there were aborted messagesets and no underflow then
+ * there is no error either (#2993).
+ *
+ * Also; avoid propagating underflow errors, which cause
+ * backoffs, since we'll want to continue fetching the
+ * remaining truncated messages as soon as possible.
+ */
+ if (msetr->msetr_ctrl_cnt > 0) {
+ /* Noop */
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ } else if (rktp->rktp_fetch_msg_max_bytes < (1 << 30)) {
+ rktp->rktp_fetch_msg_max_bytes *= 2;
+ rd_rkb_dbg(msetr->msetr_rkb, FETCH, "CONSUME",
+ "Topic %s [%" PRId32
+ "]: Increasing "
+ "max fetch bytes to %" PRId32,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rktp->rktp_fetch_msg_max_bytes);
+
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ } else if (!err && msetr->msetr_aborted_cnt == 0) {
+ rd_kafka_consumer_err(
+ &msetr->msetr_rkq, msetr->msetr_broker_id,
+ RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
+ msetr->msetr_tver->version, NULL, rktp,
+ rktp->rktp_offsets.fetch_pos.offset,
+ "Message at offset %" PRId64
+ " might be too large to fetch, try increasing "
+ "receive.message.max.bytes",
+ rktp->rktp_offsets.fetch_pos.offset);
+
+ } else if (msetr->msetr_aborted_cnt > 0) {
+ /* Noop */
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ } else {
+ /* MessageSet post-processing. */
+ rd_kafka_msgset_reader_postproc(msetr, &last_offset);
+
+ /* Ignore parse errors if there was at least one
+ * good message since it probably indicates a
+ * partial response rather than an erroneous one. */
+ if (err == RD_KAFKA_RESP_ERR__UNDERFLOW &&
+ msetr->msetr_msgcnt > 0)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_rkb_dbg(msetr->msetr_rkb, MSG | RD_KAFKA_DBG_FETCH, "CONSUME",
+ "Enqueue %i %smessage(s) (%" PRId64
+ " bytes, %d ops) on %s [%" PRId32
+ "] fetch queue (qlen %d, v%d, last_offset %" PRId64
+ ", %d ctrl msgs, %d aborted msgsets, %s)",
+ msetr->msetr_msgcnt, msetr->msetr_srcname,
+ msetr->msetr_msg_bytes, rd_kafka_q_len(&msetr->msetr_rkq),
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_q_len(msetr->msetr_par_rkq),
+ msetr->msetr_tver->version, last_offset,
+ msetr->msetr_ctrl_cnt, msetr->msetr_aborted_cnt,
+ msetr->msetr_compression
+ ? rd_kafka_compression2str(msetr->msetr_compression)
+ : "uncompressed");
+
+ /* Concat all messages&errors onto the parent's queue
+ * (the partition's fetch queue) */
+ if (rd_kafka_q_concat(msetr->msetr_par_rkq, &msetr->msetr_rkq) != -1) {
+ /* Update partition's fetch offset based on
+ * last message's offest. */
+ if (likely(last_offset != -1))
+ rktp->rktp_offsets.fetch_pos.offset = last_offset + 1;
+ }
+
+ /* Adjust next fetch offset if outlier code has indicated
+ * an even later next offset. */
+ if (msetr->msetr_next_offset > rktp->rktp_offsets.fetch_pos.offset)
+ rktp->rktp_offsets.fetch_pos.offset = msetr->msetr_next_offset;
+
+ rktp->rktp_offsets.fetch_pos.leader_epoch = msetr->msetr_leader_epoch;
+
+ rd_kafka_q_destroy_owner(&msetr->msetr_rkq);
+
+ /* Skip remaining part of slice so caller can continue
+ * with next partition. */
+ rd_slice_read(&msetr->msetr_rkbuf->rkbuf_reader, NULL,
+ rd_slice_remains(&msetr->msetr_rkbuf->rkbuf_reader));
+ return err;
+}
+
+
+
+/**
+ * @brief Parse one MessageSet at the current buffer read position,
+ * enqueueing messages, propagating errors, etc.
+ * @remark The current rkbuf_reader slice must be limited to the MessageSet size
+ *
+ * @returns see rd_kafka_msgset_reader_run()
+ */
+rd_kafka_resp_err_t
+rd_kafka_msgset_parse(rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_aborted_txns_t *aborted_txns,
+ const struct rd_kafka_toppar_ver *tver) {
+ rd_kafka_msgset_reader_t msetr;
+ rd_kafka_resp_err_t err;
+
+ rd_kafka_msgset_reader_init(&msetr, rkbuf, rktp, tver, aborted_txns,
+ rktp->rktp_fetchq);
+
+ /* Parse and handle the message set */
+ err = rd_kafka_msgset_reader_run(&msetr);
+
+ rd_atomic64_add(&rktp->rktp_c.rx_msgs, msetr.msetr_msgcnt);
+ rd_atomic64_add(&rktp->rktp_c.rx_msg_bytes, msetr.msetr_msg_bytes);
+
+ rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchcnt,
+ (int64_t)msetr.msetr_msgcnt);
+ rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchsize,
+ (int64_t)msetr.msetr_msg_bytes);
+
+ return err;
+}
+
+
+/**
+ * @brief Offset comparator
+ */
+static int rd_kafka_offset_cmp(const void *_a, const void *_b) {
+ const int64_t *a = _a, *b = _b;
+ return (*a > *b) - (*a < *b);
+}
+
+
+/**
+ * @brief Pid comparator for rd_kafka_aborted_txn_start_offsets_t
+ */
+static int rd_kafka_aborted_txn_cmp_by_pid(const void *_a, const void *_b) {
+ const rd_kafka_aborted_txn_start_offsets_t *a = _a, *b = _b;
+ return (a->pid > b->pid) - (a->pid < b->pid);
+}
+
+
+/**
+ * @brief Free resources associated with an AVL tree node.
+ */
+static void rd_kafka_aborted_txn_node_destroy(void *_node_ptr) {
+ rd_kafka_aborted_txn_start_offsets_t *node_ptr = _node_ptr;
+ rd_list_destroy(&node_ptr->offsets);
+ rd_free(node_ptr);
+}
+
+
+/**
+ * @brief Allocate memory for, and initialize a new
+ * rd_kafka_aborted_txns_t struct.
+ */
+rd_kafka_aborted_txns_t *rd_kafka_aborted_txns_new(int32_t txn_cnt) {
+ rd_kafka_aborted_txns_t *aborted_txns;
+ aborted_txns = rd_malloc(sizeof(*aborted_txns));
+ rd_avl_init(&aborted_txns->avl, rd_kafka_aborted_txn_cmp_by_pid, 0);
+ rd_list_init(&aborted_txns->list, txn_cnt,
+ rd_kafka_aborted_txn_node_destroy);
+ aborted_txns->cnt = txn_cnt;
+ return aborted_txns;
+}
+
+
+/**
+ * @brief Free all resources associated with a
+ * rd_kafka_aborted_txns_t struct.
+ */
+void rd_kafka_aborted_txns_destroy(rd_kafka_aborted_txns_t *aborted_txns) {
+ rd_list_destroy(&aborted_txns->list);
+ rd_avl_destroy(&aborted_txns->avl);
+ rd_free(aborted_txns);
+}
+
+
+/**
+ * @brief Get the abort txn start offsets corresponding to
+ * the specified pid.
+ */
+static RD_INLINE rd_kafka_aborted_txn_start_offsets_t *
+rd_kafka_aborted_txns_offsets_for_pid(rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid) {
+ rd_kafka_aborted_txn_start_offsets_t node;
+ node.pid = pid;
+ return RD_AVL_FIND(&aborted_txns->avl, &node);
+}
+
+
+/**
+ * @brief Get the next aborted transaction start
+ * offset for the specified pid.
+ *
+ * @param increment_idx if true, the offset index will be incremented.
+ * @param max_offset If the next aborted offset is greater than \p max_offset
+ * then the index is not incremented (regardless of
+ * \p increment_idx) and the function returns -1.
+ * This may be the case for empty aborted transactions
+ * that have an ABORT marker but are not listed in the
+ * AbortedTxns list.
+ *
+ *
+ * @returns the start offset or -1 if there is none.
+ */
+static int64_t
+rd_kafka_aborted_txns_next_offset(rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid,
+ rd_bool_t increment_idx,
+ int64_t max_offset) {
+ int64_t abort_start_offset;
+ rd_kafka_aborted_txn_start_offsets_t *node_ptr =
+ rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid);
+
+ if (node_ptr == NULL)
+ return -1;
+
+ if (unlikely(node_ptr->offsets_idx >= rd_list_cnt(&node_ptr->offsets)))
+ return -1;
+
+ abort_start_offset = *(
+ (int64_t *)rd_list_elem(&node_ptr->offsets, node_ptr->offsets_idx));
+
+ if (unlikely(abort_start_offset > max_offset))
+ return -1;
+
+ if (increment_idx)
+ node_ptr->offsets_idx++;
+
+ return abort_start_offset;
+}
+
+
+/**
+ * @brief Get the next aborted transaction start
+ * offset for the specified pid and progress the
+ * current index to the next one.
+ *
+ * @param max_offset If the next aborted offset is greater than \p max_offset
+ * then no offset is popped and the function returns -1.
+ * This may be the case for empty aborted transactions
+ * that have an ABORT marker but are not listed in the
+ * AbortedTxns list.
+ *
+ * @returns the start offset or -1 if there is none.
+ */
+static RD_INLINE int64_t
+rd_kafka_aborted_txns_pop_offset(rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid,
+ int64_t max_offset) {
+ return rd_kafka_aborted_txns_next_offset(aborted_txns, pid, rd_true,
+ max_offset);
+}
+
+
+/**
+ * @brief Get the next aborted transaction start
+ * offset for the specified pid.
+ *
+ * @returns the start offset or -1 if there is none.
+ */
+static RD_INLINE int64_t
+rd_kafka_aborted_txns_get_offset(const rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid) {
+ return rd_kafka_aborted_txns_next_offset(
+ (rd_kafka_aborted_txns_t *)aborted_txns, pid, rd_false, INT64_MAX);
+}
+
+
+/**
+ * @brief Add a transaction start offset corresponding
+ * to the specified pid to the aborted_txns collection.
+ */
+void rd_kafka_aborted_txns_add(rd_kafka_aborted_txns_t *aborted_txns,
+ int64_t pid,
+ int64_t first_offset) {
+ int64_t *v;
+ rd_kafka_aborted_txn_start_offsets_t *node_ptr =
+ rd_kafka_aborted_txns_offsets_for_pid(aborted_txns, pid);
+
+ if (!node_ptr) {
+ node_ptr = rd_malloc(sizeof(*node_ptr));
+ node_ptr->pid = pid;
+ node_ptr->offsets_idx = 0;
+ rd_list_init(&node_ptr->offsets, 0, NULL);
+ /* Each PID list has no more than AbortedTxnCnt elements */
+ rd_list_prealloc_elems(&node_ptr->offsets, sizeof(int64_t),
+ aborted_txns->cnt, 0);
+ RD_AVL_INSERT(&aborted_txns->avl, node_ptr, avl_node);
+ rd_list_add(&aborted_txns->list, node_ptr);
+ }
+
+ v = rd_list_add(&node_ptr->offsets, NULL);
+ *v = first_offset;
+}
+
+
+/**
+ * @brief Sort each of the abort transaction start
+ * offset lists for each pid.
+ */
+void rd_kafka_aborted_txns_sort(rd_kafka_aborted_txns_t *aborted_txns) {
+ int k;
+ for (k = 0; k < rd_list_cnt(&aborted_txns->list); k++) {
+ rd_kafka_aborted_txn_start_offsets_t *el =
+ rd_list_elem(&aborted_txns->list, k);
+ rd_list_sort(&el->offsets, rd_kafka_offset_cmp);
+ }
+}
+
+
+/**
+ * @brief Unit tests for all functions that operate on
+ * rd_kafka_aborted_txns_t
+ */
+int unittest_aborted_txns(void) {
+ rd_kafka_aborted_txns_t *aborted_txns = NULL;
+ int64_t start_offset;
+
+ aborted_txns = rd_kafka_aborted_txns_new(7);
+ rd_kafka_aborted_txns_add(aborted_txns, 1, 42);
+ rd_kafka_aborted_txns_add(aborted_txns, 1, 44);
+ rd_kafka_aborted_txns_add(aborted_txns, 1, 10);
+ rd_kafka_aborted_txns_add(aborted_txns, 1, 100);
+ rd_kafka_aborted_txns_add(aborted_txns, 2, 11);
+ rd_kafka_aborted_txns_add(aborted_txns, 2, 7);
+ rd_kafka_aborted_txns_add(aborted_txns, 1, 3);
+ rd_kafka_aborted_txns_sort(aborted_txns);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
+ RD_UT_ASSERT(3 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 3",
+ start_offset);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
+ RD_UT_ASSERT(3 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 3",
+ start_offset);
+
+ start_offset =
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
+ RD_UT_ASSERT(3 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 3",
+ start_offset);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
+ RD_UT_ASSERT(10 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 10",
+ start_offset);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
+ RD_UT_ASSERT(7 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 7",
+ start_offset);
+
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
+ RD_UT_ASSERT(42 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 42",
+ start_offset);
+
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
+ RD_UT_ASSERT(44 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 44",
+ start_offset);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
+ RD_UT_ASSERT(7 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 7",
+ start_offset);
+
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
+ RD_UT_ASSERT(11 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected 11",
+ start_offset);
+
+ /* error cases */
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 3);
+ RD_UT_ASSERT(-1 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected -1",
+ start_offset);
+
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 1, INT64_MAX);
+ rd_kafka_aborted_txns_pop_offset(aborted_txns, 2, INT64_MAX);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 1);
+ RD_UT_ASSERT(-1 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected -1",
+ start_offset);
+
+ start_offset = rd_kafka_aborted_txns_get_offset(aborted_txns, 2);
+ RD_UT_ASSERT(-1 == start_offset,
+ "queried start offset was %" PRId64
+ ", "
+ "expected -1",
+ start_offset);
+
+ rd_kafka_aborted_txns_destroy(aborted_txns);
+
+ RD_UT_PASS();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c
new file mode 100644
index 000000000..beb36bfac
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_msgset_writer.c
@@ -0,0 +1,1445 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_msgset.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_header.h"
+#include "rdkafka_lz4.h"
+
+#if WITH_ZSTD
+#include "rdkafka_zstd.h"
+#endif
+
+#include "snappy.h"
+#include "rdvarint.h"
+#include "crc32c.h"
+
+
+/** @brief The maxium ProduceRequestion ApiVersion supported by librdkafka */
+static const int16_t rd_kafka_ProduceRequest_max_version = 7;
+
+
+typedef struct rd_kafka_msgset_writer_s {
+ rd_kafka_buf_t *msetw_rkbuf; /* Backing store buffer (refcounted)*/
+
+ int16_t msetw_ApiVersion; /* ProduceRequest ApiVersion */
+ int msetw_MsgVersion; /* MsgVersion to construct */
+ int msetw_features; /* Protocol features to use */
+ rd_kafka_compression_t msetw_compression; /**< Compression type */
+ int msetw_msgcntmax; /* Max number of messages to send
+ * in a batch. */
+ size_t msetw_messages_len; /* Total size of Messages, with Message
+ * framing but without
+ * MessageSet header */
+ size_t msetw_messages_kvlen; /* Total size of Message keys
+ * and values */
+
+ size_t msetw_MessageSetSize; /* Current MessageSetSize value */
+ size_t msetw_of_MessageSetSize; /* offset of MessageSetSize */
+ size_t msetw_of_start; /* offset of MessageSet */
+
+ int msetw_relative_offsets; /* Bool: use relative offsets */
+
+ /* For MessageSet v2 */
+ int msetw_Attributes; /* MessageSet Attributes */
+ int64_t msetw_MaxTimestamp; /* Maximum timestamp in batch */
+ size_t msetw_of_CRC; /* offset of MessageSet.CRC */
+
+ rd_kafka_msgbatch_t *msetw_batch; /**< Convenience pointer to
+ * rkbuf_u.Produce.batch */
+
+ /* First message information */
+ struct {
+ size_t of; /* rkbuf's first message position */
+ int64_t timestamp;
+ } msetw_firstmsg;
+
+ rd_kafka_pid_t msetw_pid; /**< Idempotent producer's
+ * current Producer Id */
+ rd_kafka_broker_t *msetw_rkb; /* @warning Not a refcounted
+ * reference! */
+ rd_kafka_toppar_t *msetw_rktp; /* @warning Not a refcounted
+ * reference! */
+ rd_kafka_msgq_t *msetw_msgq; /**< Input message queue */
+} rd_kafka_msgset_writer_t;
+
+
+
+/**
+ * @brief Select ApiVersion and MsgVersion to use based on broker's
+ * feature compatibility.
+ *
+ * @returns -1 if a MsgVersion (or ApiVersion) could not be selected, else 0.
+ * @locality broker thread
+ */
+static RD_INLINE int
+rd_kafka_msgset_writer_select_MsgVersion(rd_kafka_msgset_writer_t *msetw) {
+ rd_kafka_broker_t *rkb = msetw->msetw_rkb;
+ rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
+ const int16_t max_ApiVersion = rd_kafka_ProduceRequest_max_version;
+ int16_t min_ApiVersion = 0;
+ int feature;
+ /* Map compression types to required feature and ApiVersion */
+ static const struct {
+ int feature;
+ int16_t ApiVersion;
+ } compr_req[RD_KAFKA_COMPRESSION_NUM] = {
+ [RD_KAFKA_COMPRESSION_LZ4] = {RD_KAFKA_FEATURE_LZ4, 0},
+#if WITH_ZSTD
+ [RD_KAFKA_COMPRESSION_ZSTD] = {RD_KAFKA_FEATURE_ZSTD, 7},
+#endif
+ };
+
+ if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER2)) {
+ min_ApiVersion = 3;
+ msetw->msetw_MsgVersion = 2;
+ msetw->msetw_features |= feature;
+ } else if ((feature = rkb->rkb_features & RD_KAFKA_FEATURE_MSGVER1)) {
+ min_ApiVersion = 2;
+ msetw->msetw_MsgVersion = 1;
+ msetw->msetw_features |= feature;
+ } else {
+ if ((feature =
+ rkb->rkb_features & RD_KAFKA_FEATURE_THROTTLETIME)) {
+ min_ApiVersion = 1;
+ msetw->msetw_features |= feature;
+ } else
+ min_ApiVersion = 0;
+ msetw->msetw_MsgVersion = 0;
+ }
+
+ msetw->msetw_compression = rktp->rktp_rkt->rkt_conf.compression_codec;
+
+ /*
+ * Check that the configured compression type is supported
+ * by both client and broker, else disable compression.
+ */
+ if (msetw->msetw_compression &&
+ (rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_Produce, 0,
+ compr_req[msetw->msetw_compression].ApiVersion, NULL) == -1 ||
+ (compr_req[msetw->msetw_compression].feature &&
+ !(msetw->msetw_rkb->rkb_features &
+ compr_req[msetw->msetw_compression].feature)))) {
+ if (unlikely(
+ rd_interval(&rkb->rkb_suppress.unsupported_compression,
+ /* at most once per day */
+ (rd_ts_t)86400 * 1000 * 1000, 0) > 0))
+ rd_rkb_log(
+ rkb, LOG_NOTICE, "COMPRESSION",
+ "%.*s [%" PRId32
+ "]: "
+ "Broker does not support compression "
+ "type %s: not compressing batch",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_compression2str(msetw->msetw_compression));
+ else
+ rd_rkb_dbg(
+ rkb, MSG, "PRODUCE",
+ "%.*s [%" PRId32
+ "]: "
+ "Broker does not support compression "
+ "type %s: not compressing batch",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_compression2str(msetw->msetw_compression));
+
+ msetw->msetw_compression = RD_KAFKA_COMPRESSION_NONE;
+ } else {
+ /* Broker supports this compression type. */
+ msetw->msetw_features |=
+ compr_req[msetw->msetw_compression].feature;
+
+ if (min_ApiVersion <
+ compr_req[msetw->msetw_compression].ApiVersion)
+ min_ApiVersion =
+ compr_req[msetw->msetw_compression].ApiVersion;
+ }
+
+ /* MsgVersion specific setup. */
+ switch (msetw->msetw_MsgVersion) {
+ case 2:
+ msetw->msetw_relative_offsets = 1; /* OffsetDelta */
+ break;
+ case 1:
+ if (msetw->msetw_compression != RD_KAFKA_COMPRESSION_NONE)
+ msetw->msetw_relative_offsets = 1;
+ break;
+ }
+
+ /* Set the highest ApiVersion supported by us and broker */
+ msetw->msetw_ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_Produce, min_ApiVersion, max_ApiVersion, NULL);
+
+ if (msetw->msetw_ApiVersion == -1) {
+ rd_kafka_msg_t *rkm;
+ /* This will only happen if the broker reports none, or
+ * no matching ProduceRequest versions, which should never
+ * happen. */
+ rd_rkb_log(rkb, LOG_ERR, "PRODUCE",
+ "%.*s [%" PRId32
+ "]: "
+ "No viable ProduceRequest ApiVersions (v%d..%d) "
+ "supported by broker: unable to produce",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, min_ApiVersion,
+ max_ApiVersion);
+
+ /* Back off and retry in 5s */
+ rkm = rd_kafka_msgq_first(msetw->msetw_msgq);
+ rd_assert(rkm);
+ rkm->rkm_u.producer.ts_backoff = rd_clock() + (5 * 1000 * 1000);
+ return -1;
+ }
+
+ /* It should not be possible to get a lower version than requested,
+ * otherwise the logic in this function is buggy. */
+ rd_assert(msetw->msetw_ApiVersion >= min_ApiVersion);
+
+ return 0;
+}
+
+
+/**
+ * @brief Allocate buffer for messageset writer based on a previously set
+ * up \p msetw.
+ *
+ * Allocate iovecs to hold all headers and messages,
+ * and allocate enough space to allow copies of small messages.
+ * The allocated size is the minimum of message.max.bytes
+ * or queued_bytes + msgcntmax * msg_overhead
+ */
+static void rd_kafka_msgset_writer_alloc_buf(rd_kafka_msgset_writer_t *msetw) {
+ rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
+ size_t msg_overhead = 0;
+ size_t hdrsize = 0;
+ size_t msgsetsize = 0;
+ size_t bufsize;
+
+ rd_kafka_assert(NULL, !msetw->msetw_rkbuf);
+
+ /* Calculate worst-case buffer size, produce header size,
+ * message size, etc, this isn't critical but avoids unnecesary
+ * extra allocations. The buffer will grow as needed if we get
+ * this wrong.
+ *
+ * ProduceRequest headers go in one iovec:
+ * ProduceRequest v0..2:
+ * RequiredAcks + Timeout +
+ * [Topic + [Partition + MessageSetSize]]
+ *
+ * ProduceRequest v3:
+ * TransactionalId + RequiredAcks + Timeout +
+ * [Topic + [Partition + MessageSetSize + MessageSet]]
+ */
+
+ /*
+ * ProduceRequest header sizes
+ */
+ switch (msetw->msetw_ApiVersion) {
+ case 7:
+ case 6:
+ case 5:
+ case 4:
+ case 3:
+ /* Add TransactionalId */
+ hdrsize += RD_KAFKAP_STR_SIZE(rk->rk_eos.transactional_id);
+ /* FALLTHRU */
+ case 0:
+ case 1:
+ case 2:
+ hdrsize +=
+ /* RequiredAcks + Timeout + TopicCnt */
+ 2 + 4 + 4 +
+ /* Topic */
+ RD_KAFKAP_STR_SIZE(msetw->msetw_rktp->rktp_rkt->rkt_topic) +
+ /* PartitionCnt + Partition + MessageSetSize */
+ 4 + 4 + 4;
+ msgsetsize += 4; /* MessageSetSize */
+ break;
+
+ default:
+ RD_NOTREACHED();
+ }
+
+ /*
+ * MsgVersion specific sizes:
+ * - (Worst-case) Message overhead: message fields
+ * - MessageSet header size
+ */
+ switch (msetw->msetw_MsgVersion) {
+ case 0:
+ /* MsgVer0 */
+ msg_overhead = RD_KAFKAP_MESSAGE_V0_OVERHEAD;
+ break;
+ case 1:
+ /* MsgVer1 */
+ msg_overhead = RD_KAFKAP_MESSAGE_V1_OVERHEAD;
+ break;
+
+ case 2:
+ /* MsgVer2 uses varints, we calculate for the worst-case. */
+ msg_overhead += RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD;
+
+ /* MessageSet header fields */
+ msgsetsize += 8 /* BaseOffset */ + 4 /* Length */ +
+ 4 /* PartitionLeaderEpoch */ +
+ 1 /* Magic (MsgVersion) */ +
+ 4 /* CRC (CRC32C) */ + 2 /* Attributes */ +
+ 4 /* LastOffsetDelta */ + 8 /* BaseTimestamp */ +
+ 8 /* MaxTimestamp */ + 8 /* ProducerId */ +
+ 2 /* ProducerEpoch */ + 4 /* BaseSequence */ +
+ 4 /* RecordCount */;
+ break;
+
+ default:
+ RD_NOTREACHED();
+ }
+
+ /*
+ * Calculate total buffer size to allocate
+ */
+ bufsize = hdrsize + msgsetsize;
+
+ /* If copying for small payloads is enabled, allocate enough
+ * space for each message to be copied based on this limit.
+ */
+ if (rk->rk_conf.msg_copy_max_size > 0) {
+ size_t queued_bytes = rd_kafka_msgq_size(msetw->msetw_msgq);
+ bufsize +=
+ RD_MIN(queued_bytes, (size_t)rk->rk_conf.msg_copy_max_size *
+ msetw->msetw_msgcntmax);
+ }
+
+ /* Add estimed per-message overhead */
+ bufsize += msg_overhead * msetw->msetw_msgcntmax;
+
+ /* Cap allocation at message.max.bytes */
+ if (bufsize > (size_t)rk->rk_conf.max_msg_size)
+ bufsize = (size_t)rk->rk_conf.max_msg_size;
+
+ /*
+ * Allocate iovecs to hold all headers and messages,
+ * and allocate auxilliery space for message headers, etc.
+ */
+ msetw->msetw_rkbuf =
+ rd_kafka_buf_new_request(msetw->msetw_rkb, RD_KAFKAP_Produce,
+ msetw->msetw_msgcntmax / 2 + 10, bufsize);
+
+ rd_kafka_buf_ApiVersion_set(msetw->msetw_rkbuf, msetw->msetw_ApiVersion,
+ msetw->msetw_features);
+}
+
+
+/**
+ * @brief Write the MessageSet header.
+ * @remark Must only be called for MsgVersion 2
+ */
+static void rd_kafka_msgset_writer_write_MessageSet_v2_header(
+ rd_kafka_msgset_writer_t *msetw) {
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+
+ rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3);
+ rd_kafka_assert(NULL, msetw->msetw_MsgVersion == 2);
+
+ /* BaseOffset (also store the offset to the start of
+ * the messageset header fields) */
+ msetw->msetw_of_start = rd_kafka_buf_write_i64(rkbuf, 0);
+
+ /* Length: updated later */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* PartitionLeaderEpoch (KIP-101) */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* Magic (MsgVersion) */
+ rd_kafka_buf_write_i8(rkbuf, msetw->msetw_MsgVersion);
+
+ /* CRC (CRC32C): updated later.
+ * CRC needs to be done after the entire messageset+messages has
+ * been constructed and the following header fields updated. :(
+ * Save the offset for this position. so it can be udpated later. */
+ msetw->msetw_of_CRC = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* Attributes: updated later */
+ rd_kafka_buf_write_i16(rkbuf, 0);
+
+ /* LastOffsetDelta: updated later */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* BaseTimestamp: updated later */
+ rd_kafka_buf_write_i64(rkbuf, 0);
+
+ /* MaxTimestamp: updated later */
+ rd_kafka_buf_write_i64(rkbuf, 0);
+
+ /* ProducerId */
+ rd_kafka_buf_write_i64(rkbuf, msetw->msetw_pid.id);
+
+ /* ProducerEpoch */
+ rd_kafka_buf_write_i16(rkbuf, msetw->msetw_pid.epoch);
+
+ /* BaseSequence: updated later in case of Idempotent Producer */
+ rd_kafka_buf_write_i32(rkbuf, -1);
+
+ /* RecordCount: udpated later */
+ rd_kafka_buf_write_i32(rkbuf, 0);
+}
+
+
+/**
+ * @brief Write ProduceRequest headers.
+ * When this function returns the msgset is ready for
+ * writing individual messages.
+ * msetw_MessageSetSize will have been set to the messageset header.
+ */
+static void
+rd_kafka_msgset_writer_write_Produce_header(rd_kafka_msgset_writer_t *msetw) {
+
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+ rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
+ rd_kafka_topic_t *rkt = msetw->msetw_rktp->rktp_rkt;
+
+ /* V3: TransactionalId */
+ if (msetw->msetw_ApiVersion >= 3)
+ rd_kafka_buf_write_kstr(rkbuf, rk->rk_eos.transactional_id);
+
+ /* RequiredAcks */
+ rd_kafka_buf_write_i16(rkbuf, rkt->rkt_conf.required_acks);
+
+ /* Timeout */
+ rd_kafka_buf_write_i32(rkbuf, rkt->rkt_conf.request_timeout_ms);
+
+ /* TopicArrayCnt */
+ rd_kafka_buf_write_i32(rkbuf, 1);
+
+ /* Insert topic */
+ rd_kafka_buf_write_kstr(rkbuf, rkt->rkt_topic);
+
+ /* PartitionArrayCnt */
+ rd_kafka_buf_write_i32(rkbuf, 1);
+
+ /* Partition */
+ rd_kafka_buf_write_i32(rkbuf, msetw->msetw_rktp->rktp_partition);
+
+ /* MessageSetSize: Will be finalized later*/
+ msetw->msetw_of_MessageSetSize = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ if (msetw->msetw_MsgVersion == 2) {
+ /* MessageSet v2 header */
+ rd_kafka_msgset_writer_write_MessageSet_v2_header(msetw);
+ msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V2_SIZE;
+ } else {
+ /* Older MessageSet */
+ msetw->msetw_MessageSetSize = RD_KAFKAP_MSGSET_V0_SIZE;
+ }
+}
+
+
+/**
+ * @brief Initialize a ProduceRequest MessageSet writer for
+ * the given broker and partition.
+ *
+ * A new buffer will be allocated to fit the pending messages in queue.
+ *
+ * @returns the number of messages to enqueue
+ *
+ * @remark This currently constructs the entire ProduceRequest, containing
+ * a single outer MessageSet for a single partition.
+ *
+ * @locality broker thread
+ */
+static int rd_kafka_msgset_writer_init(rd_kafka_msgset_writer_t *msetw,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq,
+ rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid) {
+ int msgcnt = rd_kafka_msgq_len(rkmq);
+
+ if (msgcnt == 0)
+ return 0;
+
+ memset(msetw, 0, sizeof(*msetw));
+
+ msetw->msetw_rktp = rktp;
+ msetw->msetw_rkb = rkb;
+ msetw->msetw_msgq = rkmq;
+ msetw->msetw_pid = pid;
+
+ /* Max number of messages to send in a batch,
+ * limited by current queue size or configured batch size,
+ * whichever is lower. */
+ msetw->msetw_msgcntmax =
+ RD_MIN(msgcnt, rkb->rkb_rk->rk_conf.batch_num_messages);
+ rd_dassert(msetw->msetw_msgcntmax > 0);
+
+ /* Select MsgVersion to use */
+ if (rd_kafka_msgset_writer_select_MsgVersion(msetw) == -1)
+ return -1;
+
+ /* Allocate backing buffer */
+ rd_kafka_msgset_writer_alloc_buf(msetw);
+
+ /* Construct first part of Produce header + MessageSet header */
+ rd_kafka_msgset_writer_write_Produce_header(msetw);
+
+ /* The current buffer position is now where the first message
+ * is located.
+ * Record the current buffer position so it can be rewound later
+ * in case of compression. */
+ msetw->msetw_firstmsg.of =
+ rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf);
+
+ rd_kafka_msgbatch_init(&msetw->msetw_rkbuf->rkbuf_u.Produce.batch, rktp,
+ pid, epoch_base_msgid);
+ msetw->msetw_batch = &msetw->msetw_rkbuf->rkbuf_u.Produce.batch;
+
+ return msetw->msetw_msgcntmax;
+}
+
+
+
+/**
+ * @brief Copy or link message payload to buffer.
+ */
+static RD_INLINE void
+rd_kafka_msgset_writer_write_msg_payload(rd_kafka_msgset_writer_t *msetw,
+ const rd_kafka_msg_t *rkm,
+ void (*free_cb)(void *)) {
+ const rd_kafka_t *rk = msetw->msetw_rkb->rkb_rk;
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+
+ /* If payload is below the copy limit and there is still
+ * room in the buffer we'll copy the payload to the buffer,
+ * otherwise we push a reference to the memory. */
+ if (rkm->rkm_len <= (size_t)rk->rk_conf.msg_copy_max_size &&
+ rd_buf_write_remains(&rkbuf->rkbuf_buf) > rkm->rkm_len) {
+ rd_kafka_buf_write(rkbuf, rkm->rkm_payload, rkm->rkm_len);
+ if (free_cb)
+ free_cb(rkm->rkm_payload);
+ } else
+ rd_kafka_buf_push(rkbuf, rkm->rkm_payload, rkm->rkm_len,
+ free_cb);
+}
+
+
+/**
+ * @brief Write message headers to buffer.
+ *
+ * @remark The enveloping HeaderCount varint must already have been written.
+ * @returns the number of bytes written to msetw->msetw_rkbuf
+ */
+static size_t
+rd_kafka_msgset_writer_write_msg_headers(rd_kafka_msgset_writer_t *msetw,
+ const rd_kafka_headers_t *hdrs) {
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+ const rd_kafka_header_t *hdr;
+ int i;
+ size_t start_pos = rd_buf_write_pos(&rkbuf->rkbuf_buf);
+ size_t written;
+
+ RD_LIST_FOREACH(hdr, &hdrs->rkhdrs_list, i) {
+ rd_kafka_buf_write_varint(rkbuf, hdr->rkhdr_name_size);
+ rd_kafka_buf_write(rkbuf, hdr->rkhdr_name,
+ hdr->rkhdr_name_size);
+ rd_kafka_buf_write_varint(
+ rkbuf,
+ hdr->rkhdr_value ? (int64_t)hdr->rkhdr_value_size : -1);
+ rd_kafka_buf_write(rkbuf, hdr->rkhdr_value,
+ hdr->rkhdr_value_size);
+ }
+
+ written = rd_buf_write_pos(&rkbuf->rkbuf_buf) - start_pos;
+ rd_dassert(written == hdrs->rkhdrs_ser_size);
+
+ return written;
+}
+
+
+
+/**
+ * @brief Write message to messageset buffer with MsgVersion 0 or 1.
+ * @returns the number of bytes written.
+ */
+static size_t
+rd_kafka_msgset_writer_write_msg_v0_1(rd_kafka_msgset_writer_t *msetw,
+ rd_kafka_msg_t *rkm,
+ int64_t Offset,
+ int8_t MsgAttributes,
+ void (*free_cb)(void *)) {
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+ size_t MessageSize;
+ size_t of_Crc;
+
+ /*
+ * MessageSet's (v0 and v1) per-Message header.
+ */
+
+ /* Offset (only relevant for compressed messages on MsgVersion v1) */
+ rd_kafka_buf_write_i64(rkbuf, Offset);
+
+ /* MessageSize */
+ MessageSize = 4 + 1 + 1 + /* Crc+MagicByte+Attributes */
+ 4 /* KeyLength */ + rkm->rkm_key_len +
+ 4 /* ValueLength */ + rkm->rkm_len;
+
+ if (msetw->msetw_MsgVersion == 1)
+ MessageSize += 8; /* Timestamp i64 */
+
+ rd_kafka_buf_write_i32(rkbuf, (int32_t)MessageSize);
+
+ /*
+ * Message
+ */
+ /* Crc: will be updated later */
+ of_Crc = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ /* Start Crc calculation of all buf writes. */
+ rd_kafka_buf_crc_init(rkbuf);
+
+ /* MagicByte */
+ rd_kafka_buf_write_i8(rkbuf, msetw->msetw_MsgVersion);
+
+ /* Attributes */
+ rd_kafka_buf_write_i8(rkbuf, MsgAttributes);
+
+ /* V1: Timestamp */
+ if (msetw->msetw_MsgVersion == 1)
+ rd_kafka_buf_write_i64(rkbuf, rkm->rkm_timestamp);
+
+ /* Message Key */
+ rd_kafka_buf_write_bytes(rkbuf, rkm->rkm_key, rkm->rkm_key_len);
+
+ /* Write or copy Value/payload */
+ if (rkm->rkm_payload) {
+ rd_kafka_buf_write_i32(rkbuf, (int32_t)rkm->rkm_len);
+ rd_kafka_msgset_writer_write_msg_payload(msetw, rkm, free_cb);
+ } else
+ rd_kafka_buf_write_i32(rkbuf, RD_KAFKAP_BYTES_LEN_NULL);
+
+ /* Finalize Crc */
+ rd_kafka_buf_update_u32(rkbuf, of_Crc,
+ rd_kafka_buf_crc_finalize(rkbuf));
+
+
+ /* Return written message size */
+ return 8 /*Offset*/ + 4 /*MessageSize*/ + MessageSize;
+}
+
+/**
+ * @brief Write message to messageset buffer with MsgVersion 2.
+ * @returns the number of bytes written.
+ */
+static size_t
+rd_kafka_msgset_writer_write_msg_v2(rd_kafka_msgset_writer_t *msetw,
+ rd_kafka_msg_t *rkm,
+ int64_t Offset,
+ int8_t MsgAttributes,
+ void (*free_cb)(void *)) {
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+ size_t MessageSize = 0;
+ char varint_Length[RD_UVARINT_ENC_SIZEOF(int32_t)];
+ char varint_TimestampDelta[RD_UVARINT_ENC_SIZEOF(int64_t)];
+ char varint_OffsetDelta[RD_UVARINT_ENC_SIZEOF(int64_t)];
+ char varint_KeyLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
+ char varint_ValueLen[RD_UVARINT_ENC_SIZEOF(int32_t)];
+ char varint_HeaderCount[RD_UVARINT_ENC_SIZEOF(int32_t)];
+ size_t sz_Length;
+ size_t sz_TimestampDelta;
+ size_t sz_OffsetDelta;
+ size_t sz_KeyLen;
+ size_t sz_ValueLen;
+ size_t sz_HeaderCount;
+ int HeaderCount = 0;
+ size_t HeaderSize = 0;
+
+ if (rkm->rkm_headers) {
+ HeaderCount = rkm->rkm_headers->rkhdrs_list.rl_cnt;
+ HeaderSize = rkm->rkm_headers->rkhdrs_ser_size;
+ }
+
+ /* All varints, except for Length, needs to be pre-built
+ * so that the Length field can be set correctly and thus have
+ * correct varint encoded width. */
+
+ sz_TimestampDelta = rd_uvarint_enc_i64(
+ varint_TimestampDelta, sizeof(varint_TimestampDelta),
+ rkm->rkm_timestamp - msetw->msetw_firstmsg.timestamp);
+ sz_OffsetDelta = rd_uvarint_enc_i64(varint_OffsetDelta,
+ sizeof(varint_OffsetDelta), Offset);
+ sz_KeyLen = rd_uvarint_enc_i32(varint_KeyLen, sizeof(varint_KeyLen),
+ rkm->rkm_key
+ ? (int32_t)rkm->rkm_key_len
+ : (int32_t)RD_KAFKAP_BYTES_LEN_NULL);
+ sz_ValueLen = rd_uvarint_enc_i32(
+ varint_ValueLen, sizeof(varint_ValueLen),
+ rkm->rkm_payload ? (int32_t)rkm->rkm_len
+ : (int32_t)RD_KAFKAP_BYTES_LEN_NULL);
+ sz_HeaderCount =
+ rd_uvarint_enc_i32(varint_HeaderCount, sizeof(varint_HeaderCount),
+ (int32_t)HeaderCount);
+
+ /* Calculate MessageSize without length of Length (added later)
+ * to store it in Length. */
+ MessageSize = 1 /* MsgAttributes */ + sz_TimestampDelta +
+ sz_OffsetDelta + sz_KeyLen + rkm->rkm_key_len +
+ sz_ValueLen + rkm->rkm_len + sz_HeaderCount + HeaderSize;
+
+ /* Length */
+ sz_Length = rd_uvarint_enc_i64(varint_Length, sizeof(varint_Length),
+ MessageSize);
+ rd_kafka_buf_write(rkbuf, varint_Length, sz_Length);
+ MessageSize += sz_Length;
+
+ /* Attributes: The MsgAttributes argument is losely based on MsgVer0
+ * which don't apply for MsgVer2 */
+ rd_kafka_buf_write_i8(rkbuf, 0);
+
+ /* TimestampDelta */
+ rd_kafka_buf_write(rkbuf, varint_TimestampDelta, sz_TimestampDelta);
+
+ /* OffsetDelta */
+ rd_kafka_buf_write(rkbuf, varint_OffsetDelta, sz_OffsetDelta);
+
+ /* KeyLen */
+ rd_kafka_buf_write(rkbuf, varint_KeyLen, sz_KeyLen);
+
+ /* Key (if any) */
+ if (rkm->rkm_key)
+ rd_kafka_buf_write(rkbuf, rkm->rkm_key, rkm->rkm_key_len);
+
+ /* ValueLen */
+ rd_kafka_buf_write(rkbuf, varint_ValueLen, sz_ValueLen);
+
+ /* Write or copy Value/payload */
+ if (rkm->rkm_payload)
+ rd_kafka_msgset_writer_write_msg_payload(msetw, rkm, free_cb);
+
+ /* HeaderCount */
+ rd_kafka_buf_write(rkbuf, varint_HeaderCount, sz_HeaderCount);
+
+ /* Headers array */
+ if (rkm->rkm_headers)
+ rd_kafka_msgset_writer_write_msg_headers(msetw,
+ rkm->rkm_headers);
+
+ /* Return written message size */
+ return MessageSize;
+}
+
+
+/**
+ * @brief Write message to messageset buffer.
+ * @returns the number of bytes written.
+ */
+static size_t rd_kafka_msgset_writer_write_msg(rd_kafka_msgset_writer_t *msetw,
+ rd_kafka_msg_t *rkm,
+ int64_t Offset,
+ int8_t MsgAttributes,
+ void (*free_cb)(void *)) {
+ size_t outlen;
+ size_t (*writer[])(rd_kafka_msgset_writer_t *, rd_kafka_msg_t *,
+ int64_t, int8_t, void (*)(void *)) = {
+ [0] = rd_kafka_msgset_writer_write_msg_v0_1,
+ [1] = rd_kafka_msgset_writer_write_msg_v0_1,
+ [2] = rd_kafka_msgset_writer_write_msg_v2};
+ size_t actual_written;
+ size_t pre_pos;
+
+ if (likely(rkm->rkm_timestamp))
+ MsgAttributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME;
+
+ pre_pos = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf);
+
+ outlen = writer[msetw->msetw_MsgVersion](msetw, rkm, Offset,
+ MsgAttributes, free_cb);
+
+ actual_written =
+ rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) - pre_pos;
+ rd_assert(outlen <=
+ rd_kafka_msg_wire_size(rkm, msetw->msetw_MsgVersion));
+ rd_assert(outlen == actual_written);
+
+ return outlen;
+}
+
+/**
+ * @brief Write as many messages from the given message queue to
+ * the messageset.
+ *
+ * May not write any messages.
+ *
+ * @returns 1 on success or 0 on error.
+ */
+static int rd_kafka_msgset_writer_write_msgq(rd_kafka_msgset_writer_t *msetw,
+ rd_kafka_msgq_t *rkmq) {
+ rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
+ rd_kafka_broker_t *rkb = msetw->msetw_rkb;
+ size_t len = rd_buf_len(&msetw->msetw_rkbuf->rkbuf_buf);
+ size_t max_msg_size =
+ RD_MIN((size_t)msetw->msetw_rkb->rkb_rk->rk_conf.max_msg_size,
+ (size_t)msetw->msetw_rkb->rkb_rk->rk_conf.batch_size);
+ rd_ts_t int_latency_base;
+ rd_ts_t MaxTimestamp = 0;
+ rd_kafka_msg_t *rkm;
+ int msgcnt = 0;
+ const rd_ts_t now = rd_clock();
+
+ /* Internal latency calculation base.
+ * Uses rkm_ts_timeout which is enqueue time + timeout */
+ int_latency_base =
+ now + ((rd_ts_t)rktp->rktp_rkt->rkt_conf.message_timeout_ms * 1000);
+
+ /* Acquire BaseTimestamp from first message. */
+ rkm = TAILQ_FIRST(&rkmq->rkmq_msgs);
+ rd_kafka_assert(NULL, rkm);
+ msetw->msetw_firstmsg.timestamp = rkm->rkm_timestamp;
+
+ rd_kafka_msgbatch_set_first_msg(msetw->msetw_batch, rkm);
+
+ /*
+ * Write as many messages as possible until buffer is full
+ * or limit reached.
+ */
+ do {
+ if (unlikely(msetw->msetw_batch->last_msgid &&
+ msetw->msetw_batch->last_msgid <
+ rkm->rkm_u.producer.msgid)) {
+ rd_rkb_dbg(rkb, MSG, "PRODUCE",
+ "%.*s [%" PRId32
+ "]: "
+ "Reconstructed MessageSet "
+ "(%d message(s), %" PRIusz
+ " bytes, "
+ "MsgIds %" PRIu64 "..%" PRIu64 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, msgcnt, len,
+ msetw->msetw_batch->first_msgid,
+ msetw->msetw_batch->last_msgid);
+ break;
+ }
+
+ /* Check if there is enough space in the current messageset
+ * to add this message.
+ * Since calculating the total size of a request at produce()
+ * time is tricky (we don't know the protocol version or
+ * MsgVersion that will be used), we allow a messageset to
+ * overshoot the message.max.bytes limit by one message to
+ * avoid getting stuck here.
+ * The actual messageset size is enforced by the broker. */
+ if (unlikely(
+ msgcnt == msetw->msetw_msgcntmax ||
+ (msgcnt > 0 && len + rd_kafka_msg_wire_size(
+ rkm, msetw->msetw_MsgVersion) >
+ max_msg_size))) {
+ rd_rkb_dbg(rkb, MSG, "PRODUCE",
+ "%.*s [%" PRId32
+ "]: "
+ "No more space in current MessageSet "
+ "(%i message(s), %" PRIusz " bytes)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, msgcnt, len);
+ break;
+ }
+
+ if (unlikely(rkm->rkm_u.producer.ts_backoff > now)) {
+ /* Stop accumulation when we've reached
+ * a message with a retry backoff in the future */
+ break;
+ }
+
+ /* Move message to buffer's queue */
+ rd_kafka_msgq_deq(rkmq, rkm, 1);
+ rd_kafka_msgq_enq(&msetw->msetw_batch->msgq, rkm);
+
+ msetw->msetw_messages_kvlen += rkm->rkm_len + rkm->rkm_key_len;
+
+ /* Add internal latency metrics */
+ rd_avg_add(&rkb->rkb_avg_int_latency,
+ int_latency_base - rkm->rkm_ts_timeout);
+
+ /* MessageSet v2's .MaxTimestamp field */
+ if (unlikely(MaxTimestamp < rkm->rkm_timestamp))
+ MaxTimestamp = rkm->rkm_timestamp;
+
+ /* Write message to buffer */
+ len += rd_kafka_msgset_writer_write_msg(msetw, rkm, msgcnt, 0,
+ NULL);
+
+ msgcnt++;
+
+ } while ((rkm = TAILQ_FIRST(&rkmq->rkmq_msgs)));
+
+ msetw->msetw_MaxTimestamp = MaxTimestamp;
+
+ /* Idempotent Producer:
+ * When reconstructing a batch to retry make sure
+ * the original message sequence span matches identically
+ * or we can't guarantee exactly-once delivery.
+ * If this check fails we raise a fatal error since
+ * it is unrecoverable and most likely caused by a bug
+ * in the client implementation.
+ * This should not be considered an abortable error for
+ * the transactional producer. */
+ if (msgcnt > 0 && msetw->msetw_batch->last_msgid) {
+ rd_kafka_msg_t *lastmsg;
+
+ lastmsg = rd_kafka_msgq_last(&msetw->msetw_batch->msgq);
+ rd_assert(lastmsg);
+
+ if (unlikely(lastmsg->rkm_u.producer.msgid !=
+ msetw->msetw_batch->last_msgid)) {
+ rd_kafka_set_fatal_error(
+ rkb->rkb_rk, RD_KAFKA_RESP_ERR__INCONSISTENT,
+ "Unable to reconstruct MessageSet "
+ "(currently with %d message(s)) "
+ "with msgid range %" PRIu64 "..%" PRIu64
+ ": "
+ "last message added has msgid %" PRIu64
+ ": "
+ "unable to guarantee consistency",
+ msgcnt, msetw->msetw_batch->first_msgid,
+ msetw->msetw_batch->last_msgid,
+ lastmsg->rkm_u.producer.msgid);
+ return 0;
+ }
+ }
+ return 1;
+}
+
+
+#if WITH_ZLIB
+/**
+ * @brief Compress messageset using gzip/zlib
+ */
+static int rd_kafka_msgset_writer_compress_gzip(rd_kafka_msgset_writer_t *msetw,
+ rd_slice_t *slice,
+ struct iovec *ciov) {
+
+ rd_kafka_broker_t *rkb = msetw->msetw_rkb;
+ rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
+ z_stream strm;
+ size_t len = rd_slice_remains(slice);
+ const void *p;
+ size_t rlen;
+ int r;
+ int comp_level =
+ msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level;
+
+ memset(&strm, 0, sizeof(strm));
+ r = deflateInit2(&strm, comp_level, Z_DEFLATED, 15 + 16, 8,
+ Z_DEFAULT_STRATEGY);
+ if (r != Z_OK) {
+ rd_rkb_log(rkb, LOG_ERR, "GZIP",
+ "Failed to initialize gzip for "
+ "compressing %" PRIusz
+ " bytes in "
+ "topic %.*s [%" PRId32
+ "]: %s (%i): "
+ "sending uncompressed",
+ len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, strm.msg ? strm.msg : "", r);
+ return -1;
+ }
+
+ /* Calculate maximum compressed size and
+ * allocate an output buffer accordingly, being
+ * prefixed with the Message header. */
+ ciov->iov_len = deflateBound(&strm, (uLong)rd_slice_remains(slice));
+ ciov->iov_base = rd_malloc(ciov->iov_len);
+
+ strm.next_out = (void *)ciov->iov_base;
+ strm.avail_out = (uInt)ciov->iov_len;
+
+ /* Iterate through each segment and compress it. */
+ while ((rlen = rd_slice_reader(slice, &p))) {
+
+ strm.next_in = (void *)p;
+ strm.avail_in = (uInt)rlen;
+
+ /* Compress message */
+ if ((r = deflate(&strm, Z_NO_FLUSH)) != Z_OK) {
+ rd_rkb_log(rkb, LOG_ERR, "GZIP",
+ "Failed to gzip-compress "
+ "%" PRIusz " bytes (%" PRIusz
+ " total) for "
+ "topic %.*s [%" PRId32
+ "]: "
+ "%s (%i): "
+ "sending uncompressed",
+ rlen, len,
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ strm.msg ? strm.msg : "", r);
+ deflateEnd(&strm);
+ rd_free(ciov->iov_base);
+ return -1;
+ }
+
+ rd_kafka_assert(rkb->rkb_rk, strm.avail_in == 0);
+ }
+
+ /* Finish the compression */
+ if ((r = deflate(&strm, Z_FINISH)) != Z_STREAM_END) {
+ rd_rkb_log(rkb, LOG_ERR, "GZIP",
+ "Failed to finish gzip compression "
+ " of %" PRIusz
+ " bytes for "
+ "topic %.*s [%" PRId32
+ "]: "
+ "%s (%i): "
+ "sending uncompressed",
+ len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, strm.msg ? strm.msg : "", r);
+ deflateEnd(&strm);
+ rd_free(ciov->iov_base);
+ return -1;
+ }
+
+ ciov->iov_len = strm.total_out;
+
+ /* Deinitialize compression */
+ deflateEnd(&strm);
+
+ return 0;
+}
+#endif
+
+
+#if WITH_SNAPPY
+/**
+ * @brief Compress messageset using Snappy
+ */
+static int
+rd_kafka_msgset_writer_compress_snappy(rd_kafka_msgset_writer_t *msetw,
+ rd_slice_t *slice,
+ struct iovec *ciov) {
+ rd_kafka_broker_t *rkb = msetw->msetw_rkb;
+ rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
+ struct iovec *iov;
+ size_t iov_max, iov_cnt;
+ struct snappy_env senv;
+ size_t len = rd_slice_remains(slice);
+ int r;
+
+ /* Initialize snappy compression environment */
+ rd_kafka_snappy_init_env_sg(&senv, 1 /*iov enable*/);
+
+ /* Calculate maximum compressed size and
+ * allocate an output buffer accordingly. */
+ ciov->iov_len = rd_kafka_snappy_max_compressed_length(len);
+ ciov->iov_base = rd_malloc(ciov->iov_len);
+
+ iov_max = slice->buf->rbuf_segment_cnt;
+ iov = rd_alloca(sizeof(*iov) * iov_max);
+
+ rd_slice_get_iov(slice, iov, &iov_cnt, iov_max, len);
+
+ /* Compress each message */
+ if ((r = rd_kafka_snappy_compress_iov(&senv, iov, iov_cnt, len,
+ ciov)) != 0) {
+ rd_rkb_log(rkb, LOG_ERR, "SNAPPY",
+ "Failed to snappy-compress "
+ "%" PRIusz
+ " bytes for "
+ "topic %.*s [%" PRId32
+ "]: %s: "
+ "sending uncompressed",
+ len, RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_strerror(-r));
+ rd_free(ciov->iov_base);
+ return -1;
+ }
+
+ /* rd_free snappy environment */
+ rd_kafka_snappy_free_env(&senv);
+
+ return 0;
+}
+#endif
+
+/**
+ * @brief Compress messageset using LZ4F
+ */
+static int rd_kafka_msgset_writer_compress_lz4(rd_kafka_msgset_writer_t *msetw,
+ rd_slice_t *slice,
+ struct iovec *ciov) {
+ rd_kafka_resp_err_t err;
+ int comp_level =
+ msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level;
+ err = rd_kafka_lz4_compress(msetw->msetw_rkb,
+ /* Correct or incorrect HC */
+ msetw->msetw_MsgVersion >= 1 ? 1 : 0,
+ comp_level, slice, &ciov->iov_base,
+ &ciov->iov_len);
+ return (err ? -1 : 0);
+}
+
+#if WITH_ZSTD
+/**
+ * @brief Compress messageset using ZSTD
+ */
+static int rd_kafka_msgset_writer_compress_zstd(rd_kafka_msgset_writer_t *msetw,
+ rd_slice_t *slice,
+ struct iovec *ciov) {
+ rd_kafka_resp_err_t err;
+ int comp_level =
+ msetw->msetw_rktp->rktp_rkt->rkt_conf.compression_level;
+ err = rd_kafka_zstd_compress(msetw->msetw_rkb, comp_level, slice,
+ &ciov->iov_base, &ciov->iov_len);
+ return (err ? -1 : 0);
+}
+#endif
+
+/**
+ * @brief Compress the message set.
+ * @param outlenp in: total uncompressed messages size,
+ * out (on success): returns the compressed buffer size.
+ * @returns 0 on success or if -1 if compression failed.
+ * @remark Compression failures are not critical, we'll just send the
+ * the messageset uncompressed.
+ */
+static int rd_kafka_msgset_writer_compress(rd_kafka_msgset_writer_t *msetw,
+ size_t *outlenp) {
+ rd_buf_t *rbuf = &msetw->msetw_rkbuf->rkbuf_buf;
+ rd_slice_t slice;
+ size_t len = *outlenp;
+ struct iovec ciov = RD_ZERO_INIT; /* Compressed output buffer */
+ int r = -1;
+ size_t outlen;
+
+ rd_assert(rd_buf_len(rbuf) >= msetw->msetw_firstmsg.of + len);
+
+ /* Create buffer slice from firstmsg and onwards */
+ r = rd_slice_init(&slice, rbuf, msetw->msetw_firstmsg.of, len);
+ rd_assert(r == 0 || !*"invalid firstmsg position");
+
+ switch (msetw->msetw_compression) {
+#if WITH_ZLIB
+ case RD_KAFKA_COMPRESSION_GZIP:
+ r = rd_kafka_msgset_writer_compress_gzip(msetw, &slice, &ciov);
+ break;
+#endif
+
+#if WITH_SNAPPY
+ case RD_KAFKA_COMPRESSION_SNAPPY:
+ r = rd_kafka_msgset_writer_compress_snappy(msetw, &slice,
+ &ciov);
+ break;
+#endif
+
+ case RD_KAFKA_COMPRESSION_LZ4:
+ r = rd_kafka_msgset_writer_compress_lz4(msetw, &slice, &ciov);
+ break;
+
+#if WITH_ZSTD
+ case RD_KAFKA_COMPRESSION_ZSTD:
+ r = rd_kafka_msgset_writer_compress_zstd(msetw, &slice, &ciov);
+ break;
+#endif
+
+ default:
+ rd_kafka_assert(NULL,
+ !*"notreached: unsupported compression.codec");
+ break;
+ }
+
+ if (r == -1) /* Compression failed, send uncompressed */
+ return -1;
+
+
+ if (unlikely(ciov.iov_len > len)) {
+ /* If the compressed data is larger than the uncompressed size
+ * then throw it away and send as uncompressed. */
+ rd_free(ciov.iov_base);
+ return -1;
+ }
+
+ /* Set compression codec in MessageSet.Attributes */
+ msetw->msetw_Attributes |= msetw->msetw_compression;
+
+ /* Rewind rkbuf to the pre-message checkpoint (firstmsg)
+ * and replace the original message(s) with the compressed payload,
+ * possibly with version dependent enveloping. */
+ rd_buf_write_seek(rbuf, msetw->msetw_firstmsg.of);
+
+ rd_kafka_assert(msetw->msetw_rkb->rkb_rk, ciov.iov_len < INT32_MAX);
+
+ if (msetw->msetw_MsgVersion == 2) {
+ /* MsgVersion 2 has no inner MessageSet header or wrapping
+ * for compressed messages, just the messages back-to-back,
+ * so we can push the compressed memory directly to the
+ * buffer without wrapping it. */
+ rd_buf_push(rbuf, ciov.iov_base, ciov.iov_len, rd_free);
+ outlen = ciov.iov_len;
+
+ } else {
+ /* Older MessageSets envelope/wrap the compressed MessageSet
+ * in an outer Message. */
+ rd_kafka_msg_t rkm = {.rkm_len = ciov.iov_len,
+ .rkm_payload = ciov.iov_base,
+ .rkm_timestamp =
+ msetw->msetw_firstmsg.timestamp};
+ outlen = rd_kafka_msgset_writer_write_msg(
+ msetw, &rkm, 0, msetw->msetw_compression,
+ rd_free /*free for ciov.iov_base*/);
+ }
+
+ *outlenp = outlen;
+
+ return 0;
+}
+
+
+
+/**
+ * @brief Calculate MessageSet v2 CRC (CRC32C) when messageset is complete.
+ */
+static void
+rd_kafka_msgset_writer_calc_crc_v2(rd_kafka_msgset_writer_t *msetw) {
+ int32_t crc;
+ rd_slice_t slice;
+ int r;
+
+ r = rd_slice_init(&slice, &msetw->msetw_rkbuf->rkbuf_buf,
+ msetw->msetw_of_CRC + 4,
+ rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
+ msetw->msetw_of_CRC - 4);
+ rd_assert(!r && *"slice_init failed");
+
+ /* CRC32C calculation */
+ crc = rd_slice_crc32c(&slice);
+
+ /* Update CRC at MessageSet v2 CRC offset */
+ rd_kafka_buf_update_i32(msetw->msetw_rkbuf, msetw->msetw_of_CRC, crc);
+}
+
+/**
+ * @brief Finalize MessageSet v2 header fields.
+ */
+static void rd_kafka_msgset_writer_finalize_MessageSet_v2_header(
+ rd_kafka_msgset_writer_t *msetw) {
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+ int msgcnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq);
+
+ rd_kafka_assert(NULL, msgcnt > 0);
+ rd_kafka_assert(NULL, msetw->msetw_ApiVersion >= 3);
+
+ msetw->msetw_MessageSetSize =
+ RD_KAFKAP_MSGSET_V2_SIZE + msetw->msetw_messages_len;
+
+ /* MessageSet.Length is the same as
+ * MessageSetSize minus field widths for FirstOffset+Length */
+ rd_kafka_buf_update_i32(
+ rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Length,
+ (int32_t)msetw->msetw_MessageSetSize - (8 + 4));
+
+ msetw->msetw_Attributes |= RD_KAFKA_MSG_ATTR_CREATE_TIME;
+
+ if (rd_kafka_is_transactional(msetw->msetw_rkb->rkb_rk))
+ msetw->msetw_Attributes |=
+ RD_KAFKA_MSGSET_V2_ATTR_TRANSACTIONAL;
+
+ rd_kafka_buf_update_i16(
+ rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_Attributes,
+ msetw->msetw_Attributes);
+
+ rd_kafka_buf_update_i32(rkbuf,
+ msetw->msetw_of_start +
+ RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta,
+ msgcnt - 1);
+
+ rd_kafka_buf_update_i64(
+ rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp,
+ msetw->msetw_firstmsg.timestamp);
+
+ rd_kafka_buf_update_i64(
+ rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp,
+ msetw->msetw_MaxTimestamp);
+
+ rd_kafka_buf_update_i32(
+ rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_BaseSequence,
+ msetw->msetw_batch->first_seq);
+
+ rd_kafka_buf_update_i32(
+ rkbuf, msetw->msetw_of_start + RD_KAFKAP_MSGSET_V2_OF_RecordCount,
+ msgcnt);
+
+ rd_kafka_msgset_writer_calc_crc_v2(msetw);
+}
+
+
+
+/**
+ * @brief Finalize the MessageSet header, if applicable.
+ */
+static void
+rd_kafka_msgset_writer_finalize_MessageSet(rd_kafka_msgset_writer_t *msetw) {
+ rd_dassert(msetw->msetw_messages_len > 0);
+
+ if (msetw->msetw_MsgVersion == 2)
+ rd_kafka_msgset_writer_finalize_MessageSet_v2_header(msetw);
+ else
+ msetw->msetw_MessageSetSize =
+ RD_KAFKAP_MSGSET_V0_SIZE + msetw->msetw_messages_len;
+
+ /* Update MessageSetSize */
+ rd_kafka_buf_update_i32(msetw->msetw_rkbuf,
+ msetw->msetw_of_MessageSetSize,
+ (int32_t)msetw->msetw_MessageSetSize);
+}
+
+
+/**
+ * @brief Finalize the messageset - call when no more messages are to be
+ * added to the messageset.
+ *
+ * Will compress, update final values, CRCs, etc.
+ *
+ * The messageset writer is destroyed and the buffer is returned
+ * and ready to be transmitted.
+ *
+ * @param MessagetSetSizep will be set to the finalized MessageSetSize
+ *
+ * @returns the buffer to transmit or NULL if there were no messages
+ * in messageset.
+ */
+static rd_kafka_buf_t *
+rd_kafka_msgset_writer_finalize(rd_kafka_msgset_writer_t *msetw,
+ size_t *MessageSetSizep) {
+ rd_kafka_buf_t *rkbuf = msetw->msetw_rkbuf;
+ rd_kafka_toppar_t *rktp = msetw->msetw_rktp;
+ size_t len;
+ int cnt;
+
+ /* No messages added, bail out early. */
+ if (unlikely((cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq)) ==
+ 0)) {
+ rd_kafka_buf_destroy(rkbuf);
+ return NULL;
+ }
+
+ /* Total size of messages */
+ len = rd_buf_write_pos(&msetw->msetw_rkbuf->rkbuf_buf) -
+ msetw->msetw_firstmsg.of;
+ rd_assert(len > 0);
+ rd_assert(len <= (size_t)rktp->rktp_rkt->rkt_rk->rk_conf.max_msg_size);
+
+ rd_atomic64_add(&rktp->rktp_c.tx_msgs, cnt);
+ rd_atomic64_add(&rktp->rktp_c.tx_msg_bytes,
+ msetw->msetw_messages_kvlen);
+
+ /* Idempotent Producer:
+ * Store request's PID for matching on response
+ * if the instance PID has changed and thus made
+ * the request obsolete. */
+ msetw->msetw_rkbuf->rkbuf_u.Produce.batch.pid = msetw->msetw_pid;
+
+ /* Compress the message set */
+ if (msetw->msetw_compression) {
+ if (rd_kafka_msgset_writer_compress(msetw, &len) == -1)
+ msetw->msetw_compression = 0;
+ }
+
+ msetw->msetw_messages_len = len;
+
+ /* Finalize MessageSet header fields */
+ rd_kafka_msgset_writer_finalize_MessageSet(msetw);
+
+ /* Return final MessageSetSize */
+ *MessageSetSizep = msetw->msetw_MessageSetSize;
+
+ rd_rkb_dbg(msetw->msetw_rkb, MSG, "PRODUCE",
+ "%s [%" PRId32
+ "]: "
+ "Produce MessageSet with %i message(s) (%" PRIusz
+ " bytes, "
+ "ApiVersion %d, MsgVersion %d, MsgId %" PRIu64
+ ", "
+ "BaseSeq %" PRId32 ", %s, %s)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, cnt,
+ msetw->msetw_MessageSetSize, msetw->msetw_ApiVersion,
+ msetw->msetw_MsgVersion, msetw->msetw_batch->first_msgid,
+ msetw->msetw_batch->first_seq,
+ rd_kafka_pid2str(msetw->msetw_pid),
+ msetw->msetw_compression
+ ? rd_kafka_compression2str(msetw->msetw_compression)
+ : "uncompressed");
+
+ rd_kafka_msgq_verify_order(rktp, &msetw->msetw_batch->msgq,
+ msetw->msetw_batch->first_msgid, rd_false);
+
+ rd_kafka_msgbatch_ready_produce(msetw->msetw_batch);
+
+ return rkbuf;
+}
+
+
+/**
+ * @brief Create ProduceRequest containing as many messages from
+ * the toppar's transmit queue as possible, limited by configuration,
+ * size, etc.
+ *
+ * @param rkb broker to create buffer for
+ * @param rktp toppar to transmit messages for
+ * @param MessagetSetSizep will be set to the final MessageSetSize
+ *
+ * @returns the buffer to transmit or NULL if there were no messages
+ * in messageset.
+ *
+ * @locality broker thread
+ */
+rd_kafka_buf_t *rd_kafka_msgset_create_ProduceRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq,
+ const rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid,
+ size_t *MessageSetSizep) {
+
+ rd_kafka_msgset_writer_t msetw;
+
+ if (rd_kafka_msgset_writer_init(&msetw, rkb, rktp, rkmq, pid,
+ epoch_base_msgid) <= 0)
+ return NULL;
+
+ if (!rd_kafka_msgset_writer_write_msgq(&msetw, msetw.msetw_msgq)) {
+ /* Error while writing messages to MessageSet,
+ * move all messages back on the xmit queue. */
+ rd_kafka_msgq_insert_msgq(
+ rkmq, &msetw.msetw_batch->msgq,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+ }
+
+ return rd_kafka_msgset_writer_finalize(&msetw, MessageSetSizep);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c
new file mode 100644
index 000000000..ffa6a9d52
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.c
@@ -0,0 +1,1548 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// FIXME: Revise this documentation:
+/**
+ * This file implements the consumer offset storage.
+ * It currently supports local file storage and broker OffsetCommit storage.
+ *
+ * Regardless of commit method (file, broker, ..) this is how it works:
+ * - When rdkafka, or the application, depending on if auto.offset.commit
+ * is enabled or not, calls rd_kafka_offset_store() with an offset to store,
+ * all it does is set rktp->rktp_stored_offset to this value.
+ * This can happen from any thread and is locked by the rktp lock.
+ * - The actual commit/write of the offset to its backing store (filesystem)
+ * is performed by the main rdkafka thread and scheduled at the configured
+ * auto.commit.interval.ms interval.
+ * - The write is performed in the main rdkafka thread (in a blocking manner
+ * for file based offsets) and once the write has
+ * succeeded rktp->rktp_committed_offset is updated to the new value.
+ * - If offset.store.sync.interval.ms is configured the main rdkafka thread
+ * will also make sure to fsync() each offset file accordingly. (file)
+ */
+
+
+#include "rdkafka_int.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_request.h"
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <fcntl.h>
+
+#ifdef _WIN32
+#include <io.h>
+#include <share.h>
+#include <sys/stat.h>
+#include <shlwapi.h>
+#endif
+
+
+/**
+ * Convert an absolute or logical offset to string.
+ */
+const char *rd_kafka_offset2str(int64_t offset) {
+ static RD_TLS char ret[16][32];
+ static RD_TLS int i = 0;
+
+ i = (i + 1) % 16;
+
+ if (offset >= 0)
+ rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64, offset);
+ else if (offset == RD_KAFKA_OFFSET_BEGINNING)
+ return "BEGINNING";
+ else if (offset == RD_KAFKA_OFFSET_END)
+ return "END";
+ else if (offset == RD_KAFKA_OFFSET_STORED)
+ return "STORED";
+ else if (offset == RD_KAFKA_OFFSET_INVALID)
+ return "INVALID";
+ else if (offset <= RD_KAFKA_OFFSET_TAIL_BASE)
+ rd_snprintf(ret[i], sizeof(ret[i]), "TAIL(%lld)",
+ llabs(offset - RD_KAFKA_OFFSET_TAIL_BASE));
+ else
+ rd_snprintf(ret[i], sizeof(ret[i]), "%" PRId64 "?", offset);
+
+ return ret[i];
+}
+
+static void rd_kafka_offset_file_close(rd_kafka_toppar_t *rktp) {
+ if (!rktp->rktp_offset_fp)
+ return;
+
+ fclose(rktp->rktp_offset_fp);
+ rktp->rktp_offset_fp = NULL;
+}
+
+
+#ifndef _WIN32
+/**
+ * Linux version of open callback providing racefree CLOEXEC.
+ */
+int rd_kafka_open_cb_linux(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque) {
+#ifdef O_CLOEXEC
+ return open(pathname, flags | O_CLOEXEC, mode);
+#else
+ return rd_kafka_open_cb_generic(pathname, flags, mode, opaque);
+#endif
+}
+#endif
+
+/**
+ * Fallback version of open_cb NOT providing racefree CLOEXEC,
+ * but setting CLOEXEC after file open (if FD_CLOEXEC is defined).
+ */
+int rd_kafka_open_cb_generic(const char *pathname,
+ int flags,
+ mode_t mode,
+ void *opaque) {
+#ifndef _WIN32
+ int fd;
+ int on = 1;
+ fd = open(pathname, flags, mode);
+ if (fd == -1)
+ return -1;
+#ifdef FD_CLOEXEC
+ fcntl(fd, F_SETFD, FD_CLOEXEC, &on);
+#endif
+ return fd;
+#else
+ int fd;
+ if (_sopen_s(&fd, pathname, flags, _SH_DENYNO, mode) != 0)
+ return -1;
+ return fd;
+#endif
+}
+
+
+static int rd_kafka_offset_file_open(rd_kafka_toppar_t *rktp) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ int fd;
+
+#ifndef _WIN32
+ mode_t mode = 0644;
+#else
+ mode_t mode = _S_IREAD | _S_IWRITE;
+#endif
+ if ((fd = rk->rk_conf.open_cb(rktp->rktp_offset_path, O_CREAT | O_RDWR,
+ mode, rk->rk_conf.opaque)) == -1) {
+ rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
+ "%s [%" PRId32
+ "]: "
+ "Failed to open offset file %s: %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp->rktp_offset_path,
+ rd_strerror(errno));
+ return -1;
+ }
+
+ rktp->rktp_offset_fp =
+#ifndef _WIN32
+ fdopen(fd, "r+");
+#else
+ _fdopen(fd, "r+");
+#endif
+
+ return 0;
+}
+
+
+static int64_t rd_kafka_offset_file_read(rd_kafka_toppar_t *rktp) {
+ char buf[22];
+ char *end;
+ int64_t offset;
+ size_t r;
+
+ if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) {
+ rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
+ "%s [%" PRId32
+ "]: "
+ "Seek (for read) failed on offset file %s: %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp->rktp_offset_path,
+ rd_strerror(errno));
+ rd_kafka_offset_file_close(rktp);
+ return RD_KAFKA_OFFSET_INVALID;
+ }
+
+ r = fread(buf, 1, sizeof(buf) - 1, rktp->rktp_offset_fp);
+ if (r == 0) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: offset file (%s) is empty",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp->rktp_offset_path);
+ return RD_KAFKA_OFFSET_INVALID;
+ }
+
+ buf[r] = '\0';
+
+ offset = strtoull(buf, &end, 10);
+ if (buf == end) {
+ rd_kafka_op_err(rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
+ "%s [%" PRId32
+ "]: "
+ "Unable to parse offset in %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp->rktp_offset_path);
+ return RD_KAFKA_OFFSET_INVALID;
+ }
+
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: Read offset %" PRId64
+ " from offset "
+ "file (%s)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ offset, rktp->rktp_offset_path);
+
+ return offset;
+}
+
+
+/**
+ * Sync/flush offset file.
+ */
+static int rd_kafka_offset_file_sync(rd_kafka_toppar_t *rktp) {
+ if (!rktp->rktp_offset_fp)
+ return 0;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "SYNC",
+ "%s [%" PRId32 "]: offset file sync",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+
+#ifndef _WIN32
+ (void)fflush(rktp->rktp_offset_fp);
+ (void)fsync(fileno(rktp->rktp_offset_fp)); // FIXME
+#else
+ // FIXME
+ // FlushFileBuffers(_get_osfhandle(fileno(rktp->rktp_offset_fp)));
+#endif
+ return 0;
+}
+
+
+/**
+ * Write offset to offset file.
+ *
+ * Locality: toppar's broker thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_offset_file_commit(rd_kafka_toppar_t *rktp) {
+ rd_kafka_topic_t *rkt = rktp->rktp_rkt;
+ int attempt;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int64_t offset = rktp->rktp_stored_pos.offset;
+
+ for (attempt = 0; attempt < 2; attempt++) {
+ char buf[22];
+ int len;
+
+ if (!rktp->rktp_offset_fp)
+ if (rd_kafka_offset_file_open(rktp) == -1)
+ continue;
+
+ if (fseek(rktp->rktp_offset_fp, 0, SEEK_SET) == -1) {
+ rd_kafka_op_err(
+ rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
+ "%s [%" PRId32
+ "]: "
+ "Seek failed on offset file %s: %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp->rktp_offset_path,
+ rd_strerror(errno));
+ err = RD_KAFKA_RESP_ERR__FS;
+ rd_kafka_offset_file_close(rktp);
+ continue;
+ }
+
+ len = rd_snprintf(buf, sizeof(buf), "%" PRId64 "\n", offset);
+
+ if (fwrite(buf, 1, len, rktp->rktp_offset_fp) < 1) {
+ rd_kafka_op_err(
+ rktp->rktp_rkt->rkt_rk, RD_KAFKA_RESP_ERR__FS,
+ "%s [%" PRId32
+ "]: "
+ "Failed to write offset %" PRId64
+ " to "
+ "offset file %s: %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, offset,
+ rktp->rktp_offset_path, rd_strerror(errno));
+ err = RD_KAFKA_RESP_ERR__FS;
+ rd_kafka_offset_file_close(rktp);
+ continue;
+ }
+
+ /* Need to flush before truncate to preserve write ordering */
+ (void)fflush(rktp->rktp_offset_fp);
+
+ /* Truncate file */
+#ifdef _WIN32
+ if (_chsize_s(_fileno(rktp->rktp_offset_fp), len) == -1)
+ ; /* Ignore truncate failures */
+#else
+ if (ftruncate(fileno(rktp->rktp_offset_fp), len) == -1)
+ ; /* Ignore truncate failures */
+#endif
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: wrote offset %" PRId64
+ " to "
+ "file %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, offset,
+ rktp->rktp_offset_path);
+
+ rktp->rktp_committed_pos.offset = offset;
+
+ /* If sync interval is set to immediate we sync right away. */
+ if (rkt->rkt_conf.offset_store_sync_interval_ms == 0)
+ rd_kafka_offset_file_sync(rktp);
+
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+
+ return err;
+}
+
+
+
+/**
+ * Commit a list of offsets asynchronously. Response will be queued on 'replyq'.
+ * Optional \p cb will be set on requesting op.
+ *
+ * Makes a copy of \p offsets (may be NULL for current assignment)
+ */
+static rd_kafka_resp_err_t
+rd_kafka_commit0(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq,
+ void (*cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque),
+ void *opaque,
+ const char *reason) {
+ rd_kafka_cgrp_t *rkcg;
+ rd_kafka_op_t *rko;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_COMMIT);
+ rko->rko_u.offset_commit.reason = rd_strdup(reason);
+ rko->rko_replyq = replyq;
+ rko->rko_u.offset_commit.cb = cb;
+ rko->rko_u.offset_commit.opaque = opaque;
+ if (rktp)
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ if (offsets)
+ rko->rko_u.offset_commit.partitions =
+ rd_kafka_topic_partition_list_copy(offsets);
+
+ rd_kafka_q_enq(rkcg->rkcg_ops, rko);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * NOTE: 'offsets' may be NULL, see official documentation.
+ */
+rd_kafka_resp_err_t
+rd_kafka_commit(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ int async) {
+ rd_kafka_cgrp_t *rkcg;
+ rd_kafka_resp_err_t err;
+ rd_kafka_q_t *repq = NULL;
+ rd_kafka_replyq_t rq = RD_KAFKA_NO_REPLYQ;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ if (!async) {
+ repq = rd_kafka_q_new(rk);
+ rq = RD_KAFKA_REPLYQ(repq, 0);
+ }
+
+ err = rd_kafka_commit0(rk, offsets, NULL, rq, NULL, NULL, "manual");
+
+ if (!err && !async)
+ err = rd_kafka_q_wait_result(repq, RD_POLL_INFINITE);
+
+ if (!async)
+ rd_kafka_q_destroy_owner(repq);
+
+ return err;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_commit_message(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ int async) {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_t *rktpar;
+ rd_kafka_resp_err_t err;
+
+ if (rkmessage->err)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition);
+ rktpar->offset = rkmessage->offset + 1;
+
+ err = rd_kafka_commit(rk, offsets, async);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ return err;
+}
+
+
+
+rd_kafka_resp_err_t
+rd_kafka_commit_queue(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_queue_t *rkqu,
+ void (*cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque),
+ void *opaque) {
+ rd_kafka_q_t *rkq;
+ rd_kafka_resp_err_t err;
+
+ if (!rd_kafka_cgrp_get(rk))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ if (rkqu)
+ rkq = rkqu->rkqu_q;
+ else
+ rkq = rd_kafka_q_new(rk);
+
+ err = rd_kafka_commit0(rk, offsets, NULL, RD_KAFKA_REPLYQ(rkq, 0), cb,
+ opaque, "manual");
+
+ if (!rkqu) {
+ rd_kafka_op_t *rko = rd_kafka_q_pop_serve(
+ rkq, RD_POLL_INFINITE, 0, RD_KAFKA_Q_CB_FORCE_RETURN, NULL,
+ NULL);
+ if (!rko)
+ err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ else {
+ if (cb)
+ cb(rk, rko->rko_err,
+ rko->rko_u.offset_commit.partitions, opaque);
+ err = rko->rko_err;
+ rd_kafka_op_destroy(rko);
+ }
+
+ if (rkqu)
+ rd_kafka_q_destroy(rkq);
+ else
+ rd_kafka_q_destroy_owner(rkq);
+ }
+
+ return err;
+}
+
+
+
+/**
+ * Called when a broker commit is done.
+ *
+ * Locality: toppar handler thread
+ * Locks: none
+ */
+static void
+rd_kafka_offset_broker_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_topic_partition_t *rktpar;
+
+ if (offsets->cnt == 0) {
+ rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
+ "No offsets to commit (commit_cb)");
+ return;
+ }
+
+ rktpar = &offsets->elems[0];
+
+ if (!(rktp =
+ rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false))) {
+ rd_kafka_dbg(rk, TOPIC, "OFFSETCOMMIT",
+ "No local partition found for %s [%" PRId32
+ "] "
+ "while parsing OffsetCommit response "
+ "(offset %" PRId64 ", error \"%s\")",
+ rktpar->topic, rktpar->partition, rktpar->offset,
+ rd_kafka_err2str(rktpar->err));
+ return;
+ }
+
+ if (!err)
+ err = rktpar->err;
+
+ rd_kafka_toppar_offset_commit_result(rktp, err, offsets);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: offset %" PRId64 " %scommitted: %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rktpar->offset, err ? "not " : "", rd_kafka_err2str(err));
+
+ rktp->rktp_committing_pos.offset = 0;
+
+ rd_kafka_toppar_lock(rktp);
+ if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING)
+ rd_kafka_offset_store_term(rktp, err);
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp);
+}
+
+
+/**
+ * @locks_required rd_kafka_toppar_lock(rktp) MUST be held.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_offset_broker_commit(rd_kafka_toppar_t *rktp, const char *reason) {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_t *rktpar;
+
+ rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_cgrp != NULL);
+ rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
+ rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE);
+
+ rktp->rktp_committing_pos = rktp->rktp_stored_pos;
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+ rd_kafka_topic_partition_set_from_fetch_pos(rktpar,
+ rktp->rktp_committing_pos);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSETCMT",
+ "%.*s [%" PRId32 "]: committing %s: %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_committing_pos), reason);
+
+ rd_kafka_commit0(rktp->rktp_rkt->rkt_rk, offsets, rktp,
+ RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
+ rd_kafka_offset_broker_commit_cb, NULL, reason);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ return RD_KAFKA_RESP_ERR__IN_PROGRESS;
+}
+
+
+
+/**
+ * Commit offset to backing store.
+ * This might be an async operation.
+ *
+ * Locality: toppar handler thread
+ */
+static rd_kafka_resp_err_t rd_kafka_offset_commit(rd_kafka_toppar_t *rktp,
+ const char *reason) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: commit: stored %s > committed %s?",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_stored_pos),
+ rd_kafka_fetch_pos2str(rktp->rktp_committed_pos));
+
+ /* Already committed */
+ if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
+ &rktp->rktp_committed_pos) <= 0)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Already committing (for async ops) */
+ if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
+ &rktp->rktp_committing_pos) <= 0)
+ return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
+
+ switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
+ case RD_KAFKA_OFFSET_METHOD_FILE:
+ return rd_kafka_offset_file_commit(rktp);
+ case RD_KAFKA_OFFSET_METHOD_BROKER:
+ return rd_kafka_offset_broker_commit(rktp, reason);
+ default:
+ /* UNREACHABLE */
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+}
+
+
+
+/**
+ * Sync offset backing store. This is only used for METHOD_FILE.
+ *
+ * Locality: rktp's broker thread.
+ */
+rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp) {
+ switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
+ case RD_KAFKA_OFFSET_METHOD_FILE:
+ return rd_kafka_offset_file_sync(rktp);
+ default:
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+}
+
+
+/**
+ * Store offset.
+ * Typically called from application code.
+ *
+ * NOTE: No locks must be held.
+ *
+ * @deprecated Use rd_kafka_offsets_store().
+ */
+rd_kafka_resp_err_t rd_kafka_offset_store(rd_kafka_topic_t *app_rkt,
+ int32_t partition,
+ int64_t offset) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_proper(app_rkt);
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_resp_err_t err;
+ rd_kafka_fetch_pos_t pos = {offset + 1, -1 /*no leader epoch known*/};
+
+ /* Find toppar */
+ rd_kafka_topic_rdlock(rkt);
+ if (!(rktp = rd_kafka_toppar_get(rkt, partition, 0 /*!ua_on_miss*/))) {
+ rd_kafka_topic_rdunlock(rkt);
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ }
+ rd_kafka_topic_rdunlock(rkt);
+
+ err = rd_kafka_offset_store0(rktp, pos, rd_false /* Don't force */,
+ RD_DO_LOCK);
+
+ rd_kafka_toppar_destroy(rktp);
+
+ return err;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_offsets_store(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *offsets) {
+ int i;
+ int ok_cnt = 0;
+ rd_kafka_resp_err_t last_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (rk->rk_conf.enable_auto_offset_store)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ for (i = 0; i < offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_fetch_pos_t pos = {rktpar->offset, -1};
+
+ rktp =
+ rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
+ if (!rktp) {
+ rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ last_err = rktpar->err;
+ continue;
+ }
+
+ pos.leader_epoch =
+ rd_kafka_topic_partition_get_leader_epoch(rktpar);
+
+ rktpar->err = rd_kafka_offset_store0(
+ rktp, pos, rd_false /* don't force */, RD_DO_LOCK);
+ rd_kafka_toppar_destroy(rktp);
+
+ if (rktpar->err)
+ last_err = rktpar->err;
+ else
+ ok_cnt++;
+ }
+
+ return offsets->cnt > 0 && ok_cnt == 0 ? last_err
+ : RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_error_t *rd_kafka_offset_store_message(rd_kafka_message_t *rkmessage) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_op_t *rko;
+ rd_kafka_resp_err_t err;
+ rd_kafka_msg_t *rkm = (rd_kafka_msg_t *)rkmessage;
+ rd_kafka_fetch_pos_t pos;
+
+ if (rkmessage->err)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Message object must not have an "
+ "error set");
+
+ if (unlikely(!(rko = rd_kafka_message2rko(rkmessage)) ||
+ !(rktp = rko->rko_rktp)))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Invalid message object, "
+ "not a consumed message");
+
+ pos.offset = rkmessage->offset + 1;
+ pos.leader_epoch = rkm->rkm_u.consumer.leader_epoch;
+ err = rd_kafka_offset_store0(rktp, pos, rd_false /* Don't force */,
+ RD_DO_LOCK);
+
+ if (err == RD_KAFKA_RESP_ERR__STATE)
+ return rd_kafka_error_new(err, "Partition is not assigned");
+ else if (err)
+ return rd_kafka_error_new(err, "Failed to store offset: %s",
+ rd_kafka_err2str(err));
+
+ return NULL;
+}
+
+
+
+/**
+ * Decommissions the use of an offset file for a toppar.
+ * The file content will not be touched and the file will not be removed.
+ */
+static rd_kafka_resp_err_t rd_kafka_offset_file_term(rd_kafka_toppar_t *rktp) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* Sync offset file if the sync is intervalled (> 0) */
+ if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0) {
+ rd_kafka_offset_file_sync(rktp);
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_sync_tmr, 1 /*lock*/);
+ }
+
+
+ rd_kafka_offset_file_close(rktp);
+
+ rd_free(rktp->rktp_offset_path);
+ rktp->rktp_offset_path = NULL;
+
+ return err;
+}
+
+static rd_kafka_op_res_t rd_kafka_offset_reset_op_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_toppar_t *rktp = rko->rko_rktp;
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_offset_reset(rktp, rko->rko_u.offset_reset.broker_id,
+ rko->rko_u.offset_reset.pos, rko->rko_err, "%s",
+ rko->rko_u.offset_reset.reason);
+ rd_kafka_toppar_unlock(rktp);
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+/**
+ * @brief Take action when the offset for a toppar is unusable (due to an
+ * error, or offset is logical).
+ *
+ * @param rktp the toppar
+ * @param broker_id Originating broker, if any, else RD_KAFKA_NODEID_UA.
+ * @param err_pos a logical offset, or offset corresponding to the error.
+ * @param err the error, or RD_KAFKA_RESP_ERR_NO_ERROR if offset is logical.
+ * @param fmt a reason string for logging.
+ *
+ * @locality any. if not main thread, work will be enqued on main thread.
+ * @locks_required toppar_lock() MUST be held
+ */
+void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
+ int32_t broker_id,
+ rd_kafka_fetch_pos_t err_pos,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1};
+ const char *extra = "";
+ char reason[512];
+ va_list ap;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(reason, sizeof(reason), fmt, ap);
+ va_end(ap);
+
+ /* Enqueue op for toppar handler thread if we're on the wrong thread. */
+ if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
+ rd_kafka_op_t *rko =
+ rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB);
+ rko->rko_op_cb = rd_kafka_offset_reset_op_cb;
+ rko->rko_err = err;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rko->rko_u.offset_reset.broker_id = broker_id;
+ rko->rko_u.offset_reset.pos = err_pos;
+ rko->rko_u.offset_reset.reason = rd_strdup(reason);
+ rd_kafka_q_enq(rktp->rktp_ops, rko);
+ return;
+ }
+
+ if (err_pos.offset == RD_KAFKA_OFFSET_INVALID || err)
+ pos.offset = rktp->rktp_rkt->rkt_conf.auto_offset_reset;
+ else
+ pos.offset = err_pos.offset;
+
+ if (pos.offset == RD_KAFKA_OFFSET_INVALID) {
+ /* Error, auto.offset.reset tells us to error out. */
+ if (broker_id != RD_KAFKA_NODEID_UA)
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, broker_id,
+ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp,
+ err_pos.offset, "%s: %s (broker %" PRId32 ")",
+ reason, rd_kafka_err2str(err), broker_id);
+ else
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, broker_id,
+ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET, 0, NULL, rktp,
+ err_pos.offset, "%s: %s", reason,
+ rd_kafka_err2str(err));
+
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_NONE);
+
+ } else if (pos.offset == RD_KAFKA_OFFSET_BEGINNING &&
+ rktp->rktp_lo_offset >= 0) {
+ /* Use cached log start from last Fetch if available.
+ * Note: The cached end offset (rktp_ls_offset) can't be
+ * used here since the End offset is a constantly moving
+ * target as new messages are produced. */
+ extra = "cached BEGINNING offset ";
+ pos.offset = rktp->rktp_lo_offset;
+ pos.leader_epoch = -1;
+ rd_kafka_toppar_next_offset_handle(rktp, pos);
+
+ } else {
+ /* Else query cluster for offset */
+ rktp->rktp_query_pos = pos;
+ rd_kafka_toppar_set_fetch_state(
+ rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
+ }
+
+ /* Offset resets due to error are logged since they might have quite
+ * critical impact. For non-errors, or for auto.offset.reset=error,
+ * the reason is simply debug-logged. */
+ if (!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
+ pos.offset == RD_KAFKA_OFFSET_INVALID)
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: offset reset (at %s, broker %" PRId32
+ ") "
+ "to %s%s: %s: %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(err_pos), broker_id, extra,
+ rd_kafka_fetch_pos2str(pos), reason, rd_kafka_err2str(err));
+ else
+ rd_kafka_log(
+ rktp->rktp_rkt->rkt_rk, LOG_WARNING, "OFFSET",
+ "%s [%" PRId32 "]: offset reset (at %s, broker %" PRId32
+ ") to %s%s: %s: %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(err_pos), broker_id, extra,
+ rd_kafka_fetch_pos2str(pos), reason, rd_kafka_err2str(err));
+
+ /* Note: If rktp is not delegated to the leader, then low and high
+ offsets will necessarily be cached from the last FETCH request,
+ and so this offset query will never occur in that case for
+ BEGINNING / END logical offsets. */
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
+ rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_pos,
+ err ? 100 : 0);
+}
+
+
+
+/**
+ * @brief Offset validation retry timer
+ */
+static void rd_kafka_offset_validate_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_toppar_t *rktp = arg;
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_offset_validate(rktp, "retrying offset validation");
+ rd_kafka_toppar_unlock(rktp);
+}
+
+
+
+/**
+ * @brief OffsetForLeaderEpochResponse handler that
+ * pushes the matched toppar's to the next state.
+ *
+ * @locality rdkafka main thread
+ */
+static void rd_kafka_toppar_handle_OffsetForLeaderEpoch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_topic_partition_list_t *parts = NULL;
+ rd_kafka_toppar_t *rktp = opaque;
+ rd_kafka_topic_partition_t *rktpar;
+ int64_t end_offset;
+ int32_t end_offset_leader_epoch;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ rd_kafka_toppar_destroy(rktp); /* Drop refcnt */
+ return;
+ }
+
+ err = rd_kafka_handle_OffsetForLeaderEpoch(rk, rkb, err, rkbuf, request,
+ &parts);
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT)
+ err = RD_KAFKA_RESP_ERR__OUTDATED;
+
+ if (unlikely(!err && parts->cnt == 0))
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ if (!err) {
+ err = (&parts->elems[0])->err;
+ }
+
+ if (err) {
+ int actions;
+
+ rd_rkb_dbg(rkb, FETCH, "OFFSETVALID",
+ "%.*s [%" PRId32
+ "]: OffsetForLeaderEpoch requested failed: %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE) {
+ rd_rkb_dbg(rkb, FETCH, "VALIDATE",
+ "%.*s [%" PRId32
+ "]: offset and epoch validation not "
+ "supported by broker: validation skipped",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+ /* Reset the epoch to -1 since it can't be used with
+ * older brokers. */
+ rktp->rktp_next_fetch_start.leader_epoch = -1;
+ rd_kafka_toppar_set_fetch_state(
+ rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE);
+ goto done;
+
+ } else if (err == RD_KAFKA_RESP_ERR__OUTDATED) {
+ /* Partition state has changed, this response
+ * is outdated. */
+ goto done;
+ }
+
+ actions = rd_kafka_err_action(
+ rkb, err, request, RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH,
+ RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH,
+ RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+ RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE,
+ RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
+ RD_KAFKA_ERR_ACTION_END);
+
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
+ /* Metadata refresh is ongoing, so force it */
+ rd_kafka_topic_leader_query0(rk, rktp->rktp_rkt, 1,
+ rd_true /* force */);
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ /* No need for refcnt on rktp for timer opaque
+ * since the timer resides on the rktp and will be
+ * stopped on toppar remove. */
+ rd_kafka_timer_start_oneshot(
+ &rk->rk_timers, &rktp->rktp_validate_tmr, rd_false,
+ 500 * 1000 /* 500ms */,
+ rd_kafka_offset_validate_tmr_cb, rktp);
+ goto done;
+ }
+
+ if (!(actions & RD_KAFKA_ERR_ACTION_REFRESH)) {
+ /* Permanent error */
+ rd_kafka_offset_reset(
+ rktp, rd_kafka_broker_id(rkb),
+ RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID,
+ rktp->rktp_leader_epoch),
+ RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
+ "Unable to validate offset and epoch: %s",
+ rd_kafka_err2str(err));
+ }
+ goto done;
+ }
+
+
+ rktpar = &parts->elems[0];
+ end_offset = rktpar->offset;
+ end_offset_leader_epoch =
+ rd_kafka_topic_partition_get_leader_epoch(rktpar);
+
+ if (end_offset < 0 || end_offset_leader_epoch < 0) {
+ rd_kafka_offset_reset(
+ rktp, rd_kafka_broker_id(rkb), rktp->rktp_next_fetch_start,
+ RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
+ "No epoch found less or equal to "
+ "%s: broker end offset is %" PRId64
+ " (offset leader epoch %" PRId32
+ ")."
+ " Reset using configured policy.",
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
+ end_offset, end_offset_leader_epoch);
+
+ } else if (end_offset < rktp->rktp_next_fetch_start.offset) {
+
+ if (rktp->rktp_rkt->rkt_conf.auto_offset_reset ==
+ RD_KAFKA_OFFSET_INVALID /* auto.offset.reset=error */) {
+ rd_kafka_offset_reset(
+ rktp, rd_kafka_broker_id(rkb),
+ RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID,
+ rktp->rktp_leader_epoch),
+ RD_KAFKA_RESP_ERR__LOG_TRUNCATION,
+ "Partition log truncation detected at %s: "
+ "broker end offset is %" PRId64
+ " (offset leader epoch %" PRId32
+ "). "
+ "Reset to INVALID.",
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
+ end_offset, end_offset_leader_epoch);
+
+ } else {
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Seek to the updated end offset */
+ rd_kafka_fetch_pos_t fetch_pos =
+ rd_kafka_topic_partition_get_fetch_pos(rktpar);
+ fetch_pos.validated = rd_true;
+
+ rd_kafka_toppar_op_seek(rktp, fetch_pos,
+ RD_KAFKA_NO_REPLYQ);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+ rd_kafka_toppar_destroy(rktp);
+
+ return;
+ }
+
+ } else {
+ rd_rkb_dbg(rkb, FETCH, "OFFSETVALID",
+ "%.*s [%" PRId32
+ "]: offset and epoch validation "
+ "succeeded: broker end offset %" PRId64
+ " (offset leader epoch %" PRId32 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, end_offset,
+ end_offset_leader_epoch);
+
+ rktp->rktp_next_fetch_start.leader_epoch =
+ end_offset_leader_epoch;
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE);
+ }
+
+done:
+ rd_kafka_toppar_unlock(rktp);
+
+ if (parts)
+ rd_kafka_topic_partition_list_destroy(parts);
+ rd_kafka_toppar_destroy(rktp);
+}
+
+
+static rd_kafka_op_res_t rd_kafka_offset_validate_op_cb(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_toppar_t *rktp = rko->rko_rktp;
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_offset_validate(rktp, "%s", rko->rko_u.offset_reset.reason);
+ rd_kafka_toppar_unlock(rktp);
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+/**
+ * @brief Validate partition epoch and offset (KIP-320).
+ *
+ * @param rktp the toppar
+ * @param err Optional error code that triggered the validation.
+ * @param fmt a reason string for logging.
+ *
+ * @locality any. if not main thread, work will be enqued on main thread.
+ * @locks_required toppar_lock() MUST be held
+ */
+void rd_kafka_offset_validate(rd_kafka_toppar_t *rktp, const char *fmt, ...) {
+ rd_kafka_topic_partition_list_t *parts;
+ rd_kafka_topic_partition_t *rktpar;
+ char reason[512];
+ va_list ap;
+
+ if (rktp->rktp_rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER)
+ return;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(reason, sizeof(reason), fmt, ap);
+ va_end(ap);
+
+ /* Enqueue op for toppar handler thread if we're on the wrong thread. */
+ if (!thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread)) {
+ /* Reuse OP_OFFSET_RESET type */
+ rd_kafka_op_t *rko =
+ rd_kafka_op_new(RD_KAFKA_OP_OFFSET_RESET | RD_KAFKA_OP_CB);
+ rko->rko_op_cb = rd_kafka_offset_validate_op_cb;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rko->rko_u.offset_reset.reason = rd_strdup(reason);
+ rd_kafka_q_enq(rktp->rktp_ops, rko);
+ return;
+ }
+
+ if (rktp->rktp_fetch_state != RD_KAFKA_TOPPAR_FETCH_ACTIVE &&
+ rktp->rktp_fetch_state !=
+ RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE",
+ "%.*s [%" PRId32
+ "]: skipping offset "
+ "validation in fetch state %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state]);
+ return;
+ }
+
+
+ if (rktp->rktp_leader_id == -1 || !rktp->rktp_leader ||
+ rktp->rktp_leader->rkb_source == RD_KAFKA_INTERNAL) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE",
+ "%.*s [%" PRId32
+ "]: unable to perform offset "
+ "validation: partition leader not available",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE);
+ return;
+ }
+
+ /* If the fetch start position does not have an epoch set then
+ * there is no point in doing validation.
+ * This is the case for epoch-less seek()s or epoch-less
+ * committed offsets. */
+ if (rktp->rktp_next_fetch_start.leader_epoch == -1) {
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, FETCH, "VALIDATE",
+ "%.*s [%" PRId32
+ "]: skipping offset "
+ "validation for %s: no leader epoch set",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start));
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE);
+ return;
+ }
+
+ rd_kafka_toppar_set_fetch_state(
+ rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT);
+
+ /* Construct and send OffsetForLeaderEpochRequest */
+ parts = rd_kafka_topic_partition_list_new(1);
+ rktpar = rd_kafka_topic_partition_list_add(
+ parts, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+ rd_kafka_topic_partition_set_leader_epoch(
+ rktpar, rktp->rktp_next_fetch_start.leader_epoch);
+ rd_kafka_topic_partition_set_current_leader_epoch(
+ rktpar, rktp->rktp_leader_epoch);
+ rd_kafka_toppar_keep(rktp); /* for request opaque */
+
+ rd_rkb_dbg(rktp->rktp_leader, FETCH, "VALIDATE",
+ "%.*s [%" PRId32
+ "]: querying broker for epoch "
+ "validation of %s: %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start), reason);
+
+ rd_kafka_OffsetForLeaderEpochRequest(
+ rktp->rktp_leader, parts, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
+ rd_kafka_toppar_handle_OffsetForLeaderEpoch, rktp);
+ rd_kafka_topic_partition_list_destroy(parts);
+}
+
+
+/**
+ * Escape any special characters in filename 'in' and write escaped
+ * string to 'out' (of max size out_size).
+ */
+static char *mk_esc_filename(const char *in, char *out, size_t out_size) {
+ const char *s = in;
+ char *o = out;
+
+ while (*s) {
+ const char *esc;
+ size_t esclen;
+
+ switch (*s) {
+ case '/': /* linux */
+ esc = "%2F";
+ esclen = strlen(esc);
+ break;
+ case ':': /* osx, windows */
+ esc = "%3A";
+ esclen = strlen(esc);
+ break;
+ case '\\': /* windows */
+ esc = "%5C";
+ esclen = strlen(esc);
+ break;
+ default:
+ esc = s;
+ esclen = 1;
+ break;
+ }
+
+ if ((size_t)((o + esclen + 1) - out) >= out_size) {
+ /* No more space in output string, truncate. */
+ break;
+ }
+
+ while (esclen-- > 0)
+ *(o++) = *(esc++);
+
+ s++;
+ }
+
+ *o = '\0';
+ return out;
+}
+
+
+static void rd_kafka_offset_sync_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_toppar_t *rktp = arg;
+ rd_kafka_offset_sync(rktp);
+}
+
+
+/**
+ * Prepare a toppar for using an offset file.
+ *
+ * Locality: rdkafka main thread
+ * Locks: toppar_lock(rktp) must be held
+ */
+static void rd_kafka_offset_file_init(rd_kafka_toppar_t *rktp) {
+ char spath[4096 + 1]; /* larger than escfile to avoid warning */
+ const char *path = rktp->rktp_rkt->rkt_conf.offset_store_path;
+ int64_t offset = RD_KAFKA_OFFSET_INVALID;
+
+ if (rd_kafka_path_is_dir(path)) {
+ char tmpfile[1024];
+ char escfile[4096];
+
+ /* Include group.id in filename if configured. */
+ if (!RD_KAFKAP_STR_IS_NULL(rktp->rktp_rkt->rkt_rk->rk_group_id))
+ rd_snprintf(tmpfile, sizeof(tmpfile),
+ "%s-%" PRId32 "-%.*s.offset",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ RD_KAFKAP_STR_PR(
+ rktp->rktp_rkt->rkt_rk->rk_group_id));
+ else
+ rd_snprintf(tmpfile, sizeof(tmpfile),
+ "%s-%" PRId32 ".offset",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition);
+
+ /* Escape filename to make it safe. */
+ mk_esc_filename(tmpfile, escfile, sizeof(escfile));
+
+ rd_snprintf(spath, sizeof(spath), "%s%s%s", path,
+ path[strlen(path) - 1] == '/' ? "" : "/", escfile);
+
+ path = spath;
+ }
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: using offset file %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ path);
+ rktp->rktp_offset_path = rd_strdup(path);
+
+
+ /* Set up the offset file sync interval. */
+ if (rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms > 0)
+ rd_kafka_timer_start(
+ &rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_sync_tmr,
+ rktp->rktp_rkt->rkt_conf.offset_store_sync_interval_ms *
+ 1000ll,
+ rd_kafka_offset_sync_tmr_cb, rktp);
+
+ if (rd_kafka_offset_file_open(rktp) != -1) {
+ /* Read offset from offset file. */
+ offset = rd_kafka_offset_file_read(rktp);
+ }
+
+ if (offset != RD_KAFKA_OFFSET_INVALID) {
+ /* Start fetching from offset */
+ rktp->rktp_stored_pos.offset = offset;
+ rktp->rktp_committed_pos.offset = offset;
+ rd_kafka_toppar_next_offset_handle(rktp, rktp->rktp_stored_pos);
+
+ } else {
+ /* Offset was not usable: perform offset reset logic */
+ rktp->rktp_committed_pos.offset = RD_KAFKA_OFFSET_INVALID;
+ rd_kafka_offset_reset(
+ rktp, RD_KAFKA_NODEID_UA,
+ RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_INVALID, -1),
+ RD_KAFKA_RESP_ERR__FS, "non-readable offset file");
+ }
+}
+
+
+
+/**
+ * Terminate broker offset store
+ */
+static rd_kafka_resp_err_t
+rd_kafka_offset_broker_term(rd_kafka_toppar_t *rktp) {
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * Prepare a toppar for using broker offset commit (broker 0.8.2 or
+ * later). When using KafkaConsumer (high-level consumer) this
+ * functionality is disabled in favour of the cgrp commits for the
+ * entire set of subscriptions.
+ */
+static void rd_kafka_offset_broker_init(rd_kafka_toppar_t *rktp) {
+ if (!rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))
+ return;
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA,
+ RD_KAFKA_FETCH_POS(RD_KAFKA_OFFSET_STORED, -1),
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ "query broker for offsets");
+}
+
+
+/**
+ * Terminates toppar's offset store, this is the finalizing step after
+ * offset_store_stop().
+ *
+ * Locks: rd_kafka_toppar_lock() MUST be held.
+ */
+void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_resp_err_t err2;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "STORETERM",
+ "%s [%" PRId32 "]: offset store terminating",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING;
+
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_commit_tmr, 1 /*lock*/);
+
+ switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
+ case RD_KAFKA_OFFSET_METHOD_FILE:
+ err2 = rd_kafka_offset_file_term(rktp);
+ break;
+ case RD_KAFKA_OFFSET_METHOD_BROKER:
+ err2 = rd_kafka_offset_broker_term(rktp);
+ break;
+ case RD_KAFKA_OFFSET_METHOD_NONE:
+ err2 = RD_KAFKA_RESP_ERR_NO_ERROR;
+ break;
+ }
+
+ /* Prioritize the input error (probably from commit), fall
+ * back on termination error. */
+ if (!err)
+ err = err2;
+
+ rd_kafka_toppar_fetch_stopped(rktp, err);
+}
+
+
+/**
+ * Stop toppar's offset store, committing the final offsets, etc.
+ *
+ * Returns RD_KAFKA_RESP_ERR_NO_ERROR on success,
+ * RD_KAFKA_RESP_ERR__IN_PROGRESS if the term triggered an
+ * async operation (e.g., broker offset commit), or
+ * any other error in case of immediate failure.
+ *
+ * The offset layer will call rd_kafka_offset_store_term() when
+ * the offset management has been fully stopped for this partition.
+ *
+ * Locks: rd_kafka_toppar_lock() MUST be held.
+ */
+rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_OFFSET_STORE))
+ goto done;
+
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32
+ "]: stopping offset store "
+ "(stored %s, committed %s, EOF offset %" PRId64 ")",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_stored_pos),
+ rd_kafka_fetch_pos2str(rktp->rktp_committed_pos),
+ rktp->rktp_offsets_fin.eof_offset);
+
+ /* Store end offset for empty partitions */
+ if (rktp->rktp_rkt->rkt_rk->rk_conf.enable_auto_offset_store &&
+ rktp->rktp_stored_pos.offset == RD_KAFKA_OFFSET_INVALID &&
+ rktp->rktp_offsets_fin.eof_offset > 0)
+ rd_kafka_offset_store0(
+ rktp,
+ RD_KAFKA_FETCH_POS(rktp->rktp_offsets_fin.eof_offset,
+ rktp->rktp_leader_epoch),
+ rd_true /* force */, RD_DONT_LOCK);
+
+ /* Commit offset to backing store.
+ * This might be an async operation. */
+ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) &&
+ rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
+ &rktp->rktp_committed_pos) > 0)
+ err = rd_kafka_offset_commit(rktp, "offset store stop");
+
+ /* If stop is in progress (async commit), return now. */
+ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
+ return err;
+
+done:
+ /* Stop is done */
+ rd_kafka_offset_store_term(rktp, err);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static void rd_kafka_offset_auto_commit_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_toppar_t *rktp = arg;
+ rd_kafka_offset_commit(rktp, "auto commit timer");
+}
+
+void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_toppar_t *rktp = arg;
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "Topic %s [%" PRId32
+ "]: timed offset query for %s in state %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_query_pos),
+ rd_kafka_fetch_states[rktp->rktp_fetch_state]);
+ rd_kafka_toppar_offset_request(rktp, rktp->rktp_query_pos, 0);
+ rd_kafka_toppar_unlock(rktp);
+}
+
+
+/**
+ * Initialize toppar's offset store.
+ *
+ * Locality: toppar handler thread
+ */
+void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp) {
+ static const char *store_names[] = {"none", "file", "broker"};
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: using offset store method: %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ store_names[rktp->rktp_rkt->rkt_conf.offset_store_method]);
+
+ /* The committed offset is unknown at this point. */
+ rktp->rktp_committed_pos.offset = RD_KAFKA_OFFSET_INVALID;
+
+ /* Set up the commit interval (for simple consumer). */
+ if (rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk) &&
+ rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms > 0)
+ rd_kafka_timer_start(
+ &rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_commit_tmr,
+ rktp->rktp_rkt->rkt_conf.auto_commit_interval_ms * 1000ll,
+ rd_kafka_offset_auto_commit_tmr_cb, rktp);
+
+ switch (rktp->rktp_rkt->rkt_conf.offset_store_method) {
+ case RD_KAFKA_OFFSET_METHOD_FILE:
+ rd_kafka_offset_file_init(rktp);
+ break;
+ case RD_KAFKA_OFFSET_METHOD_BROKER:
+ rd_kafka_offset_broker_init(rktp);
+ break;
+ case RD_KAFKA_OFFSET_METHOD_NONE:
+ break;
+ default:
+ /* NOTREACHED */
+ return;
+ }
+
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_OFFSET_STORE;
+}
+
+
+/**
+ * Update toppar app_pos and store_offset (if enabled) to the provided
+ * offset and epoch.
+ */
+void rd_kafka_update_app_pos(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_dolock_t do_lock) {
+
+ if (do_lock)
+ rd_kafka_toppar_lock(rktp);
+
+ rktp->rktp_app_pos = pos;
+ if (rk->rk_conf.enable_auto_offset_store)
+ rd_kafka_offset_store0(rktp, pos,
+ /* force: ignore assignment state */
+ rd_true, RD_DONT_LOCK);
+
+ if (do_lock)
+ rd_kafka_toppar_unlock(rktp);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h
new file mode 100644
index 000000000..7b01c8487
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_offset.h
@@ -0,0 +1,135 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_OFFSET_H_
+#define _RDKAFKA_OFFSET_H_
+
+#include "rdkafka_partition.h"
+
+
+const char *rd_kafka_offset2str(int64_t offset);
+
+
+/**
+ * @brief Stores the offset for the toppar 'rktp'.
+ * The actual commit of the offset to backing store is usually
+ * performed at a later time (time or threshold based).
+ *
+ * For the high-level consumer (assign()), this function will reject absolute
+ * offsets if the partition is not currently assigned, unless \p force is set.
+ * This check was added to avoid a race condition where an application
+ * would call offsets_store() after the partitions had been revoked, forcing
+ * a future auto-committer on the next assignment to commit this old offset and
+ * overwriting whatever newer offset was committed by another consumer.
+ *
+ * The \p force flag is useful for internal calls to offset_store0() which
+ * do not need the protection described above.
+ *
+ *
+ * There is one situation where the \p force flag is troublesome:
+ * If the application is using any of the consumer batching APIs,
+ * e.g., consume_batch() or the event-based consumption, then it's possible
+ * that while the batch is being accumulated or the application is picking off
+ * messages from the event a rebalance occurs (in the background) which revokes
+ * the current assignment. This revokal will remove all queued messages, but
+ * not the ones the application already has accumulated in the event object.
+ * Enforcing assignment for store in this state is tricky with a bunch of
+ * corner cases, so instead we let those places forcibly store the offset, but
+ * then in assign() we reset the stored offset to .._INVALID, just like we do
+ * on revoke.
+ * Illustrated (with fix):
+ * 1. ev = rd_kafka_queue_poll();
+ * 2. background rebalance revoke unassigns the partition and sets the
+ * stored offset to _INVALID.
+ * 3. application calls message_next(ev) which forcibly sets the
+ * stored offset.
+ * 4. background rebalance assigns the partition again, but forcibly sets
+ * the stored offset to .._INVALID to provide a clean state.
+ *
+ * @param pos Offset and leader epoch to set, may be an absolute offset
+ * or .._INVALID.
+ * @param force Forcibly set \p offset regardless of assignment state.
+ * @param do_lock Whether to lock the \p rktp or not (already locked by caller).
+ *
+ * See head of rdkafka_offset.c for more information.
+ *
+ * @returns RD_KAFKA_RESP_ERR__STATE if the partition is not currently assigned,
+ * unless \p force is set.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
+rd_kafka_offset_store0(rd_kafka_toppar_t *rktp,
+ const rd_kafka_fetch_pos_t pos,
+ rd_bool_t force,
+ rd_dolock_t do_lock) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (do_lock)
+ rd_kafka_toppar_lock(rktp);
+
+ if (unlikely(!force && !RD_KAFKA_OFFSET_IS_LOGICAL(pos.offset) &&
+ !(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ASSIGNED) &&
+ !rd_kafka_is_simple_consumer(rktp->rktp_rkt->rkt_rk))) {
+ err = RD_KAFKA_RESP_ERR__STATE;
+ } else {
+ rktp->rktp_stored_pos = pos;
+ }
+
+ if (do_lock)
+ rd_kafka_toppar_unlock(rktp);
+
+ return err;
+}
+
+rd_kafka_resp_err_t
+rd_kafka_offset_store(rd_kafka_topic_t *rkt, int32_t partition, int64_t offset);
+
+rd_kafka_resp_err_t rd_kafka_offset_sync(rd_kafka_toppar_t *rktp);
+
+void rd_kafka_offset_store_term(rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err);
+rd_kafka_resp_err_t rd_kafka_offset_store_stop(rd_kafka_toppar_t *rktp);
+void rd_kafka_offset_store_init(rd_kafka_toppar_t *rktp);
+
+void rd_kafka_offset_reset(rd_kafka_toppar_t *rktp,
+ int32_t broker_id,
+ rd_kafka_fetch_pos_t err_pos,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 5, 6);
+
+void rd_kafka_offset_validate(rd_kafka_toppar_t *rktp, const char *fmt, ...)
+ RD_FORMAT(printf, 2, 3);
+
+void rd_kafka_offset_query_tmr_cb(rd_kafka_timers_t *rkts, void *arg);
+
+void rd_kafka_update_app_pos(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_dolock_t do_lock);
+
+#endif /* _RDKAFKA_OFFSET_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c
new file mode 100644
index 000000000..128b8bb40
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.c
@@ -0,0 +1,928 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+
+#include "rdkafka_int.h"
+#include "rdkafka_op.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_proto.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_error.h"
+
+/* Current number of rd_kafka_op_t */
+rd_atomic32_t rd_kafka_op_cnt;
+
+
+const char *rd_kafka_op2str(rd_kafka_op_type_t type) {
+ int skiplen = 6;
+ static const char *names[RD_KAFKA_OP__END] = {
+ [RD_KAFKA_OP_NONE] = "REPLY:NONE",
+ [RD_KAFKA_OP_FETCH] = "REPLY:FETCH",
+ [RD_KAFKA_OP_ERR] = "REPLY:ERR",
+ [RD_KAFKA_OP_CONSUMER_ERR] = "REPLY:CONSUMER_ERR",
+ [RD_KAFKA_OP_DR] = "REPLY:DR",
+ [RD_KAFKA_OP_STATS] = "REPLY:STATS",
+ [RD_KAFKA_OP_OFFSET_COMMIT] = "REPLY:OFFSET_COMMIT",
+ [RD_KAFKA_OP_NODE_UPDATE] = "REPLY:NODE_UPDATE",
+ [RD_KAFKA_OP_XMIT_BUF] = "REPLY:XMIT_BUF",
+ [RD_KAFKA_OP_RECV_BUF] = "REPLY:RECV_BUF",
+ [RD_KAFKA_OP_XMIT_RETRY] = "REPLY:XMIT_RETRY",
+ [RD_KAFKA_OP_FETCH_START] = "REPLY:FETCH_START",
+ [RD_KAFKA_OP_FETCH_STOP] = "REPLY:FETCH_STOP",
+ [RD_KAFKA_OP_SEEK] = "REPLY:SEEK",
+ [RD_KAFKA_OP_PAUSE] = "REPLY:PAUSE",
+ [RD_KAFKA_OP_OFFSET_FETCH] = "REPLY:OFFSET_FETCH",
+ [RD_KAFKA_OP_PARTITION_JOIN] = "REPLY:PARTITION_JOIN",
+ [RD_KAFKA_OP_PARTITION_LEAVE] = "REPLY:PARTITION_LEAVE",
+ [RD_KAFKA_OP_REBALANCE] = "REPLY:REBALANCE",
+ [RD_KAFKA_OP_TERMINATE] = "REPLY:TERMINATE",
+ [RD_KAFKA_OP_COORD_QUERY] = "REPLY:COORD_QUERY",
+ [RD_KAFKA_OP_SUBSCRIBE] = "REPLY:SUBSCRIBE",
+ [RD_KAFKA_OP_ASSIGN] = "REPLY:ASSIGN",
+ [RD_KAFKA_OP_GET_SUBSCRIPTION] = "REPLY:GET_SUBSCRIPTION",
+ [RD_KAFKA_OP_GET_ASSIGNMENT] = "REPLY:GET_ASSIGNMENT",
+ [RD_KAFKA_OP_THROTTLE] = "REPLY:THROTTLE",
+ [RD_KAFKA_OP_NAME] = "REPLY:NAME",
+ [RD_KAFKA_OP_CG_METADATA] = "REPLY:CG_METADATA",
+ [RD_KAFKA_OP_OFFSET_RESET] = "REPLY:OFFSET_RESET",
+ [RD_KAFKA_OP_METADATA] = "REPLY:METADATA",
+ [RD_KAFKA_OP_LOG] = "REPLY:LOG",
+ [RD_KAFKA_OP_WAKEUP] = "REPLY:WAKEUP",
+ [RD_KAFKA_OP_CREATETOPICS] = "REPLY:CREATETOPICS",
+ [RD_KAFKA_OP_DELETETOPICS] = "REPLY:DELETETOPICS",
+ [RD_KAFKA_OP_CREATEPARTITIONS] = "REPLY:CREATEPARTITIONS",
+ [RD_KAFKA_OP_ALTERCONFIGS] = "REPLY:ALTERCONFIGS",
+ [RD_KAFKA_OP_DESCRIBECONFIGS] = "REPLY:DESCRIBECONFIGS",
+ [RD_KAFKA_OP_DELETERECORDS] = "REPLY:DELETERECORDS",
+ [RD_KAFKA_OP_LISTCONSUMERGROUPS] = "REPLY:LISTCONSUMERGROUPS",
+ [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] =
+ "REPLY:DESCRIBECONSUMERGROUPS",
+ [RD_KAFKA_OP_DELETEGROUPS] = "REPLY:DELETEGROUPS",
+ [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] =
+ "REPLY:DELETECONSUMERGROUPOFFSETS",
+ [RD_KAFKA_OP_CREATEACLS] = "REPLY:CREATEACLS",
+ [RD_KAFKA_OP_DESCRIBEACLS] = "REPLY:DESCRIBEACLS",
+ [RD_KAFKA_OP_DELETEACLS] = "REPLY:DELETEACLS",
+ [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] =
+ "REPLY:ALTERCONSUMERGROUPOFFSETS",
+ [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] =
+ "REPLY:LISTCONSUMERGROUPOFFSETS",
+ [RD_KAFKA_OP_ADMIN_FANOUT] = "REPLY:ADMIN_FANOUT",
+ [RD_KAFKA_OP_ADMIN_RESULT] = "REPLY:ADMIN_RESULT",
+ [RD_KAFKA_OP_PURGE] = "REPLY:PURGE",
+ [RD_KAFKA_OP_CONNECT] = "REPLY:CONNECT",
+ [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = "REPLY:OAUTHBEARER_REFRESH",
+ [RD_KAFKA_OP_MOCK] = "REPLY:MOCK",
+ [RD_KAFKA_OP_BROKER_MONITOR] = "REPLY:BROKER_MONITOR",
+ [RD_KAFKA_OP_TXN] = "REPLY:TXN",
+ [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] =
+ "REPLY:GET_REBALANCE_PROTOCOL",
+ [RD_KAFKA_OP_LEADERS] = "REPLY:LEADERS",
+ [RD_KAFKA_OP_BARRIER] = "REPLY:BARRIER",
+ };
+
+ if (type & RD_KAFKA_OP_REPLY)
+ skiplen = 0;
+
+ rd_assert((names[type & ~RD_KAFKA_OP_FLAGMASK] != NULL) ||
+ !*"add OP type to rd_kafka_op2str()");
+ return names[type & ~RD_KAFKA_OP_FLAGMASK] + skiplen;
+}
+
+
+void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko) {
+ fprintf(fp,
+ "%s((rd_kafka_op_t*)%p)\n"
+ "%s Type: %s (0x%x), Version: %" PRId32 "\n",
+ prefix, rko, prefix, rd_kafka_op2str(rko->rko_type),
+ rko->rko_type, rko->rko_version);
+ if (rko->rko_err)
+ fprintf(fp, "%s Error: %s\n", prefix,
+ rd_kafka_err2str(rko->rko_err));
+ if (rko->rko_replyq.q)
+ fprintf(fp, "%s Replyq %p v%d (%s)\n", prefix,
+ rko->rko_replyq.q, rko->rko_replyq.version,
+#if ENABLE_DEVEL
+ rko->rko_replyq._id
+#else
+ ""
+#endif
+ );
+ if (rko->rko_rktp) {
+ fprintf(fp,
+ "%s ((rd_kafka_toppar_t*)%p) "
+ "%s [%" PRId32 "] v%d\n",
+ prefix, rko->rko_rktp,
+ rko->rko_rktp->rktp_rkt->rkt_topic->str,
+ rko->rko_rktp->rktp_partition,
+ rd_atomic32_get(&rko->rko_rktp->rktp_version));
+ }
+
+ switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) {
+ case RD_KAFKA_OP_FETCH:
+ fprintf(fp, "%s Offset: %" PRId64 "\n", prefix,
+ rko->rko_u.fetch.rkm.rkm_offset);
+ break;
+ case RD_KAFKA_OP_CONSUMER_ERR:
+ fprintf(fp, "%s Offset: %" PRId64 "\n", prefix,
+ rko->rko_u.err.offset);
+ /* FALLTHRU */
+ case RD_KAFKA_OP_ERR:
+ fprintf(fp, "%s Reason: %s\n", prefix, rko->rko_u.err.errstr);
+ break;
+ case RD_KAFKA_OP_DR:
+ fprintf(fp, "%s %" PRId32 " messages on %s\n", prefix,
+ rko->rko_u.dr.msgq.rkmq_msg_cnt,
+ rko->rko_u.dr.rkt ? rko->rko_u.dr.rkt->rkt_topic->str
+ : "(n/a)");
+ break;
+ case RD_KAFKA_OP_OFFSET_COMMIT:
+ fprintf(fp, "%s Callback: %p (opaque %p)\n", prefix,
+ rko->rko_u.offset_commit.cb,
+ rko->rko_u.offset_commit.opaque);
+ fprintf(fp, "%s %d partitions\n", prefix,
+ rko->rko_u.offset_commit.partitions
+ ? rko->rko_u.offset_commit.partitions->cnt
+ : 0);
+ break;
+
+ case RD_KAFKA_OP_LOG:
+ fprintf(fp, "%s Log: %%%d %s: %s\n", prefix,
+ rko->rko_u.log.level, rko->rko_u.log.fac,
+ rko->rko_u.log.str);
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type) {
+ rd_kafka_op_t *rko;
+#define _RD_KAFKA_OP_EMPTY \
+ 1234567 /* Special value to be able to assert \
+ * on default-initialized (0) sizes \
+ * if we forgot to add an op type to \
+ * this list. */
+ static const size_t op2size[RD_KAFKA_OP__END] = {
+ [RD_KAFKA_OP_FETCH] = sizeof(rko->rko_u.fetch),
+ [RD_KAFKA_OP_ERR] = sizeof(rko->rko_u.err),
+ [RD_KAFKA_OP_CONSUMER_ERR] = sizeof(rko->rko_u.err),
+ [RD_KAFKA_OP_DR] = sizeof(rko->rko_u.dr),
+ [RD_KAFKA_OP_STATS] = sizeof(rko->rko_u.stats),
+ [RD_KAFKA_OP_OFFSET_COMMIT] = sizeof(rko->rko_u.offset_commit),
+ [RD_KAFKA_OP_NODE_UPDATE] = sizeof(rko->rko_u.node),
+ [RD_KAFKA_OP_XMIT_BUF] = sizeof(rko->rko_u.xbuf),
+ [RD_KAFKA_OP_RECV_BUF] = sizeof(rko->rko_u.xbuf),
+ [RD_KAFKA_OP_XMIT_RETRY] = sizeof(rko->rko_u.xbuf),
+ [RD_KAFKA_OP_FETCH_START] = sizeof(rko->rko_u.fetch_start),
+ [RD_KAFKA_OP_FETCH_STOP] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_SEEK] = sizeof(rko->rko_u.fetch_start),
+ [RD_KAFKA_OP_PAUSE] = sizeof(rko->rko_u.pause),
+ [RD_KAFKA_OP_OFFSET_FETCH] = sizeof(rko->rko_u.offset_fetch),
+ [RD_KAFKA_OP_PARTITION_JOIN] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_PARTITION_LEAVE] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_REBALANCE] = sizeof(rko->rko_u.rebalance),
+ [RD_KAFKA_OP_TERMINATE] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_COORD_QUERY] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_SUBSCRIBE] = sizeof(rko->rko_u.subscribe),
+ [RD_KAFKA_OP_ASSIGN] = sizeof(rko->rko_u.assign),
+ [RD_KAFKA_OP_GET_SUBSCRIPTION] = sizeof(rko->rko_u.subscribe),
+ [RD_KAFKA_OP_GET_ASSIGNMENT] = sizeof(rko->rko_u.assign),
+ [RD_KAFKA_OP_THROTTLE] = sizeof(rko->rko_u.throttle),
+ [RD_KAFKA_OP_NAME] = sizeof(rko->rko_u.name),
+ [RD_KAFKA_OP_CG_METADATA] = sizeof(rko->rko_u.cg_metadata),
+ [RD_KAFKA_OP_OFFSET_RESET] = sizeof(rko->rko_u.offset_reset),
+ [RD_KAFKA_OP_METADATA] = sizeof(rko->rko_u.metadata),
+ [RD_KAFKA_OP_LOG] = sizeof(rko->rko_u.log),
+ [RD_KAFKA_OP_WAKEUP] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_CREATETOPICS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DELETETOPICS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_CREATEPARTITIONS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_ALTERCONFIGS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DESCRIBECONFIGS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DELETERECORDS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_LISTCONSUMERGROUPS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DESCRIBECONSUMERGROUPS] =
+ sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DELETEGROUPS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS] =
+ sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_CREATEACLS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DESCRIBEACLS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_DELETEACLS] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS] =
+ sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS] =
+ sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_ADMIN_FANOUT] = sizeof(rko->rko_u.admin_request),
+ [RD_KAFKA_OP_ADMIN_RESULT] = sizeof(rko->rko_u.admin_result),
+ [RD_KAFKA_OP_PURGE] = sizeof(rko->rko_u.purge),
+ [RD_KAFKA_OP_CONNECT] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_OAUTHBEARER_REFRESH] = _RD_KAFKA_OP_EMPTY,
+ [RD_KAFKA_OP_MOCK] = sizeof(rko->rko_u.mock),
+ [RD_KAFKA_OP_BROKER_MONITOR] = sizeof(rko->rko_u.broker_monitor),
+ [RD_KAFKA_OP_TXN] = sizeof(rko->rko_u.txn),
+ [RD_KAFKA_OP_GET_REBALANCE_PROTOCOL] =
+ sizeof(rko->rko_u.rebalance_protocol),
+ [RD_KAFKA_OP_LEADERS] = sizeof(rko->rko_u.leaders),
+ [RD_KAFKA_OP_BARRIER] = _RD_KAFKA_OP_EMPTY,
+ };
+ size_t tsize = op2size[type & ~RD_KAFKA_OP_FLAGMASK];
+
+ rd_assert(tsize > 0 || !*"add OP type to rd_kafka_op_new0()");
+ if (tsize == _RD_KAFKA_OP_EMPTY)
+ tsize = 0;
+
+ rko = rd_calloc(1, sizeof(*rko) - sizeof(rko->rko_u) + tsize);
+ rko->rko_type = type;
+
+#if ENABLE_DEVEL
+ rko->rko_source = source;
+ rd_atomic32_add(&rd_kafka_op_cnt, 1);
+#endif
+ return rko;
+}
+
+
+void rd_kafka_op_destroy(rd_kafka_op_t *rko) {
+
+ /* Call ops callback with ERR__DESTROY to let it
+ * clean up its resources. */
+ if ((rko->rko_type & RD_KAFKA_OP_CB) && rko->rko_op_cb) {
+ rd_kafka_op_res_t res;
+ rko->rko_err = RD_KAFKA_RESP_ERR__DESTROY;
+ res = rko->rko_op_cb(rko->rko_rk, NULL, rko);
+ rd_assert(res != RD_KAFKA_OP_RES_YIELD);
+ rd_assert(res != RD_KAFKA_OP_RES_KEEP);
+ }
+
+
+ switch (rko->rko_type & ~RD_KAFKA_OP_FLAGMASK) {
+ case RD_KAFKA_OP_FETCH:
+ rd_kafka_msg_destroy(NULL, &rko->rko_u.fetch.rkm);
+ /* Decrease refcount on rkbuf to eventually rd_free shared buf*/
+ if (rko->rko_u.fetch.rkbuf)
+ rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
+
+ break;
+
+ case RD_KAFKA_OP_OFFSET_FETCH:
+ if (rko->rko_u.offset_fetch.partitions &&
+ rko->rko_u.offset_fetch.do_free)
+ rd_kafka_topic_partition_list_destroy(
+ rko->rko_u.offset_fetch.partitions);
+ break;
+
+ case RD_KAFKA_OP_OFFSET_COMMIT:
+ RD_IF_FREE(rko->rko_u.offset_commit.partitions,
+ rd_kafka_topic_partition_list_destroy);
+ RD_IF_FREE(rko->rko_u.offset_commit.reason, rd_free);
+ break;
+
+ case RD_KAFKA_OP_SUBSCRIBE:
+ case RD_KAFKA_OP_GET_SUBSCRIPTION:
+ RD_IF_FREE(rko->rko_u.subscribe.topics,
+ rd_kafka_topic_partition_list_destroy);
+ break;
+
+ case RD_KAFKA_OP_ASSIGN:
+ case RD_KAFKA_OP_GET_ASSIGNMENT:
+ RD_IF_FREE(rko->rko_u.assign.partitions,
+ rd_kafka_topic_partition_list_destroy);
+ break;
+
+ case RD_KAFKA_OP_REBALANCE:
+ RD_IF_FREE(rko->rko_u.rebalance.partitions,
+ rd_kafka_topic_partition_list_destroy);
+ break;
+
+ case RD_KAFKA_OP_NAME:
+ RD_IF_FREE(rko->rko_u.name.str, rd_free);
+ break;
+
+ case RD_KAFKA_OP_CG_METADATA:
+ RD_IF_FREE(rko->rko_u.cg_metadata,
+ rd_kafka_consumer_group_metadata_destroy);
+ break;
+
+ case RD_KAFKA_OP_ERR:
+ case RD_KAFKA_OP_CONSUMER_ERR:
+ RD_IF_FREE(rko->rko_u.err.errstr, rd_free);
+ rd_kafka_msg_destroy(NULL, &rko->rko_u.err.rkm);
+ break;
+
+ break;
+
+ case RD_KAFKA_OP_THROTTLE:
+ RD_IF_FREE(rko->rko_u.throttle.nodename, rd_free);
+ break;
+
+ case RD_KAFKA_OP_STATS:
+ RD_IF_FREE(rko->rko_u.stats.json, rd_free);
+ break;
+
+ case RD_KAFKA_OP_XMIT_RETRY:
+ case RD_KAFKA_OP_XMIT_BUF:
+ case RD_KAFKA_OP_RECV_BUF:
+ if (rko->rko_u.xbuf.rkbuf)
+ rd_kafka_buf_handle_op(rko, RD_KAFKA_RESP_ERR__DESTROY);
+
+ RD_IF_FREE(rko->rko_u.xbuf.rkbuf, rd_kafka_buf_destroy);
+ break;
+
+ case RD_KAFKA_OP_DR:
+ rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq);
+ if (rko->rko_u.dr.do_purge2)
+ rd_kafka_msgq_purge(rko->rko_rk, &rko->rko_u.dr.msgq2);
+
+ if (rko->rko_u.dr.rkt)
+ rd_kafka_topic_destroy0(rko->rko_u.dr.rkt);
+ break;
+
+ case RD_KAFKA_OP_OFFSET_RESET:
+ RD_IF_FREE(rko->rko_u.offset_reset.reason, rd_free);
+ break;
+
+ case RD_KAFKA_OP_METADATA:
+ RD_IF_FREE(rko->rko_u.metadata.md, rd_kafka_metadata_destroy);
+ break;
+
+ case RD_KAFKA_OP_LOG:
+ rd_free(rko->rko_u.log.str);
+ break;
+
+ case RD_KAFKA_OP_ADMIN_FANOUT:
+ rd_assert(rko->rko_u.admin_request.fanout.outstanding == 0);
+ rd_list_destroy(&rko->rko_u.admin_request.fanout.results);
+ case RD_KAFKA_OP_CREATETOPICS:
+ case RD_KAFKA_OP_DELETETOPICS:
+ case RD_KAFKA_OP_CREATEPARTITIONS:
+ case RD_KAFKA_OP_ALTERCONFIGS:
+ case RD_KAFKA_OP_DESCRIBECONFIGS:
+ case RD_KAFKA_OP_DELETERECORDS:
+ case RD_KAFKA_OP_LISTCONSUMERGROUPS:
+ case RD_KAFKA_OP_DESCRIBECONSUMERGROUPS:
+ case RD_KAFKA_OP_DELETEGROUPS:
+ case RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS:
+ case RD_KAFKA_OP_CREATEACLS:
+ case RD_KAFKA_OP_DESCRIBEACLS:
+ case RD_KAFKA_OP_DELETEACLS:
+ case RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS:
+ case RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS:
+ rd_kafka_replyq_destroy(&rko->rko_u.admin_request.replyq);
+ rd_list_destroy(&rko->rko_u.admin_request.args);
+ if (rko->rko_u.admin_request.options.match_consumer_group_states
+ .u.PTR) {
+ rd_list_destroy(rko->rko_u.admin_request.options
+ .match_consumer_group_states.u.PTR);
+ }
+ rd_assert(!rko->rko_u.admin_request.fanout_parent);
+ RD_IF_FREE(rko->rko_u.admin_request.coordkey, rd_free);
+ break;
+
+ case RD_KAFKA_OP_ADMIN_RESULT:
+ rd_list_destroy(&rko->rko_u.admin_result.args);
+ rd_list_destroy(&rko->rko_u.admin_result.results);
+ RD_IF_FREE(rko->rko_u.admin_result.errstr, rd_free);
+ rd_assert(!rko->rko_u.admin_result.fanout_parent);
+ ;
+ break;
+
+ case RD_KAFKA_OP_MOCK:
+ RD_IF_FREE(rko->rko_u.mock.name, rd_free);
+ RD_IF_FREE(rko->rko_u.mock.str, rd_free);
+ break;
+
+ case RD_KAFKA_OP_BROKER_MONITOR:
+ rd_kafka_broker_destroy(rko->rko_u.broker_monitor.rkb);
+ break;
+
+ case RD_KAFKA_OP_TXN:
+ RD_IF_FREE(rko->rko_u.txn.group_id, rd_free);
+ RD_IF_FREE(rko->rko_u.txn.offsets,
+ rd_kafka_topic_partition_list_destroy);
+ RD_IF_FREE(rko->rko_u.txn.cgmetadata,
+ rd_kafka_consumer_group_metadata_destroy);
+ break;
+
+ case RD_KAFKA_OP_LEADERS:
+ rd_assert(!rko->rko_u.leaders.eonce);
+ rd_assert(!rko->rko_u.leaders.replyq.q);
+ RD_IF_FREE(rko->rko_u.leaders.leaders, rd_list_destroy);
+ RD_IF_FREE(rko->rko_u.leaders.partitions,
+ rd_kafka_topic_partition_list_destroy);
+ break;
+
+ default:
+ break;
+ }
+
+ RD_IF_FREE(rko->rko_rktp, rd_kafka_toppar_destroy);
+
+ RD_IF_FREE(rko->rko_error, rd_kafka_error_destroy);
+
+ rd_kafka_replyq_destroy(&rko->rko_replyq);
+
+#if ENABLE_DEVEL
+ if (rd_atomic32_sub(&rd_kafka_op_cnt, 1) < 0)
+ rd_kafka_assert(NULL, !*"rd_kafka_op_cnt < 0");
+#endif
+
+ rd_free(rko);
+}
+
+
+
+/**
+ * Propagate an error event to the application on a specific queue.
+ */
+void rd_kafka_q_op_err(rd_kafka_q_t *rkq,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ va_list ap;
+ char buf[2048];
+ rd_kafka_op_t *rko;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);
+ rko->rko_err = err;
+ rko->rko_u.err.errstr = rd_strdup(buf);
+
+ rd_kafka_q_enq(rkq, rko);
+}
+
+
+
+/**
+ * @brief Enqueue RD_KAFKA_OP_CONSUMER_ERR on \p rkq.
+ *
+ * @param broker_id Is the relevant broker id, or RD_KAFKA_NODEID_UA (-1)
+ * if not applicable.
+ * @param err Error code.
+ * @param version Queue version barrier, or 0 if not applicable.
+ * @param topic May be NULL.
+ * @param rktp May be NULL. Takes precedence over \p topic.
+ * @param offset RD_KAFKA_OFFSET_INVALID if not applicable.
+ *
+ * @sa rd_kafka_q_op_err()
+ */
+void rd_kafka_consumer_err(rd_kafka_q_t *rkq,
+ int32_t broker_id,
+ rd_kafka_resp_err_t err,
+ int32_t version,
+ const char *topic,
+ rd_kafka_toppar_t *rktp,
+ int64_t offset,
+ const char *fmt,
+ ...) {
+ va_list ap;
+ char buf[2048];
+ rd_kafka_op_t *rko;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_CONSUMER_ERR);
+ rko->rko_version = version;
+ rko->rko_err = err;
+ rko->rko_u.err.offset = offset;
+ rko->rko_u.err.errstr = rd_strdup(buf);
+ rko->rko_u.err.rkm.rkm_broker_id = broker_id;
+
+ if (rktp)
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ else if (topic)
+ rko->rko_u.err.rkm.rkm_rkmessage.rkt =
+ (rd_kafka_topic_t *)rd_kafka_lwtopic_new(rkq->rkq_rk,
+ topic);
+
+
+ rd_kafka_q_enq(rkq, rko);
+}
+
+
+/**
+ * Creates a reply op based on 'rko_orig'.
+ * If 'rko_orig' has rko_op_cb set the reply op will be OR:ed with
+ * RD_KAFKA_OP_CB, else the reply type will be the original rko_type OR:ed
+ * with RD_KAFKA_OP_REPLY.
+ */
+rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(rko_orig->rko_type | RD_KAFKA_OP_REPLY);
+ rd_kafka_op_get_reply_version(rko, rko_orig);
+ rko->rko_err = err;
+ if (rko_orig->rko_rktp)
+ rko->rko_rktp = rd_kafka_toppar_keep(rko_orig->rko_rktp);
+
+ return rko;
+}
+
+
+/**
+ * @brief Create new callback op for type \p type
+ */
+rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk,
+ rd_kafka_op_type_t type,
+ rd_kafka_op_cb_t *cb) {
+ rd_kafka_op_t *rko;
+ rko = rd_kafka_op_new(type | RD_KAFKA_OP_CB);
+ rko->rko_op_cb = cb;
+ rko->rko_rk = rk;
+ return rko;
+}
+
+
+/**
+ * @brief Reply to 'rko' re-using the same rko with rko_err
+ * specified by \p err. rko_error is set to NULL.
+ *
+ * If there is no replyq the rko is destroyed.
+ *
+ * @returns 1 if op was enqueued, else 0 and rko is destroyed.
+ */
+int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err) {
+
+ if (!rko->rko_replyq.q) {
+ rd_kafka_op_destroy(rko);
+ return 0;
+ }
+
+ rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY);
+ rko->rko_err = err;
+ rko->rko_error = NULL;
+
+ return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
+}
+
+
+/**
+ * @brief Reply to 'rko' re-using the same rko with rko_error specified
+ * by \p error (may be NULL) and rko_err set to the corresponding
+ * error code. Assumes ownership of \p error.
+ *
+ * If there is no replyq the rko is destroyed.
+ *
+ * @returns 1 if op was enqueued, else 0 and rko is destroyed.
+ */
+int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error) {
+
+ if (!rko->rko_replyq.q) {
+ RD_IF_FREE(error, rd_kafka_error_destroy);
+ rd_kafka_op_destroy(rko);
+ return 0;
+ }
+
+ rko->rko_type |= (rko->rko_op_cb ? RD_KAFKA_OP_CB : RD_KAFKA_OP_REPLY);
+ rko->rko_err =
+ error ? rd_kafka_error_code(error) : RD_KAFKA_RESP_ERR_NO_ERROR;
+ rko->rko_error = error;
+
+ return rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
+}
+
+
+/**
+ * @brief Send request to queue, wait for response.
+ *
+ * @returns response on success or NULL if destq is disabled.
+ */
+rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq,
+ rd_kafka_q_t *recvq,
+ rd_kafka_op_t *rko,
+ int timeout_ms) {
+ rd_kafka_op_t *reply;
+
+ /* Indicate to destination where to send reply. */
+ rd_kafka_op_set_replyq(rko, recvq, NULL);
+
+ /* Enqueue op */
+ if (!rd_kafka_q_enq(destq, rko))
+ return NULL;
+
+ /* Wait for reply */
+ reply = rd_kafka_q_pop(recvq, rd_timeout_us(timeout_ms), 0);
+
+ /* May be NULL for timeout */
+ return reply;
+}
+
+/**
+ * Send request to queue, wait for response.
+ * Creates a temporary reply queue.
+ */
+rd_kafka_op_t *
+rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms) {
+ rd_kafka_q_t *recvq;
+ rd_kafka_op_t *reply;
+
+ recvq = rd_kafka_q_new(destq->rkq_rk);
+
+ reply = rd_kafka_op_req0(destq, recvq, rko, timeout_ms);
+
+ rd_kafka_q_destroy_owner(recvq);
+
+ return reply;
+}
+
+
+/**
+ * Send simple type-only request to queue, wait for response.
+ */
+rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(type);
+ return rd_kafka_op_req(destq, rko, RD_POLL_INFINITE);
+}
+
+
+/**
+ * Destroys the rko and returns its err.
+ */
+rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ if (rko) {
+ err = rko->rko_err;
+ rd_kafka_op_destroy(rko);
+ }
+ return err;
+}
+
+
+/**
+ * Destroys the rko and returns its error object or NULL if no error.
+ */
+rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko) {
+ if (rko) {
+ rd_kafka_error_t *error = rko->rko_error;
+ rko->rko_error = NULL;
+ rd_kafka_op_destroy(rko);
+ return error;
+ }
+
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Operation timed out");
+}
+
+
+/**
+ * Call op callback
+ */
+rd_kafka_op_res_t
+rd_kafka_op_call(rd_kafka_t *rk, rd_kafka_q_t *rkq, rd_kafka_op_t *rko) {
+ rd_kafka_op_res_t res;
+ rd_assert(rko->rko_op_cb);
+ res = rko->rko_op_cb(rk, rkq, rko);
+ if (unlikely(res == RD_KAFKA_OP_RES_YIELD || rd_kafka_yield_thread))
+ return RD_KAFKA_OP_RES_YIELD;
+ if (res != RD_KAFKA_OP_RES_KEEP)
+ rko->rko_op_cb = NULL;
+ return res;
+}
+
+
+/**
+ * @brief Creates a new RD_KAFKA_OP_FETCH op representing a
+ * control message. The rkm_flags property is set to
+ * RD_KAFKA_MSG_F_CONTROL.
+ */
+rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp,
+ int32_t version,
+ rd_kafka_buf_t *rkbuf,
+ int64_t offset) {
+ rd_kafka_msg_t *rkm;
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new_fetch_msg(&rkm, rktp, version, rkbuf, offset, 0,
+ NULL, 0, NULL);
+
+ rkm->rkm_flags |= RD_KAFKA_MSG_F_CONTROL;
+
+ return rko;
+}
+
+/**
+ * @brief Creates a new RD_KAFKA_OP_FETCH op and sets up the
+ * embedded message according to the parameters.
+ *
+ * @param rkmp will be set to the embedded rkm in the rko (for convenience)
+ * @param offset may be updated later if relative offset.
+ */
+rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp,
+ rd_kafka_toppar_t *rktp,
+ int32_t version,
+ rd_kafka_buf_t *rkbuf,
+ int64_t offset,
+ size_t key_len,
+ const void *key,
+ size_t val_len,
+ const void *val) {
+ rd_kafka_msg_t *rkm;
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH);
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rko->rko_version = version;
+ rkm = &rko->rko_u.fetch.rkm;
+ *rkmp = rkm;
+
+ /* Since all the ops share the same payload buffer
+ * a refcnt is used on the rkbuf that makes sure all
+ * consume_cb() will have been
+ * called for each of these ops before the rkbuf
+ * and its memory backing buffers are freed. */
+ rko->rko_u.fetch.rkbuf = rkbuf;
+ rd_kafka_buf_keep(rkbuf);
+
+ rkm->rkm_offset = offset;
+
+ rkm->rkm_key = (void *)key;
+ rkm->rkm_key_len = key_len;
+
+ rkm->rkm_payload = (void *)val;
+ rkm->rkm_len = val_len;
+ rko->rko_len = (int32_t)rkm->rkm_len;
+
+ rkm->rkm_partition = rktp->rktp_partition;
+
+ /* Persistence status is always PERSISTED for consumed messages
+ * since we managed to read the message. */
+ rkm->rkm_status = RD_KAFKA_MSG_STATUS_PERSISTED;
+
+ return rko;
+}
+
+
+/**
+ * Enqueue ERR__THROTTLE op, if desired.
+ */
+void rd_kafka_op_throttle_time(rd_kafka_broker_t *rkb,
+ rd_kafka_q_t *rkq,
+ int throttle_time) {
+ rd_kafka_op_t *rko;
+
+ if (unlikely(throttle_time > 0))
+ rd_avg_add(&rkb->rkb_avg_throttle, throttle_time);
+
+ /* We send throttle events when:
+ * - throttle_time > 0
+ * - throttle_time == 0 and last throttle_time > 0
+ */
+ if (!rkb->rkb_rk->rk_conf.throttle_cb ||
+ (!throttle_time &&
+ !rd_atomic32_get(&rkb->rkb_rk->rk_last_throttle)))
+ return;
+
+ rd_atomic32_set(&rkb->rkb_rk->rk_last_throttle, throttle_time);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_THROTTLE);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_HIGH);
+ rko->rko_u.throttle.nodename = rd_strdup(rkb->rkb_nodename);
+ rko->rko_u.throttle.nodeid = rkb->rkb_nodeid;
+ rko->rko_u.throttle.throttle_time = throttle_time;
+ rd_kafka_q_enq(rkq, rko);
+}
+
+
+/**
+ * @brief Handle standard op types.
+ */
+rd_kafka_op_res_t rd_kafka_op_handle_std(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ int cb_type) {
+ if (cb_type == RD_KAFKA_Q_CB_FORCE_RETURN)
+ return RD_KAFKA_OP_RES_PASS;
+ else if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) {
+ /* Control messages must not be exposed to the application
+ * but we need to store their offsets. */
+ rd_kafka_fetch_op_app_prepare(rk, rko);
+ return RD_KAFKA_OP_RES_HANDLED;
+ } else if (cb_type != RD_KAFKA_Q_CB_EVENT &&
+ rko->rko_type & RD_KAFKA_OP_CB)
+ return rd_kafka_op_call(rk, rkq, rko);
+ else if (rko->rko_type == RD_KAFKA_OP_RECV_BUF) /* Handle Response */
+ rd_kafka_buf_handle_op(rko, rko->rko_err);
+ else if (cb_type != RD_KAFKA_Q_CB_RETURN &&
+ rko->rko_type & RD_KAFKA_OP_REPLY &&
+ rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED; /* dest queue was
+ * probably disabled. */
+ else
+ return RD_KAFKA_OP_RES_PASS;
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Attempt to handle op using its queue's serve callback,
+ * or the passed callback, or op_handle_std(), else do nothing.
+ *
+ * @param rkq is \p rko's queue (which it was unlinked from) with rkq_lock
+ * being held. Callback may re-enqueue the op on this queue
+ * and return YIELD.
+ *
+ * @returns HANDLED if op was handled (and destroyed), PASS if not,
+ * or YIELD if op was handled (maybe destroyed or re-enqueued)
+ * and caller must propagate yield upwards (cancel and return).
+ */
+rd_kafka_op_res_t rd_kafka_op_handle(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque,
+ rd_kafka_q_serve_cb_t *callback) {
+ rd_kafka_op_res_t res;
+
+ if (rko->rko_serve) {
+ callback = rko->rko_serve;
+ opaque = rko->rko_serve_opaque;
+ rko->rko_serve = NULL;
+ rko->rko_serve_opaque = NULL;
+ }
+
+ res = rd_kafka_op_handle_std(rk, rkq, rko, cb_type);
+ if (res == RD_KAFKA_OP_RES_KEEP) {
+ /* Op was handled but must not be destroyed. */
+ return res;
+ }
+ if (res == RD_KAFKA_OP_RES_HANDLED) {
+ rd_kafka_op_destroy(rko);
+ return res;
+ } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD))
+ return res;
+
+ if (callback)
+ res = callback(rk, rkq, rko, cb_type, opaque);
+
+ return res;
+}
+
+
+/**
+ * @brief Prepare passing message to application.
+ * This must be called just prior to passing/returning a consumed
+ * message to the application.
+ *
+ * Performs:
+ * - Store offset for fetched message + 1.
+ * - Updates the application offset (rktp_app_offset).
+ *
+ * @locks rktp_lock and rk_lock MUST NOT be held
+ */
+void rd_kafka_fetch_op_app_prepare(rd_kafka_t *rk, rd_kafka_op_t *rko) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_fetch_pos_t pos;
+
+ if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH || rko->rko_err))
+ return;
+
+ rktp = rko->rko_rktp;
+
+ if (unlikely(!rk))
+ rk = rktp->rktp_rkt->rkt_rk;
+
+ pos.offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1;
+ pos.leader_epoch = rko->rko_u.fetch.rkm.rkm_u.consumer.leader_epoch;
+
+ rd_kafka_update_app_pos(rk, rktp, pos, RD_DO_LOCK);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h
new file mode 100644
index 000000000..57c07491a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_op.h
@@ -0,0 +1,778 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_OP_H_
+#define _RDKAFKA_OP_H_
+
+
+#include "rdkafka_msg.h"
+#include "rdkafka_timer.h"
+#include "rdkafka_admin.h"
+
+
+/* Forward declarations */
+typedef struct rd_kafka_q_s rd_kafka_q_t;
+typedef struct rd_kafka_toppar_s rd_kafka_toppar_t;
+typedef struct rd_kafka_op_s rd_kafka_op_t;
+
+/* One-off reply queue + reply version.
+ * All APIs that take a rd_kafka_replyq_t makes a copy of the
+ * struct as-is and grabs hold of the existing .q refcount.
+ * Think of replyq as a (Q,VERSION) tuple. */
+typedef struct rd_kafka_replyq_s {
+ rd_kafka_q_t *q;
+ int32_t version;
+#if ENABLE_DEVEL
+ char *_id; /* Devel id used for debugging reference leaks.
+ * Is a strdup() of the caller's function name,
+ * which makes for easy debugging with valgrind. */
+#endif
+} rd_kafka_replyq_t;
+
+
+
+/**
+ * Flags used by:
+ * - rd_kafka_op_t.rko_flags
+ * - rd_kafka_buf_t.rkbuf_flags
+ */
+#define RD_KAFKA_OP_F_FREE 0x1 /* rd_free payload when done with it */
+#define RD_KAFKA_OP_F_NO_RESPONSE 0x2 /* rkbuf: Not expecting a response */
+#define RD_KAFKA_OP_F_CRC 0x4 /* rkbuf: Perform CRC calculation */
+#define RD_KAFKA_OP_F_BLOCKING 0x8 /* rkbuf: blocking protocol request */
+#define RD_KAFKA_OP_F_REPROCESS 0x10 /* cgrp: Reprocess at a later time. */
+#define RD_KAFKA_OP_F_SENT 0x20 /* rkbuf: request sent on wire */
+#define RD_KAFKA_OP_F_FLEXVER \
+ 0x40 /* rkbuf: flexible protocol version \
+ * (KIP-482) */
+#define RD_KAFKA_OP_F_NEED_MAKE \
+ 0x80 /* rkbuf: request content has not \
+ * been made yet, the make \
+ * callback will be triggered \
+ * to construct the request \
+ * right before it is sent. */
+#define RD_KAFKA_OP_F_FORCE_CB \
+ 0x100 /* rko: force callback even if \
+ * op type is eventable. */
+
+typedef enum {
+ RD_KAFKA_OP_NONE, /* No specific type, use OP_CB */
+ RD_KAFKA_OP_FETCH, /* Kafka thread -> Application */
+ RD_KAFKA_OP_ERR, /* Kafka thread -> Application */
+ RD_KAFKA_OP_CONSUMER_ERR, /* Kafka thread -> Application */
+ RD_KAFKA_OP_DR, /* Kafka thread -> Application
+ * Produce message delivery report */
+ RD_KAFKA_OP_STATS, /* Kafka thread -> Application */
+
+ RD_KAFKA_OP_OFFSET_COMMIT, /* any -> toppar's Broker thread */
+ RD_KAFKA_OP_NODE_UPDATE, /* any -> Broker thread: node update */
+
+ RD_KAFKA_OP_XMIT_BUF, /* transmit buffer: any -> broker thread */
+ RD_KAFKA_OP_RECV_BUF, /* received response buffer: broker thr -> any */
+ RD_KAFKA_OP_XMIT_RETRY, /* retry buffer xmit: any -> broker thread */
+ RD_KAFKA_OP_FETCH_START, /* Application -> toppar's handler thread */
+ RD_KAFKA_OP_FETCH_STOP, /* Application -> toppar's handler thread */
+ RD_KAFKA_OP_SEEK, /* Application -> toppar's handler thread */
+ RD_KAFKA_OP_PAUSE, /* Application -> toppar's handler thread */
+ RD_KAFKA_OP_OFFSET_FETCH, /* Broker -> broker thread: fetch offsets
+ * for topic. */
+
+ RD_KAFKA_OP_PARTITION_JOIN, /* * -> cgrp op: add toppar to cgrp
+ * * -> broker op: add toppar to broker */
+ RD_KAFKA_OP_PARTITION_LEAVE, /* * -> cgrp op: remove toppar from cgrp
+ * * -> broker op: remove toppar from rkb*/
+ RD_KAFKA_OP_REBALANCE, /* broker thread -> app:
+ * group rebalance */
+ RD_KAFKA_OP_TERMINATE, /* For generic use */
+ RD_KAFKA_OP_COORD_QUERY, /* Query for coordinator */
+ RD_KAFKA_OP_SUBSCRIBE, /* New subscription */
+ RD_KAFKA_OP_ASSIGN, /* New assignment */
+ RD_KAFKA_OP_GET_SUBSCRIPTION, /* Get current subscription.
+ * Reuses u.subscribe */
+ RD_KAFKA_OP_GET_ASSIGNMENT, /* Get current assignment.
+ * Reuses u.assign */
+ RD_KAFKA_OP_THROTTLE, /* Throttle info */
+ RD_KAFKA_OP_NAME, /* Request name */
+ RD_KAFKA_OP_CG_METADATA, /**< Request consumer metadata */
+ RD_KAFKA_OP_OFFSET_RESET, /* Offset reset */
+ RD_KAFKA_OP_METADATA, /* Metadata response */
+ RD_KAFKA_OP_LOG, /* Log */
+ RD_KAFKA_OP_WAKEUP, /* Wake-up signaling */
+ RD_KAFKA_OP_CREATETOPICS, /**< Admin: CreateTopics: u.admin_request*/
+ RD_KAFKA_OP_DELETETOPICS, /**< Admin: DeleteTopics: u.admin_request*/
+ RD_KAFKA_OP_CREATEPARTITIONS, /**< Admin: CreatePartitions:
+ * u.admin_request*/
+ RD_KAFKA_OP_ALTERCONFIGS, /**< Admin: AlterConfigs: u.admin_request*/
+ RD_KAFKA_OP_DESCRIBECONFIGS, /**< Admin: DescribeConfigs:
+ * u.admin_request*/
+ RD_KAFKA_OP_DELETERECORDS, /**< Admin: DeleteRecords:
+ * u.admin_request*/
+ RD_KAFKA_OP_LISTCONSUMERGROUPS, /**< Admin:
+ * ListConsumerGroups
+ * u.admin_request */
+ RD_KAFKA_OP_DESCRIBECONSUMERGROUPS, /**< Admin:
+ * DescribeConsumerGroups
+ * u.admin_request */
+ RD_KAFKA_OP_DELETEGROUPS, /**< Admin: DeleteGroups: u.admin_request*/
+ RD_KAFKA_OP_DELETECONSUMERGROUPOFFSETS, /**< Admin:
+ * DeleteConsumerGroupOffsets
+ * u.admin_request */
+ RD_KAFKA_OP_CREATEACLS, /**< Admin: CreateAcls: u.admin_request*/
+ RD_KAFKA_OP_DESCRIBEACLS, /**< Admin: DescribeAcls: u.admin_request*/
+ RD_KAFKA_OP_DELETEACLS, /**< Admin: DeleteAcls: u.admin_request*/
+ RD_KAFKA_OP_ALTERCONSUMERGROUPOFFSETS, /**< Admin:
+ * AlterConsumerGroupOffsets
+ * u.admin_request */
+ RD_KAFKA_OP_LISTCONSUMERGROUPOFFSETS, /**< Admin:
+ * ListConsumerGroupOffsets
+ * u.admin_request */
+ RD_KAFKA_OP_ADMIN_FANOUT, /**< Admin: fanout request */
+ RD_KAFKA_OP_ADMIN_RESULT, /**< Admin API .._result_t */
+ RD_KAFKA_OP_PURGE, /**< Purge queues */
+ RD_KAFKA_OP_CONNECT, /**< Connect (to broker) */
+ RD_KAFKA_OP_OAUTHBEARER_REFRESH, /**< Refresh OAUTHBEARER token */
+ RD_KAFKA_OP_MOCK, /**< Mock cluster command */
+ RD_KAFKA_OP_BROKER_MONITOR, /**< Broker state change */
+ RD_KAFKA_OP_TXN, /**< Transaction command */
+ RD_KAFKA_OP_GET_REBALANCE_PROTOCOL, /**< Get rebalance protocol */
+ RD_KAFKA_OP_LEADERS, /**< Partition leader query */
+ RD_KAFKA_OP_BARRIER, /**< Version barrier bump */
+ RD_KAFKA_OP__END
+} rd_kafka_op_type_t;
+
+/* Flags used with op_type_t */
+#define RD_KAFKA_OP_CB (int)(1 << 29) /* Callback op. */
+#define RD_KAFKA_OP_REPLY (int)(1 << 30) /* Reply op. */
+#define RD_KAFKA_OP_FLAGMASK (RD_KAFKA_OP_CB | RD_KAFKA_OP_REPLY)
+
+
+/**
+ * @brief Op/queue priority levels.
+ * @remark Since priority levels alter the FIFO order, pay extra attention
+ * to preserve ordering as deemed necessary.
+ * @remark Priority should only be set on ops destined for application
+ * facing queues (rk_rep, rkcg_q, etc).
+ */
+typedef enum {
+ RD_KAFKA_PRIO_NORMAL = 0, /* Normal bulk, messages, DRs, etc. */
+ RD_KAFKA_PRIO_MEDIUM, /* Prioritize in front of bulk,
+ * still at some scale. e.g. logs, .. */
+ RD_KAFKA_PRIO_HIGH, /* Small scale high priority */
+ RD_KAFKA_PRIO_FLASH /* Micro scale, immediate delivery. */
+} rd_kafka_prio_t;
+
+
+/**
+ * @brief Op handler result
+ *
+ * @remark When returning YIELD from a handler the handler will
+ * need to have made sure to either re-enqueue the op or destroy it
+ * since the caller will not touch the op anymore.
+ */
+typedef enum {
+ RD_KAFKA_OP_RES_PASS, /* Not handled, pass to caller */
+ RD_KAFKA_OP_RES_HANDLED, /* Op was handled (through callbacks) */
+ RD_KAFKA_OP_RES_KEEP, /* Op was handled (through callbacks)
+ * but must not be destroyed by op_handle().
+ * It is NOT PERMITTED to return RES_KEEP
+ * from a callback handling a ERR__DESTROY
+ * event. */
+ RD_KAFKA_OP_RES_YIELD /* Callback called yield */
+} rd_kafka_op_res_t;
+
+
+/**
+ * @brief Queue serve callback call type
+ */
+typedef enum {
+ RD_KAFKA_Q_CB_INVALID, /* dont use */
+ RD_KAFKA_Q_CB_CALLBACK, /* trigger callback based on op */
+ RD_KAFKA_Q_CB_RETURN, /* return op rather than trigger callback
+ * (if possible)*/
+ RD_KAFKA_Q_CB_FORCE_RETURN, /* return op, regardless of callback. */
+ RD_KAFKA_Q_CB_EVENT /* like _Q_CB_RETURN but return event_t:ed op */
+} rd_kafka_q_cb_type_t;
+
+/**
+ * @brief Queue serve callback
+ * @remark See rd_kafka_op_res_t docs for return semantics.
+ */
+typedef rd_kafka_op_res_t(rd_kafka_q_serve_cb_t)(rd_kafka_t *rk,
+ struct rd_kafka_q_s *rkq,
+ struct rd_kafka_op_s *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque)
+ RD_WARN_UNUSED_RESULT;
+
+/**
+ * @brief Enumerates the assign op sub-types.
+ */
+typedef enum {
+ RD_KAFKA_ASSIGN_METHOD_ASSIGN, /**< Absolute assign/unassign */
+ RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN, /**< Incremental assign */
+ RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN /**< Incremental unassign */
+} rd_kafka_assign_method_t;
+
+/**
+ * @brief Op callback type
+ */
+typedef rd_kafka_op_res_t(rd_kafka_op_cb_t)(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ struct rd_kafka_op_s *rko)
+ RD_WARN_UNUSED_RESULT;
+
+/* Forward declaration */
+struct rd_kafka_admin_worker_cbs;
+struct rd_kafka_admin_fanout_worker_cbs;
+
+
+#define RD_KAFKA_OP_TYPE_ASSERT(rko, type) \
+ rd_assert(((rko)->rko_type & ~RD_KAFKA_OP_FLAGMASK) == (type))
+
+struct rd_kafka_op_s {
+ TAILQ_ENTRY(rd_kafka_op_s) rko_link;
+
+ rd_kafka_op_type_t rko_type; /* Internal op type */
+ rd_kafka_event_type_t rko_evtype;
+ int rko_flags; /* See RD_KAFKA_OP_F_... above */
+ int32_t rko_version;
+ rd_kafka_resp_err_t rko_err;
+ rd_kafka_error_t *rko_error;
+ int32_t rko_len; /* Depends on type, typically the
+ * message length. */
+ rd_kafka_prio_t rko_prio; /**< In-queue priority.
+ * Higher value means higher prio*/
+
+ rd_kafka_toppar_t *rko_rktp;
+
+ /*
+ * Generic fields
+ */
+
+ /* Indicates request: enqueue reply on rko_replyq.q with .version.
+ * .q is refcounted. */
+ rd_kafka_replyq_t rko_replyq;
+
+ /* Original queue's op serve callback and opaque, if any.
+ * Mainly used for forwarded queues to use the original queue's
+ * serve function from the forwarded position. */
+ rd_kafka_q_serve_cb_t *rko_serve;
+ void *rko_serve_opaque;
+
+ rd_kafka_t *rko_rk;
+
+#if ENABLE_DEVEL
+ const char *rko_source; /**< Where op was created */
+#endif
+
+ /* RD_KAFKA_OP_CB */
+ rd_kafka_op_cb_t *rko_op_cb;
+
+ union {
+ struct {
+ rd_kafka_buf_t *rkbuf;
+ rd_kafka_msg_t rkm;
+ int evidx;
+ } fetch;
+
+ struct {
+ rd_kafka_topic_partition_list_t *partitions;
+ /** Require stable (txn-commited) offsets */
+ rd_bool_t require_stable_offsets;
+ int do_free; /* free .partitions on destroy() */
+ } offset_fetch;
+
+ struct {
+ rd_kafka_topic_partition_list_t *partitions;
+ void (*cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque);
+ void *opaque;
+ int silent_empty; /**< Fail silently if there are no
+ * offsets to commit. */
+ rd_ts_t ts_timeout;
+ char *reason;
+ } offset_commit;
+
+ struct {
+ rd_kafka_topic_partition_list_t *topics;
+ } subscribe; /* also used for GET_SUBSCRIPTION */
+
+ struct {
+ rd_kafka_topic_partition_list_t *partitions;
+ rd_kafka_assign_method_t method;
+ } assign; /* also used for GET_ASSIGNMENT */
+
+ struct {
+ rd_kafka_topic_partition_list_t *partitions;
+ } rebalance;
+
+ struct {
+ const char *str;
+ } rebalance_protocol;
+
+ struct {
+ char *str;
+ } name;
+
+ rd_kafka_consumer_group_metadata_t *cg_metadata;
+
+ struct {
+ int64_t offset;
+ char *errstr;
+ rd_kafka_msg_t rkm;
+ rd_kafka_topic_t *rkt;
+ int fatal; /**< This was a ERR__FATAL error that has
+ * been translated to the fatal error
+ * code. */
+ } err; /* used for ERR and CONSUMER_ERR */
+
+ struct {
+ int throttle_time;
+ int32_t nodeid;
+ char *nodename;
+ } throttle;
+
+ struct {
+ char *json;
+ size_t json_len;
+ } stats;
+
+ struct {
+ rd_kafka_buf_t *rkbuf;
+ } xbuf; /* XMIT_BUF and RECV_BUF */
+
+ /* RD_KAFKA_OP_METADATA */
+ struct {
+ rd_kafka_metadata_t *md;
+ int force; /* force request regardless of outstanding
+ * metadata requests. */
+ } metadata;
+
+ struct {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_msgq_t msgq;
+ rd_kafka_msgq_t msgq2;
+ int do_purge2;
+ } dr;
+
+ struct {
+ int32_t nodeid;
+ char nodename[RD_KAFKA_NODENAME_SIZE];
+ } node;
+
+ struct {
+ rd_kafka_fetch_pos_t pos;
+ int32_t broker_id; /**< Originating broker, or -1 */
+ char *reason;
+ } offset_reset;
+
+ struct {
+ rd_kafka_fetch_pos_t pos;
+ struct rd_kafka_cgrp_s *rkcg;
+ } fetch_start; /* reused for SEEK */
+
+ struct {
+ int pause;
+ int flag;
+ } pause;
+
+ struct {
+ char fac[64];
+ int level;
+ char *str;
+ int ctx;
+ } log;
+
+ struct {
+ rd_kafka_AdminOptions_t options; /**< Copy of user's
+ * options */
+ rd_ts_t abs_timeout; /**< Absolute timeout
+ * for this request. */
+ rd_kafka_timer_t tmr; /**< Timeout timer */
+ struct rd_kafka_enq_once_s *eonce; /**< Enqueue op
+ * only once,
+ * used to
+ * (re)trigger
+ * the request op
+ * upon broker state
+ * changes while
+ * waiting for the
+ * controller, or
+ * due to .tmr
+ * timeout. */
+ rd_list_t
+ args; /**< Type depends on request, e.g.
+ * rd_kafka_NewTopic_t for CreateTopics
+ */
+
+ rd_kafka_buf_t *reply_buf; /**< Protocol reply,
+ * temporary reference not
+ * owned by this rko */
+
+ /**< Worker callbacks, see rdkafka_admin.c */
+ struct rd_kafka_admin_worker_cbs *cbs;
+
+ /** Worker state */
+ enum { RD_KAFKA_ADMIN_STATE_INIT,
+ RD_KAFKA_ADMIN_STATE_WAIT_BROKER,
+ RD_KAFKA_ADMIN_STATE_WAIT_CONTROLLER,
+ RD_KAFKA_ADMIN_STATE_WAIT_FANOUTS,
+ RD_KAFKA_ADMIN_STATE_CONSTRUCT_REQUEST,
+ RD_KAFKA_ADMIN_STATE_WAIT_RESPONSE,
+ RD_KAFKA_ADMIN_STATE_WAIT_BROKER_LIST,
+ } state;
+
+ int32_t broker_id; /**< Requested broker id to
+ * communicate with.
+ * Used for AlterConfigs, et.al,
+ * that needs to speak to a
+ * specific broker rather than
+ * the controller.
+ * See RD_KAFKA_ADMIN_TARGET_..
+ * for special values (coordinator,
+ * fanout, etc).
+ */
+ /** The type of coordinator to look up */
+ rd_kafka_coordtype_t coordtype;
+ /** Which coordinator to look up */
+ char *coordkey;
+
+ /** Application's reply queue */
+ rd_kafka_replyq_t replyq;
+ rd_kafka_event_type_t reply_event_type;
+
+ /** A collection of fanout child ops. */
+ struct {
+ /** The type of request being fanned out.
+ * This is used for the ADMIN_RESULT. */
+ rd_kafka_op_type_t reqtype;
+
+ /** Worker callbacks, see rdkafka_admin.c */
+ struct rd_kafka_admin_fanout_worker_cbs *cbs;
+
+ /** Number of outstanding requests remaining to
+ * wait for. */
+ int outstanding;
+
+ /** Incremental results from fanouts.
+ * This list is pre-allocated to the number
+ * of input objects and can thus be set
+ * by index to retain original ordering. */
+ rd_list_t results;
+
+ /** Reply event type */
+ rd_kafka_event_type_t reply_event_type;
+
+ } fanout;
+
+ /** A reference to the parent ADMIN_FANOUT op that
+ * spawned this op, if applicable. NULL otherwise. */
+ struct rd_kafka_op_s *fanout_parent;
+
+ } admin_request;
+
+ struct {
+ rd_kafka_op_type_t reqtype; /**< Request op type,
+ * used for logging. */
+
+ rd_list_t args; /**< Args moved from the request op
+ * when the result op is created.
+ *
+ * Type depends on request.
+ */
+
+ char *errstr; /**< Error string, if rko_err
+ * is set, else NULL. */
+
+ rd_list_t results; /**< Type depends on request type:
+ *
+ * (rd_kafka_topic_result_t *):
+ * CreateTopics, DeleteTopics,
+ * CreatePartitions.
+ *
+ * (rd_kafka_ConfigResource_t *):
+ * AlterConfigs, DescribeConfigs
+ */
+
+ void *opaque; /**< Application's opaque as set by
+ * rd_kafka_AdminOptions_set_opaque
+ */
+
+ /** A reference to the parent ADMIN_FANOUT op that
+ * spawned this op, if applicable. NULL otherwise. */
+ struct rd_kafka_op_s *fanout_parent;
+ } admin_result;
+
+ struct {
+ int flags; /**< purge_flags from rd_kafka_purge() */
+ } purge;
+
+ /**< Mock cluster command */
+ struct {
+ enum { RD_KAFKA_MOCK_CMD_TOPIC_SET_ERROR,
+ RD_KAFKA_MOCK_CMD_TOPIC_CREATE,
+ RD_KAFKA_MOCK_CMD_PART_SET_LEADER,
+ RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER,
+ RD_KAFKA_MOCK_CMD_PART_SET_FOLLOWER_WMARKS,
+ RD_KAFKA_MOCK_CMD_BROKER_SET_UPDOWN,
+ RD_KAFKA_MOCK_CMD_BROKER_SET_RTT,
+ RD_KAFKA_MOCK_CMD_BROKER_SET_RACK,
+ RD_KAFKA_MOCK_CMD_COORD_SET,
+ RD_KAFKA_MOCK_CMD_APIVERSION_SET,
+ } cmd;
+
+ rd_kafka_resp_err_t err; /**< Error for:
+ * TOPIC_SET_ERROR */
+ char *name; /**< For:
+ * TOPIC_SET_ERROR
+ * TOPIC_CREATE
+ * PART_SET_FOLLOWER
+ * PART_SET_FOLLOWER_WMARKS
+ * BROKER_SET_RACK
+ * COORD_SET (key_type) */
+ char *str; /**< For:
+ * COORD_SET (key) */
+ int32_t partition; /**< For:
+ * PART_SET_FOLLOWER
+ * PART_SET_FOLLOWER_WMARKS
+ * PART_SET_LEADER
+ * APIVERSION_SET (ApiKey)
+ */
+ int32_t broker_id; /**< For:
+ * PART_SET_FOLLOWER
+ * PART_SET_LEADER
+ * BROKER_SET_UPDOWN
+ * BROKER_SET_RACK
+ * COORD_SET */
+ int64_t lo; /**< Low offset, for:
+ * TOPIC_CREATE (part cnt)
+ * PART_SET_FOLLOWER_WMARKS
+ * BROKER_SET_UPDOWN
+ * APIVERSION_SET (minver)
+ * BROKER_SET_RTT
+ */
+ int64_t hi; /**< High offset, for:
+ * TOPIC_CREATE (repl fact)
+ * PART_SET_FOLLOWER_WMARKS
+ * APIVERSION_SET (maxver)
+ */
+ } mock;
+
+ struct {
+ struct rd_kafka_broker_s *rkb; /**< Broker who's state
+ * changed. */
+ /**< Callback to trigger on the op handler's thread. */
+ void (*cb)(struct rd_kafka_broker_s *rkb);
+ } broker_monitor;
+
+ struct {
+ /** Consumer group metadata for send_offsets_to.. */
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ /** Consumer group id for AddOffsetsTo.. */
+ char *group_id;
+ int timeout_ms; /**< Operation timeout */
+ rd_ts_t abs_timeout; /**< Absolute time */
+ /**< Offsets to commit */
+ rd_kafka_topic_partition_list_t *offsets;
+ } txn;
+
+ struct {
+ /* This struct serves two purposes, the fields
+ * with "Request:" are used for the async workers state
+ * while the "Reply:" fields is a separate reply
+ * rko that is enqueued for the caller upon
+ * completion or failure. */
+
+ /** Request: Partitions to query.
+ * Reply: Queried partitions with .err field set. */
+ rd_kafka_topic_partition_list_t *partitions;
+
+ /** Request: Absolute timeout */
+ rd_ts_t ts_timeout;
+
+ /** Request: Metadata query timer */
+ rd_kafka_timer_t query_tmr;
+
+ /** Request: Timeout timer */
+ rd_kafka_timer_t timeout_tmr;
+
+ /** Request: Enqueue op only once, used to (re)trigger
+ * metadata cache lookups, topic refresh, timeout. */
+ struct rd_kafka_enq_once_s *eonce;
+
+ /** Request: Caller's replyq */
+ rd_kafka_replyq_t replyq;
+
+ /** Request: Number of metadata queries made. */
+ int query_cnt;
+
+ /** Reply: Leaders (result)
+ * (rd_kafka_partition_leader*) */
+ rd_list_t *leaders;
+
+ /** Reply: Callback on completion (or failure) */
+ rd_kafka_op_cb_t *cb;
+
+ /** Reply: Callback opaque */
+ void *opaque;
+
+ } leaders;
+
+ } rko_u;
+};
+
+TAILQ_HEAD(rd_kafka_op_head_s, rd_kafka_op_s);
+
+
+
+const char *rd_kafka_op2str(rd_kafka_op_type_t type);
+void rd_kafka_op_destroy(rd_kafka_op_t *rko);
+rd_kafka_op_t *rd_kafka_op_new0(const char *source, rd_kafka_op_type_t type);
+#if ENABLE_DEVEL
+#define _STRINGIFYX(A) #A
+#define _STRINGIFY(A) _STRINGIFYX(A)
+#define rd_kafka_op_new(type) \
+ rd_kafka_op_new0(__FILE__ ":" _STRINGIFY(__LINE__), type)
+#else
+#define rd_kafka_op_new(type) rd_kafka_op_new0(NULL, type)
+#endif
+rd_kafka_op_t *rd_kafka_op_new_reply(rd_kafka_op_t *rko_orig,
+ rd_kafka_resp_err_t err);
+rd_kafka_op_t *rd_kafka_op_new_cb(rd_kafka_t *rk,
+ rd_kafka_op_type_t type,
+ rd_kafka_op_cb_t *cb);
+int rd_kafka_op_reply(rd_kafka_op_t *rko, rd_kafka_resp_err_t err);
+int rd_kafka_op_error_reply(rd_kafka_op_t *rko, rd_kafka_error_t *error);
+
+#define rd_kafka_op_set_prio(rko, prio) ((rko)->rko_prio = prio)
+
+#define rd_kafka_op_err(rk, err, ...) \
+ do { \
+ if (!((rk)->rk_conf.enabled_events & RD_KAFKA_EVENT_ERROR)) { \
+ rd_kafka_log(rk, LOG_ERR, "ERROR", __VA_ARGS__); \
+ break; \
+ } \
+ rd_kafka_q_op_err((rk)->rk_rep, err, __VA_ARGS__); \
+ } while (0)
+
+void rd_kafka_q_op_err(rd_kafka_q_t *rkq,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 3, 4);
+void rd_kafka_consumer_err(rd_kafka_q_t *rkq,
+ int32_t broker_id,
+ rd_kafka_resp_err_t err,
+ int32_t version,
+ const char *topic,
+ rd_kafka_toppar_t *rktp,
+ int64_t offset,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 8, 9);
+rd_kafka_op_t *rd_kafka_op_req0(rd_kafka_q_t *destq,
+ rd_kafka_q_t *recvq,
+ rd_kafka_op_t *rko,
+ int timeout_ms);
+rd_kafka_op_t *
+rd_kafka_op_req(rd_kafka_q_t *destq, rd_kafka_op_t *rko, int timeout_ms);
+rd_kafka_op_t *rd_kafka_op_req2(rd_kafka_q_t *destq, rd_kafka_op_type_t type);
+rd_kafka_resp_err_t rd_kafka_op_err_destroy(rd_kafka_op_t *rko);
+rd_kafka_error_t *rd_kafka_op_error_destroy(rd_kafka_op_t *rko);
+
+rd_kafka_op_res_t rd_kafka_op_call(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) RD_WARN_UNUSED_RESULT;
+
+rd_kafka_op_t *rd_kafka_op_new_fetch_msg(rd_kafka_msg_t **rkmp,
+ rd_kafka_toppar_t *rktp,
+ int32_t version,
+ rd_kafka_buf_t *rkbuf,
+ int64_t offset,
+ size_t key_len,
+ const void *key,
+ size_t val_len,
+ const void *val);
+
+rd_kafka_op_t *rd_kafka_op_new_ctrl_msg(rd_kafka_toppar_t *rktp,
+ int32_t version,
+ rd_kafka_buf_t *rkbuf,
+ int64_t offset);
+
+void rd_kafka_op_throttle_time(struct rd_kafka_broker_s *rkb,
+ rd_kafka_q_t *rkq,
+ int throttle_time);
+
+
+rd_kafka_op_res_t
+rd_kafka_op_handle(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque,
+ rd_kafka_q_serve_cb_t *callback) RD_WARN_UNUSED_RESULT;
+
+
+extern rd_atomic32_t rd_kafka_op_cnt;
+
+void rd_kafka_op_print(FILE *fp, const char *prefix, rd_kafka_op_t *rko);
+
+void rd_kafka_fetch_op_app_prepare(rd_kafka_t *rk, rd_kafka_op_t *rko);
+
+
+#define rd_kafka_op_is_ctrl_msg(rko) \
+ ((rko)->rko_type == RD_KAFKA_OP_FETCH && !(rko)->rko_err && \
+ ((rko)->rko_u.fetch.rkm.rkm_flags & RD_KAFKA_MSG_F_CONTROL))
+
+
+
+/**
+ * @returns true if the rko's replyq is valid and the
+ * rko's rktp version (if any) is not outdated.
+ */
+#define rd_kafka_op_replyq_is_valid(RKO) \
+ (rd_kafka_replyq_is_valid(&(RKO)->rko_replyq) && \
+ !rd_kafka_op_version_outdated((RKO), 0))
+
+
+
+/**
+ * @returns the rko for a consumer message (RD_KAFKA_OP_FETCH).
+ */
+static RD_UNUSED rd_kafka_op_t *
+rd_kafka_message2rko(rd_kafka_message_t *rkmessage) {
+ rd_kafka_op_t *rko = rkmessage->_private;
+
+ if (!rko || rko->rko_type != RD_KAFKA_OP_FETCH)
+ return NULL;
+
+ return rko;
+}
+
+
+
+#endif /* _RDKAFKA_OP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c
new file mode 100644
index 000000000..46d2fb3ed
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.c
@@ -0,0 +1,4301 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_request.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_fetcher.h"
+#include "rdregex.h"
+#include "rdports.h" /* rd_qsort_r() */
+
+#include "rdunittest.h"
+
+const char *rd_kafka_fetch_states[] = {"none", "stopping",
+ "stopped", "offset-query",
+ "offset-wait", "validate-epoch-wait",
+ "active"};
+
+
+static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque);
+
+static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp,
+ int backoff_ms,
+ const char *reason);
+
+
+static RD_INLINE int32_t
+rd_kafka_toppar_version_new_barrier0(rd_kafka_toppar_t *rktp,
+ const char *func,
+ int line) {
+ int32_t version = rd_atomic32_add(&rktp->rktp_version, 1);
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BARRIER",
+ "%s [%" PRId32 "]: %s:%d: new version barrier v%" PRId32,
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition, func,
+ line, version);
+ return version;
+}
+
+#define rd_kafka_toppar_version_new_barrier(rktp) \
+ rd_kafka_toppar_version_new_barrier0(rktp, __FUNCTION__, __LINE__)
+
+
+/**
+ * Toppar based OffsetResponse handling.
+ * This is used for updating the low water mark for consumer lag.
+ */
+static void rd_kafka_toppar_lag_handle_Offset(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_toppar_t *rktp = opaque;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_t *rktpar;
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+
+ /* Parse and return Offset */
+ err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request, offsets,
+ NULL);
+
+ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return; /* Retrying */
+ }
+
+ if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
+ offsets, rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition)))
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ if (!err && !rktpar->err) {
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_lo_offset = rktpar->offset;
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ rktp->rktp_wait_consumer_lag_resp = 0;
+
+ rd_kafka_toppar_destroy(rktp); /* from request.opaque */
+}
+
+
+
+/**
+ * Request information from broker to keep track of consumer lag.
+ *
+ * @locality toppar handle thread
+ * @locks none
+ */
+static void rd_kafka_toppar_consumer_lag_req(rd_kafka_toppar_t *rktp) {
+ rd_kafka_topic_partition_list_t *partitions;
+ rd_kafka_topic_partition_t *rktpar;
+
+ if (rktp->rktp_wait_consumer_lag_resp)
+ return; /* Previous request not finished yet */
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Offset requests can only be sent to the leader replica.
+ *
+ * Note: If rktp is delegated to a preferred replica, it is
+ * certain that FETCH >= v5 and so rktp_lo_offset will be
+ * updated via LogStartOffset in the FETCH response.
+ */
+ if (!rktp->rktp_leader || (rktp->rktp_leader != rktp->rktp_broker)) {
+ rd_kafka_toppar_unlock(rktp);
+ return;
+ }
+
+ /* Also don't send a timed log start offset request if leader
+ * broker supports FETCH >= v5, since this will be set when
+ * doing fetch requests.
+ */
+ if (rd_kafka_broker_ApiVersion_supported(
+ rktp->rktp_broker, RD_KAFKAP_Fetch, 0, 5, NULL) == 5) {
+ rd_kafka_toppar_unlock(rktp);
+ return;
+ }
+
+ rktp->rktp_wait_consumer_lag_resp = 1;
+
+ partitions = rd_kafka_topic_partition_list_new(1);
+ rktpar = rd_kafka_topic_partition_list_add(
+ partitions, rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+ rktpar->offset = RD_KAFKA_OFFSET_BEGINNING;
+ rd_kafka_topic_partition_set_current_leader_epoch(
+ rktpar, rktp->rktp_leader_epoch);
+
+ /* Ask for oldest offset. The newest offset is automatically
+ * propagated in FetchResponse.HighwaterMark. */
+ rd_kafka_ListOffsetsRequest(
+ rktp->rktp_broker, partitions, RD_KAFKA_REPLYQ(rktp->rktp_ops, 0),
+ rd_kafka_toppar_lag_handle_Offset, rd_kafka_toppar_keep(rktp));
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_topic_partition_list_destroy(partitions);
+}
+
+
+
+/**
+ * Request earliest offset for a partition
+ *
+ * Locality: toppar handler thread
+ */
+static void rd_kafka_toppar_consumer_lag_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_toppar_t *rktp = arg;
+ rd_kafka_toppar_consumer_lag_req(rktp);
+}
+
+/**
+ * @brief Update rktp_op_version.
+ * Enqueue an RD_KAFKA_OP_BARRIER type of operation
+ * when the op_version is updated.
+ *
+ * @locks_required rd_kafka_toppar_lock() must be held.
+ * @locality Toppar handler thread
+ */
+void rd_kafka_toppar_op_version_bump(rd_kafka_toppar_t *rktp, int32_t version) {
+ rd_kafka_op_t *rko;
+
+ rktp->rktp_op_version = version;
+ rko = rd_kafka_op_new(RD_KAFKA_OP_BARRIER);
+ rko->rko_version = version;
+ rko->rko_prio = RD_KAFKA_PRIO_FLASH;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rd_kafka_q_enq(rktp->rktp_fetchq, rko);
+}
+
+
+/**
+ * Add new partition to topic.
+ *
+ * Locks: rd_kafka_topic_wrlock() must be held.
+ * Locks: rd_kafka_wrlock() must be held.
+ */
+rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ const char *func,
+ int line) {
+ rd_kafka_toppar_t *rktp;
+
+ rktp = rd_calloc(1, sizeof(*rktp));
+
+ rktp->rktp_partition = partition;
+ rktp->rktp_rkt = rkt;
+ rktp->rktp_leader_id = -1;
+ rktp->rktp_broker_id = -1;
+ rktp->rktp_leader_epoch = -1;
+ rd_interval_init(&rktp->rktp_lease_intvl);
+ rd_interval_init(&rktp->rktp_new_lease_intvl);
+ rd_interval_init(&rktp->rktp_new_lease_log_intvl);
+ rd_interval_init(&rktp->rktp_metadata_intvl);
+ /* Mark partition as unknown (does not exist) until we see the
+ * partition in topic metadata. */
+ if (partition != RD_KAFKA_PARTITION_UA)
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
+ rktp->rktp_fetch_state = RD_KAFKA_TOPPAR_FETCH_NONE;
+ rktp->rktp_fetch_msg_max_bytes =
+ rkt->rkt_rk->rk_conf.fetch_msg_max_bytes;
+ rktp->rktp_offset_fp = NULL;
+ rd_kafka_offset_stats_reset(&rktp->rktp_offsets);
+ rd_kafka_offset_stats_reset(&rktp->rktp_offsets_fin);
+ rktp->rktp_ls_offset = RD_KAFKA_OFFSET_INVALID;
+ rktp->rktp_hi_offset = RD_KAFKA_OFFSET_INVALID;
+ rktp->rktp_lo_offset = RD_KAFKA_OFFSET_INVALID;
+ rd_kafka_fetch_pos_init(&rktp->rktp_query_pos);
+ rd_kafka_fetch_pos_init(&rktp->rktp_next_fetch_start);
+ rd_kafka_fetch_pos_init(&rktp->rktp_last_next_fetch_start);
+ rd_kafka_fetch_pos_init(&rktp->rktp_app_pos);
+ rd_kafka_fetch_pos_init(&rktp->rktp_stored_pos);
+ rd_kafka_fetch_pos_init(&rktp->rktp_committing_pos);
+ rd_kafka_fetch_pos_init(&rktp->rktp_committed_pos);
+ rd_kafka_msgq_init(&rktp->rktp_msgq);
+ rd_kafka_msgq_init(&rktp->rktp_xmit_msgq);
+ mtx_init(&rktp->rktp_lock, mtx_plain);
+
+ rd_refcnt_init(&rktp->rktp_refcnt, 0);
+ rktp->rktp_fetchq = rd_kafka_q_new(rkt->rkt_rk);
+ rktp->rktp_ops = rd_kafka_q_new(rkt->rkt_rk);
+ rktp->rktp_ops->rkq_serve = rd_kafka_toppar_op_serve;
+ rktp->rktp_ops->rkq_opaque = rktp;
+ rd_atomic32_init(&rktp->rktp_version, 1);
+ rktp->rktp_op_version = rd_atomic32_get(&rktp->rktp_version);
+
+ rd_atomic32_init(&rktp->rktp_msgs_inflight, 0);
+ rd_kafka_pid_reset(&rktp->rktp_eos.pid);
+
+ /* Consumer: If statistics is available we query the log start offset
+ * of each partition.
+ * Since the oldest offset only moves on log retention, we cap this
+ * value on the low end to a reasonable value to avoid flooding
+ * the brokers with OffsetRequests when our statistics interval is low.
+ * FIXME: Use a global timer to collect offsets for all partitions
+ * FIXME: This timer is superfulous for FETCH >= v5 because the log
+ * start offset is included in fetch responses.
+ * */
+ if (rktp->rktp_rkt->rkt_rk->rk_conf.stats_interval_ms > 0 &&
+ rkt->rkt_rk->rk_type == RD_KAFKA_CONSUMER &&
+ rktp->rktp_partition != RD_KAFKA_PARTITION_UA) {
+ int intvl = rkt->rkt_rk->rk_conf.stats_interval_ms;
+ if (intvl < 10 * 1000 /* 10s */)
+ intvl = 10 * 1000;
+ rd_kafka_timer_start(
+ &rkt->rkt_rk->rk_timers, &rktp->rktp_consumer_lag_tmr,
+ intvl * 1000ll, rd_kafka_toppar_consumer_lag_tmr_cb, rktp);
+ }
+
+ rktp->rktp_rkt = rd_kafka_topic_keep(rkt);
+
+ rd_kafka_q_fwd_set(rktp->rktp_ops, rkt->rkt_rk->rk_ops);
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPPARNEW",
+ "NEW %s [%" PRId32 "] %p refcnt %p (at %s:%d)",
+ rkt->rkt_topic->str, rktp->rktp_partition, rktp,
+ &rktp->rktp_refcnt, func, line);
+
+ return rd_kafka_toppar_keep(rktp);
+}
+
+
+
+/**
+ * Removes a toppar from its duties, global lists, etc.
+ *
+ * Locks: rd_kafka_toppar_lock() MUST be held
+ */
+static void rd_kafka_toppar_remove(rd_kafka_toppar_t *rktp) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARREMOVE",
+ "Removing toppar %s [%" PRId32 "] %p",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rktp);
+
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_validate_tmr, 1 /*lock*/);
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr, 1 /*lock*/);
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_consumer_lag_tmr, 1 /*lock*/);
+
+ rd_kafka_q_fwd_set(rktp->rktp_ops, NULL);
+}
+
+
+/**
+ * Final destructor for partition.
+ */
+void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp) {
+
+ rd_kafka_toppar_remove(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESTROY",
+ "%s [%" PRId32 "]: %p DESTROY_FINAL",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rktp);
+
+ /* Clear queues */
+ rd_kafka_assert(rktp->rktp_rkt->rkt_rk,
+ rd_kafka_msgq_len(&rktp->rktp_xmit_msgq) == 0);
+ rd_kafka_dr_msgq(rktp->rktp_rkt, &rktp->rktp_msgq,
+ RD_KAFKA_RESP_ERR__DESTROY);
+ rd_kafka_q_destroy_owner(rktp->rktp_fetchq);
+ rd_kafka_q_destroy_owner(rktp->rktp_ops);
+
+ rd_kafka_replyq_destroy(&rktp->rktp_replyq);
+
+ rd_kafka_topic_destroy0(rktp->rktp_rkt);
+
+ mtx_destroy(&rktp->rktp_lock);
+
+ if (rktp->rktp_leader)
+ rd_kafka_broker_destroy(rktp->rktp_leader);
+
+ rd_refcnt_destroy(&rktp->rktp_refcnt);
+
+ rd_free(rktp);
+}
+
+
+/**
+ * Set toppar fetching state.
+ *
+ * @locality any
+ * @locks_required rd_kafka_toppar_lock() MUST be held.
+ */
+void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state) {
+ rd_kafka_assert(NULL,
+ thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
+
+ if ((int)rktp->rktp_fetch_state == fetch_state)
+ return;
+
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "PARTSTATE",
+ "Partition %.*s [%" PRId32 "] changed fetch state %s -> %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ rd_kafka_fetch_states[fetch_state]);
+
+ rktp->rktp_fetch_state = fetch_state;
+
+ if (fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE)
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, CONSUMER | RD_KAFKA_DBG_TOPIC,
+ "FETCH",
+ "Partition %.*s [%" PRId32 "] start fetching at %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start));
+}
+
+
+/**
+ * Returns the appropriate toppar for a given rkt and partition.
+ * The returned toppar has increased refcnt and must be unreffed by calling
+ * rd_kafka_toppar_destroy().
+ * May return NULL.
+ *
+ * If 'ua_on_miss' is true the UA (unassigned) toppar is returned if
+ * 'partition' was not known locally, else NULL is returned.
+ *
+ * Locks: Caller must hold rd_kafka_topic_*lock()
+ */
+rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func,
+ int line,
+ const rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int ua_on_miss) {
+ rd_kafka_toppar_t *rktp;
+
+ if (partition >= 0 && partition < rkt->rkt_partition_cnt)
+ rktp = rkt->rkt_p[partition];
+ else if (partition == RD_KAFKA_PARTITION_UA || ua_on_miss)
+ rktp = rkt->rkt_ua;
+ else
+ return NULL;
+
+ if (rktp)
+ return rd_kafka_toppar_keep_fl(func, line, rktp);
+
+ return NULL;
+}
+
+
+/**
+ * Same as rd_kafka_toppar_get() but no need for locking and
+ * looks up the topic first.
+ *
+ * Locality: any
+ * Locks: none
+ */
+rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int ua_on_miss,
+ int create_on_miss) {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_toppar_t *rktp;
+
+ rd_kafka_wrlock(rk);
+
+ /* Find or create topic */
+ if (unlikely(!(rkt = rd_kafka_topic_find(rk, topic, 0 /*no-lock*/)))) {
+ if (!create_on_miss) {
+ rd_kafka_wrunlock(rk);
+ return NULL;
+ }
+ rkt = rd_kafka_topic_new0(rk, topic, NULL, NULL, 0 /*no-lock*/);
+ if (!rkt) {
+ rd_kafka_wrunlock(rk);
+ rd_kafka_log(rk, LOG_ERR, "TOPIC",
+ "Failed to create local topic \"%s\": %s",
+ topic, rd_strerror(errno));
+ return NULL;
+ }
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_topic_wrlock(rkt);
+ rktp = rd_kafka_toppar_desired_add(rkt, partition);
+ rd_kafka_topic_wrunlock(rkt);
+
+ rd_kafka_topic_destroy0(rkt);
+
+ return rktp;
+}
+
+
+/**
+ * Returns a toppar if it is available in the cluster.
+ * '*errp' is set to the error-code if lookup fails.
+ *
+ * Locks: topic_*lock() MUST be held
+ */
+rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int ua_on_miss,
+ rd_kafka_resp_err_t *errp) {
+ rd_kafka_toppar_t *rktp;
+
+ switch (rkt->rkt_state) {
+ case RD_KAFKA_TOPIC_S_UNKNOWN:
+ /* No metadata received from cluster yet.
+ * Put message in UA partition and re-run partitioner when
+ * cluster comes up. */
+ partition = RD_KAFKA_PARTITION_UA;
+ break;
+
+ case RD_KAFKA_TOPIC_S_NOTEXISTS:
+ /* Topic not found in cluster.
+ * Fail message immediately. */
+ *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ return NULL;
+
+ case RD_KAFKA_TOPIC_S_ERROR:
+ /* Permanent topic error. */
+ *errp = rkt->rkt_err;
+ return NULL;
+
+ case RD_KAFKA_TOPIC_S_EXISTS:
+ /* Topic exists in cluster. */
+
+ /* Topic exists but has no partitions.
+ * This is usually an transient state following the
+ * auto-creation of a topic. */
+ if (unlikely(rkt->rkt_partition_cnt == 0)) {
+ partition = RD_KAFKA_PARTITION_UA;
+ break;
+ }
+
+ /* Check that partition exists. */
+ if (partition >= rkt->rkt_partition_cnt) {
+ *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ return NULL;
+ }
+ break;
+
+ default:
+ rd_kafka_assert(rkt->rkt_rk, !*"NOTREACHED");
+ break;
+ }
+
+ /* Get new partition */
+ rktp = rd_kafka_toppar_get(rkt, partition, 0);
+
+ if (unlikely(!rktp)) {
+ /* Unknown topic or partition */
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS)
+ *errp = RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC;
+ else
+ *errp = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ return NULL;
+ }
+
+ return rktp;
+}
+
+
+/**
+ * Looks for partition 'i' in topic 'rkt's desired list.
+ *
+ * The desired partition list is the list of partitions that are desired
+ * (e.g., by the consumer) but not yet seen on a broker.
+ * As soon as the partition is seen on a broker the toppar is moved from
+ * the desired list and onto the normal rkt_p array.
+ * When the partition on the broker goes away a desired partition is put
+ * back on the desired list.
+ *
+ * Locks: rd_kafka_topic_*lock() must be held.
+ * Note: 'rktp' refcount is increased.
+ */
+
+rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt,
+ int32_t partition) {
+ rd_kafka_toppar_t *rktp;
+ int i;
+
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) {
+ if (rktp->rktp_partition == partition)
+ return rd_kafka_toppar_keep(rktp);
+ }
+
+ return NULL;
+}
+
+
+/**
+ * Link toppar on desired list.
+ *
+ * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
+ */
+void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp) {
+
+ if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP)
+ return; /* Already linked */
+
+ rd_kafka_toppar_keep(rktp);
+ rd_list_add(&rktp->rktp_rkt->rkt_desp, rktp);
+ rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl);
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_ON_DESP;
+}
+
+/**
+ * Unlink toppar from desired list.
+ *
+ * Locks: rd_kafka_topic_wrlock() and toppar_lock() must be held.
+ */
+void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp) {
+ if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_ON_DESP))
+ return; /* Not linked */
+
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_ON_DESP;
+ rd_list_remove(&rktp->rktp_rkt->rkt_desp, rktp);
+ rd_interval_reset(&rktp->rktp_rkt->rkt_desp_refresh_intvl);
+ rd_kafka_toppar_destroy(rktp);
+}
+
+
+/**
+ * @brief If rktp is not already desired:
+ * - mark as DESIRED|~REMOVE
+ * - add to desired list if unknown
+ *
+ * @remark toppar_lock() MUST be held
+ */
+void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp) {
+ if ((rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
+ return;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
+ "%s [%" PRId32 "]: marking as DESIRED",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+
+ /* If toppar was marked for removal this is no longer
+ * the case since the partition is now desired. */
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_REMOVE;
+
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_DESIRED;
+
+ if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESIRED",
+ "%s [%" PRId32 "]: adding to DESIRED list",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition);
+ rd_kafka_toppar_desired_link(rktp);
+ }
+}
+
+
+/**
+ * Adds 'partition' as a desired partition to topic 'rkt', or updates
+ * an existing partition to be desired.
+ *
+ * Locks: rd_kafka_topic_wrlock() must be held.
+ */
+rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt,
+ int32_t partition) {
+ rd_kafka_toppar_t *rktp;
+
+ rktp = rd_kafka_toppar_get(rkt, partition, 0 /*no_ua_on_miss*/);
+
+ if (!rktp)
+ rktp = rd_kafka_toppar_desired_get(rkt, partition);
+
+ if (!rktp)
+ rktp = rd_kafka_toppar_new(rkt, partition);
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_toppar_desired_add0(rktp);
+ rd_kafka_toppar_unlock(rktp);
+
+ return rktp; /* Callers refcount */
+}
+
+
+
+/**
+ * Unmarks an 'rktp' as desired.
+ *
+ * Locks: rd_kafka_topic_wrlock() and rd_kafka_toppar_lock() MUST be held.
+ */
+void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp) {
+
+ if (!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED))
+ return;
+
+ rktp->rktp_flags &= ~RD_KAFKA_TOPPAR_F_DESIRED;
+ rd_kafka_toppar_desired_unlink(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "DESP",
+ "Removing (un)desired topic %s [%" PRId32 "]",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition);
+
+ if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_UNKNOWN) {
+ /* If this partition does not exist in the cluster
+ * and is no longer desired, remove it. */
+ rd_kafka_toppar_broker_leave_for_remove(rktp);
+ }
+}
+
+
+
+/**
+ * Append message at tail of 'rktp' message queue.
+ */
+void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
+ rd_kafka_msg_t *rkm,
+ rd_ts_t now) {
+ rd_kafka_q_t *wakeup_q = NULL;
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (!rkm->rkm_u.producer.msgid &&
+ rktp->rktp_partition != RD_KAFKA_PARTITION_UA)
+ rkm->rkm_u.producer.msgid = ++rktp->rktp_msgid;
+
+ if (rktp->rktp_partition == RD_KAFKA_PARTITION_UA ||
+ rktp->rktp_rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO) {
+ /* No need for enq_sorted(), this is the oldest message. */
+ rd_kafka_msgq_enq(&rktp->rktp_msgq, rkm);
+ } else {
+ rd_kafka_msgq_enq_sorted(rktp->rktp_rkt, &rktp->rktp_msgq, rkm);
+ }
+
+ if (unlikely(rktp->rktp_partition != RD_KAFKA_PARTITION_UA &&
+ rd_kafka_msgq_may_wakeup(&rktp->rktp_msgq, now) &&
+ (wakeup_q = rktp->rktp_msgq_wakeup_q))) {
+ /* Wake-up broker thread */
+ rktp->rktp_msgq.rkmq_wakeup.signalled = rd_true;
+ rd_kafka_q_keep(wakeup_q);
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+ if (unlikely(wakeup_q != NULL)) {
+ rd_kafka_q_yield(wakeup_q);
+ rd_kafka_q_destroy(wakeup_q);
+ }
+}
+
+
+/**
+ * @brief Insert \p srcq before \p insert_before in \p destq.
+ *
+ * If \p srcq and \p destq overlaps only part of the \p srcq will be inserted.
+ *
+ * Upon return \p srcq will contain any remaining messages that require
+ * another insert position in \p destq.
+ */
+static void rd_kafka_msgq_insert_msgq_before(rd_kafka_msgq_t *destq,
+ rd_kafka_msg_t *insert_before,
+ rd_kafka_msgq_t *srcq,
+ int (*cmp)(const void *a,
+ const void *b)) {
+ rd_kafka_msg_t *slast;
+ rd_kafka_msgq_t tmpq;
+
+ if (!insert_before) {
+ /* Append all of srcq to destq */
+ rd_kafka_msgq_concat(destq, srcq);
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+ return;
+ }
+
+ slast = rd_kafka_msgq_last(srcq);
+ rd_dassert(slast);
+
+ if (cmp(slast, insert_before) > 0) {
+ rd_kafka_msg_t *new_sfirst;
+ int cnt;
+ int64_t bytes;
+
+ /* destq insert_before resides somewhere between
+ * srcq.first and srcq.last, find the first message in
+ * srcq that is > insert_before and split srcq into
+ * a left part that contains the messages to insert before
+ * insert_before, and a right part that will need another
+ * insert position. */
+
+ new_sfirst = rd_kafka_msgq_find_pos(srcq, NULL, insert_before,
+ cmp, &cnt, &bytes);
+ rd_assert(new_sfirst);
+
+ /* split srcq into two parts using the divider message */
+ rd_kafka_msgq_split(srcq, &tmpq, new_sfirst, cnt, bytes);
+
+ rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
+ rd_kafka_msgq_verify_order(NULL, &tmpq, 0, rd_false);
+ } else {
+ rd_kafka_msgq_init(&tmpq);
+ }
+
+ /* srcq now contains messages up to the first message in destq,
+ * insert srcq at insert_before in destq. */
+ rd_dassert(!TAILQ_EMPTY(&destq->rkmq_msgs));
+ rd_dassert(!TAILQ_EMPTY(&srcq->rkmq_msgs));
+ TAILQ_INSERT_LIST_BEFORE(&destq->rkmq_msgs, insert_before,
+ &srcq->rkmq_msgs, rd_kafka_msgs_head_s,
+ rd_kafka_msg_t *, rkm_link);
+ destq->rkmq_msg_cnt += srcq->rkmq_msg_cnt;
+ destq->rkmq_msg_bytes += srcq->rkmq_msg_bytes;
+ srcq->rkmq_msg_cnt = 0;
+ srcq->rkmq_msg_bytes = 0;
+
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+ rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
+
+ /* tmpq contains the remaining messages in srcq, move it over. */
+ rd_kafka_msgq_move(srcq, &tmpq);
+
+ rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
+}
+
+
+/**
+ * @brief Insert all messages from \p srcq into \p destq in their sorted
+ * position (using \p cmp)
+ */
+void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq,
+ rd_kafka_msgq_t *srcq,
+ int (*cmp)(const void *a, const void *b)) {
+ rd_kafka_msg_t *sfirst, *dlast, *start_pos = NULL;
+
+ if (unlikely(RD_KAFKA_MSGQ_EMPTY(srcq))) {
+ /* srcq is empty */
+ return;
+ }
+
+ if (unlikely(RD_KAFKA_MSGQ_EMPTY(destq))) {
+ /* destq is empty, simply move the srcq. */
+ rd_kafka_msgq_move(destq, srcq);
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+ return;
+ }
+
+ /* Optimize insertion by bulk-moving messages in place.
+ * We know that:
+ * - destq is sorted but might not be continous (1,2,3,7)
+ * - srcq is sorted but might not be continous (4,5,6,8)
+ * - there migt be (multiple) overlaps between the two, e.g:
+ * destq = (1,2,3,7), srcq = (4,5,6,8)
+ * - there may be millions of messages.
+ */
+
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+ rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
+
+ dlast = rd_kafka_msgq_last(destq);
+ sfirst = rd_kafka_msgq_first(srcq);
+
+ /* Most common case, all of srcq goes after destq */
+ if (likely(cmp(dlast, sfirst) < 0)) {
+ rd_kafka_msgq_concat(destq, srcq);
+
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+
+ rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq));
+ return;
+ }
+
+ /* Insert messages from srcq into destq in non-overlapping
+ * chunks until srcq is exhausted. */
+ while (likely(sfirst != NULL)) {
+ rd_kafka_msg_t *insert_before;
+
+ /* Get insert position in destq of first element in srcq */
+ insert_before = rd_kafka_msgq_find_pos(destq, start_pos, sfirst,
+ cmp, NULL, NULL);
+
+ /* Insert as much of srcq as possible at insert_before */
+ rd_kafka_msgq_insert_msgq_before(destq, insert_before, srcq,
+ cmp);
+
+ /* Remember the current destq position so the next find_pos()
+ * does not have to re-scan destq and what was
+ * added from srcq. */
+ start_pos = insert_before;
+
+ /* For next iteration */
+ sfirst = rd_kafka_msgq_first(srcq);
+
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+ rd_kafka_msgq_verify_order(NULL, srcq, 0, rd_false);
+ }
+
+ rd_kafka_msgq_verify_order(NULL, destq, 0, rd_false);
+
+ rd_assert(RD_KAFKA_MSGQ_EMPTY(srcq));
+}
+
+
+/**
+ * @brief Inserts messages from \p srcq according to their sorted position
+ * into \p destq, filtering out messages that can not be retried.
+ *
+ * @param incr_retry Increment retry count for messages.
+ * @param max_retries Maximum retries allowed per message.
+ * @param backoff Absolute retry backoff for retried messages.
+ *
+ * @returns 0 if all messages were retried, or 1 if some messages
+ * could not be retried.
+ */
+int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq,
+ rd_kafka_msgq_t *srcq,
+ int incr_retry,
+ int max_retries,
+ rd_ts_t backoff,
+ rd_kafka_msg_status_t status,
+ int (*cmp)(const void *a, const void *b)) {
+ rd_kafka_msgq_t retryable = RD_KAFKA_MSGQ_INITIALIZER(retryable);
+ rd_kafka_msg_t *rkm, *tmp;
+
+ /* Scan through messages to see which ones are eligible for retry,
+ * move the retryable ones to temporary queue and
+ * set backoff time for first message and optionally
+ * increase retry count for each message.
+ * Sorted insert is not necessary since the original order
+ * srcq order is maintained. */
+ TAILQ_FOREACH_SAFE(rkm, &srcq->rkmq_msgs, rkm_link, tmp) {
+ if (rkm->rkm_u.producer.retries + incr_retry > max_retries)
+ continue;
+
+ rd_kafka_msgq_deq(srcq, rkm, 1);
+ rd_kafka_msgq_enq(&retryable, rkm);
+
+ rkm->rkm_u.producer.ts_backoff = backoff;
+ rkm->rkm_u.producer.retries += incr_retry;
+
+ /* Don't downgrade a message from any form of PERSISTED
+ * to NOT_PERSISTED, since the original cause of indicating
+ * PERSISTED can't be changed.
+ * E.g., a previous ack or in-flight timeout. */
+ if (likely(!(status == RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
+ rkm->rkm_status !=
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED)))
+ rkm->rkm_status = status;
+ }
+
+ /* No messages are retryable */
+ if (RD_KAFKA_MSGQ_EMPTY(&retryable))
+ return 0;
+
+ /* Insert retryable list at sorted position */
+ rd_kafka_msgq_insert_msgq(destq, &retryable, cmp);
+
+ return 1;
+}
+
+/**
+ * @brief Inserts messages from \p rkmq according to their sorted position
+ * into the partition's message queue.
+ *
+ * @param incr_retry Increment retry count for messages.
+ * @param status Set status on each message.
+ *
+ * @returns 0 if all messages were retried, or 1 if some messages
+ * could not be retried.
+ *
+ * @locality Broker thread (but not necessarily the leader broker thread)
+ */
+
+int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq,
+ int incr_retry,
+ rd_kafka_msg_status_t status) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ rd_ts_t backoff = rd_clock() + (rk->rk_conf.retry_backoff_ms * 1000);
+ int r;
+
+ if (rd_kafka_terminating(rk))
+ return 1;
+
+ rd_kafka_toppar_lock(rktp);
+ r = rd_kafka_retry_msgq(&rktp->rktp_msgq, rkmq, incr_retry,
+ rk->rk_conf.max_retries, backoff, status,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+ rd_kafka_toppar_unlock(rktp);
+
+ return r;
+}
+
+/**
+ * @brief Insert sorted message list \p rkmq at sorted position in \p rktp 's
+ * message queue. The queues must not overlap.
+ * @remark \p rkmq will be cleared.
+ */
+void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq) {
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_msgq_insert_msgq(&rktp->rktp_msgq, rkmq,
+ rktp->rktp_rkt->rkt_conf.msg_order_cmp);
+ rd_kafka_toppar_unlock(rktp);
+}
+
+
+
+/**
+ * Helper method for purging queues when removing a toppar.
+ * Locks: rd_kafka_toppar_lock() MUST be held
+ */
+void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp) {
+ rd_kafka_q_disable(rktp->rktp_fetchq);
+ rd_kafka_q_purge(rktp->rktp_fetchq);
+ rd_kafka_q_disable(rktp->rktp_ops);
+ rd_kafka_q_purge(rktp->rktp_ops);
+}
+
+
+/**
+ * @brief Migrate rktp from (optional) \p old_rkb to (optional) \p new_rkb,
+ * but at least one is required to be non-NULL.
+ *
+ * This is an async operation.
+ *
+ * @locks rd_kafka_toppar_lock() MUST be held
+ */
+static void rd_kafka_toppar_broker_migrate(rd_kafka_toppar_t *rktp,
+ rd_kafka_broker_t *old_rkb,
+ rd_kafka_broker_t *new_rkb) {
+ rd_kafka_op_t *rko;
+ rd_kafka_broker_t *dest_rkb;
+ int had_next_broker = rktp->rktp_next_broker ? 1 : 0;
+
+ rd_assert(old_rkb || new_rkb);
+
+ /* Update next broker */
+ if (new_rkb)
+ rd_kafka_broker_keep(new_rkb);
+ if (rktp->rktp_next_broker)
+ rd_kafka_broker_destroy(rktp->rktp_next_broker);
+ rktp->rktp_next_broker = new_rkb;
+
+ /* If next_broker is set it means there is already an async
+ * migration op going on and we should not send a new one
+ * but simply change the next_broker (which we did above). */
+ if (had_next_broker)
+ return;
+
+ /* Revert from offset-wait state back to offset-query
+ * prior to leaving the broker to avoid stalling
+ * on the new broker waiting for a offset reply from
+ * this old broker (that might not come and thus need
+ * to time out..slowly) */
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
+ rd_kafka_toppar_offset_retry(rktp, 500,
+ "migrating to new broker");
+
+ if (old_rkb) {
+ /* If there is an existing broker for this toppar we let it
+ * first handle its own leave and then trigger the join for
+ * the next broker, if any. */
+ rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
+ dest_rkb = old_rkb;
+ } else {
+ /* No existing broker, send join op directly to new broker. */
+ rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_JOIN);
+ dest_rkb = new_rkb;
+ }
+
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
+ "Migrating topic %.*s [%" PRId32
+ "] %p from %s to %s "
+ "(sending %s to %s)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
+ rktp, old_rkb ? rd_kafka_broker_name(old_rkb) : "(none)",
+ new_rkb ? rd_kafka_broker_name(new_rkb) : "(none)",
+ rd_kafka_op2str(rko->rko_type), rd_kafka_broker_name(dest_rkb));
+
+ rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
+}
+
+
+/**
+ * Async toppar leave from broker.
+ * Only use this when partitions are to be removed.
+ *
+ * Locks: rd_kafka_toppar_lock() MUST be held
+ */
+void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp) {
+ rd_kafka_op_t *rko;
+ rd_kafka_broker_t *dest_rkb;
+
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_REMOVE;
+
+ if (rktp->rktp_next_broker)
+ dest_rkb = rktp->rktp_next_broker;
+ else if (rktp->rktp_broker)
+ dest_rkb = rktp->rktp_broker;
+ else {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "TOPPARDEL",
+ "%.*s [%" PRId32
+ "] %p not handled by any broker: "
+ "not sending LEAVE for remove",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rktp);
+ return;
+ }
+
+
+ /* Revert from offset-wait state back to offset-query
+ * prior to leaving the broker to avoid stalling
+ * on the new broker waiting for a offset reply from
+ * this old broker (that might not come and thus need
+ * to time out..slowly) */
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT)
+ rd_kafka_toppar_set_fetch_state(
+ rktp, RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_PARTITION_LEAVE);
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "BRKMIGR",
+ "%.*s [%" PRId32 "] %p sending final LEAVE for removal by %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
+ rktp, rd_kafka_broker_name(dest_rkb));
+
+ rd_kafka_q_enq(dest_rkb->rkb_ops, rko);
+}
+
+
+/**
+ * @brief Delegates toppar 'rktp' to broker 'rkb'. 'rkb' may be NULL to
+ * undelegate broker.
+ *
+ * @locks Caller must have rd_kafka_toppar_lock(rktp) held.
+ */
+void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp,
+ rd_kafka_broker_t *rkb) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ int internal_fallback = 0;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
+ "%s [%" PRId32
+ "]: delegate to broker %s "
+ "(rktp %p, term %d, ref %d)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rkb ? rkb->rkb_name : "(none)", rktp,
+ rd_kafka_terminating(rk),
+ rd_refcnt_get(&rktp->rktp_refcnt));
+
+ /* Undelegated toppars are delgated to the internal
+ * broker for bookkeeping. */
+ if (!rkb && !rd_kafka_terminating(rk)) {
+ rkb = rd_kafka_broker_internal(rk);
+ internal_fallback = 1;
+ }
+
+ if (rktp->rktp_broker == rkb && !rktp->rktp_next_broker) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
+ "%.*s [%" PRId32
+ "]: not updating broker: "
+ "already on correct broker %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rkb ? rd_kafka_broker_name(rkb) : "(none)");
+
+ if (internal_fallback)
+ rd_kafka_broker_destroy(rkb);
+ return;
+ }
+
+ if (rktp->rktp_broker)
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
+ "%.*s [%" PRId32
+ "]: no longer delegated to "
+ "broker %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_broker_name(rktp->rktp_broker));
+
+
+ if (rkb) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
+ "%.*s [%" PRId32
+ "]: delegating to broker %s "
+ "for partition with %i messages "
+ "(%" PRIu64 " bytes) queued",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_broker_name(rkb),
+ rktp->rktp_msgq.rkmq_msg_cnt,
+ rktp->rktp_msgq.rkmq_msg_bytes);
+
+
+ } else {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BRKDELGT",
+ "%.*s [%" PRId32 "]: no broker delegated",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+ }
+
+ if (rktp->rktp_broker || rkb)
+ rd_kafka_toppar_broker_migrate(rktp, rktp->rktp_broker, rkb);
+
+ if (internal_fallback)
+ rd_kafka_broker_destroy(rkb);
+}
+
+
+
+void rd_kafka_toppar_offset_commit_result(
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets) {
+ if (err)
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq,
+ /* FIXME: propagate broker_id */
+ RD_KAFKA_NODEID_UA, err, 0 /* FIXME:VERSION*/, NULL, rktp,
+ RD_KAFKA_OFFSET_INVALID, "Offset commit failed: %s",
+ rd_kafka_err2str(err));
+
+ rd_kafka_toppar_lock(rktp);
+ if (!err)
+ rktp->rktp_committed_pos =
+ rd_kafka_topic_partition_get_fetch_pos(&offsets->elems[0]);
+
+ /* When stopping toppars:
+ * Final commit is now done (or failed), propagate. */
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING)
+ rd_kafka_toppar_fetch_stopped(rktp, err);
+
+ rd_kafka_toppar_unlock(rktp);
+}
+
+
+
+/**
+ * Handle the next offset to consume for a toppar.
+ * This is used during initial setup when trying to figure out what
+ * offset to start consuming from.
+ *
+ * Locality: toppar handler thread.
+ * Locks: toppar_lock(rktp) must be held
+ */
+void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t next_pos) {
+
+ if (RD_KAFKA_OFFSET_IS_LOGICAL(next_pos.offset)) {
+ /* Offset storage returned logical offset (e.g. "end"),
+ * look it up. */
+
+ /* Save next offset, even if logical, so that e.g.,
+ * assign(BEGINNING) survives a pause+resume, etc.
+ * See issue #2105. */
+ rd_kafka_toppar_set_next_fetch_position(rktp, next_pos);
+
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, next_pos,
+ RD_KAFKA_RESP_ERR_NO_ERROR, "update");
+ return;
+ }
+
+ /* Adjust by TAIL count if, if wanted */
+ if (rktp->rktp_query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
+ int64_t orig_offset = next_pos.offset;
+ int64_t tail_cnt = llabs(rktp->rktp_query_pos.offset -
+ RD_KAFKA_OFFSET_TAIL_BASE);
+
+ if (tail_cnt > next_pos.offset)
+ next_pos.offset = 0;
+ else
+ next_pos.offset -= tail_cnt;
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "OffsetReply for topic %s [%" PRId32
+ "]: "
+ "offset %" PRId64
+ ": adjusting for "
+ "OFFSET_TAIL(%" PRId64 "): effective %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, orig_offset, tail_cnt,
+ rd_kafka_fetch_pos2str(next_pos));
+ }
+
+ rd_kafka_toppar_set_next_fetch_position(rktp, next_pos);
+
+ rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_ACTIVE);
+
+ /* Wake-up broker thread which might be idling on IO */
+ if (rktp->rktp_broker)
+ rd_kafka_broker_wakeup(rktp->rktp_broker, "ready to fetch");
+}
+
+
+
+/**
+ * Fetch committed offset for a single partition. (simple consumer)
+ *
+ * Locality: toppar thread
+ */
+void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ rd_kafka_topic_partition_list_t *part;
+ rd_kafka_op_t *rko;
+
+ rd_kafka_dbg(rk, TOPIC, "OFFSETREQ",
+ "Partition %.*s [%" PRId32
+ "]: querying cgrp for "
+ "committed offset (opv %d)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, replyq.version);
+
+ part = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add0(__FUNCTION__, __LINE__, part,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp, NULL);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH);
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rko->rko_replyq = replyq;
+
+ rko->rko_u.offset_fetch.partitions = part;
+ rko->rko_u.offset_fetch.require_stable_offsets =
+ rk->rk_conf.isolation_level == RD_KAFKA_READ_COMMITTED;
+ rko->rko_u.offset_fetch.do_free = 1;
+
+ rd_kafka_q_enq(rktp->rktp_cgrp->rkcg_ops, rko);
+}
+
+
+
+/**
+ * Toppar based OffsetResponse handling.
+ * This is used for finding the next offset to Fetch.
+ *
+ * Locality: toppar handler thread
+ */
+static void rd_kafka_toppar_handle_Offset(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_toppar_t *rktp = opaque;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_t *rktpar;
+ int actions = 0;
+
+ rd_kafka_toppar_lock(rktp);
+ /* Drop reply from previous partition leader */
+ if (err != RD_KAFKA_RESP_ERR__DESTROY && rktp->rktp_broker != rkb)
+ err = RD_KAFKA_RESP_ERR__OUTDATED;
+ rd_kafka_toppar_unlock(rktp);
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+
+ rd_rkb_dbg(rkb, TOPIC, "OFFSET",
+ "Offset reply for "
+ "topic %.*s [%" PRId32 "] (v%d vs v%d)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, request->rkbuf_replyq.version,
+ rktp->rktp_op_version);
+
+ rd_dassert(request->rkbuf_replyq.version > 0);
+ if (err != RD_KAFKA_RESP_ERR__DESTROY &&
+ rd_kafka_buf_version_outdated(request, rktp->rktp_op_version)) {
+ /* Outdated request response, ignore. */
+ err = RD_KAFKA_RESP_ERR__OUTDATED;
+ }
+
+ /* Parse and return Offset */
+ if (err != RD_KAFKA_RESP_ERR__OUTDATED)
+ err = rd_kafka_handle_ListOffsets(rk, rkb, err, rkbuf, request,
+ offsets, &actions);
+
+ if (!err && !(rktpar = rd_kafka_topic_partition_list_find(
+ offsets, rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition))) {
+ /* Requested partition not found in response */
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ }
+
+ if (err) {
+ rd_rkb_dbg(rkb, TOPIC, "OFFSET",
+ "Offset reply error for "
+ "topic %.*s [%" PRId32 "] (v%d, %s): %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, request->rkbuf_replyq.version,
+ rd_kafka_err2str(err),
+ rd_kafka_actions2str(actions));
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY ||
+ err == RD_KAFKA_RESP_ERR__OUTDATED) {
+ /* Termination or outdated, quick cleanup. */
+
+ if (err == RD_KAFKA_RESP_ERR__OUTDATED) {
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_toppar_offset_retry(
+ rktp, 500, "outdated offset response");
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ /* from request.opaque */
+ rd_kafka_toppar_destroy(rktp);
+ return;
+
+ } else if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS)
+ return; /* Retry in progress */
+
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (!(actions & (RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_REFRESH))) {
+ /* Permanent error. Trigger auto.offset.reset policy
+ * and signal error back to application. */
+
+ rd_kafka_offset_reset(rktp, rkb->rkb_nodeid,
+ rktp->rktp_query_pos, err,
+ "failed to query logical offset");
+
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, rkb->rkb_nodeid, err, 0, NULL,
+ rktp,
+ (rktp->rktp_query_pos.offset <=
+ RD_KAFKA_OFFSET_TAIL_BASE
+ ? rktp->rktp_query_pos.offset -
+ RD_KAFKA_OFFSET_TAIL_BASE
+ : rktp->rktp_query_pos.offset),
+ "Failed to query logical offset %s: %s",
+ rd_kafka_offset2str(rktp->rktp_query_pos.offset),
+ rd_kafka_err2str(err));
+
+ } else {
+ /* Temporary error. Schedule retry. */
+ char tmp[256];
+
+ rd_snprintf(
+ tmp, sizeof(tmp),
+ "failed to query logical offset %s: %s",
+ rd_kafka_offset2str(rktp->rktp_query_pos.offset),
+ rd_kafka_err2str(err));
+
+ rd_kafka_toppar_offset_retry(rktp, 500, tmp);
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp); /* from request.opaque */
+ return;
+ }
+
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "Offset %s request for %.*s [%" PRId32
+ "] "
+ "returned offset %s (%" PRId64 ") leader epoch %" PRId32,
+ rd_kafka_offset2str(rktp->rktp_query_pos.offset),
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_offset2str(rktpar->offset),
+ rktpar->offset,
+ rd_kafka_topic_partition_get_leader_epoch(rktpar));
+
+
+ rd_kafka_toppar_next_offset_handle(
+ rktp, RD_KAFKA_FETCH_POS(
+ rktpar->offset,
+ rd_kafka_topic_partition_get_leader_epoch(rktpar)));
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ rd_kafka_toppar_destroy(rktp); /* from request.opaque */
+}
+
+
+/**
+ * @brief An Offset fetch failed (for whatever reason) in
+ * the RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT state:
+ * set the state back to FETCH_OFFSET_QUERY and start the
+ * offset_query_tmr to trigger a new request eventually.
+ *
+ * @locality toppar handler thread
+ * @locks toppar_lock() MUST be held
+ */
+static void rd_kafka_toppar_offset_retry(rd_kafka_toppar_t *rktp,
+ int backoff_ms,
+ const char *reason) {
+ rd_ts_t tmr_next;
+ int restart_tmr;
+
+ /* (Re)start timer if not started or the current timeout
+ * is larger than \p backoff_ms. */
+ tmr_next = rd_kafka_timer_next(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr, 1);
+
+ restart_tmr =
+ (tmr_next == -1 || tmr_next > rd_clock() + (backoff_ms * 1000ll));
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%s [%" PRId32 "]: %s: %s for %s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ reason,
+ restart_tmr ? "(re)starting offset query timer"
+ : "offset query timer already scheduled",
+ rd_kafka_fetch_pos2str(rktp->rktp_query_pos));
+
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY);
+
+ if (restart_tmr)
+ rd_kafka_timer_start(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr,
+ backoff_ms * 1000ll,
+ rd_kafka_offset_query_tmr_cb, rktp);
+}
+
+
+
+/**
+ * Send OffsetRequest for toppar.
+ *
+ * If \p backoff_ms is non-zero only the query timer is started,
+ * otherwise a query is triggered directly.
+ *
+ * Locality: toppar handler thread
+ * Locks: toppar_lock() must be held
+ */
+void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t query_pos,
+ int backoff_ms) {
+ rd_kafka_broker_t *rkb;
+
+ rd_kafka_assert(NULL,
+ thrd_is_current(rktp->rktp_rkt->rkt_rk->rk_thread));
+
+ rkb = rktp->rktp_broker;
+
+ if (!backoff_ms && (!rkb || rkb->rkb_source == RD_KAFKA_INTERNAL))
+ backoff_ms = 500;
+
+ if (backoff_ms) {
+ rd_kafka_toppar_offset_retry(
+ rktp, backoff_ms,
+ !rkb ? "no current leader for partition" : "backoff");
+ return;
+ }
+
+
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr, 1 /*lock*/);
+
+
+ if (query_pos.offset == RD_KAFKA_OFFSET_STORED &&
+ rktp->rktp_rkt->rkt_conf.offset_store_method ==
+ RD_KAFKA_OFFSET_METHOD_BROKER) {
+ /*
+ * Get stored offset from broker based storage:
+ * ask cgrp manager for offsets
+ */
+ rd_kafka_toppar_offset_fetch(
+ rktp,
+ RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version));
+
+ } else {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_topic_partition_t *rktpar;
+
+ /*
+ * Look up logical offset (end,beginning,tail,..)
+ */
+
+ rd_rkb_dbg(rkb, TOPIC, "OFFREQ",
+ "Partition %.*s [%" PRId32
+ "]: querying for logical "
+ "offset %s (opv %d)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_offset2str(query_pos.offset),
+ rktp->rktp_op_version);
+
+ rd_kafka_toppar_keep(rktp); /* refcnt for OffsetRequest opaque*/
+
+ if (query_pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE)
+ query_pos.offset = RD_KAFKA_OFFSET_END;
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition);
+ rd_kafka_topic_partition_set_from_fetch_pos(rktpar, query_pos);
+ rd_kafka_topic_partition_set_current_leader_epoch(
+ rktpar, rktp->rktp_leader_epoch);
+
+ rd_kafka_ListOffsetsRequest(
+ rkb, offsets,
+ RD_KAFKA_REPLYQ(rktp->rktp_ops, rktp->rktp_op_version),
+ rd_kafka_toppar_handle_Offset, rktp);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+ }
+
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT);
+}
+
+
+/**
+ * Start fetching toppar.
+ *
+ * Locality: toppar handler thread
+ * Locks: none
+ */
+static void rd_kafka_toppar_fetch_start(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_op_t *rko_orig) {
+ rd_kafka_cgrp_t *rkcg = rko_orig->rko_u.fetch_start.rkcg;
+ rd_kafka_resp_err_t err = 0;
+ int32_t version = rko_orig->rko_version;
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
+ "Start fetch for %.*s [%" PRId32
+ "] in "
+ "state %s at %s (v%" PRId32 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ rd_kafka_fetch_pos2str(pos), version);
+
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
+ err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
+ rd_kafka_toppar_unlock(rktp);
+ goto err_reply;
+ }
+
+ rd_kafka_toppar_op_version_bump(rktp, version);
+
+ if (rkcg) {
+ rd_kafka_assert(rktp->rktp_rkt->rkt_rk, !rktp->rktp_cgrp);
+ /* Attach toppar to cgrp */
+ rktp->rktp_cgrp = rkcg;
+ rd_kafka_cgrp_op(rkcg, rktp, RD_KAFKA_NO_REPLYQ,
+ RD_KAFKA_OP_PARTITION_JOIN, 0);
+ }
+
+
+ if (pos.offset == RD_KAFKA_OFFSET_BEGINNING ||
+ pos.offset == RD_KAFKA_OFFSET_END ||
+ pos.offset <= RD_KAFKA_OFFSET_TAIL_BASE) {
+ rd_kafka_toppar_next_offset_handle(rktp, pos);
+
+ } else if (pos.offset == RD_KAFKA_OFFSET_STORED) {
+ rd_kafka_offset_store_init(rktp);
+
+ } else if (pos.offset == RD_KAFKA_OFFSET_INVALID) {
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos,
+ RD_KAFKA_RESP_ERR__NO_OFFSET,
+ "no previously committed offset "
+ "available");
+
+ } else {
+ rd_kafka_toppar_set_next_fetch_position(rktp, pos);
+
+ rd_kafka_toppar_set_fetch_state(rktp,
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE);
+
+ /* Wake-up broker thread which might be idling on IO */
+ if (rktp->rktp_broker)
+ rd_kafka_broker_wakeup(rktp->rktp_broker,
+ "fetch start");
+ }
+
+ rktp->rktp_offsets_fin.eof_offset = RD_KAFKA_OFFSET_INVALID;
+
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Signal back to caller thread that start has commenced, or err */
+err_reply:
+ if (rko_orig->rko_replyq.q) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_FETCH_START);
+
+ rko->rko_err = err;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
+ }
+}
+
+
+
+/**
+ * Mark toppar's fetch state as stopped (all decommissioning is done,
+ * offsets are stored, etc).
+ *
+ * Locality: toppar handler thread
+ * Locks: toppar_lock(rktp) MUST be held
+ */
+void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err) {
+
+
+ rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPED);
+
+ rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID;
+ rktp->rktp_app_pos.leader_epoch = -1;
+
+ if (rktp->rktp_cgrp) {
+ /* Detach toppar from cgrp */
+ rd_kafka_cgrp_op(rktp->rktp_cgrp, rktp, RD_KAFKA_NO_REPLYQ,
+ RD_KAFKA_OP_PARTITION_LEAVE, 0);
+ rktp->rktp_cgrp = NULL;
+ }
+
+ /* Signal back to application thread that stop is done. */
+ if (rktp->rktp_replyq.q) {
+ rd_kafka_op_t *rko;
+ rko =
+ rd_kafka_op_new(RD_KAFKA_OP_FETCH_STOP | RD_KAFKA_OP_REPLY);
+ rko->rko_err = err;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_replyq_enq(&rktp->rktp_replyq, rko, 0);
+ }
+}
+
+
+/**
+ * Stop toppar fetcher.
+ * This is usually an async operation.
+ *
+ * Locality: toppar handler thread
+ */
+void rd_kafka_toppar_fetch_stop(rd_kafka_toppar_t *rktp,
+ rd_kafka_op_t *rko_orig) {
+ int32_t version = rko_orig->rko_version;
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
+ "Stopping fetch for %.*s [%" PRId32 "] in state %s (v%d)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
+
+ rd_kafka_toppar_op_version_bump(rktp, version);
+
+ /* Abort pending offset lookups. */
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr, 1 /*lock*/);
+
+ /* Clear out the forwarding queue. */
+ rd_kafka_q_fwd_set(rktp->rktp_fetchq, NULL);
+
+ /* Assign the future replyq to propagate stop results. */
+ rd_kafka_assert(rktp->rktp_rkt->rkt_rk, rktp->rktp_replyq.q == NULL);
+ rktp->rktp_replyq = rko_orig->rko_replyq;
+ rd_kafka_replyq_clear(&rko_orig->rko_replyq);
+
+ rd_kafka_toppar_set_fetch_state(rktp, RD_KAFKA_TOPPAR_FETCH_STOPPING);
+
+ /* Stop offset store (possibly async).
+ * NOTE: will call .._stopped() if store finishes immediately,
+ * so no more operations after this call! */
+ rd_kafka_offset_store_stop(rktp);
+
+ rd_kafka_toppar_unlock(rktp);
+}
+
+
+/**
+ * Update a toppars offset.
+ * The toppar must have been previously FETCH_START:ed
+ *
+ * Locality: toppar handler thread
+ */
+void rd_kafka_toppar_seek(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_op_t *rko_orig) {
+ rd_kafka_resp_err_t err = 0;
+ int32_t version = rko_orig->rko_version;
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "FETCH",
+ "Seek %.*s [%" PRId32 "] to %s in state %s (v%" PRId32 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
+ rd_kafka_fetch_states[rktp->rktp_fetch_state], version);
+
+
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_STOPPING) {
+ err = RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
+ goto err_reply;
+ } else if (!RD_KAFKA_TOPPAR_FETCH_IS_STARTED(rktp->rktp_fetch_state)) {
+ err = RD_KAFKA_RESP_ERR__STATE;
+ goto err_reply;
+ } else if (pos.offset == RD_KAFKA_OFFSET_STORED) {
+ err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+ goto err_reply;
+ }
+
+ rd_kafka_toppar_op_version_bump(rktp, version);
+
+ /* Reset app offsets since seek()ing is analogue to a (re)assign(),
+ * and we want to avoid using the current app offset on resume()
+ * following a seek (#3567). */
+ rktp->rktp_app_pos.offset = RD_KAFKA_OFFSET_INVALID;
+ rktp->rktp_app_pos.leader_epoch = -1;
+
+ /* Abort pending offset lookups. */
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr, 1 /*lock*/);
+
+ if (pos.offset <= 0 || pos.validated) {
+ rd_kafka_toppar_next_offset_handle(rktp, pos);
+ } else {
+ rd_kafka_toppar_set_fetch_state(
+ rktp, RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT);
+ rd_kafka_toppar_set_next_fetch_position(rktp, pos);
+ rd_kafka_offset_validate(rktp, "seek");
+ }
+
+ /* Signal back to caller thread that seek has commenced, or err */
+err_reply:
+ rd_kafka_toppar_unlock(rktp);
+
+ if (rko_orig->rko_replyq.q) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_SEEK | RD_KAFKA_OP_REPLY);
+
+ rko->rko_err = err;
+ rko->rko_u.fetch_start.pos = rko_orig->rko_u.fetch_start.pos;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_replyq_enq(&rko_orig->rko_replyq, rko, 0);
+ }
+}
+
+
+/**
+ * @brief Pause/resume toppar.
+ *
+ * This is the internal handler of the pause/resume op.
+ *
+ * @locality toppar's handler thread
+ */
+static void rd_kafka_toppar_pause_resume(rd_kafka_toppar_t *rktp,
+ rd_kafka_op_t *rko_orig) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ int pause = rko_orig->rko_u.pause.pause;
+ int flag = rko_orig->rko_u.pause.flag;
+ int32_t version = rko_orig->rko_version;
+
+ rd_kafka_toppar_lock(rktp);
+
+ rd_kafka_toppar_op_version_bump(rktp, version);
+
+ if (!pause && (rktp->rktp_flags & flag) != flag) {
+ rd_kafka_dbg(rk, TOPIC, "RESUME",
+ "Not resuming %s [%" PRId32
+ "]: "
+ "partition is not paused by %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ (flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "application"
+ : "library"));
+ rd_kafka_toppar_unlock(rktp);
+ return;
+ }
+
+ if (pause) {
+ /* Pause partition by setting either
+ * RD_KAFKA_TOPPAR_F_APP_PAUSE or
+ * RD_KAFKA_TOPPAR_F_LIB_PAUSE */
+ rktp->rktp_flags |= flag;
+
+ if (rk->rk_type == RD_KAFKA_CONSUMER) {
+ /* Save offset of last consumed message+1 as the
+ * next message to fetch on resume. */
+ if (rktp->rktp_app_pos.offset !=
+ RD_KAFKA_OFFSET_INVALID)
+ rd_kafka_toppar_set_next_fetch_position(
+ rktp, rktp->rktp_app_pos);
+
+ rd_kafka_dbg(
+ rk, TOPIC, pause ? "PAUSE" : "RESUME",
+ "%s %s [%" PRId32 "]: at %s (state %s, v%d)",
+ pause ? "Pause" : "Resume",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ version);
+ } else {
+ rd_kafka_dbg(
+ rk, TOPIC, pause ? "PAUSE" : "RESUME",
+ "%s %s [%" PRId32 "] (state %s, v%d)",
+ pause ? "Pause" : "Resume",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ version);
+ }
+
+ } else {
+ /* Unset the RD_KAFKA_TOPPAR_F_APP_PAUSE or
+ * RD_KAFKA_TOPPAR_F_LIB_PAUSE flag */
+ rktp->rktp_flags &= ~flag;
+
+ if (rk->rk_type == RD_KAFKA_CONSUMER) {
+ rd_kafka_dbg(
+ rk, TOPIC, pause ? "PAUSE" : "RESUME",
+ "%s %s [%" PRId32 "]: at %s (state %s, v%d)",
+ rktp->rktp_fetch_state ==
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE
+ ? "Resuming"
+ : "Not resuming stopped",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_pos2str(rktp->rktp_next_fetch_start),
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ version);
+
+ /* If the resuming offset is logical we
+ * need to trigger a seek (that performs the
+ * logical->absolute lookup logic) to get
+ * things going.
+ * Typical case is when a partition is paused
+ * before anything has been consumed by app
+ * yet thus having rktp_app_offset=INVALID. */
+ if (!RD_KAFKA_TOPPAR_IS_PAUSED(rktp) &&
+ (rktp->rktp_fetch_state ==
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE ||
+ rktp->rktp_fetch_state ==
+ RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT) &&
+ rktp->rktp_next_fetch_start.offset ==
+ RD_KAFKA_OFFSET_INVALID)
+ rd_kafka_toppar_next_offset_handle(
+ rktp, rktp->rktp_next_fetch_start);
+
+ } else
+ rd_kafka_dbg(
+ rk, TOPIC, pause ? "PAUSE" : "RESUME",
+ "%s %s [%" PRId32 "] (state %s, v%d)",
+ pause ? "Pause" : "Resume",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ version);
+ }
+ rd_kafka_toppar_unlock(rktp);
+
+ if (pause && rk->rk_type == RD_KAFKA_CONSUMER) {
+ /* Flush partition's fetch queue */
+ rd_kafka_q_purge_toppar_version(rktp->rktp_fetchq, rktp,
+ rko_orig->rko_version);
+ }
+}
+
+
+
+/**
+ * @brief Serve a toppar in a consumer broker thread.
+ * This is considered the fast path and should be minimal,
+ * mostly focusing on fetch related mechanisms.
+ *
+ * @returns the partition's Fetch backoff timestamp, or 0 if no backoff.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp) {
+ return rd_kafka_toppar_fetch_decide(rktp, rkb, 0);
+}
+
+
+
+/**
+ * @brief Serve a toppar op
+ *
+ * @param rktp may be NULL for certain ops (OP_RECV_BUF)
+ *
+ * Will send an empty reply op if the request rko has a replyq set,
+ * providing synchronous operation.
+ *
+ * @locality toppar handler thread
+ */
+static rd_kafka_op_res_t rd_kafka_toppar_op_serve(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_cb_type_t cb_type,
+ void *opaque) {
+ rd_kafka_toppar_t *rktp = NULL;
+ int outdated = 0;
+
+ if (rko->rko_rktp)
+ rktp = rko->rko_rktp;
+
+ if (rktp) {
+ outdated =
+ rd_kafka_op_version_outdated(rko, rktp->rktp_op_version);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OP",
+ "%.*s [%" PRId32
+ "] received %sop %s "
+ "(v%" PRId32 ") in fetch-state %s (opv%d)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, outdated ? "outdated " : "",
+ rd_kafka_op2str(rko->rko_type), rko->rko_version,
+ rd_kafka_fetch_states[rktp->rktp_fetch_state],
+ rktp->rktp_op_version);
+
+ if (outdated) {
+#if ENABLE_DEVEL
+ rd_kafka_op_print(stdout, "PART_OUTDATED", rko);
+#endif
+ rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__OUTDATED);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+ }
+
+ switch ((int)rko->rko_type) {
+ case RD_KAFKA_OP_FETCH_START:
+ rd_kafka_toppar_fetch_start(rktp, rko->rko_u.fetch_start.pos,
+ rko);
+ break;
+
+ case RD_KAFKA_OP_FETCH_STOP:
+ rd_kafka_toppar_fetch_stop(rktp, rko);
+ break;
+
+ case RD_KAFKA_OP_SEEK:
+ rd_kafka_toppar_seek(rktp, rko->rko_u.fetch_start.pos, rko);
+ break;
+
+ case RD_KAFKA_OP_PAUSE:
+ rd_kafka_toppar_pause_resume(rktp, rko);
+ break;
+
+ case RD_KAFKA_OP_OFFSET_COMMIT | RD_KAFKA_OP_REPLY:
+ rd_kafka_assert(NULL, rko->rko_u.offset_commit.cb);
+ rko->rko_u.offset_commit.cb(rk, rko->rko_err,
+ rko->rko_u.offset_commit.partitions,
+ rko->rko_u.offset_commit.opaque);
+ break;
+
+ case RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY: {
+ /* OffsetFetch reply */
+ rd_kafka_topic_partition_list_t *offsets =
+ rko->rko_u.offset_fetch.partitions;
+ rd_kafka_fetch_pos_t pos = {RD_KAFKA_OFFSET_INVALID, -1};
+
+ rktp = rd_kafka_topic_partition_get_toppar(
+ rk, &offsets->elems[0], rd_true /*create-on-miss*/);
+
+ if (!rko->rko_err) {
+ /* Request succeeded but per-partition might have failed
+ */
+ rko->rko_err = offsets->elems[0].err;
+ pos = rd_kafka_topic_partition_get_fetch_pos(
+ &offsets->elems[0]);
+ }
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+ rko->rko_u.offset_fetch.partitions = NULL;
+
+ rd_kafka_timer_stop(&rktp->rktp_rkt->rkt_rk->rk_timers,
+ &rktp->rktp_offset_query_tmr, 1 /*lock*/);
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (rko->rko_err) {
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "Failed to fetch offset for "
+ "%.*s [%" PRId32 "]: %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_err2str(rko->rko_err));
+
+ /* Keep on querying until we succeed. */
+ rd_kafka_toppar_offset_retry(rktp, 500,
+ "failed to fetch offsets");
+ rd_kafka_toppar_unlock(rktp);
+
+
+ /* Propagate error to application */
+ if (rko->rko_err != RD_KAFKA_RESP_ERR__WAIT_COORD &&
+ rko->rko_err !=
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
+ rd_kafka_consumer_err(
+ rktp->rktp_fetchq, RD_KAFKA_NODEID_UA,
+ rko->rko_err, 0, NULL, rktp,
+ RD_KAFKA_OFFSET_INVALID,
+ "Failed to fetch "
+ "offsets from brokers: %s",
+ rd_kafka_err2str(rko->rko_err));
+
+ /* Refcount from get_toppar() */
+ rd_kafka_toppar_destroy(rktp);
+
+ break;
+ }
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "OFFSET",
+ "%.*s [%" PRId32 "]: OffsetFetch returned %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_fetch_pos2str(pos));
+
+ if (pos.offset > 0)
+ rktp->rktp_committed_pos = pos;
+
+ if (pos.offset >= 0)
+ rd_kafka_toppar_next_offset_handle(rktp, pos);
+ else
+ rd_kafka_offset_reset(rktp, RD_KAFKA_NODEID_UA, pos,
+ RD_KAFKA_RESP_ERR__NO_OFFSET,
+ "no previously committed offset "
+ "available");
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Refcount from get_toppar() */
+ rd_kafka_toppar_destroy(rktp);
+ } break;
+
+ default:
+ rd_kafka_assert(NULL, !*"unknown type");
+ break;
+ }
+
+ rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR_NO_ERROR);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+
+/**
+ * Send command op to toppar (handled by toppar's thread).
+ *
+ * Locality: any thread
+ */
+static void rd_kafka_toppar_op0(rd_kafka_toppar_t *rktp,
+ rd_kafka_op_t *rko,
+ rd_kafka_replyq_t replyq) {
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+ rko->rko_replyq = replyq;
+
+ rd_kafka_q_enq(rktp->rktp_ops, rko);
+}
+
+
+/**
+ * Send command op to toppar (handled by toppar's thread).
+ *
+ * Locality: any thread
+ */
+static void rd_kafka_toppar_op(rd_kafka_toppar_t *rktp,
+ rd_kafka_op_type_t type,
+ int32_t version,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_cgrp_t *rkcg,
+ rd_kafka_replyq_t replyq) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new(type);
+ rko->rko_version = version;
+ if (type == RD_KAFKA_OP_FETCH_START || type == RD_KAFKA_OP_SEEK) {
+ if (rkcg)
+ rko->rko_u.fetch_start.rkcg = rkcg;
+ rko->rko_u.fetch_start.pos = pos;
+ }
+
+ rd_kafka_toppar_op0(rktp, rko, replyq);
+}
+
+
+
+/**
+ * Start consuming partition (async operation).
+ * 'offset' is the initial offset
+ * 'fwdq' is an optional queue to forward messages to, if this is NULL
+ * then messages will be enqueued on rktp_fetchq.
+ * 'replyq' is an optional queue for handling the consume_start ack.
+ *
+ * This is the thread-safe interface that can be called from any thread.
+ */
+rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_q_t *fwdq,
+ rd_kafka_replyq_t replyq) {
+ int32_t version;
+
+ rd_kafka_q_lock(rktp->rktp_fetchq);
+ if (fwdq && !(rktp->rktp_fetchq->rkq_flags & RD_KAFKA_Q_F_FWD_APP))
+ rd_kafka_q_fwd_set0(rktp->rktp_fetchq, fwdq, 0, /* no do_lock */
+ 0 /* no fwd_app */);
+ rd_kafka_q_unlock(rktp->rktp_fetchq);
+
+ /* Bump version barrier. */
+ version = rd_kafka_toppar_version_new_barrier(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
+ "Start consuming %.*s [%" PRId32 "] at %s (v%" PRId32 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
+ version);
+
+ rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_START, version, pos,
+ rktp->rktp_rkt->rkt_rk->rk_cgrp, replyq);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * Stop consuming partition (async operatoin)
+ * This is thread-safe interface that can be called from any thread.
+ *
+ * Locality: any thread
+ */
+rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq) {
+ int32_t version;
+
+ /* Bump version barrier. */
+ version = rd_kafka_toppar_version_new_barrier(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
+ "Stop consuming %.*s [%" PRId32 "] (v%" PRId32 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, version);
+
+ rd_kafka_toppar_op(rktp, RD_KAFKA_OP_FETCH_STOP, version,
+ RD_KAFKA_FETCH_POS(-1, -1), NULL, replyq);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Set/Seek offset of a consumed partition (async operation).
+ *
+ * @param offset is the target offset.
+ * @param leader_epoch is the partition leader epoch, or -1.
+ * @param replyq is an optional queue for handling the ack.
+ *
+ * This is the thread-safe interface that can be called from any thread.
+ */
+rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_replyq_t replyq) {
+ int32_t version;
+
+ /* Bump version barrier. */
+ version = rd_kafka_toppar_version_new_barrier(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "CONSUMER",
+ "Seek %.*s [%" PRId32 "] to %s (v%" PRId32 ")",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_fetch_pos2str(pos),
+ version);
+
+ rd_kafka_toppar_op(rktp, RD_KAFKA_OP_SEEK, version, pos, NULL, replyq);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Pause/resume partition (async operation).
+ *
+ * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
+ * depending on if the app paused or librdkafka.
+ * @param pause is 1 for pausing or 0 for resuming.
+ *
+ * @locality any
+ */
+rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp,
+ int pause,
+ int flag,
+ rd_kafka_replyq_t replyq) {
+ int32_t version;
+ rd_kafka_op_t *rko;
+
+ /* Bump version barrier. */
+ version = rd_kafka_toppar_version_new_barrier(rktp);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, pause ? "PAUSE" : "RESUME",
+ "%s %.*s [%" PRId32 "] (v%" PRId32 ")",
+ pause ? "Pause" : "Resume",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, version);
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_PAUSE);
+ rko->rko_version = version;
+ rko->rko_u.pause.pause = pause;
+ rko->rko_u.pause.flag = flag;
+
+ rd_kafka_toppar_op0(rktp, rko, replyq);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Pause a toppar (asynchronous).
+ *
+ * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
+ * depending on if the app paused or librdkafka.
+ *
+ * @locality any
+ * @locks none needed
+ */
+void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag) {
+ rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag,
+ RD_KAFKA_NO_REPLYQ);
+}
+
+/**
+ * @brief Resume a toppar (asynchronous).
+ *
+ * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
+ * depending on if the app paused or librdkafka.
+ *
+ * @locality any
+ * @locks none needed
+ */
+void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag) {
+ rd_kafka_toppar_op_pause_resume(rktp, 1 /*pause*/, flag,
+ RD_KAFKA_NO_REPLYQ);
+}
+
+
+
+/**
+ * @brief Pause or resume a list of partitions.
+ *
+ * @param flag is either RD_KAFKA_TOPPAR_F_APP_PAUSE or .._F_LIB_PAUSE
+ * depending on if the app paused or librdkafka.
+ * @param pause true for pausing, false for resuming.
+ * @param async RD_SYNC to wait for background thread to handle op,
+ * RD_ASYNC for asynchronous operation.
+ *
+ * @locality any
+ *
+ * @remark This is an asynchronous call, the actual pause/resume is performed
+ * by toppar_pause() in the toppar's handler thread.
+ */
+rd_kafka_resp_err_t
+rd_kafka_toppars_pause_resume(rd_kafka_t *rk,
+ rd_bool_t pause,
+ rd_async_t async,
+ int flag,
+ rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+ int waitcnt = 0;
+ rd_kafka_q_t *tmpq = NULL;
+
+ if (!async)
+ tmpq = rd_kafka_q_new(rk);
+
+ rd_kafka_dbg(
+ rk, TOPIC, pause ? "PAUSE" : "RESUME", "%s %s %d partition(s)",
+ flag & RD_KAFKA_TOPPAR_F_APP_PAUSE ? "Application" : "Library",
+ pause ? "pausing" : "resuming", partitions->cnt);
+
+ for (i = 0; i < partitions->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &partitions->elems[i];
+ rd_kafka_toppar_t *rktp;
+
+ rktp =
+ rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
+ if (!rktp) {
+ rd_kafka_dbg(rk, TOPIC, pause ? "PAUSE" : "RESUME",
+ "%s %s [%" PRId32
+ "]: skipped: "
+ "unknown partition",
+ pause ? "Pause" : "Resume", rktpar->topic,
+ rktpar->partition);
+
+ rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ continue;
+ }
+
+ rd_kafka_toppar_op_pause_resume(rktp, pause, flag,
+ RD_KAFKA_REPLYQ(tmpq, 0));
+
+ if (!async)
+ waitcnt++;
+
+ rd_kafka_toppar_destroy(rktp);
+
+ rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ if (!async) {
+ while (waitcnt-- > 0)
+ rd_kafka_q_wait_result(tmpq, RD_POLL_INFINITE);
+
+ rd_kafka_q_destroy_owner(tmpq);
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * Propagate error for toppar
+ */
+void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err,
+ const char *reason) {
+ rd_kafka_op_t *rko;
+ char buf[512];
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_ERR);
+ rko->rko_err = err;
+ rko->rko_rktp = rd_kafka_toppar_keep(rktp);
+
+ rd_snprintf(buf, sizeof(buf), "%.*s [%" PRId32 "]: %s (%s)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, reason, rd_kafka_err2str(err));
+
+ rko->rko_u.err.errstr = rd_strdup(buf);
+
+ rd_kafka_q_enq(rktp->rktp_fetchq, rko);
+}
+
+
+
+/**
+ * Returns the currently delegated broker for this toppar.
+ * If \p proper_broker is set NULL will be returned if current handler
+ * is not a proper broker (INTERNAL broker).
+ *
+ * The returned broker has an increased refcount.
+ *
+ * Locks: none
+ */
+rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp,
+ int proper_broker) {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_toppar_lock(rktp);
+ rkb = rktp->rktp_broker;
+ if (rkb) {
+ if (proper_broker && rkb->rkb_source == RD_KAFKA_INTERNAL)
+ rkb = NULL;
+ else
+ rd_kafka_broker_keep(rkb);
+ }
+ rd_kafka_toppar_unlock(rktp);
+
+ return rkb;
+}
+
+
+/**
+ * @brief Take action when partition broker becomes unavailable.
+ * This should be called when requests fail with
+ * NOT_LEADER_FOR.. or similar error codes, e.g. ProduceRequest.
+ *
+ * @locks none
+ * @locality any
+ */
+void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp,
+ const char *reason,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_topic_t *rkt = rktp->rktp_rkt;
+
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "BROKERUA",
+ "%s [%" PRId32 "]: broker unavailable: %s: %s",
+ rkt->rkt_topic->str, rktp->rktp_partition, reason,
+ rd_kafka_err2str(err));
+
+ rd_kafka_topic_wrlock(rkt);
+ rkt->rkt_flags |= RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
+ rd_kafka_topic_wrunlock(rkt);
+
+ rd_kafka_topic_fast_leader_query(rkt->rkt_rk);
+}
+
+
+const char *
+rd_kafka_topic_partition_topic(const rd_kafka_topic_partition_t *rktpar) {
+ const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
+ return rktp->rktp_rkt->rkt_topic->str;
+}
+
+int32_t
+rd_kafka_topic_partition_partition(const rd_kafka_topic_partition_t *rktpar) {
+ const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
+ return rktp->rktp_partition;
+}
+
+void rd_kafka_topic_partition_get(const rd_kafka_topic_partition_t *rktpar,
+ const char **name,
+ int32_t *partition) {
+ const rd_kafka_toppar_t *rktp = (const rd_kafka_toppar_t *)rktpar;
+ *name = rktp->rktp_rkt->rkt_topic->str;
+ *partition = rktp->rktp_partition;
+}
+
+
+
+/**
+ *
+ * rd_kafka_topic_partition_t lists
+ * Fixed-size non-growable list of partitions for propagation to application.
+ *
+ */
+
+
+static void
+rd_kafka_topic_partition_list_grow(rd_kafka_topic_partition_list_t *rktparlist,
+ int add_size) {
+ if (add_size < rktparlist->size)
+ add_size = RD_MAX(rktparlist->size, 32);
+
+ rktparlist->size += add_size;
+ rktparlist->elems = rd_realloc(
+ rktparlist->elems, sizeof(*rktparlist->elems) * rktparlist->size);
+}
+
+
+/**
+ * @brief Initialize a list for fitting \p size partitions.
+ */
+void rd_kafka_topic_partition_list_init(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int size) {
+ memset(rktparlist, 0, sizeof(*rktparlist));
+
+ if (size > 0)
+ rd_kafka_topic_partition_list_grow(rktparlist, size);
+}
+
+
+/**
+ * Create a list for fitting 'size' topic_partitions (rktp).
+ */
+rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_new(int size) {
+ rd_kafka_topic_partition_list_t *rktparlist;
+
+ rktparlist = rd_calloc(1, sizeof(*rktparlist));
+
+ if (size > 0)
+ rd_kafka_topic_partition_list_grow(rktparlist, size);
+
+ return rktparlist;
+}
+
+
+
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic,
+ int32_t partition) {
+ rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
+
+ rktpar->topic = rd_strdup(topic);
+ rktpar->partition = partition;
+
+ return rktpar;
+}
+
+/**
+ * @brief Update \p dst with info from \p src.
+ */
+static void
+rd_kafka_topic_partition_update(rd_kafka_topic_partition_t *dst,
+ const rd_kafka_topic_partition_t *src) {
+ const rd_kafka_topic_partition_private_t *srcpriv;
+ rd_kafka_topic_partition_private_t *dstpriv;
+
+ rd_dassert(!strcmp(dst->topic, src->topic));
+ rd_dassert(dst->partition == src->partition);
+ rd_dassert(dst != src);
+
+ dst->offset = src->offset;
+ dst->opaque = src->opaque;
+ dst->err = src->err;
+
+ if (src->metadata_size > 0) {
+ dst->metadata = rd_malloc(src->metadata_size);
+ dst->metadata_size = src->metadata_size;
+ ;
+ memcpy(dst->metadata, src->metadata, dst->metadata_size);
+ }
+
+ if ((srcpriv = src->_private)) {
+ dstpriv = rd_kafka_topic_partition_get_private(dst);
+ if (srcpriv->rktp && !dstpriv->rktp)
+ dstpriv->rktp = rd_kafka_toppar_keep(srcpriv->rktp);
+
+ rd_assert(dstpriv->rktp == srcpriv->rktp);
+
+ dstpriv->leader_epoch = srcpriv->leader_epoch;
+
+ } else if ((dstpriv = dst->_private)) {
+ /* No private object in source, reset the leader epoch. */
+ dstpriv->leader_epoch = -1;
+ }
+}
+
+
+rd_kafka_topic_partition_t *
+rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src) {
+ rd_kafka_topic_partition_t *dst =
+ rd_kafka_topic_partition_new(src->topic, src->partition);
+
+ rd_kafka_topic_partition_update(dst, src);
+
+ return dst;
+}
+
+
+/** Same as above but with generic void* signature */
+void *rd_kafka_topic_partition_copy_void(const void *src) {
+ return rd_kafka_topic_partition_copy(src);
+}
+
+
+rd_kafka_topic_partition_t *
+rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp) {
+ rd_kafka_topic_partition_t *rktpar = rd_calloc(1, sizeof(*rktpar));
+
+ rktpar->topic = RD_KAFKAP_STR_DUP(rktp->rktp_rkt->rkt_topic);
+ rktpar->partition = rktp->rktp_partition;
+
+ return rktpar;
+}
+
+/**
+ * @brief Destroy a partition private glue object.
+ */
+static void rd_kafka_topic_partition_private_destroy(
+ rd_kafka_topic_partition_private_t *parpriv) {
+ if (parpriv->rktp)
+ rd_kafka_toppar_destroy(parpriv->rktp);
+ rd_free(parpriv);
+}
+
+static void
+rd_kafka_topic_partition_destroy0(rd_kafka_topic_partition_t *rktpar,
+ int do_free) {
+ if (rktpar->topic)
+ rd_free(rktpar->topic);
+ if (rktpar->metadata)
+ rd_free(rktpar->metadata);
+ if (rktpar->_private)
+ rd_kafka_topic_partition_private_destroy(
+ (rd_kafka_topic_partition_private_t *)rktpar->_private);
+
+ if (do_free)
+ rd_free(rktpar);
+}
+
+
+int32_t rd_kafka_topic_partition_get_leader_epoch(
+ const rd_kafka_topic_partition_t *rktpar) {
+ const rd_kafka_topic_partition_private_t *parpriv;
+
+ if (!(parpriv = rktpar->_private))
+ return -1;
+
+ return parpriv->leader_epoch;
+}
+
+void rd_kafka_topic_partition_set_leader_epoch(
+ rd_kafka_topic_partition_t *rktpar,
+ int32_t leader_epoch) {
+ rd_kafka_topic_partition_private_t *parpriv;
+
+ /* Avoid allocating private_t if clearing the epoch */
+ if (leader_epoch == -1 && !rktpar->_private)
+ return;
+
+ parpriv = rd_kafka_topic_partition_get_private(rktpar);
+
+ parpriv->leader_epoch = leader_epoch;
+}
+
+int32_t rd_kafka_topic_partition_get_current_leader_epoch(
+ const rd_kafka_topic_partition_t *rktpar) {
+ const rd_kafka_topic_partition_private_t *parpriv;
+
+ if (!(parpriv = rktpar->_private))
+ return -1;
+
+ return parpriv->current_leader_epoch;
+}
+
+void rd_kafka_topic_partition_set_current_leader_epoch(
+ rd_kafka_topic_partition_t *rktpar,
+ int32_t current_leader_epoch) {
+ rd_kafka_topic_partition_private_t *parpriv;
+
+ /* Avoid allocating private_t if clearing the epoch */
+ if (current_leader_epoch == -1 && !rktpar->_private)
+ return;
+
+ parpriv = rd_kafka_topic_partition_get_private(rktpar);
+
+ parpriv->current_leader_epoch = current_leader_epoch;
+}
+
+/**
+ * @brief Set offset and leader epoch from a fetchpos.
+ */
+void rd_kafka_topic_partition_set_from_fetch_pos(
+ rd_kafka_topic_partition_t *rktpar,
+ const rd_kafka_fetch_pos_t fetchpos) {
+ rktpar->offset = fetchpos.offset;
+ rd_kafka_topic_partition_set_leader_epoch(rktpar,
+ fetchpos.leader_epoch);
+}
+
+/**
+ * @brief Destroy all partitions in list.
+ *
+ * @remark The allocated size of the list will not shrink.
+ */
+void rd_kafka_topic_partition_list_clear(
+ rd_kafka_topic_partition_list_t *rktparlist) {
+ int i;
+
+ for (i = 0; i < rktparlist->cnt; i++)
+ rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
+
+ rktparlist->cnt = 0;
+}
+
+
+void rd_kafka_topic_partition_destroy_free(void *ptr) {
+ rd_kafka_topic_partition_destroy0(ptr, rd_true /*do_free*/);
+}
+
+void rd_kafka_topic_partition_destroy(rd_kafka_topic_partition_t *rktpar) {
+ rd_kafka_topic_partition_destroy0(rktpar, 1);
+}
+
+
+/**
+ * Destroys a list previously created with .._list_new() and drops
+ * any references to contained toppars.
+ */
+void rd_kafka_topic_partition_list_destroy(
+ rd_kafka_topic_partition_list_t *rktparlist) {
+ int i;
+
+ for (i = 0; i < rktparlist->cnt; i++)
+ rd_kafka_topic_partition_destroy0(&rktparlist->elems[i], 0);
+
+ if (rktparlist->elems)
+ rd_free(rktparlist->elems);
+
+ rd_free(rktparlist);
+}
+
+
+/**
+ * @brief Wrapper for rd_kafka_topic_partition_list_destroy() that
+ * matches the standard free(void *) signature, for callback use.
+ */
+void rd_kafka_topic_partition_list_destroy_free(void *ptr) {
+ rd_kafka_topic_partition_list_destroy(
+ (rd_kafka_topic_partition_list_t *)ptr);
+}
+
+
+/**
+ * @brief Add a partition to an rktpar list.
+ * The list must have enough room to fit it.
+ *
+ * @param rktp Optional partition object that will be stored on the
+ * ._private object (with refcount increased).
+ *
+ * @returns a pointer to the added element.
+ */
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0(
+ const char *func,
+ int line,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition,
+ rd_kafka_toppar_t *rktp,
+ const rd_kafka_topic_partition_private_t *parpriv) {
+ rd_kafka_topic_partition_t *rktpar;
+ if (rktparlist->cnt == rktparlist->size)
+ rd_kafka_topic_partition_list_grow(rktparlist, 1);
+ rd_kafka_assert(NULL, rktparlist->cnt < rktparlist->size);
+
+ rktpar = &rktparlist->elems[rktparlist->cnt++];
+ memset(rktpar, 0, sizeof(*rktpar));
+ rktpar->topic = rd_strdup(topic);
+ rktpar->partition = partition;
+ rktpar->offset = RD_KAFKA_OFFSET_INVALID;
+
+ if (parpriv) {
+ rd_kafka_topic_partition_private_t *parpriv_copy =
+ rd_kafka_topic_partition_get_private(rktpar);
+ if (parpriv->rktp) {
+ parpriv_copy->rktp =
+ rd_kafka_toppar_keep_fl(func, line, parpriv->rktp);
+ }
+ parpriv_copy->leader_epoch = parpriv->leader_epoch;
+ parpriv_copy->current_leader_epoch = parpriv->leader_epoch;
+ } else if (rktp) {
+ rd_kafka_topic_partition_private_t *parpriv_copy =
+ rd_kafka_topic_partition_get_private(rktpar);
+ parpriv_copy->rktp = rd_kafka_toppar_keep_fl(func, line, rktp);
+ }
+
+ return rktpar;
+}
+
+
+rd_kafka_topic_partition_t *
+rd_kafka_topic_partition_list_add(rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition) {
+ return rd_kafka_topic_partition_list_add0(
+ __FUNCTION__, __LINE__, rktparlist, topic, partition, NULL, NULL);
+}
+
+
+/**
+ * Adds a consecutive list of partitions to a list
+ */
+void rd_kafka_topic_partition_list_add_range(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t start,
+ int32_t stop) {
+
+ for (; start <= stop; start++)
+ rd_kafka_topic_partition_list_add(rktparlist, topic, start);
+}
+
+
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition) {
+ rd_kafka_topic_partition_t *rktpar;
+
+ if ((rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic,
+ partition)))
+ return rktpar;
+
+ return rd_kafka_topic_partition_list_add(rktparlist, topic, partition);
+}
+
+
+
+/**
+ * @brief Creates a copy of \p rktpar and adds it to \p rktparlist
+ */
+void rd_kafka_topic_partition_list_add_copy(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const rd_kafka_topic_partition_t *rktpar) {
+ rd_kafka_topic_partition_t *dst;
+
+ dst = rd_kafka_topic_partition_list_add0(
+ __FUNCTION__, __LINE__, rktparlist, rktpar->topic,
+ rktpar->partition, NULL, rktpar->_private);
+ rd_kafka_topic_partition_update(dst, rktpar);
+}
+
+
+
+/**
+ * Create and return a copy of list 'src'
+ */
+rd_kafka_topic_partition_list_t *
+rd_kafka_topic_partition_list_copy(const rd_kafka_topic_partition_list_t *src) {
+ rd_kafka_topic_partition_list_t *dst;
+ int i;
+
+ dst = rd_kafka_topic_partition_list_new(src->size);
+
+ for (i = 0; i < src->cnt; i++)
+ rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]);
+ return dst;
+}
+
+/**
+ * @brief Same as rd_kafka_topic_partition_list_copy() but suitable for
+ * rd_list_copy(). The \p opaque is ignored.
+ */
+void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque) {
+ return rd_kafka_topic_partition_list_copy(src);
+}
+
+/**
+ * @brief Append copies of all elements in \p src to \p dst.
+ * No duplicate-checks are performed.
+ */
+void rd_kafka_topic_partition_list_add_list(
+ rd_kafka_topic_partition_list_t *dst,
+ const rd_kafka_topic_partition_list_t *src) {
+ int i;
+
+ if (src->cnt == 0)
+ return;
+
+ if (dst->size < dst->cnt + src->cnt)
+ rd_kafka_topic_partition_list_grow(dst, src->cnt);
+
+ for (i = 0; i < src->cnt; i++)
+ rd_kafka_topic_partition_list_add_copy(dst, &src->elems[i]);
+}
+
+
+/**
+ * @brief Compare two partition lists using partition comparator \p cmp.
+ *
+ * @warning This is an O(Na*Nb) operation.
+ */
+int rd_kafka_topic_partition_list_cmp(const void *_a,
+ const void *_b,
+ int (*cmp)(const void *, const void *)) {
+ const rd_kafka_topic_partition_list_t *a = _a, *b = _b;
+ int r;
+ int i;
+
+ r = a->cnt - b->cnt;
+ if (r || a->cnt == 0)
+ return r;
+
+ /* Since the lists may not be sorted we need to scan all of B
+ * for each element in A.
+ * FIXME: If the list sizes are larger than X we could create a
+ * temporary hash map instead. */
+ for (i = 0; i < a->cnt; i++) {
+ int j;
+
+ for (j = 0; j < b->cnt; j++) {
+ r = cmp(&a->elems[i], &b->elems[j]);
+ if (!r)
+ break;
+ }
+
+ if (j == b->cnt)
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief Ensures the \p rktpar has a toppar set in _private.
+ *
+ * @returns the toppar object (or possibly NULL if \p create_on_miss is true)
+ * WITHOUT refcnt increased.
+ */
+rd_kafka_toppar_t *
+rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk,
+ rd_kafka_topic_partition_t *rktpar,
+ rd_bool_t create_on_miss) {
+ rd_kafka_topic_partition_private_t *parpriv;
+
+ parpriv = rd_kafka_topic_partition_get_private(rktpar);
+
+ if (!parpriv->rktp)
+ parpriv->rktp = rd_kafka_toppar_get2(
+ rk, rktpar->topic, rktpar->partition,
+ 0 /* not ua on miss */, create_on_miss);
+
+ return parpriv->rktp;
+}
+
+
+int rd_kafka_topic_partition_cmp(const void *_a, const void *_b) {
+ const rd_kafka_topic_partition_t *a = _a;
+ const rd_kafka_topic_partition_t *b = _b;
+ int r = strcmp(a->topic, b->topic);
+ if (r)
+ return r;
+ else
+ return RD_CMP(a->partition, b->partition);
+}
+
+/** @brief Compare only the topic */
+int rd_kafka_topic_partition_cmp_topic(const void *_a, const void *_b) {
+ const rd_kafka_topic_partition_t *a = _a;
+ const rd_kafka_topic_partition_t *b = _b;
+ return strcmp(a->topic, b->topic);
+}
+
+static int rd_kafka_topic_partition_cmp_opaque(const void *_a,
+ const void *_b,
+ void *opaque) {
+ return rd_kafka_topic_partition_cmp(_a, _b);
+}
+
+/** @returns a hash of the topic and partition */
+unsigned int rd_kafka_topic_partition_hash(const void *_a) {
+ const rd_kafka_topic_partition_t *a = _a;
+ int r = 31 * 17 + a->partition;
+ return 31 * r + rd_string_hash(a->topic, -1);
+}
+
+
+
+/**
+ * @brief Search 'rktparlist' for 'topic' and 'partition'.
+ * @returns the elems[] index or -1 on miss.
+ */
+static int rd_kafka_topic_partition_list_find0(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition,
+ int (*cmp)(const void *, const void *)) {
+ rd_kafka_topic_partition_t skel;
+ int i;
+
+ skel.topic = (char *)topic;
+ skel.partition = partition;
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ if (!cmp(&skel, &rktparlist->elems[i]))
+ return i;
+ }
+
+ return -1;
+}
+
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition) {
+ int i = rd_kafka_topic_partition_list_find0(
+ rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
+ if (i == -1)
+ return NULL;
+ else
+ return &rktparlist->elems[i];
+}
+
+
+int rd_kafka_topic_partition_list_find_idx(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition) {
+ return rd_kafka_topic_partition_list_find0(
+ rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
+}
+
+
+/**
+ * @returns the first element that matches \p topic, regardless of partition.
+ */
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic) {
+ int i = rd_kafka_topic_partition_list_find0(
+ rktparlist, topic, RD_KAFKA_PARTITION_UA,
+ rd_kafka_topic_partition_cmp_topic);
+ if (i == -1)
+ return NULL;
+ else
+ return &rktparlist->elems[i];
+}
+
+
+int rd_kafka_topic_partition_list_del_by_idx(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int idx) {
+ if (unlikely(idx < 0 || idx >= rktparlist->cnt))
+ return 0;
+
+ rd_kafka_topic_partition_destroy0(&rktparlist->elems[idx], 0);
+ memmove(&rktparlist->elems[idx], &rktparlist->elems[idx + 1],
+ (rktparlist->cnt - idx - 1) * sizeof(rktparlist->elems[idx]));
+ rktparlist->cnt--;
+
+ return 1;
+}
+
+
+int rd_kafka_topic_partition_list_del(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition) {
+ int i = rd_kafka_topic_partition_list_find0(
+ rktparlist, topic, partition, rd_kafka_topic_partition_cmp);
+ if (i == -1)
+ return 0;
+
+ return rd_kafka_topic_partition_list_del_by_idx(rktparlist, i);
+}
+
+
+
+/**
+ * Returns true if 'topic' matches the 'rktpar', else false.
+ * On match, if rktpar is a regex pattern then 'matched_by_regex' is set to 1.
+ */
+int rd_kafka_topic_partition_match(rd_kafka_t *rk,
+ const rd_kafka_group_member_t *rkgm,
+ const rd_kafka_topic_partition_t *rktpar,
+ const char *topic,
+ int *matched_by_regex) {
+ int ret = 0;
+
+ if (*rktpar->topic == '^') {
+ char errstr[128];
+
+ ret = rd_regex_match(rktpar->topic, topic, errstr,
+ sizeof(errstr));
+ if (ret == -1) {
+ rd_kafka_dbg(rk, CGRP, "SUBMATCH",
+ "Invalid regex for member "
+ "\"%.*s\" subscription \"%s\": %s",
+ RD_KAFKAP_STR_PR(rkgm->rkgm_member_id),
+ rktpar->topic, errstr);
+ return 0;
+ }
+
+ if (ret && matched_by_regex)
+ *matched_by_regex = 1;
+
+ } else if (!strcmp(rktpar->topic, topic)) {
+
+ if (matched_by_regex)
+ *matched_by_regex = 0;
+
+ ret = 1;
+ }
+
+ return ret;
+}
+
+
+
+void rd_kafka_topic_partition_list_sort(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int (*cmp)(const void *, const void *, void *),
+ void *opaque) {
+
+ if (!cmp)
+ cmp = rd_kafka_topic_partition_cmp_opaque;
+
+ rd_qsort_r(rktparlist->elems, rktparlist->cnt,
+ sizeof(*rktparlist->elems), cmp, opaque);
+}
+
+
+void rd_kafka_topic_partition_list_sort_by_topic(
+ rd_kafka_topic_partition_list_t *rktparlist) {
+ rd_kafka_topic_partition_list_sort(
+ rktparlist, rd_kafka_topic_partition_cmp_opaque, NULL);
+}
+
+rd_kafka_resp_err_t rd_kafka_topic_partition_list_set_offset(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition,
+ int64_t offset) {
+ rd_kafka_topic_partition_t *rktpar;
+
+ if (!(rktpar = rd_kafka_topic_partition_list_find(rktparlist, topic,
+ partition)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ rktpar->offset = offset;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Reset all offsets to the provided value.
+ */
+void rd_kafka_topic_partition_list_reset_offsets(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int64_t offset) {
+
+ int i;
+ for (i = 0; i < rktparlist->cnt; i++)
+ rktparlist->elems[i].offset = offset;
+}
+
+
+/**
+ * Set offset values in partition list based on toppar's last stored offset.
+ *
+ * from_rktp - true: set rktp's last stored offset, false: set def_value
+ * unless a concrete offset is set.
+ * is_commit: indicates that set offset is to be committed (for debug log)
+ *
+ * Returns the number of valid non-logical offsets (>=0).
+ */
+int rd_kafka_topic_partition_list_set_offsets(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int from_rktp,
+ int64_t def_value,
+ int is_commit) {
+ int i;
+ int valid_cnt = 0;
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
+ const char *verb = "setting";
+ char preamble[128];
+
+ *preamble = '\0'; /* Avoid warning */
+
+ if (from_rktp) {
+ rd_kafka_toppar_t *rktp =
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
+ rd_true);
+ rd_kafka_toppar_lock(rktp);
+
+ if (rk->rk_conf.debug &
+ (RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_TOPIC))
+ rd_snprintf(preamble, sizeof(preamble),
+ "stored %s, committed %s: ",
+ rd_kafka_fetch_pos2str(
+ rktp->rktp_stored_pos),
+ rd_kafka_fetch_pos2str(
+ rktp->rktp_committed_pos));
+
+ if (rd_kafka_fetch_pos_cmp(&rktp->rktp_stored_pos,
+ &rktp->rktp_committed_pos) >
+ 0) {
+ verb = "setting stored";
+ rd_kafka_topic_partition_set_from_fetch_pos(
+ rktpar, rktp->rktp_stored_pos);
+ } else {
+ rktpar->offset = RD_KAFKA_OFFSET_INVALID;
+ }
+ rd_kafka_toppar_unlock(rktp);
+ } else {
+ if (RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset)) {
+ verb = "setting default";
+ rktpar->offset = def_value;
+ rd_kafka_topic_partition_set_leader_epoch(
+ rktpar, -1);
+ } else
+ verb = "keeping";
+ }
+
+ if (is_commit && rktpar->offset == RD_KAFKA_OFFSET_INVALID)
+ rd_kafka_dbg(rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET",
+ "Topic %s [%" PRId32
+ "]: "
+ "%snot including in commit",
+ rktpar->topic, rktpar->partition,
+ preamble);
+ else
+ rd_kafka_dbg(
+ rk, CGRP | RD_KAFKA_DBG_TOPIC, "OFFSET",
+ "Topic %s [%" PRId32
+ "]: "
+ "%s%s offset %s (leader epoch %" PRId32 ") %s",
+ rktpar->topic, rktpar->partition, preamble, verb,
+ rd_kafka_offset2str(rktpar->offset),
+ rd_kafka_topic_partition_get_leader_epoch(rktpar),
+ is_commit ? " for commit" : "");
+
+ if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktpar->offset))
+ valid_cnt++;
+ }
+
+ return valid_cnt;
+}
+
+
+/**
+ * @returns the number of partitions with absolute (non-logical) offsets set.
+ */
+int rd_kafka_topic_partition_list_count_abs_offsets(
+ const rd_kafka_topic_partition_list_t *rktparlist) {
+ int i;
+ int valid_cnt = 0;
+
+ for (i = 0; i < rktparlist->cnt; i++)
+ if (!RD_KAFKA_OFFSET_IS_LOGICAL(rktparlist->elems[i].offset))
+ valid_cnt++;
+
+ return valid_cnt;
+}
+
+
+/**
+ * @brief Update _private (toppar) field to point to valid rktp
+ * for each parition.
+ *
+ * @param create_on_miss Create partition (and topic_t object) if necessary.
+ */
+void rd_kafka_topic_partition_list_update_toppars(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_bool_t create_on_miss) {
+ int i;
+ for (i = 0; i < rktparlist->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
+
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar,
+ create_on_miss);
+ }
+}
+
+
+/**
+ * @brief Populate \p leaders with the leaders+partitions for the partitions in
+ * \p rktparlist. Duplicates are suppressed.
+ *
+ * If no leader is found for a partition that element's \c .err will
+ * be set to RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE.
+ *
+ * If the partition does not exist \c .err will be set to
+ * RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION.
+ *
+ * @param rktparlist The partitions to look up leaders for, the .err field
+ * will be set according to outcome, e.g., ERR_NO_ERROR,
+ * ERR_UNKNOWN_TOPIC_OR_PART, etc.
+ * @param leaders rd_list_t of allocated (struct rd_kafka_partition_leader *)
+ * @param query_topics (optional) rd_list of strdupped (char *)
+ * @param query_unknown Add unknown topics to \p query_topics.
+ * @param eonce (optional) For triggering asynchronously on cache change
+ * in case not all leaders are known now.
+ *
+ * @remark This is based on the current topic_t and partition state
+ * which may lag behind the last metadata update due to internal
+ * threading and also the fact that no topic_t may have been created.
+ *
+ * @param leaders rd_list_t of type (struct rd_kafka_partition_leader *)
+ *
+ * @returns true if all partitions have leaders, else false.
+ *
+ * @sa rd_kafka_topic_partition_list_get_leaders_by_metadata
+ *
+ * @locks rd_kafka_*lock() MUST NOT be held
+ */
+static rd_bool_t rd_kafka_topic_partition_list_get_leaders(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *leaders,
+ rd_list_t *query_topics,
+ rd_bool_t query_unknown,
+ rd_kafka_enq_once_t *eonce) {
+ rd_bool_t complete;
+ int cnt = 0;
+ int i;
+
+ if (eonce)
+ rd_kafka_wrlock(rk);
+ else
+ rd_kafka_rdlock(rk);
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
+ rd_kafka_topic_partition_t *rktpar2;
+ rd_kafka_broker_t *rkb = NULL;
+ struct rd_kafka_partition_leader leader_skel;
+ struct rd_kafka_partition_leader *leader;
+ const rd_kafka_metadata_topic_t *mtopic;
+ const rd_kafka_metadata_partition_t *mpart;
+ rd_bool_t topic_wait_cache;
+
+ rd_kafka_metadata_cache_topic_partition_get(
+ rk, &mtopic, &mpart, rktpar->topic, rktpar->partition,
+ 0 /*negative entries too*/);
+
+ topic_wait_cache =
+ !mtopic ||
+ RD_KAFKA_METADATA_CACHE_ERR_IS_TEMPORARY(mtopic->err);
+
+ if (!topic_wait_cache && mtopic &&
+ mtopic->err != RD_KAFKA_RESP_ERR_NO_ERROR &&
+ mtopic->err != RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE) {
+ /* Topic permanently errored */
+ rktpar->err = mtopic->err;
+ continue;
+ }
+
+ if (mtopic && !mpart && mtopic->partition_cnt > 0) {
+ /* Topic exists but partition doesnt.
+ * This is a permanent error. */
+ rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ continue;
+ }
+
+ if (mpart &&
+ (mpart->leader == -1 ||
+ !(rkb = rd_kafka_broker_find_by_nodeid0(
+ rk, mpart->leader, -1 /*any state*/, rd_false)))) {
+ /* Partition has no (valid) leader.
+ * This is a permanent error. */
+ rktpar->err =
+ mtopic->err
+ ? mtopic->err
+ : RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE;
+ continue;
+ }
+
+ if (topic_wait_cache || !rkb) {
+ /* Topic unknown or no current leader for partition,
+ * add topic to query list. */
+ rktpar->err = RD_KAFKA_RESP_ERR__IN_PROGRESS;
+ if (query_topics &&
+ !rd_list_find(query_topics, rktpar->topic,
+ (void *)strcmp))
+ rd_list_add(query_topics,
+ rd_strdup(rktpar->topic));
+ continue;
+ }
+
+ /* Leader exists, add to leader list. */
+
+ rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ memset(&leader_skel, 0, sizeof(leader_skel));
+ leader_skel.rkb = rkb;
+
+ leader = rd_list_find(leaders, &leader_skel,
+ rd_kafka_partition_leader_cmp);
+
+ if (!leader) {
+ leader = rd_kafka_partition_leader_new(rkb);
+ rd_list_add(leaders, leader);
+ }
+
+ rktpar2 = rd_kafka_topic_partition_list_find(
+ leader->partitions, rktpar->topic, rktpar->partition);
+ if (rktpar2) {
+ /* Already exists in partitions list, just update. */
+ rd_kafka_topic_partition_update(rktpar2, rktpar);
+ } else {
+ /* Make a copy of rktpar and add to partitions list */
+ rd_kafka_topic_partition_list_add_copy(
+ leader->partitions, rktpar);
+ }
+
+ rktpar->err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_broker_destroy(rkb); /* loose refcount */
+ cnt++;
+ }
+
+ complete = cnt == rktparlist->cnt;
+
+ if (!complete && eonce)
+ /* Add eonce to cache observers */
+ rd_kafka_metadata_cache_wait_state_change_async(rk, eonce);
+
+ if (eonce)
+ rd_kafka_wrunlock(rk);
+ else
+ rd_kafka_rdunlock(rk);
+
+ return complete;
+}
+
+
+/**
+ * @brief Timer timeout callback for query_leaders_async rko's eonce object.
+ */
+static void
+rd_kafka_partition_leader_query_eonce_timeout_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_enq_once_t *eonce = arg;
+ rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "timeout timer");
+}
+
+
+/**
+ * @brief Query timer callback for query_leaders_async rko's eonce object.
+ */
+static void
+rd_kafka_partition_leader_query_eonce_timer_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_enq_once_t *eonce = arg;
+ rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR_NO_ERROR,
+ "query timer");
+}
+
+
+/**
+ * @brief Query metadata cache for partition leaders, or trigger metadata
+ * refresh if leaders not known.
+ *
+ * @locks_required none
+ * @locality any
+ */
+static rd_kafka_op_res_t
+rd_kafka_topic_partition_list_query_leaders_async_worker(rd_kafka_op_t *rko) {
+ rd_kafka_t *rk = rko->rko_rk;
+ rd_list_t query_topics, *leaders = NULL;
+ rd_kafka_op_t *reply;
+
+ RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_LEADERS);
+
+ if (rko->rko_err)
+ goto reply; /* Timeout or ERR__DESTROY */
+
+ /* Since we're iterating over get_leaders() until all partition leaders
+ * are known we need to re-enable the eonce to be triggered again (which
+ * is not necessary the first time we get here, but there
+ * is no harm doing it then either). */
+ rd_kafka_enq_once_reenable(rko->rko_u.leaders.eonce, rko,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+
+ /* Look up the leaders in the metadata cache, if not all leaders
+ * are known the eonce is registered for metadata cache changes
+ * which will cause our function to be called
+ * again on (any) metadata cache change.
+ *
+ * When we are called again we perform the cache lookup again and
+ * hopefully get all leaders, otherwise defer a new async wait.
+ * Repeat until success or timeout. */
+
+ rd_list_init(&query_topics, 4 + rko->rko_u.leaders.partitions->cnt / 2,
+ rd_free);
+
+ leaders = rd_list_new(1 + rko->rko_u.leaders.partitions->cnt / 2,
+ rd_kafka_partition_leader_destroy_free);
+
+ if (rd_kafka_topic_partition_list_get_leaders(
+ rk, rko->rko_u.leaders.partitions, leaders, &query_topics,
+ /* Add unknown topics to query_topics only on the
+ * first query, after that we consider them permanently
+ * non-existent */
+ rko->rko_u.leaders.query_cnt == 0, rko->rko_u.leaders.eonce)) {
+ /* All leaders now known (or failed), reply to caller */
+ rd_list_destroy(&query_topics);
+ goto reply;
+ }
+
+ if (rd_list_empty(&query_topics)) {
+ /* Not all leaders known but no topics left to query,
+ * reply to caller. */
+ rd_list_destroy(&query_topics);
+ goto reply;
+ }
+
+ /* Need to refresh topic metadata, but at most every interval. */
+ if (!rd_kafka_timer_is_started(&rk->rk_timers,
+ &rko->rko_u.leaders.query_tmr)) {
+
+ rko->rko_u.leaders.query_cnt++;
+
+ /* Add query interval timer. */
+ rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce,
+ "query timer");
+ rd_kafka_timer_start_oneshot(
+ &rk->rk_timers, &rko->rko_u.leaders.query_tmr, rd_true,
+ 3 * 1000 * 1000 /* 3s */,
+ rd_kafka_partition_leader_query_eonce_timer_cb,
+ rko->rko_u.leaders.eonce);
+
+ /* Request metadata refresh */
+ rd_kafka_metadata_refresh_topics(
+ rk, NULL, &query_topics, rd_true /*force*/,
+ rd_false /*!allow_auto_create*/, rd_false /*!cgrp_update*/,
+ "query partition leaders");
+ }
+
+ rd_list_destroy(leaders);
+ rd_list_destroy(&query_topics);
+
+ /* Wait for next eonce trigger */
+ return RD_KAFKA_OP_RES_KEEP; /* rko is still used */
+
+reply:
+ /* Decommission worker state and reply to caller */
+
+ if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.query_tmr,
+ RD_DO_LOCK))
+ rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce,
+ "query timer");
+ if (rd_kafka_timer_stop(&rk->rk_timers, &rko->rko_u.leaders.timeout_tmr,
+ RD_DO_LOCK))
+ rd_kafka_enq_once_del_source(rko->rko_u.leaders.eonce,
+ "timeout timer");
+
+ if (rko->rko_u.leaders.eonce) {
+ rd_kafka_enq_once_disable(rko->rko_u.leaders.eonce);
+ rko->rko_u.leaders.eonce = NULL;
+ }
+
+ /* No leaders found, set a request-level error */
+ if (leaders && rd_list_cnt(leaders) == 0) {
+ if (!rko->rko_err)
+ rko->rko_err = RD_KAFKA_RESP_ERR__NOENT;
+ rd_list_destroy(leaders);
+ leaders = NULL;
+ }
+
+ /* Create and enqueue reply rko */
+ if (rko->rko_u.leaders.replyq.q) {
+ reply = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_LEADERS,
+ rko->rko_u.leaders.cb);
+ rd_kafka_op_get_reply_version(reply, rko);
+ reply->rko_err = rko->rko_err;
+ reply->rko_u.leaders.partitions =
+ rko->rko_u.leaders.partitions; /* Transfer ownership for
+ * partition list that
+ * now contains
+ * per-partition errors*/
+ rko->rko_u.leaders.partitions = NULL;
+ reply->rko_u.leaders.leaders = leaders; /* Possibly NULL */
+ reply->rko_u.leaders.opaque = rko->rko_u.leaders.opaque;
+
+ rd_kafka_replyq_enq(&rko->rko_u.leaders.replyq, reply, 0);
+ }
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+static rd_kafka_op_res_t
+rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb(
+ rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ return rd_kafka_topic_partition_list_query_leaders_async_worker(rko);
+}
+
+/**
+ * @brief Async variant of rd_kafka_topic_partition_list_query_leaders().
+ *
+ * The reply rko op will contain:
+ * - .leaders which is a list of leaders and their partitions, this may be
+ * NULL for overall errors (such as no leaders are found), or a
+ * partial or complete list of leaders.
+ * - .partitions which is a copy of the input list of partitions with the
+ * .err field set to the outcome of the leader query, typically ERR_NO_ERROR
+ * or ERR_UNKNOWN_TOPIC_OR_PART.
+ *
+ * @locks_acquired rd_kafka_*lock()
+ *
+ * @remark rd_kafka_*lock() MUST NOT be held
+ */
+void rd_kafka_topic_partition_list_query_leaders_async(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ int timeout_ms,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_op_cb_t *cb,
+ void *opaque) {
+ rd_kafka_op_t *rko;
+
+ rd_assert(rktparlist && rktparlist->cnt > 0);
+ rd_assert(replyq.q);
+
+ rko = rd_kafka_op_new_cb(
+ rk, RD_KAFKA_OP_LEADERS,
+ rd_kafka_topic_partition_list_query_leaders_async_worker_op_cb);
+ rko->rko_u.leaders.replyq = replyq;
+ rko->rko_u.leaders.partitions =
+ rd_kafka_topic_partition_list_copy(rktparlist);
+ rko->rko_u.leaders.ts_timeout = rd_timeout_init(timeout_ms);
+ rko->rko_u.leaders.cb = cb;
+ rko->rko_u.leaders.opaque = opaque;
+
+ /* Create an eonce to be triggered either by metadata cache update
+ * (from refresh_topics()), query interval, or timeout. */
+ rko->rko_u.leaders.eonce =
+ rd_kafka_enq_once_new(rko, RD_KAFKA_REPLYQ(rk->rk_ops, 0));
+
+ rd_kafka_enq_once_add_source(rko->rko_u.leaders.eonce, "timeout timer");
+ rd_kafka_timer_start_oneshot(
+ &rk->rk_timers, &rko->rko_u.leaders.timeout_tmr, rd_true,
+ rd_timeout_remains_us(rko->rko_u.leaders.ts_timeout),
+ rd_kafka_partition_leader_query_eonce_timeout_cb,
+ rko->rko_u.leaders.eonce);
+
+ if (rd_kafka_topic_partition_list_query_leaders_async_worker(rko) ==
+ RD_KAFKA_OP_RES_HANDLED)
+ rd_kafka_op_destroy(rko); /* Reply queue already disabled */
+}
+
+
+/**
+ * @brief Get leaders for all partitions in \p rktparlist, querying metadata
+ * if needed.
+ *
+ * @param leaders is a pre-initialized (empty) list which will be populated
+ * with the leader brokers and their partitions
+ * (struct rd_kafka_partition_leader *)
+ *
+ * @remark Will not trigger topic auto creation (unless configured).
+ *
+ * @returns an error code on error.
+ *
+ * @locks rd_kafka_*lock() MUST NOT be held
+ */
+rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *leaders,
+ int timeout_ms) {
+ rd_ts_t ts_end = rd_timeout_init(timeout_ms);
+ rd_ts_t ts_query = 0;
+ rd_ts_t now;
+ int query_cnt = 0;
+ int i = 0;
+
+ /* Get all the partition leaders, try multiple times:
+ * if there are no leaders after the first run fire off a leader
+ * query and wait for broker state update before trying again,
+ * keep trying and re-querying at increasing intervals until
+ * success or timeout. */
+ do {
+ rd_list_t query_topics;
+ int query_intvl;
+
+ rd_list_init(&query_topics, rktparlist->cnt, rd_free);
+
+ rd_kafka_topic_partition_list_get_leaders(
+ rk, rktparlist, leaders, &query_topics,
+ /* Add unknown topics to query_topics only on the
+ * first query, after that we consider them
+ * permanently non-existent */
+ query_cnt == 0, NULL);
+
+ if (rd_list_empty(&query_topics)) {
+ /* No remaining topics to query: leader-list complete.*/
+ rd_list_destroy(&query_topics);
+
+ /* No leader(s) for partitions means all partitions
+ * are unknown. */
+ if (rd_list_empty(leaders))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ now = rd_clock();
+
+ /*
+ * Missing leader for some partitions
+ */
+ query_intvl = (i + 1) * 100; /* add 100ms per iteration */
+ if (query_intvl > 2 * 1000)
+ query_intvl = 2 * 1000; /* Cap to 2s */
+
+ if (now >= ts_query + (query_intvl * 1000)) {
+ /* Query metadata for missing leaders,
+ * possibly creating the topic. */
+ rd_kafka_metadata_refresh_topics(
+ rk, NULL, &query_topics, rd_true /*force*/,
+ rd_false /*!allow_auto_create*/,
+ rd_false /*!cgrp_update*/,
+ "query partition leaders");
+ ts_query = now;
+ query_cnt++;
+
+ } else {
+ /* Wait for broker ids to be updated from
+ * metadata refresh above. */
+ int wait_ms =
+ rd_timeout_remains_limit(ts_end, query_intvl);
+ rd_kafka_metadata_cache_wait_change(rk, wait_ms);
+ }
+
+ rd_list_destroy(&query_topics);
+
+ i++;
+ } while (ts_end == RD_POLL_INFINITE ||
+ now < ts_end); /* now is deliberately outdated here
+ * since wait_change() will block.
+ * This gives us one more chance to spin thru*/
+
+ if (rd_atomic32_get(&rk->rk_broker_up_cnt) == 0)
+ return RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN;
+
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+}
+
+
+/**
+ * @brief Populate \p rkts with the rd_kafka_topic_t objects for the
+ * partitions in. Duplicates are suppressed.
+ *
+ * @returns the number of topics added.
+ */
+int rd_kafka_topic_partition_list_get_topics(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *rkts) {
+ int cnt = 0;
+
+ int i;
+ for (i = 0; i < rktparlist->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &rktparlist->elems[i];
+ rd_kafka_toppar_t *rktp;
+
+ rktp =
+ rd_kafka_topic_partition_get_toppar(rk, rktpar, rd_false);
+ if (!rktp) {
+ rktpar->err = RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION;
+ continue;
+ }
+
+ if (!rd_list_find(rkts, rktp->rktp_rkt,
+ rd_kafka_topic_cmp_rkt)) {
+ rd_list_add(rkts, rd_kafka_topic_keep(rktp->rktp_rkt));
+ cnt++;
+ }
+
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ return cnt;
+}
+
+
+/**
+ * @brief Populate \p topics with the strdupped topic names in \p rktparlist.
+ * Duplicates are suppressed.
+ *
+ * @param include_regex: include regex topics
+ *
+ * @returns the number of topics added.
+ */
+int rd_kafka_topic_partition_list_get_topic_names(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *topics,
+ int include_regex) {
+ int cnt = 0;
+ int i;
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rktparlist->elems[i];
+
+ if (!include_regex && *rktpar->topic == '^')
+ continue;
+
+ if (!rd_list_find(topics, rktpar->topic, (void *)strcmp)) {
+ rd_list_add(topics, rd_strdup(rktpar->topic));
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+
+/**
+ * @brief Create a copy of \p rktparlist only containing the partitions
+ * matched by \p match function.
+ *
+ * \p match shall return 1 for match, else 0.
+ *
+ * @returns a new list
+ */
+rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ int (*match)(const void *elem, const void *opaque),
+ void *opaque) {
+ rd_kafka_topic_partition_list_t *newlist;
+ int i;
+
+ newlist = rd_kafka_topic_partition_list_new(0);
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rktparlist->elems[i];
+
+ if (!match(rktpar, opaque))
+ continue;
+
+ rd_kafka_topic_partition_list_add_copy(newlist, rktpar);
+ }
+
+ return newlist;
+}
+
+void rd_kafka_topic_partition_list_log(
+ rd_kafka_t *rk,
+ const char *fac,
+ int dbg,
+ const rd_kafka_topic_partition_list_t *rktparlist) {
+ int i;
+
+ rd_kafka_dbg(rk, NONE | dbg, fac,
+ "List with %d partition(s):", rktparlist->cnt);
+ for (i = 0; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rktparlist->elems[i];
+ rd_kafka_dbg(rk, NONE | dbg, fac,
+ " %s [%" PRId32 "] offset %s%s%s", rktpar->topic,
+ rktpar->partition,
+ rd_kafka_offset2str(rktpar->offset),
+ rktpar->err ? ": error: " : "",
+ rktpar->err ? rd_kafka_err2str(rktpar->err) : "");
+ }
+}
+
+/**
+ * @returns a comma-separated list of partitions.
+ */
+const char *rd_kafka_topic_partition_list_str(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ char *dest,
+ size_t dest_size,
+ int fmt_flags) {
+ int i;
+ size_t of = 0;
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rktparlist->elems[i];
+ char errstr[128];
+ char offsetstr[32];
+ int r;
+
+ if (!rktpar->err && (fmt_flags & RD_KAFKA_FMT_F_ONLY_ERR))
+ continue;
+
+ if (rktpar->err && !(fmt_flags & RD_KAFKA_FMT_F_NO_ERR))
+ rd_snprintf(errstr, sizeof(errstr), "(%s)",
+ rd_kafka_err2str(rktpar->err));
+ else
+ errstr[0] = '\0';
+
+ if (rktpar->offset != RD_KAFKA_OFFSET_INVALID)
+ rd_snprintf(offsetstr, sizeof(offsetstr), "@%" PRId64,
+ rktpar->offset);
+ else
+ offsetstr[0] = '\0';
+
+ r = rd_snprintf(&dest[of], dest_size - of,
+ "%s"
+ "%s[%" PRId32
+ "]"
+ "%s"
+ "%s",
+ of == 0 ? "" : ", ", rktpar->topic,
+ rktpar->partition, offsetstr, errstr);
+
+ if ((size_t)r >= dest_size - of) {
+ rd_snprintf(&dest[dest_size - 4], 4, "...");
+ break;
+ }
+
+ of += r;
+ }
+
+ return dest;
+}
+
+
+
+/**
+ * @brief Update \p dst with info from \p src.
+ *
+ * Fields updated:
+ * - metadata
+ * - metadata_size
+ * - offset
+ * - offset leader epoch
+ * - err
+ *
+ * Will only update partitions that are in both dst and src, other partitions
+ * will remain unchanged.
+ */
+void rd_kafka_topic_partition_list_update(
+ rd_kafka_topic_partition_list_t *dst,
+ const rd_kafka_topic_partition_list_t *src) {
+ int i;
+
+ for (i = 0; i < dst->cnt; i++) {
+ rd_kafka_topic_partition_t *d = &dst->elems[i];
+ rd_kafka_topic_partition_t *s;
+ rd_kafka_topic_partition_private_t *s_priv, *d_priv;
+
+ if (!(s = rd_kafka_topic_partition_list_find(
+ (rd_kafka_topic_partition_list_t *)src, d->topic,
+ d->partition)))
+ continue;
+
+ d->offset = s->offset;
+ d->err = s->err;
+ if (d->metadata) {
+ rd_free(d->metadata);
+ d->metadata = NULL;
+ d->metadata_size = 0;
+ }
+ if (s->metadata_size > 0) {
+ d->metadata = rd_malloc(s->metadata_size);
+ d->metadata_size = s->metadata_size;
+ memcpy((void *)d->metadata, s->metadata,
+ s->metadata_size);
+ }
+
+ s_priv = rd_kafka_topic_partition_get_private(s);
+ d_priv = rd_kafka_topic_partition_get_private(d);
+ d_priv->leader_epoch = s_priv->leader_epoch;
+ }
+}
+
+
+/**
+ * @returns the sum of \p cb called for each element.
+ */
+size_t rd_kafka_topic_partition_list_sum(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque),
+ void *opaque) {
+ int i;
+ size_t sum = 0;
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rktparlist->elems[i];
+ sum += cb(rktpar, opaque);
+ }
+
+ return sum;
+}
+
+
+/**
+ * @returns rd_true if there are duplicate topic/partitions in the list,
+ * rd_false if not.
+ *
+ * @remarks sorts the elements of the list.
+ */
+rd_bool_t rd_kafka_topic_partition_list_has_duplicates(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_bool_t ignore_partition) {
+
+ int i;
+
+ if (rktparlist->cnt <= 1)
+ return rd_false;
+
+ rd_kafka_topic_partition_list_sort_by_topic(rktparlist);
+
+ for (i = 1; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *p1 =
+ &rktparlist->elems[i - 1];
+ const rd_kafka_topic_partition_t *p2 = &rktparlist->elems[i];
+
+ if (((p1->partition == p2->partition) || ignore_partition) &&
+ !strcmp(p1->topic, p2->topic)) {
+ return rd_true;
+ }
+ }
+
+ return rd_false;
+}
+
+
+/**
+ * @brief Set \c .err field \p err on all partitions in list.
+ */
+void rd_kafka_topic_partition_list_set_err(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_kafka_resp_err_t err) {
+ int i;
+
+ for (i = 0; i < rktparlist->cnt; i++)
+ rktparlist->elems[i].err = err;
+}
+
+/**
+ * @brief Get the first set error in the partition list.
+ */
+rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err(
+ const rd_kafka_topic_partition_list_t *rktparlist) {
+ int i;
+
+ for (i = 0; i < rktparlist->cnt; i++)
+ if (rktparlist->elems[i].err)
+ return rktparlist->elems[i].err;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @returns the number of wildcard/regex topics
+ */
+int rd_kafka_topic_partition_list_regex_cnt(
+ const rd_kafka_topic_partition_list_t *rktparlist) {
+ int i;
+ int cnt = 0;
+
+ for (i = 0; i < rktparlist->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &rktparlist->elems[i];
+ cnt += *rktpar->topic == '^';
+ }
+ return cnt;
+}
+
+
+/**
+ * @brief Reset base sequence for this toppar.
+ *
+ * See rd_kafka_toppar_pid_change() below.
+ *
+ * @warning Toppar must be completely drained.
+ *
+ * @locality toppar handler thread
+ * @locks toppar_lock MUST be held.
+ */
+static void rd_kafka_toppar_reset_base_msgid(rd_kafka_toppar_t *rktp,
+ uint64_t new_base_msgid) {
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "RESETSEQ",
+ "%.*s [%" PRId32
+ "] "
+ "resetting epoch base seq from %" PRIu64 " to %" PRIu64,
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic), rktp->rktp_partition,
+ rktp->rktp_eos.epoch_base_msgid, new_base_msgid);
+
+ rktp->rktp_eos.next_ack_seq = 0;
+ rktp->rktp_eos.next_err_seq = 0;
+ rktp->rktp_eos.epoch_base_msgid = new_base_msgid;
+}
+
+
+/**
+ * @brief Update/change the Producer ID for this toppar.
+ *
+ * Must only be called when pid is different from the current toppar pid.
+ *
+ * The epoch base sequence will be set to \p base_msgid, which must be the
+ * first message in the partition
+ * queue. However, if there are outstanding messages in-flight to the broker
+ * we will need to wait for these ProduceRequests to finish (most likely
+ * with failure) and have their messages re-enqueued to maintain original order.
+ * In this case the pid will not be updated and this function should be
+ * called again when there are no outstanding messages.
+ *
+ * @remark This function must only be called when rktp_xmitq is non-empty.
+ *
+ * @returns 1 if a new pid was set, else 0.
+ *
+ * @locality toppar handler thread
+ * @locks none
+ */
+int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp,
+ rd_kafka_pid_t pid,
+ uint64_t base_msgid) {
+ int inflight = rd_atomic32_get(&rktp->rktp_msgs_inflight);
+
+ if (unlikely(inflight > 0)) {
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID",
+ "%.*s [%" PRId32
+ "] will not change %s -> %s yet: "
+ "%d message(s) still in-flight from current "
+ "epoch",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid),
+ rd_kafka_pid2str(pid), inflight);
+ return 0;
+ }
+
+ rd_assert(base_msgid != 0 &&
+ *"BUG: pid_change() must only be called with "
+ "non-empty xmitq");
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_EOS, "NEWPID",
+ "%.*s [%" PRId32
+ "] changed %s -> %s "
+ "with base MsgId %" PRIu64,
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_pid2str(rktp->rktp_eos.pid),
+ rd_kafka_pid2str(pid), base_msgid);
+
+ rktp->rktp_eos.pid = pid;
+ rd_kafka_toppar_reset_base_msgid(rktp, base_msgid);
+
+ rd_kafka_toppar_unlock(rktp);
+
+ return 1;
+}
+
+
+/**
+ * @brief Purge messages in partition queues.
+ * Delivery reports will be enqueued for all purged messages, the error
+ * code is set to RD_KAFKA_RESP_ERR__PURGE_QUEUE.
+ *
+ * @param include_xmit_msgq If executing from the rktp's current broker handler
+ * thread, also include the xmit message queue.
+ *
+ * @warning Only to be used with the producer.
+ *
+ * @returns the number of messages purged
+ *
+ * @locality any thread.
+ * @locks_acquired rd_kafka_toppar_lock()
+ * @locks_required none
+ */
+int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp,
+ int purge_flags,
+ rd_bool_t include_xmit_msgq) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+ rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
+ int cnt;
+
+ rd_assert(rk->rk_type == RD_KAFKA_PRODUCER);
+
+ rd_kafka_dbg(rk, TOPIC, "PURGE",
+ "%s [%" PRId32
+ "]: purging queues "
+ "(purge_flags 0x%x, %s xmit_msgq)",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ purge_flags, include_xmit_msgq ? "include" : "exclude");
+
+ if (!(purge_flags & RD_KAFKA_PURGE_F_QUEUE))
+ return 0;
+
+ if (include_xmit_msgq) {
+ /* xmit_msgq is owned by the toppar handler thread
+ * (broker thread) and requires no locking. */
+ rd_assert(rktp->rktp_broker);
+ rd_assert(thrd_is_current(rktp->rktp_broker->rkb_thread));
+ rd_kafka_msgq_concat(&rkmq, &rktp->rktp_xmit_msgq);
+ }
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_msgq_concat(&rkmq, &rktp->rktp_msgq);
+ cnt = rd_kafka_msgq_len(&rkmq);
+
+ if (cnt > 0 && purge_flags & RD_KAFKA_PURGE_F_ABORT_TXN) {
+ /* All messages in-queue are purged
+ * on abort_transaction(). Since these messages
+ * will not be produced (retried) we need to adjust the
+ * idempotence epoch's base msgid to skip the messages. */
+ rktp->rktp_eos.epoch_base_msgid += cnt;
+ rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_EOS, "ADVBASE",
+ "%.*s [%" PRId32
+ "] "
+ "advancing epoch base msgid to %" PRIu64
+ " due to %d message(s) in aborted transaction",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rktp->rktp_eos.epoch_base_msgid, cnt);
+ }
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_dr_msgq(rktp->rktp_rkt, &rkmq, RD_KAFKA_RESP_ERR__PURGE_QUEUE);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Purge queues for the unassigned toppars of all known topics.
+ *
+ * @locality application thread
+ * @locks none
+ */
+void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk) {
+ rd_kafka_topic_t *rkt;
+ int msg_cnt = 0, part_cnt = 0;
+
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ rd_kafka_toppar_t *rktp;
+ int r;
+
+ rd_kafka_topic_rdlock(rkt);
+ rktp = rkt->rkt_ua;
+ if (rktp)
+ rd_kafka_toppar_keep(rktp);
+ rd_kafka_topic_rdunlock(rkt);
+
+ if (unlikely(!rktp))
+ continue;
+
+
+ rd_kafka_toppar_lock(rktp);
+
+ r = rd_kafka_msgq_len(&rktp->rktp_msgq);
+ rd_kafka_dr_msgq(rkt, &rktp->rktp_msgq,
+ RD_KAFKA_RESP_ERR__PURGE_QUEUE);
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_destroy(rktp);
+
+ if (r > 0) {
+ msg_cnt += r;
+ part_cnt++;
+ }
+ }
+ rd_kafka_rdunlock(rk);
+
+ rd_kafka_dbg(rk, QUEUE | RD_KAFKA_DBG_TOPIC, "PURGEQ",
+ "Purged %i message(s) from %d UA-partition(s)", msg_cnt,
+ part_cnt);
+}
+
+
+void rd_kafka_partition_leader_destroy_free(void *ptr) {
+ struct rd_kafka_partition_leader *leader = ptr;
+ rd_kafka_partition_leader_destroy(leader);
+}
+
+
+const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos) {
+ static RD_TLS char ret[2][64];
+ static int idx;
+
+ idx = (idx + 1) % 2;
+
+ rd_snprintf(
+ ret[idx], sizeof(ret[idx]), "offset %s (leader epoch %" PRId32 ")",
+ rd_kafka_offset2str(fetchpos.offset), fetchpos.leader_epoch);
+
+ return ret[idx];
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h
new file mode 100644
index 000000000..a1f1f47cd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_partition.h
@@ -0,0 +1,1058 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_PARTITION_H_
+#define _RDKAFKA_PARTITION_H_
+
+#include "rdkafka_topic.h"
+#include "rdkafka_cgrp.h"
+#include "rdkafka_broker.h"
+
+extern const char *rd_kafka_fetch_states[];
+
+
+/**
+ * @brief Offset statistics
+ */
+struct offset_stats {
+ rd_kafka_fetch_pos_t fetch_pos; /**< Next offset to fetch */
+ int64_t eof_offset; /**< Last offset we reported EOF for */
+};
+
+/**
+ * @brief Reset offset_stats struct to default values
+ */
+static RD_UNUSED void rd_kafka_offset_stats_reset(struct offset_stats *offs) {
+ offs->fetch_pos.offset = 0;
+ offs->fetch_pos.leader_epoch = -1;
+ offs->eof_offset = RD_KAFKA_OFFSET_INVALID;
+}
+
+
+/**
+ * @brief Store information about a partition error for future use.
+ */
+struct rd_kafka_toppar_err {
+ rd_kafka_resp_err_t err; /**< Error code */
+ int actions; /**< Request actions */
+ rd_ts_t ts; /**< Timestamp */
+ uint64_t base_msgid; /**< First msg msgid */
+ int32_t base_seq; /**< Idempodent Producer:
+ * first msg sequence */
+ int32_t last_seq; /**< Idempotent Producer:
+ * last msg sequence */
+};
+
+
+
+/**
+ * @brief Fetchpos comparator, leader epoch has precedence.
+ */
+static RD_UNUSED RD_INLINE int
+rd_kafka_fetch_pos_cmp(const rd_kafka_fetch_pos_t *a,
+ const rd_kafka_fetch_pos_t *b) {
+ if (a->leader_epoch < b->leader_epoch)
+ return -1;
+ else if (a->leader_epoch > b->leader_epoch)
+ return 1;
+ else if (a->offset < b->offset)
+ return -1;
+ else if (a->offset > b->offset)
+ return 1;
+ else
+ return 0;
+}
+
+
+static RD_UNUSED RD_INLINE void
+rd_kafka_fetch_pos_init(rd_kafka_fetch_pos_t *fetchpos) {
+ fetchpos->offset = RD_KAFKA_OFFSET_INVALID;
+ fetchpos->leader_epoch = -1;
+}
+
+const char *rd_kafka_fetch_pos2str(const rd_kafka_fetch_pos_t fetchpos);
+
+static RD_UNUSED RD_INLINE rd_kafka_fetch_pos_t
+rd_kafka_fetch_pos_make(int64_t offset,
+ int32_t leader_epoch,
+ rd_bool_t validated) {
+ rd_kafka_fetch_pos_t fetchpos = {offset, leader_epoch, validated};
+ return fetchpos;
+}
+
+#ifdef RD_HAS_STATEMENT_EXPRESSIONS
+#define RD_KAFKA_FETCH_POS0(offset, leader_epoch, validated) \
+ ({ \
+ rd_kafka_fetch_pos_t _fetchpos = {offset, leader_epoch, \
+ validated}; \
+ _fetchpos; \
+ })
+#else
+#define RD_KAFKA_FETCH_POS0(offset, leader_epoch, validated) \
+ rd_kafka_fetch_pos_make(offset, leader_epoch, validated)
+#endif
+
+#define RD_KAFKA_FETCH_POS(offset, leader_epoch) \
+ RD_KAFKA_FETCH_POS0(offset, leader_epoch, rd_false)
+
+
+
+typedef TAILQ_HEAD(rd_kafka_toppar_tqhead_s,
+ rd_kafka_toppar_s) rd_kafka_toppar_tqhead_t;
+
+/**
+ * Topic + Partition combination
+ */
+struct rd_kafka_toppar_s { /* rd_kafka_toppar_t */
+ TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rklink; /* rd_kafka_t link */
+ TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rkblink; /* rd_kafka_broker_t link*/
+ CIRCLEQ_ENTRY(rd_kafka_toppar_s)
+ rktp_activelink; /* rkb_active_toppars */
+ TAILQ_ENTRY(rd_kafka_toppar_s) rktp_rktlink; /* rd_kafka_topic_t link*/
+ TAILQ_ENTRY(rd_kafka_toppar_s) rktp_cgrplink; /* rd_kafka_cgrp_t link */
+ TAILQ_ENTRY(rd_kafka_toppar_s)
+ rktp_txnlink; /**< rd_kafka_t.rk_eos.
+ * txn_pend_rktps
+ * or txn_rktps */
+ rd_kafka_topic_t *rktp_rkt; /**< This toppar's topic object */
+ int32_t rktp_partition;
+ // LOCK: toppar_lock() + topic_wrlock()
+ // LOCK: .. in partition_available()
+ int32_t rktp_leader_id; /**< Current leader id.
+ * This is updated directly
+ * from metadata. */
+ int32_t rktp_broker_id; /**< Current broker id. */
+ rd_kafka_broker_t *rktp_leader; /**< Current leader broker.
+ * This updated simultaneously
+ * with rktp_leader_id. */
+ rd_kafka_broker_t *rktp_broker; /**< Current preferred broker
+ * (usually the leader).
+ * This updated asynchronously
+ * by issuing JOIN op to
+ * broker thread, so be careful
+ * in using this since it
+ * may lag. */
+ rd_kafka_broker_t *rktp_next_broker; /**< Next preferred broker after
+ * async migration op. */
+ rd_refcnt_t rktp_refcnt;
+ mtx_t rktp_lock;
+
+ // LOCK: toppar_lock. toppar_insert_msg(), concat_msgq()
+ // LOCK: toppar_lock. toppar_enq_msg(), deq_msg(), toppar_retry_msgq()
+ rd_kafka_q_t *rktp_msgq_wakeup_q; /**< Wake-up queue */
+ rd_kafka_msgq_t rktp_msgq; /* application->rdkafka queue.
+ * protected by rktp_lock */
+ rd_kafka_msgq_t rktp_xmit_msgq; /* internal broker xmit queue.
+ * local to broker thread. */
+
+ int rktp_fetch; /* On rkb_active_toppars list */
+
+ /* Consumer */
+ rd_kafka_q_t *rktp_fetchq; /* Queue of fetched messages
+ * from broker.
+ * Broker thread -> App */
+ rd_kafka_q_t *rktp_ops; /* * -> Main thread */
+
+ rd_atomic32_t rktp_msgs_inflight; /**< Current number of
+ * messages in-flight to/from
+ * the broker. */
+
+ uint64_t rktp_msgid; /**< Current/last message id.
+ * Each message enqueued on a
+ * non-UA partition will get a
+ * partition-unique sequencial
+ * number assigned.
+ * This number is used to
+ * re-enqueue the message
+ * on resends but making sure
+ * the input ordering is still
+ * maintained, and used by
+ * the idempotent producer.
+ * Starts at 1.
+ * Protected by toppar_lock */
+ struct {
+ rd_kafka_pid_t pid; /**< Partition's last known
+ * Producer Id and epoch.
+ * Protected by toppar lock.
+ * Only updated in toppar
+ * handler thread. */
+ uint64_t acked_msgid; /**< Highest acknowledged message.
+ * Protected by toppar lock. */
+ uint64_t epoch_base_msgid; /**< This Producer epoch's
+ * base msgid.
+ * When a new epoch is
+ * acquired, or on transaction
+ * abort, the base_seq is set to
+ * the current rktp_msgid so that
+ * sub-sequent produce
+ * requests will have
+ * a sequence number series
+ * starting at 0.
+ * Protected by toppar_lock */
+ int32_t next_ack_seq; /**< Next expected ack sequence.
+ * Protected by toppar lock. */
+ int32_t next_err_seq; /**< Next expected error sequence.
+ * Used when draining outstanding
+ * issues.
+ * This value will be the same
+ * as next_ack_seq until a
+ * drainable error occurs,
+ * in which case it
+ * will advance past next_ack_seq.
+ * next_ack_seq can never be larger
+ * than next_err_seq.
+ * Protected by toppar lock. */
+ rd_bool_t wait_drain; /**< All inflight requests must
+ * be drained/finish before
+ * resuming producing.
+ * This is set to true
+ * when a leader change
+ * happens so that the
+ * in-flight messages for the
+ * old brokers finish before
+ * the new broker starts sending.
+ * This as a step to ensure
+ * consistency.
+ * Only accessed from toppar
+ * handler thread. */
+ } rktp_eos;
+
+ /**
+ * rktp version barriers
+ *
+ * rktp_version is the application/controller side's
+ * authoritative version, it depicts the most up to date state.
+ * This is what q_filter() matches an rko_version to.
+ *
+ * rktp_op_version is the last/current received state handled
+ * by the toppar in the broker thread. It is updated to rktp_version
+ * when receiving a new op.
+ *
+ * rktp_fetch_version is the current fetcher decision version.
+ * It is used in fetch_decide() to see if the fetch decision
+ * needs to be updated by comparing to rktp_op_version.
+ *
+ * Example:
+ * App thread : Send OP_START (v1 bump): rktp_version=1
+ * Broker thread: Recv OP_START (v1): rktp_op_version=1
+ * Broker thread: fetch_decide() detects that
+ * rktp_op_version != rktp_fetch_version and
+ * sets rktp_fetch_version=1.
+ * Broker thread: next Fetch request has it's tver state set to
+ * rktp_fetch_verison (v1).
+ *
+ * App thread : Send OP_SEEK (v2 bump): rktp_version=2
+ * Broker thread: Recv OP_SEEK (v2): rktp_op_version=2
+ * Broker thread: Recv IO FetchResponse with tver=1,
+ * when enqueued on rktp_fetchq they're discarded
+ * due to old version (tver<rktp_version).
+ * Broker thread: fetch_decide() detects version change and
+ * sets rktp_fetch_version=2.
+ * Broker thread: next Fetch request has tver=2
+ * Broker thread: Recv IO FetchResponse with tver=2 which
+ * is same as rktp_version so message is forwarded
+ * to app.
+ */
+ rd_atomic32_t rktp_version; /* Latest op version.
+ * Authoritative (app thread)*/
+ int32_t rktp_op_version; /* Op version of curr command
+ * state from.
+ * (broker thread) */
+ int32_t rktp_fetch_version; /* Op version of curr fetch.
+ (broker thread) */
+
+ enum { RD_KAFKA_TOPPAR_FETCH_NONE = 0,
+ RD_KAFKA_TOPPAR_FETCH_STOPPING,
+ RD_KAFKA_TOPPAR_FETCH_STOPPED,
+ RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY,
+ RD_KAFKA_TOPPAR_FETCH_OFFSET_WAIT,
+ RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT,
+ RD_KAFKA_TOPPAR_FETCH_ACTIVE,
+ } rktp_fetch_state; /* Broker thread's state */
+
+#define RD_KAFKA_TOPPAR_FETCH_IS_STARTED(fetch_state) \
+ ((fetch_state) >= RD_KAFKA_TOPPAR_FETCH_OFFSET_QUERY)
+
+ int32_t rktp_leader_epoch; /**< Last known partition leader epoch,
+ * or -1. */
+
+ int32_t rktp_fetch_msg_max_bytes; /* Max number of bytes to
+ * fetch.
+ * Locality: broker thread
+ */
+
+ rd_ts_t rktp_ts_fetch_backoff; /* Back off fetcher for
+ * this partition until this
+ * absolute timestamp
+ * expires. */
+
+ /** Offset to query broker for. */
+ rd_kafka_fetch_pos_t rktp_query_pos;
+
+ /** Next fetch start position.
+ * This is set up start, seek, resume, etc, to tell
+ * the fetcher where to start fetching.
+ * It is not updated for each fetch, see
+ * rktp_offsets.fetch_pos for that.
+ * @locality toppar thread */
+ rd_kafka_fetch_pos_t rktp_next_fetch_start;
+
+ /** The previous next fetch position.
+ * @locality toppar thread */
+ rd_kafka_fetch_pos_t rktp_last_next_fetch_start;
+
+ /** Application's position.
+ * This is the latest offset delivered to application + 1.
+ * It is reset to INVALID_OFFSET when partition is
+ * unassigned/stopped/seeked. */
+ rd_kafka_fetch_pos_t rktp_app_pos;
+
+ /** Last stored offset, but maybe not yet committed. */
+ rd_kafka_fetch_pos_t rktp_stored_pos;
+
+ /** Offset currently being committed */
+ rd_kafka_fetch_pos_t rktp_committing_pos;
+
+ /** Last (known) committed offset */
+ rd_kafka_fetch_pos_t rktp_committed_pos;
+
+ rd_ts_t rktp_ts_committed_offset; /**< Timestamp of last commit */
+
+ struct offset_stats rktp_offsets; /* Current offsets.
+ * Locality: broker thread*/
+ struct offset_stats rktp_offsets_fin; /* Finalized offset for stats.
+ * Updated periodically
+ * by broker thread.
+ * Locks: toppar_lock */
+
+ int64_t rktp_ls_offset; /**< Current last stable offset
+ * Locks: toppar_lock */
+ int64_t rktp_hi_offset; /* Current high watermark offset.
+ * Locks: toppar_lock */
+ int64_t rktp_lo_offset; /* Current broker low offset.
+ * This is outside of the stats
+ * struct due to this field
+ * being populated by the
+ * toppar thread rather than
+ * the broker thread.
+ * Locality: toppar thread
+ * Locks: toppar_lock */
+
+ rd_ts_t rktp_ts_offset_lag;
+
+ char *rktp_offset_path; /* Path to offset file */
+ FILE *rktp_offset_fp; /* Offset file pointer */
+
+ rd_kafka_resp_err_t rktp_last_error; /**< Last Fetch error.
+ * Used for suppressing
+ * reoccuring errors.
+ * @locality broker thread */
+
+ rd_kafka_cgrp_t *rktp_cgrp; /* Belongs to this cgrp */
+
+ rd_bool_t rktp_started; /**< Fetcher is instructured to
+ * start.
+ * This is used by cgrp to keep
+ * track of whether the toppar has
+ * been started or not. */
+
+ rd_kafka_replyq_t rktp_replyq; /* Current replyq+version
+ * for propagating
+ * major operations, e.g.,
+ * FETCH_STOP. */
+ // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_DESIRED
+ // LOCK: toppar_lock(). RD_KAFKA_TOPPAR_F_UNKNOWN
+ int rktp_flags;
+#define RD_KAFKA_TOPPAR_F_DESIRED \
+ 0x1 /* This partition is desired \
+ * by a consumer. */
+#define RD_KAFKA_TOPPAR_F_UNKNOWN \
+ 0x2 /* Topic is not yet or no longer \
+ * seen on a broker. */
+#define RD_KAFKA_TOPPAR_F_OFFSET_STORE 0x4 /* Offset store is active */
+#define RD_KAFKA_TOPPAR_F_OFFSET_STORE_STOPPING \
+ 0x8 /* Offset store stopping \
+ */
+#define RD_KAFKA_TOPPAR_F_APP_PAUSE 0x10 /* App pause()d consumption */
+#define RD_KAFKA_TOPPAR_F_LIB_PAUSE 0x20 /* librdkafka paused consumption */
+#define RD_KAFKA_TOPPAR_F_REMOVE 0x40 /* partition removed from cluster */
+#define RD_KAFKA_TOPPAR_F_LEADER_ERR \
+ 0x80 /* Operation failed: \
+ * leader might be missing. \
+ * Typically set from \
+ * ProduceResponse failure. */
+#define RD_KAFKA_TOPPAR_F_PEND_TXN \
+ 0x100 /* Partition is pending being added \
+ * to a producer transaction. */
+#define RD_KAFKA_TOPPAR_F_IN_TXN \
+ 0x200 /* Partition is part of \
+ * a producer transaction. */
+#define RD_KAFKA_TOPPAR_F_ON_DESP 0x400 /**< On rkt_desp list */
+#define RD_KAFKA_TOPPAR_F_ON_CGRP 0x800 /**< On rkcg_toppars list */
+#define RD_KAFKA_TOPPAR_F_ON_RKB 0x1000 /**< On rkb_toppars list */
+#define RD_KAFKA_TOPPAR_F_ASSIGNED \
+ 0x2000 /**< Toppar is part of the consumer \
+ * assignment. */
+
+ /*
+ * Timers
+ */
+ rd_kafka_timer_t rktp_offset_query_tmr; /* Offset query timer */
+ rd_kafka_timer_t rktp_offset_commit_tmr; /* Offset commit timer */
+ rd_kafka_timer_t rktp_offset_sync_tmr; /* Offset file sync timer */
+ rd_kafka_timer_t rktp_consumer_lag_tmr; /* Consumer lag monitoring
+ * timer */
+ rd_kafka_timer_t rktp_validate_tmr; /**< Offset and epoch
+ * validation retry timer */
+
+ rd_interval_t rktp_lease_intvl; /**< Preferred replica lease
+ * period */
+ rd_interval_t rktp_new_lease_intvl; /**< Controls max frequency
+ * at which a new preferred
+ * replica lease can be
+ * created for a toppar.
+ */
+ rd_interval_t rktp_new_lease_log_intvl; /**< .. and how often
+ * we log about it. */
+ rd_interval_t rktp_metadata_intvl; /**< Controls max frequency
+ * of metadata requests
+ * in preferred replica
+ * handler.
+ */
+
+ int rktp_wait_consumer_lag_resp; /* Waiting for consumer lag
+ * response. */
+
+ struct rd_kafka_toppar_err rktp_last_err; /**< Last produce error */
+
+
+ struct {
+ rd_atomic64_t tx_msgs; /**< Producer: sent messages */
+ rd_atomic64_t tx_msg_bytes; /**< .. bytes */
+ rd_atomic64_t rx_msgs; /**< Consumer: received messages */
+ rd_atomic64_t rx_msg_bytes; /**< .. bytes */
+ rd_atomic64_t producer_enq_msgs; /**< Producer: enqueued msgs */
+ rd_atomic64_t rx_ver_drops; /**< Consumer: outdated message
+ * drops. */
+ } rktp_c;
+};
+
+/**
+ * @struct This is a separately allocated glue object used in
+ * rd_kafka_topic_partition_t._private to allow referencing both
+ * an rktp and/or a leader epoch. Both are optional.
+ * The rktp, if non-NULL, owns a refcount.
+ *
+ * This glue object is not always set in ._private, but allocated on demand
+ * as necessary.
+ */
+typedef struct rd_kafka_topic_partition_private_s {
+ /** Reference to a toppar. Optional, may be NULL. */
+ rd_kafka_toppar_t *rktp;
+ /** Current Leader epoch, if known, else -1.
+ * this is set when the API needs to send the last epoch known
+ * by the client. */
+ int32_t current_leader_epoch;
+ /** Leader epoch if known, else -1. */
+ int32_t leader_epoch;
+} rd_kafka_topic_partition_private_t;
+
+
+/**
+ * Check if toppar is paused (consumer).
+ * Locks: toppar_lock() MUST be held.
+ */
+#define RD_KAFKA_TOPPAR_IS_PAUSED(rktp) \
+ ((rktp)->rktp_flags & \
+ (RD_KAFKA_TOPPAR_F_APP_PAUSE | RD_KAFKA_TOPPAR_F_LIB_PAUSE))
+
+
+
+/**
+ * @brief Increase refcount and return rktp object.
+ */
+#define rd_kafka_toppar_keep(RKTP) \
+ rd_kafka_toppar_keep0(__FUNCTION__, __LINE__, RKTP)
+
+#define rd_kafka_toppar_keep_fl(FUNC, LINE, RKTP) \
+ rd_kafka_toppar_keep0(FUNC, LINE, RKTP)
+
+static RD_UNUSED RD_INLINE rd_kafka_toppar_t *
+rd_kafka_toppar_keep0(const char *func, int line, rd_kafka_toppar_t *rktp) {
+ rd_refcnt_add_fl(func, line, &rktp->rktp_refcnt);
+ return rktp;
+}
+
+void rd_kafka_toppar_destroy_final(rd_kafka_toppar_t *rktp);
+
+#define rd_kafka_toppar_destroy(RKTP) \
+ do { \
+ rd_kafka_toppar_t *_RKTP = (RKTP); \
+ if (unlikely(rd_refcnt_sub(&_RKTP->rktp_refcnt) == 0)) \
+ rd_kafka_toppar_destroy_final(_RKTP); \
+ } while (0)
+
+
+
+#define rd_kafka_toppar_lock(rktp) mtx_lock(&(rktp)->rktp_lock)
+#define rd_kafka_toppar_unlock(rktp) mtx_unlock(&(rktp)->rktp_lock)
+
+static const char *
+rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) RD_UNUSED;
+static const char *rd_kafka_toppar_name(const rd_kafka_toppar_t *rktp) {
+ static RD_TLS char ret[256];
+
+ rd_snprintf(ret, sizeof(ret), "%.*s [%" PRId32 "]",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+
+ return ret;
+}
+rd_kafka_toppar_t *rd_kafka_toppar_new0(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ const char *func,
+ int line);
+#define rd_kafka_toppar_new(rkt, partition) \
+ rd_kafka_toppar_new0(rkt, partition, __FUNCTION__, __LINE__)
+void rd_kafka_toppar_purge_and_disable_queues(rd_kafka_toppar_t *rktp);
+void rd_kafka_toppar_set_fetch_state(rd_kafka_toppar_t *rktp, int fetch_state);
+void rd_kafka_toppar_insert_msg(rd_kafka_toppar_t *rktp, rd_kafka_msg_t *rkm);
+void rd_kafka_toppar_enq_msg(rd_kafka_toppar_t *rktp,
+ rd_kafka_msg_t *rkm,
+ rd_ts_t now);
+int rd_kafka_retry_msgq(rd_kafka_msgq_t *destq,
+ rd_kafka_msgq_t *srcq,
+ int incr_retry,
+ int max_retries,
+ rd_ts_t backoff,
+ rd_kafka_msg_status_t status,
+ int (*cmp)(const void *a, const void *b));
+void rd_kafka_msgq_insert_msgq(rd_kafka_msgq_t *destq,
+ rd_kafka_msgq_t *srcq,
+ int (*cmp)(const void *a, const void *b));
+int rd_kafka_toppar_retry_msgq(rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq,
+ int incr_retry,
+ rd_kafka_msg_status_t status);
+void rd_kafka_toppar_insert_msgq(rd_kafka_toppar_t *rktp,
+ rd_kafka_msgq_t *rkmq);
+void rd_kafka_toppar_enq_error(rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err,
+ const char *reason);
+rd_kafka_toppar_t *rd_kafka_toppar_get0(const char *func,
+ int line,
+ const rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int ua_on_miss);
+#define rd_kafka_toppar_get(rkt, partition, ua_on_miss) \
+ rd_kafka_toppar_get0(__FUNCTION__, __LINE__, rkt, partition, ua_on_miss)
+rd_kafka_toppar_t *rd_kafka_toppar_get2(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int ua_on_miss,
+ int create_on_miss);
+rd_kafka_toppar_t *rd_kafka_toppar_get_avail(const rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int ua_on_miss,
+ rd_kafka_resp_err_t *errp);
+
+rd_kafka_toppar_t *rd_kafka_toppar_desired_get(rd_kafka_topic_t *rkt,
+ int32_t partition);
+void rd_kafka_toppar_desired_add0(rd_kafka_toppar_t *rktp);
+rd_kafka_toppar_t *rd_kafka_toppar_desired_add(rd_kafka_topic_t *rkt,
+ int32_t partition);
+void rd_kafka_toppar_desired_link(rd_kafka_toppar_t *rktp);
+void rd_kafka_toppar_desired_unlink(rd_kafka_toppar_t *rktp);
+void rd_kafka_toppar_desired_del(rd_kafka_toppar_t *rktp);
+
+void rd_kafka_toppar_next_offset_handle(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t next_pos);
+
+void rd_kafka_toppar_broker_delegate(rd_kafka_toppar_t *rktp,
+ rd_kafka_broker_t *rkb);
+
+
+rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_start(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_q_t *fwdq,
+ rd_kafka_replyq_t replyq);
+
+rd_kafka_resp_err_t rd_kafka_toppar_op_fetch_stop(rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq);
+
+rd_kafka_resp_err_t rd_kafka_toppar_op_seek(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t pos,
+ rd_kafka_replyq_t replyq);
+
+rd_kafka_resp_err_t
+rd_kafka_toppar_op_pause(rd_kafka_toppar_t *rktp, int pause, int flag);
+
+void rd_kafka_toppar_fetch_stopped(rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err);
+
+
+
+rd_ts_t rd_kafka_broker_consumer_toppar_serve(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp);
+
+
+void rd_kafka_toppar_offset_fetch(rd_kafka_toppar_t *rktp,
+ rd_kafka_replyq_t replyq);
+
+void rd_kafka_toppar_offset_request(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t query_pos,
+ int backoff_ms);
+
+int rd_kafka_toppar_purge_queues(rd_kafka_toppar_t *rktp,
+ int purge_flags,
+ rd_bool_t include_xmit_msgq);
+
+rd_kafka_broker_t *rd_kafka_toppar_broker(rd_kafka_toppar_t *rktp,
+ int proper_broker);
+void rd_kafka_toppar_leader_unavailable(rd_kafka_toppar_t *rktp,
+ const char *reason,
+ rd_kafka_resp_err_t err);
+
+void rd_kafka_toppar_pause(rd_kafka_toppar_t *rktp, int flag);
+void rd_kafka_toppar_resume(rd_kafka_toppar_t *rktp, int flag);
+
+rd_kafka_resp_err_t rd_kafka_toppar_op_pause_resume(rd_kafka_toppar_t *rktp,
+ int pause,
+ int flag,
+ rd_kafka_replyq_t replyq);
+rd_kafka_resp_err_t
+rd_kafka_toppars_pause_resume(rd_kafka_t *rk,
+ rd_bool_t pause,
+ rd_async_t async,
+ int flag,
+ rd_kafka_topic_partition_list_t *partitions);
+
+
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_new(const char *topic,
+ int32_t partition);
+void rd_kafka_topic_partition_destroy_free(void *ptr);
+rd_kafka_topic_partition_t *
+rd_kafka_topic_partition_copy(const rd_kafka_topic_partition_t *src);
+void *rd_kafka_topic_partition_copy_void(const void *src);
+void rd_kafka_topic_partition_destroy_free(void *ptr);
+rd_kafka_topic_partition_t *
+rd_kafka_topic_partition_new_from_rktp(rd_kafka_toppar_t *rktp);
+
+void rd_kafka_topic_partition_list_init(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int size);
+void rd_kafka_topic_partition_list_destroy_free(void *ptr);
+
+void rd_kafka_topic_partition_list_clear(
+ rd_kafka_topic_partition_list_t *rktparlist);
+
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_add0(
+ const char *func,
+ int line,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition,
+ rd_kafka_toppar_t *rktp,
+ const rd_kafka_topic_partition_private_t *parpriv);
+
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_upsert(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition);
+
+void rd_kafka_topic_partition_list_add_copy(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ const rd_kafka_topic_partition_t *rktpar);
+
+
+void rd_kafka_topic_partition_list_add_list(
+ rd_kafka_topic_partition_list_t *dst,
+ const rd_kafka_topic_partition_list_t *src);
+
+/**
+ * Traverse rd_kafka_topic_partition_list_t.
+ *
+ * @warning \p TPLIST modifications are not allowed.
+ */
+#define RD_KAFKA_TPLIST_FOREACH(RKTPAR, TPLIST) \
+ for (RKTPAR = &(TPLIST)->elems[0]; \
+ (RKTPAR) < &(TPLIST)->elems[(TPLIST)->cnt]; RKTPAR++)
+
+/**
+ * Traverse rd_kafka_topic_partition_list_t.
+ *
+ * @warning \p TPLIST modifications are not allowed, but removal of the
+ * current \p RKTPAR element is allowed.
+ */
+#define RD_KAFKA_TPLIST_FOREACH_REVERSE(RKTPAR, TPLIST) \
+ for (RKTPAR = &(TPLIST)->elems[(TPLIST)->cnt - 1]; \
+ (RKTPAR) >= &(TPLIST)->elems[0]; RKTPAR--)
+
+int rd_kafka_topic_partition_match(rd_kafka_t *rk,
+ const rd_kafka_group_member_t *rkgm,
+ const rd_kafka_topic_partition_t *rktpar,
+ const char *topic,
+ int *matched_by_regex);
+
+
+int rd_kafka_topic_partition_cmp(const void *_a, const void *_b);
+unsigned int rd_kafka_topic_partition_hash(const void *a);
+
+int rd_kafka_topic_partition_list_find_idx(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic,
+ int32_t partition);
+rd_kafka_topic_partition_t *rd_kafka_topic_partition_list_find_topic(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ const char *topic);
+
+void rd_kafka_topic_partition_list_sort_by_topic(
+ rd_kafka_topic_partition_list_t *rktparlist);
+
+void rd_kafka_topic_partition_list_reset_offsets(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int64_t offset);
+
+int rd_kafka_topic_partition_list_set_offsets(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ int from_rktp,
+ int64_t def_value,
+ int is_commit);
+
+int rd_kafka_topic_partition_list_count_abs_offsets(
+ const rd_kafka_topic_partition_list_t *rktparlist);
+
+int rd_kafka_topic_partition_list_cmp(const void *_a,
+ const void *_b,
+ int (*cmp)(const void *, const void *));
+
+/**
+ * @returns (and creates if necessary) the ._private glue object.
+ */
+static RD_UNUSED RD_INLINE rd_kafka_topic_partition_private_t *
+rd_kafka_topic_partition_get_private(rd_kafka_topic_partition_t *rktpar) {
+ rd_kafka_topic_partition_private_t *parpriv;
+
+ if (!(parpriv = rktpar->_private)) {
+ parpriv = rd_calloc(1, sizeof(*parpriv));
+ parpriv->leader_epoch = -1;
+ rktpar->_private = parpriv;
+ }
+
+ return parpriv;
+}
+
+
+/**
+ * @returns the partition leader current epoch, if relevant and known,
+ * else -1.
+ *
+ * @param rktpar Partition object.
+ *
+ * @remark See KIP-320 for more information.
+ */
+int32_t rd_kafka_topic_partition_get_current_leader_epoch(
+ const rd_kafka_topic_partition_t *rktpar);
+
+
+/**
+ * @brief Sets the partition leader current epoch (use -1 to clear).
+ *
+ * @param rktpar Partition object.
+ * @param leader_epoch Partition leader current epoch, use -1 to reset.
+ *
+ * @remark See KIP-320 for more information.
+ */
+void rd_kafka_topic_partition_set_current_leader_epoch(
+ rd_kafka_topic_partition_t *rktpar,
+ int32_t leader_epoch);
+
+
+/**
+ * @returns the partition's rktp if set (no refcnt increase), else NULL.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_toppar_t *
+rd_kafka_topic_partition_toppar(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_t *rktpar) {
+ const rd_kafka_topic_partition_private_t *parpriv;
+
+ if ((parpriv = rktpar->_private))
+ return parpriv->rktp;
+
+ return NULL;
+}
+
+rd_kafka_toppar_t *
+rd_kafka_topic_partition_ensure_toppar(rd_kafka_t *rk,
+ rd_kafka_topic_partition_t *rktpar,
+ rd_bool_t create_on_miss);
+
+/**
+ * @returns (and sets if necessary) the \p rktpar's ._private.
+ * @remark a new reference is returned.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_toppar_t *
+rd_kafka_topic_partition_get_toppar(rd_kafka_t *rk,
+ rd_kafka_topic_partition_t *rktpar,
+ rd_bool_t create_on_miss) {
+ rd_kafka_toppar_t *rktp;
+
+ rktp =
+ rd_kafka_topic_partition_ensure_toppar(rk, rktpar, create_on_miss);
+
+ if (rktp)
+ rd_kafka_toppar_keep(rktp);
+
+ return rktp;
+}
+
+
+
+void rd_kafka_topic_partition_list_update_toppars(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_bool_t create_on_miss);
+
+
+void rd_kafka_topic_partition_list_query_leaders_async(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ int timeout_ms,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_op_cb_t *cb,
+ void *opaque);
+
+rd_kafka_resp_err_t rd_kafka_topic_partition_list_query_leaders(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *leaders,
+ int timeout_ms);
+
+int rd_kafka_topic_partition_list_get_topics(
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *rkts);
+
+int rd_kafka_topic_partition_list_get_topic_names(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ rd_list_t *topics,
+ int include_regex);
+
+void rd_kafka_topic_partition_list_log(
+ rd_kafka_t *rk,
+ const char *fac,
+ int dbg,
+ const rd_kafka_topic_partition_list_t *rktparlist);
+
+#define RD_KAFKA_FMT_F_OFFSET 0x1 /* Print offset */
+#define RD_KAFKA_FMT_F_ONLY_ERR 0x2 /* Only include errored entries */
+#define RD_KAFKA_FMT_F_NO_ERR 0x4 /* Dont print error string */
+const char *rd_kafka_topic_partition_list_str(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ char *dest,
+ size_t dest_size,
+ int fmt_flags);
+
+void rd_kafka_topic_partition_list_update(
+ rd_kafka_topic_partition_list_t *dst,
+ const rd_kafka_topic_partition_list_t *src);
+
+int rd_kafka_topic_partition_leader_cmp(const void *_a, const void *_b);
+
+void rd_kafka_topic_partition_set_from_fetch_pos(
+ rd_kafka_topic_partition_t *rktpar,
+ const rd_kafka_fetch_pos_t fetchpos);
+
+static RD_UNUSED rd_kafka_fetch_pos_t rd_kafka_topic_partition_get_fetch_pos(
+ const rd_kafka_topic_partition_t *rktpar) {
+ rd_kafka_fetch_pos_t fetchpos = {
+ rktpar->offset, rd_kafka_topic_partition_get_leader_epoch(rktpar)};
+
+ return fetchpos;
+}
+
+
+/**
+ * @brief Match function that returns true if partition has a valid offset.
+ */
+static RD_UNUSED int
+rd_kafka_topic_partition_match_valid_offset(const void *elem,
+ const void *opaque) {
+ const rd_kafka_topic_partition_t *rktpar = elem;
+ return rktpar->offset >= 0;
+}
+
+rd_kafka_topic_partition_list_t *rd_kafka_topic_partition_list_match(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ int (*match)(const void *elem, const void *opaque),
+ void *opaque);
+
+size_t rd_kafka_topic_partition_list_sum(
+ const rd_kafka_topic_partition_list_t *rktparlist,
+ size_t (*cb)(const rd_kafka_topic_partition_t *rktpar, void *opaque),
+ void *opaque);
+
+rd_bool_t rd_kafka_topic_partition_list_has_duplicates(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_bool_t ignore_partition);
+
+void rd_kafka_topic_partition_list_set_err(
+ rd_kafka_topic_partition_list_t *rktparlist,
+ rd_kafka_resp_err_t err);
+
+rd_kafka_resp_err_t rd_kafka_topic_partition_list_get_err(
+ const rd_kafka_topic_partition_list_t *rktparlist);
+
+int rd_kafka_topic_partition_list_regex_cnt(
+ const rd_kafka_topic_partition_list_t *rktparlist);
+
+void *rd_kafka_topic_partition_list_copy_opaque(const void *src, void *opaque);
+
+/**
+ * @brief Toppar + Op version tuple used for mapping Fetched partitions
+ * back to their fetch versions.
+ */
+struct rd_kafka_toppar_ver {
+ rd_kafka_toppar_t *rktp;
+ int32_t version;
+};
+
+
+/**
+ * @brief Toppar + Op version comparator.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_toppar_ver_cmp(const void *_a,
+ const void *_b) {
+ const struct rd_kafka_toppar_ver *a = _a, *b = _b;
+ const rd_kafka_toppar_t *rktp_a = a->rktp;
+ const rd_kafka_toppar_t *rktp_b = b->rktp;
+ int r;
+
+ if (rktp_a->rktp_rkt != rktp_b->rktp_rkt &&
+ (r = rd_kafkap_str_cmp(rktp_a->rktp_rkt->rkt_topic,
+ rktp_b->rktp_rkt->rkt_topic)))
+ return r;
+
+ return RD_CMP(rktp_a->rktp_partition, rktp_b->rktp_partition);
+}
+
+/**
+ * @brief Frees up resources for \p tver but not the \p tver itself.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_toppar_ver_destroy(struct rd_kafka_toppar_ver *tver) {
+ rd_kafka_toppar_destroy(tver->rktp);
+}
+
+
+/**
+ * @returns 1 if rko version is outdated, else 0.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_op_version_outdated(rd_kafka_op_t *rko,
+ int version) {
+ if (!rko->rko_version)
+ return 0;
+
+ if (version)
+ return rko->rko_version < version;
+
+ if (rko->rko_rktp)
+ return rko->rko_version <
+ rd_atomic32_get(&rko->rko_rktp->rktp_version);
+ return 0;
+}
+
+void rd_kafka_toppar_offset_commit_result(
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets);
+
+void rd_kafka_toppar_broker_leave_for_remove(rd_kafka_toppar_t *rktp);
+
+
+/**
+ * @brief Represents a leader and the partitions it is leader for.
+ */
+struct rd_kafka_partition_leader {
+ rd_kafka_broker_t *rkb;
+ rd_kafka_topic_partition_list_t *partitions;
+};
+
+static RD_UNUSED void
+rd_kafka_partition_leader_destroy(struct rd_kafka_partition_leader *leader) {
+ rd_kafka_broker_destroy(leader->rkb);
+ rd_kafka_topic_partition_list_destroy(leader->partitions);
+ rd_free(leader);
+}
+
+void rd_kafka_partition_leader_destroy_free(void *ptr);
+
+static RD_UNUSED struct rd_kafka_partition_leader *
+rd_kafka_partition_leader_new(rd_kafka_broker_t *rkb) {
+ struct rd_kafka_partition_leader *leader = rd_malloc(sizeof(*leader));
+ leader->rkb = rkb;
+ rd_kafka_broker_keep(rkb);
+ leader->partitions = rd_kafka_topic_partition_list_new(0);
+ return leader;
+}
+
+static RD_UNUSED int rd_kafka_partition_leader_cmp(const void *_a,
+ const void *_b) {
+ const struct rd_kafka_partition_leader *a = _a, *b = _b;
+ return rd_kafka_broker_cmp(a->rkb, b->rkb);
+}
+
+
+int rd_kafka_toppar_pid_change(rd_kafka_toppar_t *rktp,
+ rd_kafka_pid_t pid,
+ uint64_t base_msgid);
+
+int rd_kafka_toppar_handle_purge_queues(rd_kafka_toppar_t *rktp,
+ rd_kafka_broker_t *rkb,
+ int purge_flags);
+void rd_kafka_purge_ua_toppar_queues(rd_kafka_t *rk);
+
+static RD_UNUSED int rd_kafka_toppar_topic_cmp(const void *_a, const void *_b) {
+ const rd_kafka_toppar_t *a = _a, *b = _b;
+ return strcmp(a->rktp_rkt->rkt_topic->str, b->rktp_rkt->rkt_topic->str);
+}
+
+
+/**
+ * @brief Set's the partitions next fetch position, i.e., the next offset
+ * to start fetching from.
+ *
+ * @locks_required rd_kafka_toppar_lock(rktp) MUST be held.
+ */
+static RD_UNUSED RD_INLINE void
+rd_kafka_toppar_set_next_fetch_position(rd_kafka_toppar_t *rktp,
+ rd_kafka_fetch_pos_t next_pos) {
+ rktp->rktp_next_fetch_start = next_pos;
+}
+
+#endif /* _RDKAFKA_PARTITION_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c
new file mode 100644
index 000000000..dfe3ef03e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.c
@@ -0,0 +1,228 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_pattern.h"
+
+void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist,
+ rd_kafka_pattern_t *rkpat) {
+ TAILQ_REMOVE(&plist->rkpl_head, rkpat, rkpat_link);
+ rd_regex_destroy(rkpat->rkpat_re);
+ rd_free(rkpat->rkpat_orig);
+ rd_free(rkpat);
+}
+
+void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist,
+ rd_kafka_pattern_t *rkpat) {
+ TAILQ_INSERT_TAIL(&plist->rkpl_head, rkpat, rkpat_link);
+}
+
+rd_kafka_pattern_t *
+rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size) {
+ rd_kafka_pattern_t *rkpat;
+
+ rkpat = rd_calloc(1, sizeof(*rkpat));
+
+ /* Verify and precompile pattern */
+ if (!(rkpat->rkpat_re = rd_regex_comp(pattern, errstr, errstr_size))) {
+ rd_free(rkpat);
+ return NULL;
+ }
+
+ rkpat->rkpat_orig = rd_strdup(pattern);
+
+ return rkpat;
+}
+
+
+
+int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str) {
+ rd_kafka_pattern_t *rkpat;
+
+ TAILQ_FOREACH(rkpat, &plist->rkpl_head, rkpat_link) {
+ if (rd_regex_exec(rkpat->rkpat_re, str))
+ return 1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Append pattern to list.
+ */
+int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist,
+ const char *pattern,
+ char *errstr,
+ int errstr_size) {
+ rd_kafka_pattern_t *rkpat;
+ rkpat = rd_kafka_pattern_new(pattern, errstr, errstr_size);
+ if (!rkpat)
+ return -1;
+
+ rd_kafka_pattern_add(plist, rkpat);
+ return 0;
+}
+
+/**
+ * Remove matching patterns.
+ * Returns the number of removed patterns.
+ */
+int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist,
+ const char *pattern) {
+ rd_kafka_pattern_t *rkpat, *rkpat_tmp;
+ int cnt = 0;
+
+ TAILQ_FOREACH_SAFE(rkpat, &plist->rkpl_head, rkpat_link, rkpat_tmp) {
+ if (!strcmp(rkpat->rkpat_orig, pattern)) {
+ rd_kafka_pattern_destroy(plist, rkpat);
+ cnt++;
+ }
+ }
+ return cnt;
+}
+
+/**
+ * Parse a patternlist and populate a list with it.
+ */
+static int rd_kafka_pattern_list_parse(rd_kafka_pattern_list_t *plist,
+ const char *patternlist,
+ char *errstr,
+ size_t errstr_size) {
+ char *s;
+ rd_strdupa(&s, patternlist);
+
+ while (s && *s) {
+ char *t = s;
+ char re_errstr[256];
+
+ /* Find separator */
+ while ((t = strchr(t, ','))) {
+ if (t > s && *(t - 1) == ',') {
+ /* separator was escaped,
+ remove escape and scan again. */
+ memmove(t - 1, t, strlen(t) + 1);
+ t++;
+ } else {
+ *t = '\0';
+ t++;
+ break;
+ }
+ }
+
+ if (rd_kafka_pattern_list_append(plist, s, re_errstr,
+ sizeof(re_errstr)) == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to parse pattern \"%s\": "
+ "%s",
+ s, re_errstr);
+ rd_kafka_pattern_list_clear(plist);
+ return -1;
+ }
+
+ s = t;
+ }
+
+ return 0;
+}
+
+
+/**
+ * Clear a pattern list.
+ */
+void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist) {
+ rd_kafka_pattern_t *rkpat;
+
+ while ((rkpat = TAILQ_FIRST(&plist->rkpl_head)))
+ rd_kafka_pattern_destroy(plist, rkpat);
+
+ if (plist->rkpl_orig) {
+ rd_free(plist->rkpl_orig);
+ plist->rkpl_orig = NULL;
+ }
+}
+
+
+/**
+ * Free a pattern list previously created with list_new()
+ */
+void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist) {
+ rd_kafka_pattern_list_clear(plist);
+ rd_free(plist);
+}
+
+/**
+ * Initialize a pattern list, optionally populating it with the
+ * comma-separated patterns in 'patternlist'.
+ */
+int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist,
+ const char *patternlist,
+ char *errstr,
+ size_t errstr_size) {
+ TAILQ_INIT(&plist->rkpl_head);
+ if (patternlist) {
+ if (rd_kafka_pattern_list_parse(plist, patternlist, errstr,
+ errstr_size) == -1)
+ return -1;
+ plist->rkpl_orig = rd_strdup(patternlist);
+ } else
+ plist->rkpl_orig = NULL;
+
+ return 0;
+}
+
+
+/**
+ * Allocate and initialize a new list.
+ */
+rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist,
+ char *errstr,
+ int errstr_size) {
+ rd_kafka_pattern_list_t *plist;
+
+ plist = rd_calloc(1, sizeof(*plist));
+
+ if (rd_kafka_pattern_list_init(plist, patternlist, errstr,
+ errstr_size) == -1) {
+ rd_free(plist);
+ return NULL;
+ }
+
+ return plist;
+}
+
+
+/**
+ * Make a copy of a pattern list.
+ */
+rd_kafka_pattern_list_t *
+rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src) {
+ char errstr[16];
+ return rd_kafka_pattern_list_new(src->rkpl_orig, errstr,
+ sizeof(errstr));
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h
new file mode 100644
index 000000000..88d183cd3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_pattern.h
@@ -0,0 +1,70 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_PATTERN_H_
+#define _RDKAFKA_PATTERN_H_
+
+#include "rdregex.h"
+
+typedef struct rd_kafka_pattern_s {
+ TAILQ_ENTRY(rd_kafka_pattern_s) rkpat_link;
+
+ rd_regex_t *rkpat_re; /* Compiled regex */
+ char *rkpat_orig; /* Original pattern */
+} rd_kafka_pattern_t;
+
+typedef struct rd_kafka_pattern_list_s {
+ TAILQ_HEAD(, rd_kafka_pattern_s) rkpl_head;
+ char *rkpl_orig;
+} rd_kafka_pattern_list_t;
+
+void rd_kafka_pattern_destroy(rd_kafka_pattern_list_t *plist,
+ rd_kafka_pattern_t *rkpat);
+void rd_kafka_pattern_add(rd_kafka_pattern_list_t *plist,
+ rd_kafka_pattern_t *rkpat);
+rd_kafka_pattern_t *
+rd_kafka_pattern_new(const char *pattern, char *errstr, int errstr_size);
+int rd_kafka_pattern_match(rd_kafka_pattern_list_t *plist, const char *str);
+int rd_kafka_pattern_list_append(rd_kafka_pattern_list_t *plist,
+ const char *pattern,
+ char *errstr,
+ int errstr_size);
+int rd_kafka_pattern_list_remove(rd_kafka_pattern_list_t *plist,
+ const char *pattern);
+void rd_kafka_pattern_list_clear(rd_kafka_pattern_list_t *plist);
+void rd_kafka_pattern_list_destroy(rd_kafka_pattern_list_t *plist);
+int rd_kafka_pattern_list_init(rd_kafka_pattern_list_t *plist,
+ const char *patternlist,
+ char *errstr,
+ size_t errstr_size);
+rd_kafka_pattern_list_t *rd_kafka_pattern_list_new(const char *patternlist,
+ char *errstr,
+ int errstr_size);
+rd_kafka_pattern_list_t *
+rd_kafka_pattern_list_copy(rd_kafka_pattern_list_t *src);
+
+#endif /* _RDKAFKA_PATTERN_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c
new file mode 100644
index 000000000..f58bc5060
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.c
@@ -0,0 +1,213 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_plugin.h"
+#include "rddl.h"
+
+
+typedef struct rd_kafka_plugin_s {
+ char *rkplug_path; /* Library path */
+ rd_kafka_t *rkplug_rk; /* Backpointer to the rk handle */
+ void *rkplug_handle; /* dlopen (or similar) handle */
+ void *rkplug_opaque; /* Plugin's opaque */
+
+} rd_kafka_plugin_t;
+
+
+/**
+ * @brief Plugin path comparator
+ */
+static int rd_kafka_plugin_cmp(const void *_a, const void *_b) {
+ const rd_kafka_plugin_t *a = _a, *b = _b;
+
+ return strcmp(a->rkplug_path, b->rkplug_path);
+}
+
+
+/**
+ * @brief Add plugin (by library path) and calls its conf_init() constructor
+ *
+ * @returns an error code on error.
+ * @remark duplicate plugins are silently ignored.
+ *
+ * @remark Libraries are refcounted and thus not unloaded until all
+ * plugins referencing the library have been destroyed.
+ * (dlopen() and LoadLibrary() does this for us)
+ */
+static rd_kafka_resp_err_t rd_kafka_plugin_new(rd_kafka_conf_t *conf,
+ const char *path,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_plugin_t *rkplug;
+ const rd_kafka_plugin_t skel = {.rkplug_path = (char *)path};
+ rd_kafka_plugin_f_conf_init_t *conf_init;
+ rd_kafka_resp_err_t err;
+ void *handle;
+ void *plug_opaque = NULL;
+
+ /* Avoid duplicates */
+ if (rd_list_find(&conf->plugins, &skel, rd_kafka_plugin_cmp)) {
+ rd_snprintf(errstr, errstr_size, "Ignoring duplicate plugin %s",
+ path);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Loading plugin \"%s\"", path);
+
+ /* Attempt to load library */
+ if (!(handle = rd_dl_open(path, errstr, errstr_size))) {
+ rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
+ "Failed to load plugin \"%s\": %s", path, errstr);
+ return RD_KAFKA_RESP_ERR__FS;
+ }
+
+ /* Find conf_init() function */
+ if (!(conf_init =
+ rd_dl_sym(handle, "conf_init", errstr, errstr_size))) {
+ rd_dl_close(handle);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ /* Call conf_init() */
+ rd_kafka_dbg0(conf, PLUGIN, "PLUGINIT",
+ "Calling plugin \"%s\" conf_init()", path);
+
+ if ((err = conf_init(conf, &plug_opaque, errstr, errstr_size))) {
+ rd_dl_close(handle);
+ return err;
+ }
+
+ rkplug = rd_calloc(1, sizeof(*rkplug));
+ rkplug->rkplug_path = rd_strdup(path);
+ rkplug->rkplug_handle = handle;
+ rkplug->rkplug_opaque = plug_opaque;
+
+ rd_list_add(&conf->plugins, rkplug);
+
+ rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD", "Plugin \"%s\" loaded", path);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Free the plugin, any conf_destroy() interceptors will have been
+ * called prior to this call.
+ * @remark plugin is not removed from any list (caller's responsibility)
+ * @remark this relies on the actual library loader to refcount libraries,
+ * especially in the config copy case.
+ * This is true for POSIX dlopen() and Win32 LoadLibrary().
+ * @locality application thread
+ */
+static void rd_kafka_plugin_destroy(rd_kafka_plugin_t *rkplug) {
+ rd_dl_close(rkplug->rkplug_handle);
+ rd_free(rkplug->rkplug_path);
+ rd_free(rkplug);
+}
+
+
+
+/**
+ * @brief Initialize all configured plugins.
+ *
+ * @remark Any previously loaded plugins will be unloaded.
+ *
+ * @returns the error code of the first failing plugin.
+ * @locality application thread calling rd_kafka_new().
+ */
+static rd_kafka_conf_res_t rd_kafka_plugins_conf_set0(rd_kafka_conf_t *conf,
+ const char *paths,
+ char *errstr,
+ size_t errstr_size) {
+ char *s;
+
+ rd_list_destroy(&conf->plugins);
+ rd_list_init(&conf->plugins, 0, (void *)&rd_kafka_plugin_destroy);
+
+ if (!paths || !*paths)
+ return RD_KAFKA_CONF_OK;
+
+ /* Split paths by ; */
+ rd_strdupa(&s, paths);
+
+ rd_kafka_dbg0(conf, PLUGIN, "PLUGLOAD",
+ "Loading plugins from conf object %p: \"%s\"", conf,
+ paths);
+
+ while (s && *s) {
+ char *path = s;
+ char *t;
+ rd_kafka_resp_err_t err;
+
+ if ((t = strchr(s, ';'))) {
+ *t = '\0';
+ s = t + 1;
+ } else {
+ s = NULL;
+ }
+
+ if ((err = rd_kafka_plugin_new(conf, path, errstr,
+ errstr_size))) {
+ /* Failed to load plugin */
+ size_t elen = errstr_size > 0 ? strlen(errstr) : 0;
+
+ /* See if there is room for appending the
+ * plugin path to the error message. */
+ if (elen + strlen("(plugin )") + strlen(path) <
+ errstr_size)
+ rd_snprintf(errstr + elen, errstr_size - elen,
+ " (plugin %s)", path);
+
+ rd_list_destroy(&conf->plugins);
+ return RD_KAFKA_CONF_INVALID;
+ }
+ }
+
+ return RD_KAFKA_CONF_OK;
+}
+
+
+/**
+ * @brief Conf setter for "plugin.library.paths"
+ */
+rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope,
+ void *pconf,
+ const char *name,
+ const char *value,
+ void *dstptr,
+ rd_kafka_conf_set_mode_t set_mode,
+ char *errstr,
+ size_t errstr_size) {
+
+ assert(scope == _RK_GLOBAL);
+ return rd_kafka_plugins_conf_set0(
+ (rd_kafka_conf_t *)pconf,
+ set_mode == _RK_CONF_PROP_SET_DEL ? NULL : value, errstr,
+ errstr_size);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h
new file mode 100644
index 000000000..1783d5f53
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_plugin.h
@@ -0,0 +1,41 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_PLUGIN_H
+#define _RDKAFKA_PLUGIN_H
+
+rd_kafka_conf_res_t rd_kafka_plugins_conf_set(int scope,
+ void *conf,
+ const char *name,
+ const char *value,
+ void *dstptr,
+ rd_kafka_conf_set_mode_t set_mode,
+ char *errstr,
+ size_t errstr_size);
+
+#endif /* _RDKAFKA_PLUGIN_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h
new file mode 100644
index 000000000..396765857
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_proto.h
@@ -0,0 +1,655 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_PROTO_H_
+#define _RDKAFKA_PROTO_H_
+
+
+#include "rdendian.h"
+#include "rdvarint.h"
+
+/* Protocol defines */
+#include "rdkafka_protocol.h"
+
+
+
+/** Default generic retry count for failed requests.
+ * This may be overriden for specific request types. */
+#define RD_KAFKA_REQUEST_DEFAULT_RETRIES 2
+
+/** Max (practically infinite) retry count */
+#define RD_KAFKA_REQUEST_MAX_RETRIES INT_MAX
+
+/** Do not retry request */
+#define RD_KAFKA_REQUEST_NO_RETRIES 0
+
+
+/**
+ * Request types
+ */
+struct rd_kafkap_reqhdr {
+ int32_t Size;
+ int16_t ApiKey;
+ int16_t ApiVersion;
+ int32_t CorrId;
+ /* ClientId follows */
+};
+
+#define RD_KAFKAP_REQHDR_SIZE (4 + 2 + 2 + 4)
+#define RD_KAFKAP_RESHDR_SIZE (4 + 4)
+
+/**
+ * Response header
+ */
+struct rd_kafkap_reshdr {
+ int32_t Size;
+ int32_t CorrId;
+};
+
+
+/**
+ * Request type v1 (flexible version)
+ *
+ * i32 Size
+ * i16 ApiKey
+ * i16 ApiVersion
+ * i32 CorrId
+ * string ClientId (2-byte encoding, not compact string)
+ * uvarint Tags
+ * <Request payload>
+ * uvarint EndTags
+ *
+ * Any struct-type (non-primitive or array type) field in the request payload
+ * must also have a trailing tags list, this goes for structs in arrays as well.
+ */
+
+/**
+ * @brief Protocol request type (ApiKey) to name/string.
+ *
+ * Generate updates to this list with generate_proto.sh.
+ */
+static RD_UNUSED const char *rd_kafka_ApiKey2str(int16_t ApiKey) {
+ static const char *names[] = {
+ [RD_KAFKAP_Produce] = "Produce",
+ [RD_KAFKAP_Fetch] = "Fetch",
+ [RD_KAFKAP_ListOffsets] = "ListOffsets",
+ [RD_KAFKAP_Metadata] = "Metadata",
+ [RD_KAFKAP_LeaderAndIsr] = "LeaderAndIsr",
+ [RD_KAFKAP_StopReplica] = "StopReplica",
+ [RD_KAFKAP_UpdateMetadata] = "UpdateMetadata",
+ [RD_KAFKAP_ControlledShutdown] = "ControlledShutdown",
+ [RD_KAFKAP_OffsetCommit] = "OffsetCommit",
+ [RD_KAFKAP_OffsetFetch] = "OffsetFetch",
+ [RD_KAFKAP_FindCoordinator] = "FindCoordinator",
+ [RD_KAFKAP_JoinGroup] = "JoinGroup",
+ [RD_KAFKAP_Heartbeat] = "Heartbeat",
+ [RD_KAFKAP_LeaveGroup] = "LeaveGroup",
+ [RD_KAFKAP_SyncGroup] = "SyncGroup",
+ [RD_KAFKAP_DescribeGroups] = "DescribeGroups",
+ [RD_KAFKAP_ListGroups] = "ListGroups",
+ [RD_KAFKAP_SaslHandshake] = "SaslHandshake",
+ [RD_KAFKAP_ApiVersion] = "ApiVersion",
+ [RD_KAFKAP_CreateTopics] = "CreateTopics",
+ [RD_KAFKAP_DeleteTopics] = "DeleteTopics",
+ [RD_KAFKAP_DeleteRecords] = "DeleteRecords",
+ [RD_KAFKAP_InitProducerId] = "InitProducerId",
+ [RD_KAFKAP_OffsetForLeaderEpoch] = "OffsetForLeaderEpoch",
+ [RD_KAFKAP_AddPartitionsToTxn] = "AddPartitionsToTxn",
+ [RD_KAFKAP_AddOffsetsToTxn] = "AddOffsetsToTxn",
+ [RD_KAFKAP_EndTxn] = "EndTxn",
+ [RD_KAFKAP_WriteTxnMarkers] = "WriteTxnMarkers",
+ [RD_KAFKAP_TxnOffsetCommit] = "TxnOffsetCommit",
+ [RD_KAFKAP_DescribeAcls] = "DescribeAcls",
+ [RD_KAFKAP_CreateAcls] = "CreateAcls",
+ [RD_KAFKAP_DeleteAcls] = "DeleteAcls",
+ [RD_KAFKAP_DescribeConfigs] = "DescribeConfigs",
+ [RD_KAFKAP_AlterConfigs] = "AlterConfigs",
+ [RD_KAFKAP_AlterReplicaLogDirs] = "AlterReplicaLogDirs",
+ [RD_KAFKAP_DescribeLogDirs] = "DescribeLogDirs",
+ [RD_KAFKAP_SaslAuthenticate] = "SaslAuthenticate",
+ [RD_KAFKAP_CreatePartitions] = "CreatePartitions",
+ [RD_KAFKAP_CreateDelegationToken] = "CreateDelegationToken",
+ [RD_KAFKAP_RenewDelegationToken] = "RenewDelegationToken",
+ [RD_KAFKAP_ExpireDelegationToken] = "ExpireDelegationToken",
+ [RD_KAFKAP_DescribeDelegationToken] = "DescribeDelegationToken",
+ [RD_KAFKAP_DeleteGroups] = "DeleteGroups",
+ [RD_KAFKAP_ElectLeaders] = "ElectLeadersRequest",
+ [RD_KAFKAP_IncrementalAlterConfigs] =
+ "IncrementalAlterConfigsRequest",
+ [RD_KAFKAP_AlterPartitionReassignments] =
+ "AlterPartitionReassignmentsRequest",
+ [RD_KAFKAP_ListPartitionReassignments] =
+ "ListPartitionReassignmentsRequest",
+ [RD_KAFKAP_OffsetDelete] = "OffsetDeleteRequest",
+ [RD_KAFKAP_DescribeClientQuotas] = "DescribeClientQuotasRequest",
+ [RD_KAFKAP_AlterClientQuotas] = "AlterClientQuotasRequest",
+ [RD_KAFKAP_DescribeUserScramCredentials] =
+ "DescribeUserScramCredentialsRequest",
+ [RD_KAFKAP_AlterUserScramCredentials] =
+ "AlterUserScramCredentialsRequest",
+ [RD_KAFKAP_Vote] = "VoteRequest",
+ [RD_KAFKAP_BeginQuorumEpoch] = "BeginQuorumEpochRequest",
+ [RD_KAFKAP_EndQuorumEpoch] = "EndQuorumEpochRequest",
+ [RD_KAFKAP_DescribeQuorum] = "DescribeQuorumRequest",
+ [RD_KAFKAP_AlterIsr] = "AlterIsrRequest",
+ [RD_KAFKAP_UpdateFeatures] = "UpdateFeaturesRequest",
+ [RD_KAFKAP_Envelope] = "EnvelopeRequest",
+ [RD_KAFKAP_FetchSnapshot] = "FetchSnapshot",
+ [RD_KAFKAP_DescribeCluster] = "DescribeCluster",
+ [RD_KAFKAP_DescribeProducers] = "DescribeProducers",
+ [RD_KAFKAP_BrokerHeartbeat] = "BrokerHeartbeat",
+ [RD_KAFKAP_UnregisterBroker] = "UnregisterBroker",
+ [RD_KAFKAP_DescribeTransactions] = "DescribeTransactions",
+ [RD_KAFKAP_ListTransactions] = "ListTransactions",
+ [RD_KAFKAP_AllocateProducerIds] = "AllocateProducerIds",
+ };
+ static RD_TLS char ret[64];
+
+ if (ApiKey < 0 || ApiKey >= (int)RD_ARRAYSIZE(names) ||
+ !names[ApiKey]) {
+ rd_snprintf(ret, sizeof(ret), "Unknown-%hd?", ApiKey);
+ return ret;
+ }
+
+ return names[ApiKey];
+}
+
+
+
+/**
+ * @brief ApiKey version support tuple.
+ */
+struct rd_kafka_ApiVersion {
+ int16_t ApiKey;
+ int16_t MinVer;
+ int16_t MaxVer;
+};
+
+/**
+ * @brief ApiVersion.ApiKey comparator.
+ */
+static RD_UNUSED int rd_kafka_ApiVersion_key_cmp(const void *_a,
+ const void *_b) {
+ const struct rd_kafka_ApiVersion *a =
+ (const struct rd_kafka_ApiVersion *)_a;
+ const struct rd_kafka_ApiVersion *b =
+ (const struct rd_kafka_ApiVersion *)_b;
+ return RD_CMP(a->ApiKey, b->ApiKey);
+}
+
+
+
+typedef enum {
+ RD_KAFKA_READ_UNCOMMITTED = 0,
+ RD_KAFKA_READ_COMMITTED = 1
+} rd_kafka_isolation_level_t;
+
+
+
+#define RD_KAFKA_CTRL_MSG_ABORT 0
+#define RD_KAFKA_CTRL_MSG_COMMIT 1
+
+
+/**
+ * @enum Coordinator type, used with FindCoordinatorRequest
+ */
+typedef enum rd_kafka_coordtype_t {
+ RD_KAFKA_COORD_GROUP = 0,
+ RD_KAFKA_COORD_TXN = 1
+} rd_kafka_coordtype_t;
+
+
+/**
+ *
+ * Kafka protocol string representation prefixed with a convenience header
+ *
+ * Serialized format:
+ * { uint16, data.. }
+ *
+ */
+typedef struct rd_kafkap_str_s {
+ /* convenience header (aligned access, host endian) */
+ int len; /* Kafka string length (-1=NULL, 0=empty, >0=string) */
+ const char *str; /* points into data[] or other memory,
+ * not NULL-terminated */
+} rd_kafkap_str_t;
+
+
+#define RD_KAFKAP_STR_LEN_NULL -1
+#define RD_KAFKAP_STR_IS_NULL(kstr) ((kstr)->len == RD_KAFKAP_STR_LEN_NULL)
+
+/* Returns the length of the string of a kafka protocol string representation */
+#define RD_KAFKAP_STR_LEN0(len) ((len) == RD_KAFKAP_STR_LEN_NULL ? 0 : (len))
+#define RD_KAFKAP_STR_LEN(kstr) RD_KAFKAP_STR_LEN0((kstr)->len)
+
+/* Returns the actual size of a kafka protocol string representation. */
+#define RD_KAFKAP_STR_SIZE0(len) (2 + RD_KAFKAP_STR_LEN0(len))
+#define RD_KAFKAP_STR_SIZE(kstr) RD_KAFKAP_STR_SIZE0((kstr)->len)
+
+
+/** @returns true if kstr is pre-serialized through .._new() */
+#define RD_KAFKAP_STR_IS_SERIALIZED(kstr) \
+ (((const char *)((kstr) + 1)) + 2 == (const char *)((kstr)->str))
+
+/* Serialized Kafka string: only works for _new() kstrs.
+ * Check with RD_KAFKAP_STR_IS_SERIALIZED */
+#define RD_KAFKAP_STR_SER(kstr) ((kstr) + 1)
+
+/* Macro suitable for "%.*s" printing. */
+#define RD_KAFKAP_STR_PR(kstr) \
+ (int)((kstr)->len == RD_KAFKAP_STR_LEN_NULL ? 0 : (kstr)->len), \
+ (kstr)->str
+
+/* strndupa() a Kafka string */
+#define RD_KAFKAP_STR_DUPA(destptr, kstr) \
+ rd_strndupa((destptr), (kstr)->str, RD_KAFKAP_STR_LEN(kstr))
+
+/* strndup() a Kafka string */
+#define RD_KAFKAP_STR_DUP(kstr) rd_strndup((kstr)->str, RD_KAFKAP_STR_LEN(kstr))
+
+#define RD_KAFKAP_STR_INITIALIZER \
+ { .len = RD_KAFKAP_STR_LEN_NULL, .str = NULL }
+
+/**
+ * Frees a Kafka string previously allocated with `rd_kafkap_str_new()`
+ */
+static RD_UNUSED void rd_kafkap_str_destroy(rd_kafkap_str_t *kstr) {
+ rd_free(kstr);
+}
+
+
+
+/**
+ * Allocate a new Kafka string and make a copy of 'str'.
+ * If 'len' is -1 the length will be calculated.
+ * Supports Kafka NULL strings.
+ * Nul-terminates the string, but the trailing \0 is not part of
+ * the serialized string.
+ */
+static RD_INLINE RD_UNUSED rd_kafkap_str_t *rd_kafkap_str_new(const char *str,
+ int len) {
+ rd_kafkap_str_t *kstr;
+ int16_t klen;
+
+ if (!str)
+ len = RD_KAFKAP_STR_LEN_NULL;
+ else if (len == -1)
+ len = (int)strlen(str);
+
+ kstr = (rd_kafkap_str_t *)rd_malloc(
+ sizeof(*kstr) + 2 + (len == RD_KAFKAP_STR_LEN_NULL ? 0 : len + 1));
+ kstr->len = len;
+
+ /* Serialised format: 16-bit string length */
+ klen = htobe16(len);
+ memcpy(kstr + 1, &klen, 2);
+
+ /* Pre-Serialised format: non null-terminated string */
+ if (len == RD_KAFKAP_STR_LEN_NULL)
+ kstr->str = NULL;
+ else {
+ kstr->str = ((const char *)(kstr + 1)) + 2;
+ memcpy((void *)kstr->str, str, len);
+ ((char *)kstr->str)[len] = '\0';
+ }
+
+ return kstr;
+}
+
+
+/**
+ * Makes a copy of `src`. The copy will be fully allocated and should
+ * be freed with rd_kafka_pstr_destroy()
+ */
+static RD_INLINE RD_UNUSED rd_kafkap_str_t *
+rd_kafkap_str_copy(const rd_kafkap_str_t *src) {
+ return rd_kafkap_str_new(src->str, src->len);
+}
+
+static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp(const rd_kafkap_str_t *a,
+ const rd_kafkap_str_t *b) {
+ int minlen = RD_MIN(a->len, b->len);
+ int r = memcmp(a->str, b->str, minlen);
+ if (r)
+ return r;
+ else
+ return RD_CMP(a->len, b->len);
+}
+
+static RD_INLINE RD_UNUSED int rd_kafkap_str_cmp_str(const rd_kafkap_str_t *a,
+ const char *str) {
+ int len = (int)strlen(str);
+ int minlen = RD_MIN(a->len, len);
+ int r = memcmp(a->str, str, minlen);
+ if (r)
+ return r;
+ else
+ return RD_CMP(a->len, len);
+}
+
+static RD_INLINE RD_UNUSED int
+rd_kafkap_str_cmp_str2(const char *str, const rd_kafkap_str_t *b) {
+ int len = (int)strlen(str);
+ int minlen = RD_MIN(b->len, len);
+ int r = memcmp(str, b->str, minlen);
+ if (r)
+ return r;
+ else
+ return RD_CMP(len, b->len);
+}
+
+
+
+/**
+ *
+ * Kafka protocol bytes array representation prefixed with a convenience header
+ *
+ * Serialized format:
+ * { uint32, data.. }
+ *
+ */
+typedef struct rd_kafkap_bytes_s {
+ /* convenience header (aligned access, host endian) */
+ int32_t len; /* Kafka bytes length (-1=NULL, 0=empty, >0=data) */
+ const void *data; /* points just past the struct, or other memory,
+ * not NULL-terminated */
+ const char _data[1]; /* Bytes following struct when new()ed */
+} rd_kafkap_bytes_t;
+
+
+#define RD_KAFKAP_BYTES_LEN_NULL -1
+#define RD_KAFKAP_BYTES_IS_NULL(kbytes) \
+ ((kbytes)->len == RD_KAFKAP_BYTES_LEN_NULL)
+
+/* Returns the length of the bytes of a kafka protocol bytes representation */
+#define RD_KAFKAP_BYTES_LEN0(len) \
+ ((len) == RD_KAFKAP_BYTES_LEN_NULL ? 0 : (len))
+#define RD_KAFKAP_BYTES_LEN(kbytes) RD_KAFKAP_BYTES_LEN0((kbytes)->len)
+
+/* Returns the actual size of a kafka protocol bytes representation. */
+#define RD_KAFKAP_BYTES_SIZE0(len) (4 + RD_KAFKAP_BYTES_LEN0(len))
+#define RD_KAFKAP_BYTES_SIZE(kbytes) RD_KAFKAP_BYTES_SIZE0((kbytes)->len)
+
+/** @returns true if kbyes is pre-serialized through .._new() */
+#define RD_KAFKAP_BYTES_IS_SERIALIZED(kstr) \
+ (((const char *)((kbytes) + 1)) + 2 == (const char *)((kbytes)->data))
+
+/* Serialized Kafka bytes: only works for _new() kbytes */
+#define RD_KAFKAP_BYTES_SER(kbytes) ((kbytes) + 1)
+
+
+/**
+ * Frees a Kafka bytes previously allocated with `rd_kafkap_bytes_new()`
+ */
+static RD_UNUSED void rd_kafkap_bytes_destroy(rd_kafkap_bytes_t *kbytes) {
+ rd_free(kbytes);
+}
+
+
+/**
+ * @brief Allocate a new Kafka bytes and make a copy of 'bytes'.
+ * If \p len > 0 but \p bytes is NULL no copying is performed by
+ * the bytes structure will be allocated to fit \p size bytes.
+ *
+ * Supports:
+ * - Kafka NULL bytes (bytes==NULL,len==0),
+ * - Empty bytes (bytes!=NULL,len==0)
+ * - Copy data (bytes!=NULL,len>0)
+ * - No-copy, just alloc (bytes==NULL,len>0)
+ */
+static RD_INLINE RD_UNUSED rd_kafkap_bytes_t *
+rd_kafkap_bytes_new(const char *bytes, int32_t len) {
+ rd_kafkap_bytes_t *kbytes;
+ int32_t klen;
+
+ if (!bytes && !len)
+ len = RD_KAFKAP_BYTES_LEN_NULL;
+
+ kbytes = (rd_kafkap_bytes_t *)rd_malloc(
+ sizeof(*kbytes) + 4 + (len == RD_KAFKAP_BYTES_LEN_NULL ? 0 : len));
+ kbytes->len = len;
+
+ klen = htobe32(len);
+ memcpy((void *)(kbytes + 1), &klen, 4);
+
+ if (len == RD_KAFKAP_BYTES_LEN_NULL)
+ kbytes->data = NULL;
+ else {
+ kbytes->data = ((const char *)(kbytes + 1)) + 4;
+ if (bytes)
+ memcpy((void *)kbytes->data, bytes, len);
+ }
+
+ return kbytes;
+}
+
+
+/**
+ * Makes a copy of `src`. The copy will be fully allocated and should
+ * be freed with rd_kafkap_bytes_destroy()
+ */
+static RD_INLINE RD_UNUSED rd_kafkap_bytes_t *
+rd_kafkap_bytes_copy(const rd_kafkap_bytes_t *src) {
+ return rd_kafkap_bytes_new((const char *)src->data, src->len);
+}
+
+
+static RD_INLINE RD_UNUSED int rd_kafkap_bytes_cmp(const rd_kafkap_bytes_t *a,
+ const rd_kafkap_bytes_t *b) {
+ int minlen = RD_MIN(a->len, b->len);
+ int r = memcmp(a->data, b->data, minlen);
+ if (r)
+ return r;
+ else
+ return RD_CMP(a->len, b->len);
+}
+
+static RD_INLINE RD_UNUSED int
+rd_kafkap_bytes_cmp_data(const rd_kafkap_bytes_t *a,
+ const char *data,
+ int len) {
+ int minlen = RD_MIN(a->len, len);
+ int r = memcmp(a->data, data, minlen);
+ if (r)
+ return r;
+ else
+ return RD_CMP(a->len, len);
+}
+
+
+
+typedef struct rd_kafka_buf_s rd_kafka_buf_t;
+
+
+#define RD_KAFKA_NODENAME_SIZE 256
+
+
+
+/**
+ * @brief Message overheads (worst-case)
+ */
+
+/**
+ * MsgVersion v0..v1
+ */
+/* Offset + MessageSize */
+#define RD_KAFKAP_MESSAGESET_V0_HDR_SIZE (8 + 4)
+/* CRC + Magic + Attr + KeyLen + ValueLen */
+#define RD_KAFKAP_MESSAGE_V0_HDR_SIZE (4 + 1 + 1 + 4 + 4)
+/* CRC + Magic + Attr + Timestamp + KeyLen + ValueLen */
+#define RD_KAFKAP_MESSAGE_V1_HDR_SIZE (4 + 1 + 1 + 8 + 4 + 4)
+/* Maximum per-message overhead */
+#define RD_KAFKAP_MESSAGE_V0_OVERHEAD \
+ (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V0_HDR_SIZE)
+#define RD_KAFKAP_MESSAGE_V1_OVERHEAD \
+ (RD_KAFKAP_MESSAGESET_V0_HDR_SIZE + RD_KAFKAP_MESSAGE_V1_HDR_SIZE)
+
+/**
+ * MsgVersion v2
+ */
+#define RD_KAFKAP_MESSAGE_V2_MAX_OVERHEAD \
+ ( /* Length (varint) */ \
+ RD_UVARINT_ENC_SIZEOF(int32_t) + /* Attributes */ \
+ 1 + /* TimestampDelta (varint) */ \
+ RD_UVARINT_ENC_SIZEOF(int64_t) + /* OffsetDelta (varint) */ \
+ RD_UVARINT_ENC_SIZEOF(int32_t) + /* KeyLen (varint) */ \
+ RD_UVARINT_ENC_SIZEOF(int32_t) + /* ValueLen (varint) */ \
+ RD_UVARINT_ENC_SIZEOF(int32_t) + /* HeaderCnt (varint): */ \
+ RD_UVARINT_ENC_SIZEOF(int32_t))
+
+#define RD_KAFKAP_MESSAGE_V2_MIN_OVERHEAD \
+ ( /* Length (varint) */ \
+ RD_UVARINT_ENC_SIZE_0() + /* Attributes */ \
+ 1 + /* TimestampDelta (varint) */ \
+ RD_UVARINT_ENC_SIZE_0() + /* OffsetDelta (varint) */ \
+ RD_UVARINT_ENC_SIZE_0() + /* KeyLen (varint) */ \
+ RD_UVARINT_ENC_SIZE_0() + /* ValueLen (varint) */ \
+ RD_UVARINT_ENC_SIZE_0() + /* HeaderCnt (varint): */ \
+ RD_UVARINT_ENC_SIZE_0())
+
+
+/**
+ * @brief MessageSets are not explicitly versioned but depends on the
+ * Produce/Fetch API version and the encompassed Message versions.
+ * We use the Message version (MsgVersion, aka MagicByte) to describe
+ * the MessageSet version, that is, MsgVersion <= 1 uses the old
+ * MessageSet version (v0?) while MsgVersion 2 uses MessageSet version v2
+ */
+
+/* Old MessageSet header: none */
+#define RD_KAFKAP_MSGSET_V0_SIZE 0
+
+/* MessageSet v2 header */
+#define RD_KAFKAP_MSGSET_V2_SIZE \
+ (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4 + 4)
+
+/* Byte offsets for MessageSet fields */
+#define RD_KAFKAP_MSGSET_V2_OF_Length (8)
+#define RD_KAFKAP_MSGSET_V2_OF_MagicByte (8 + 4 + 4)
+#define RD_KAFKAP_MSGSET_V2_OF_CRC (8 + 4 + 4 + 1)
+#define RD_KAFKAP_MSGSET_V2_OF_Attributes (8 + 4 + 4 + 1 + 4)
+#define RD_KAFKAP_MSGSET_V2_OF_LastOffsetDelta (8 + 4 + 4 + 1 + 4 + 2)
+#define RD_KAFKAP_MSGSET_V2_OF_BaseTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4)
+#define RD_KAFKAP_MSGSET_V2_OF_MaxTimestamp (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8)
+#define RD_KAFKAP_MSGSET_V2_OF_ProducerId (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8)
+#define RD_KAFKAP_MSGSET_V2_OF_ProducerEpoch \
+ (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8)
+#define RD_KAFKAP_MSGSET_V2_OF_BaseSequence \
+ (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2)
+#define RD_KAFKAP_MSGSET_V2_OF_RecordCount \
+ (8 + 4 + 4 + 1 + 4 + 2 + 4 + 8 + 8 + 8 + 2 + 4)
+
+
+
+/**
+ * @name Producer ID and Epoch for the Idempotent Producer
+ * @{
+ *
+ */
+
+/**
+ * @brief Producer ID and Epoch
+ */
+typedef struct rd_kafka_pid_s {
+ int64_t id; /**< Producer Id */
+ int16_t epoch; /**< Producer Epoch */
+} rd_kafka_pid_t;
+
+#define RD_KAFKA_PID_INITIALIZER \
+ { -1, -1 }
+
+/**
+ * @returns true if \p PID is valid
+ */
+#define rd_kafka_pid_valid(PID) ((PID).id != -1)
+
+/**
+ * @brief Check two pids for equality
+ */
+static RD_UNUSED RD_INLINE int rd_kafka_pid_eq(const rd_kafka_pid_t a,
+ const rd_kafka_pid_t b) {
+ return a.id == b.id && a.epoch == b.epoch;
+}
+
+/**
+ * @brief Pid+epoch comparator
+ */
+static RD_UNUSED int rd_kafka_pid_cmp(const void *_a, const void *_b) {
+ const rd_kafka_pid_t *a = _a, *b = _b;
+
+ if (a->id < b->id)
+ return -1;
+ else if (a->id > b->id)
+ return 1;
+
+ return (int)a->epoch - (int)b->epoch;
+}
+
+
+/**
+ * @returns the string representation of a PID in a thread-safe
+ * static buffer.
+ */
+static RD_UNUSED const char *rd_kafka_pid2str(const rd_kafka_pid_t pid) {
+ static RD_TLS char buf[2][64];
+ static RD_TLS int i;
+
+ if (!rd_kafka_pid_valid(pid))
+ return "PID{Invalid}";
+
+ i = (i + 1) % 2;
+
+ rd_snprintf(buf[i], sizeof(buf[i]), "PID{Id:%" PRId64 ",Epoch:%hd}",
+ pid.id, pid.epoch);
+
+ return buf[i];
+}
+
+/**
+ * @brief Reset the PID to invalid/init state
+ */
+static RD_UNUSED RD_INLINE void rd_kafka_pid_reset(rd_kafka_pid_t *pid) {
+ pid->id = -1;
+ pid->epoch = -1;
+}
+
+
+/**
+ * @brief Bump the epoch of a valid PID
+ */
+static RD_UNUSED RD_INLINE rd_kafka_pid_t
+rd_kafka_pid_bump(const rd_kafka_pid_t old) {
+ rd_kafka_pid_t new_pid = {
+ old.id, (int16_t)(((int)old.epoch + 1) & (int)INT16_MAX)};
+ return new_pid;
+}
+
+/**@}*/
+
+
+#endif /* _RDKAFKA_PROTO_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h
new file mode 100644
index 000000000..60c099986
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_protocol.h
@@ -0,0 +1,120 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_PROTOCOL_H_
+#define _RDKAFKA_PROTOCOL_H_
+
+/**
+ * Kafka protocol defines.
+ *
+ * The separation from rdkafka_proto.h is to provide the protocol defines
+ * to C and C++ test code in tests/.
+ */
+
+#define RD_KAFKA_PORT 9092
+#define RD_KAFKA_PORT_STR "9092"
+
+
+/**
+ * Request types
+ *
+ * Generate updates to this list with generate_proto.sh.
+ */
+#define RD_KAFKAP_None -1
+#define RD_KAFKAP_Produce 0
+#define RD_KAFKAP_Fetch 1
+#define RD_KAFKAP_ListOffsets 2
+#define RD_KAFKAP_Metadata 3
+#define RD_KAFKAP_LeaderAndIsr 4
+#define RD_KAFKAP_StopReplica 5
+#define RD_KAFKAP_UpdateMetadata 6
+#define RD_KAFKAP_ControlledShutdown 7
+#define RD_KAFKAP_OffsetCommit 8
+#define RD_KAFKAP_OffsetFetch 9
+#define RD_KAFKAP_FindCoordinator 10
+#define RD_KAFKAP_JoinGroup 11
+#define RD_KAFKAP_Heartbeat 12
+#define RD_KAFKAP_LeaveGroup 13
+#define RD_KAFKAP_SyncGroup 14
+#define RD_KAFKAP_DescribeGroups 15
+#define RD_KAFKAP_ListGroups 16
+#define RD_KAFKAP_SaslHandshake 17
+#define RD_KAFKAP_ApiVersion 18
+#define RD_KAFKAP_CreateTopics 19
+#define RD_KAFKAP_DeleteTopics 20
+#define RD_KAFKAP_DeleteRecords 21
+#define RD_KAFKAP_InitProducerId 22
+#define RD_KAFKAP_OffsetForLeaderEpoch 23
+#define RD_KAFKAP_AddPartitionsToTxn 24
+#define RD_KAFKAP_AddOffsetsToTxn 25
+#define RD_KAFKAP_EndTxn 26
+#define RD_KAFKAP_WriteTxnMarkers 27
+#define RD_KAFKAP_TxnOffsetCommit 28
+#define RD_KAFKAP_DescribeAcls 29
+#define RD_KAFKAP_CreateAcls 30
+#define RD_KAFKAP_DeleteAcls 31
+#define RD_KAFKAP_DescribeConfigs 32
+#define RD_KAFKAP_AlterConfigs 33
+#define RD_KAFKAP_AlterReplicaLogDirs 34
+#define RD_KAFKAP_DescribeLogDirs 35
+#define RD_KAFKAP_SaslAuthenticate 36
+#define RD_KAFKAP_CreatePartitions 37
+#define RD_KAFKAP_CreateDelegationToken 38
+#define RD_KAFKAP_RenewDelegationToken 39
+#define RD_KAFKAP_ExpireDelegationToken 40
+#define RD_KAFKAP_DescribeDelegationToken 41
+#define RD_KAFKAP_DeleteGroups 42
+#define RD_KAFKAP_ElectLeaders 43
+#define RD_KAFKAP_IncrementalAlterConfigs 44
+#define RD_KAFKAP_AlterPartitionReassignments 45
+#define RD_KAFKAP_ListPartitionReassignments 46
+#define RD_KAFKAP_OffsetDelete 47
+#define RD_KAFKAP_DescribeClientQuotas 48
+#define RD_KAFKAP_AlterClientQuotas 49
+#define RD_KAFKAP_DescribeUserScramCredentials 50
+#define RD_KAFKAP_AlterUserScramCredentials 51
+#define RD_KAFKAP_Vote 52
+#define RD_KAFKAP_BeginQuorumEpoch 53
+#define RD_KAFKAP_EndQuorumEpoch 54
+#define RD_KAFKAP_DescribeQuorum 55
+#define RD_KAFKAP_AlterIsr 56
+#define RD_KAFKAP_UpdateFeatures 57
+#define RD_KAFKAP_Envelope 58
+#define RD_KAFKAP_FetchSnapshot 59
+#define RD_KAFKAP_DescribeCluster 60
+#define RD_KAFKAP_DescribeProducers 61
+#define RD_KAFKAP_BrokerHeartbeat 63
+#define RD_KAFKAP_UnregisterBroker 64
+#define RD_KAFKAP_DescribeTransactions 65
+#define RD_KAFKAP_ListTransactions 66
+#define RD_KAFKAP_AllocateProducerIds 67
+
+#define RD_KAFKAP__NUM 68
+
+
+#endif /* _RDKAFKA_PROTOCOL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c
new file mode 100644
index 000000000..57fce36b8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.c
@@ -0,0 +1,1085 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_interceptor.h"
+
+int RD_TLS rd_kafka_yield_thread = 0;
+
+void rd_kafka_yield(rd_kafka_t *rk) {
+ rd_kafka_yield_thread = 1;
+}
+
+
+/**
+ * @brief Check and reset yield flag.
+ * @returns rd_true if caller should yield, otherwise rd_false.
+ * @remarks rkq_lock MUST be held
+ */
+static RD_INLINE rd_bool_t rd_kafka_q_check_yield(rd_kafka_q_t *rkq) {
+ if (!(rkq->rkq_flags & RD_KAFKA_Q_F_YIELD))
+ return rd_false;
+
+ rkq->rkq_flags &= ~RD_KAFKA_Q_F_YIELD;
+ return rd_true;
+}
+/**
+ * Destroy a queue. refcnt must be at zero.
+ */
+void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq) {
+
+ mtx_lock(&rkq->rkq_lock);
+ if (unlikely(rkq->rkq_qio != NULL)) {
+ rd_free(rkq->rkq_qio);
+ rkq->rkq_qio = NULL;
+ }
+ /* Queue must have been disabled prior to final destruction,
+ * this is to catch the case where the queue owner/poll does not
+ * use rd_kafka_q_destroy_owner(). */
+ rd_dassert(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY));
+ rd_kafka_q_disable0(rkq, 0 /*no-lock*/); /* for the non-devel case */
+ rd_kafka_q_fwd_set0(rkq, NULL, 0 /*no-lock*/, 0 /*no-fwd-app*/);
+ rd_kafka_q_purge0(rkq, 0 /*no-lock*/);
+ assert(!rkq->rkq_fwdq);
+ mtx_unlock(&rkq->rkq_lock);
+ mtx_destroy(&rkq->rkq_lock);
+ cnd_destroy(&rkq->rkq_cond);
+
+ if (rkq->rkq_flags & RD_KAFKA_Q_F_ALLOCATED)
+ rd_free(rkq);
+}
+
+
+
+/**
+ * Initialize a queue.
+ */
+void rd_kafka_q_init0(rd_kafka_q_t *rkq,
+ rd_kafka_t *rk,
+ const char *func,
+ int line) {
+ rd_kafka_q_reset(rkq);
+ rkq->rkq_fwdq = NULL;
+ rkq->rkq_refcnt = 1;
+ rkq->rkq_flags = RD_KAFKA_Q_F_READY;
+ rkq->rkq_rk = rk;
+ rkq->rkq_qio = NULL;
+ rkq->rkq_serve = NULL;
+ rkq->rkq_opaque = NULL;
+ mtx_init(&rkq->rkq_lock, mtx_plain);
+ cnd_init(&rkq->rkq_cond);
+#if ENABLE_DEVEL
+ rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line);
+#else
+ rkq->rkq_name = func;
+#endif
+}
+
+
+/**
+ * Allocate a new queue and initialize it.
+ */
+rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, const char *func, int line) {
+ rd_kafka_q_t *rkq = rd_malloc(sizeof(*rkq));
+ rd_kafka_q_init(rkq, rk);
+ rkq->rkq_flags |= RD_KAFKA_Q_F_ALLOCATED;
+#if ENABLE_DEVEL
+ rd_snprintf(rkq->rkq_name, sizeof(rkq->rkq_name), "%s:%d", func, line);
+#else
+ rkq->rkq_name = func;
+#endif
+ return rkq;
+}
+
+/**
+ * Set/clear forward queue.
+ * Queue forwarding enables message routing inside rdkafka.
+ * Typical use is to re-route all fetched messages for all partitions
+ * to one single queue.
+ *
+ * All access to rkq_fwdq are protected by rkq_lock.
+ */
+void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq,
+ rd_kafka_q_t *destq,
+ int do_lock,
+ int fwd_app) {
+ if (unlikely(srcq == destq))
+ return;
+
+ if (do_lock)
+ mtx_lock(&srcq->rkq_lock);
+ if (fwd_app)
+ srcq->rkq_flags |= RD_KAFKA_Q_F_FWD_APP;
+ if (srcq->rkq_fwdq) {
+ rd_kafka_q_destroy(srcq->rkq_fwdq);
+ srcq->rkq_fwdq = NULL;
+ }
+ if (destq) {
+ rd_kafka_q_keep(destq);
+
+ /* If rkq has ops in queue, append them to fwdq's queue.
+ * This is an irreversible operation. */
+ if (srcq->rkq_qlen > 0) {
+ rd_dassert(destq->rkq_flags & RD_KAFKA_Q_F_READY);
+ rd_kafka_q_concat(destq, srcq);
+ }
+
+ srcq->rkq_fwdq = destq;
+ }
+ if (do_lock)
+ mtx_unlock(&srcq->rkq_lock);
+}
+
+/**
+ * Purge all entries from a queue.
+ */
+int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock) {
+ rd_kafka_op_t *rko, *next;
+ TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
+ rd_kafka_q_t *fwdq;
+ int cnt = 0;
+
+ if (do_lock)
+ mtx_lock(&rkq->rkq_lock);
+
+ if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+ cnt = rd_kafka_q_purge(fwdq);
+ rd_kafka_q_destroy(fwdq);
+ return cnt;
+ }
+
+ /* Move ops queue to tmpq to avoid lock-order issue
+ * by locks taken from rd_kafka_op_destroy(). */
+ TAILQ_MOVE(&tmpq, &rkq->rkq_q, rko_link);
+
+ rd_kafka_q_mark_served(rkq);
+
+ /* Zero out queue */
+ rd_kafka_q_reset(rkq);
+
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+
+ /* Destroy the ops */
+ next = TAILQ_FIRST(&tmpq);
+ while ((rko = next)) {
+ next = TAILQ_NEXT(next, rko_link);
+ rd_kafka_op_destroy(rko);
+ cnt++;
+ }
+
+ return cnt;
+}
+
+
+/**
+ * Purge all entries from a queue with a rktp version smaller than `version`
+ * This shaves off the head of the queue, up until the first rko with
+ * a non-matching rktp or version.
+ */
+void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq,
+ rd_kafka_toppar_t *rktp,
+ int version) {
+ rd_kafka_op_t *rko, *next;
+ TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
+ int32_t cnt = 0;
+ int64_t size = 0;
+ rd_kafka_q_t *fwdq;
+
+ mtx_lock(&rkq->rkq_lock);
+
+ if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ mtx_unlock(&rkq->rkq_lock);
+ rd_kafka_q_purge_toppar_version(fwdq, rktp, version);
+ rd_kafka_q_destroy(fwdq);
+ return;
+ }
+
+ /* Move ops to temporary queue and then destroy them from there
+ * without locks to avoid lock-ordering problems in op_destroy() */
+ while ((rko = TAILQ_FIRST(&rkq->rkq_q)) && rko->rko_rktp &&
+ rko->rko_rktp == rktp && rko->rko_version < version) {
+ TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
+ TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
+ cnt++;
+ size += rko->rko_len;
+ }
+
+ rd_kafka_q_mark_served(rkq);
+
+ rkq->rkq_qlen -= cnt;
+ rkq->rkq_qsize -= size;
+ mtx_unlock(&rkq->rkq_lock);
+
+ next = TAILQ_FIRST(&tmpq);
+ while ((rko = next)) {
+ next = TAILQ_NEXT(next, rko_link);
+ rd_kafka_op_destroy(rko);
+ }
+}
+
+
+/**
+ * Move 'cnt' entries from 'srcq' to 'dstq'.
+ * If 'cnt' == -1 all entries will be moved.
+ * Returns the number of entries moved.
+ */
+int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq,
+ rd_kafka_q_t *srcq,
+ int cnt,
+ int do_locks) {
+ rd_kafka_op_t *rko;
+ int mcnt = 0;
+
+ if (do_locks) {
+ mtx_lock(&srcq->rkq_lock);
+ mtx_lock(&dstq->rkq_lock);
+ }
+
+ if (!dstq->rkq_fwdq && !srcq->rkq_fwdq) {
+ if (cnt > 0 && dstq->rkq_qlen == 0)
+ rd_kafka_q_io_event(dstq);
+
+ /* Optimization, if 'cnt' is equal/larger than all
+ * items of 'srcq' we can move the entire queue. */
+ if (cnt == -1 || cnt >= (int)srcq->rkq_qlen) {
+ mcnt = srcq->rkq_qlen;
+ rd_kafka_q_concat0(dstq, srcq, 0 /*no-lock*/);
+ } else {
+ while (mcnt < cnt &&
+ (rko = TAILQ_FIRST(&srcq->rkq_q))) {
+ TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
+ if (likely(!rko->rko_prio))
+ TAILQ_INSERT_TAIL(&dstq->rkq_q, rko,
+ rko_link);
+ else
+ TAILQ_INSERT_SORTED(
+ &dstq->rkq_q, rko, rd_kafka_op_t *,
+ rko_link, rd_kafka_op_cmp_prio);
+
+ srcq->rkq_qlen--;
+ dstq->rkq_qlen++;
+ srcq->rkq_qsize -= rko->rko_len;
+ dstq->rkq_qsize += rko->rko_len;
+ mcnt++;
+ }
+ }
+
+ rd_kafka_q_mark_served(srcq);
+
+ } else
+ mcnt = rd_kafka_q_move_cnt(
+ dstq->rkq_fwdq ? dstq->rkq_fwdq : dstq,
+ srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq, cnt, do_locks);
+
+ if (do_locks) {
+ mtx_unlock(&dstq->rkq_lock);
+ mtx_unlock(&srcq->rkq_lock);
+ }
+
+ return mcnt;
+}
+
+
+/**
+ * Filters out outdated ops.
+ */
+static RD_INLINE rd_kafka_op_t *
+rd_kafka_op_filter(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int version) {
+ if (unlikely(!rko))
+ return NULL;
+
+ if (unlikely(rd_kafka_op_version_outdated(rko, version))) {
+ rd_kafka_q_deq0(rkq, rko);
+ rd_kafka_op_destroy(rko);
+ return NULL;
+ }
+
+ return rko;
+}
+
+
+
+/**
+ * Pop an op from a queue.
+ *
+ * Locality: any thread.
+ */
+
+
+/**
+ * Serve q like rd_kafka_q_serve() until an op is found that can be returned
+ * as an event to the application.
+ *
+ * @returns the first event:able op, or NULL on timeout.
+ *
+ * Locality: any thread
+ */
+rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq,
+ rd_ts_t timeout_us,
+ int32_t version,
+ rd_kafka_q_cb_type_t cb_type,
+ rd_kafka_q_serve_cb_t *callback,
+ void *opaque) {
+ rd_kafka_op_t *rko;
+ rd_kafka_q_t *fwdq;
+
+ rd_dassert(cb_type);
+
+ mtx_lock(&rkq->rkq_lock);
+
+ rd_kafka_yield_thread = 0;
+ if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ struct timespec timeout_tspec;
+
+ rd_timeout_init_timespec_us(&timeout_tspec, timeout_us);
+
+ while (1) {
+ rd_kafka_op_res_t res;
+ /* Keep track of current lock status to avoid
+ * unnecessary lock flapping in all the cases below. */
+ rd_bool_t is_locked = rd_true;
+
+ /* Filter out outdated ops */
+ retry:
+ while ((rko = TAILQ_FIRST(&rkq->rkq_q)) &&
+ !(rko = rd_kafka_op_filter(rkq, rko, version)))
+ ;
+
+ rd_kafka_q_mark_served(rkq);
+
+ if (rko) {
+ /* Proper versioned op */
+ rd_kafka_q_deq0(rkq, rko);
+
+ /* Let op_handle() operate without lock
+ * held to allow re-enqueuing, etc. */
+ mtx_unlock(&rkq->rkq_lock);
+ is_locked = rd_false;
+
+ /* Ops with callbacks are considered handled
+ * and we move on to the next op, if any.
+ * Ops w/o callbacks are returned immediately */
+ res = rd_kafka_op_handle(rkq->rkq_rk, rkq, rko,
+ cb_type, opaque,
+ callback);
+
+ if (res == RD_KAFKA_OP_RES_HANDLED ||
+ res == RD_KAFKA_OP_RES_KEEP) {
+ mtx_lock(&rkq->rkq_lock);
+ is_locked = rd_true;
+ goto retry; /* Next op */
+ } else if (unlikely(res ==
+ RD_KAFKA_OP_RES_YIELD)) {
+ /* Callback yielded, unroll */
+ return NULL;
+ } else
+ break; /* Proper op, handle below. */
+ }
+
+ if (unlikely(rd_kafka_q_check_yield(rkq))) {
+ if (is_locked)
+ mtx_unlock(&rkq->rkq_lock);
+ return NULL;
+ }
+
+ if (!is_locked)
+ mtx_lock(&rkq->rkq_lock);
+
+ if (cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock,
+ &timeout_tspec) != thrd_success) {
+ mtx_unlock(&rkq->rkq_lock);
+ return NULL;
+ }
+ }
+
+ } else {
+ /* Since the q_pop may block we need to release the parent
+ * queue's lock. */
+ mtx_unlock(&rkq->rkq_lock);
+ rko = rd_kafka_q_pop_serve(fwdq, timeout_us, version, cb_type,
+ callback, opaque);
+ rd_kafka_q_destroy(fwdq);
+ }
+
+
+ return rko;
+}
+
+rd_kafka_op_t *
+rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version) {
+ return rd_kafka_q_pop_serve(rkq, timeout_us, version,
+ RD_KAFKA_Q_CB_RETURN, NULL, NULL);
+}
+
+
+/**
+ * Pop all available ops from a queue and call the provided
+ * callback for each op.
+ * `max_cnt` limits the number of ops served, 0 = no limit.
+ *
+ * Returns the number of ops served.
+ *
+ * Locality: any thread.
+ */
+int rd_kafka_q_serve(rd_kafka_q_t *rkq,
+ int timeout_ms,
+ int max_cnt,
+ rd_kafka_q_cb_type_t cb_type,
+ rd_kafka_q_serve_cb_t *callback,
+ void *opaque) {
+ rd_kafka_t *rk = rkq->rkq_rk;
+ rd_kafka_op_t *rko;
+ rd_kafka_q_t localq;
+ rd_kafka_q_t *fwdq;
+ int cnt = 0;
+ struct timespec timeout_tspec;
+
+ rd_dassert(cb_type);
+
+ mtx_lock(&rkq->rkq_lock);
+
+ rd_dassert(TAILQ_EMPTY(&rkq->rkq_q) || rkq->rkq_qlen > 0);
+ if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ int ret;
+ /* Since the q_pop may block we need to release the parent
+ * queue's lock. */
+ mtx_unlock(&rkq->rkq_lock);
+ ret = rd_kafka_q_serve(fwdq, timeout_ms, max_cnt, cb_type,
+ callback, opaque);
+ rd_kafka_q_destroy(fwdq);
+ return ret;
+ }
+
+ rd_timeout_init_timespec(&timeout_tspec, timeout_ms);
+
+ /* Wait for op */
+ while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) &&
+ !rd_kafka_q_check_yield(rkq) &&
+ cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock,
+ &timeout_tspec) == thrd_success)
+ ;
+
+ rd_kafka_q_mark_served(rkq);
+
+ if (!rko) {
+ mtx_unlock(&rkq->rkq_lock);
+ return 0;
+ }
+
+ /* Move the first `max_cnt` ops. */
+ rd_kafka_q_init(&localq, rkq->rkq_rk);
+ rd_kafka_q_move_cnt(&localq, rkq, max_cnt == 0 ? -1 /*all*/ : max_cnt,
+ 0 /*no-locks*/);
+
+ mtx_unlock(&rkq->rkq_lock);
+
+ rd_kafka_yield_thread = 0;
+
+ /* Call callback for each op */
+ while ((rko = TAILQ_FIRST(&localq.rkq_q))) {
+ rd_kafka_op_res_t res;
+
+ rd_kafka_q_deq0(&localq, rko);
+ res = rd_kafka_op_handle(rk, &localq, rko, cb_type, opaque,
+ callback);
+ /* op must have been handled */
+ rd_kafka_assert(NULL, res != RD_KAFKA_OP_RES_PASS);
+ cnt++;
+
+ if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
+ rd_kafka_yield_thread)) {
+ /* Callback called rd_kafka_yield(), we must
+ * stop our callback dispatching and put the
+ * ops in localq back on the original queue head. */
+ if (!TAILQ_EMPTY(&localq.rkq_q))
+ rd_kafka_q_prepend(rkq, &localq);
+ break;
+ }
+ }
+
+ rd_kafka_q_destroy_owner(&localq);
+
+ return cnt;
+}
+
+/**
+ * @brief Filter out and destroy outdated messages.
+ *
+ * @returns Returns the number of valid messages.
+ *
+ * @locality Any thread.
+ */
+static size_t
+rd_kafka_purge_outdated_messages(rd_kafka_toppar_t *rktp,
+ int32_t version,
+ rd_kafka_message_t **rkmessages,
+ size_t cnt,
+ struct rd_kafka_op_tailq *ctrl_msg_q) {
+ size_t valid_count = 0;
+ size_t i;
+ rd_kafka_op_t *rko, *next;
+
+ for (i = 0; i < cnt; i++) {
+ rko = rkmessages[i]->_private;
+ if (rko->rko_rktp == rktp &&
+ rd_kafka_op_version_outdated(rko, version)) {
+ /* This also destroys the corresponding rkmessage. */
+ rd_kafka_op_destroy(rko);
+ } else if (i > valid_count) {
+ rkmessages[valid_count++] = rkmessages[i];
+ } else {
+ valid_count++;
+ }
+ }
+
+ /* Discard outdated control msgs ops */
+ next = TAILQ_FIRST(ctrl_msg_q);
+ while (next) {
+ rko = next;
+ next = TAILQ_NEXT(rko, rko_link);
+ if (rko->rko_rktp == rktp &&
+ rd_kafka_op_version_outdated(rko, version)) {
+ TAILQ_REMOVE(ctrl_msg_q, rko, rko_link);
+ rd_kafka_op_destroy(rko);
+ }
+ }
+
+ return valid_count;
+}
+
+
+/**
+ * Populate 'rkmessages' array with messages from 'rkq'.
+ * If 'auto_commit' is set, each message's offset will be committed
+ * to the offset store for that toppar.
+ *
+ * Returns the number of messages added.
+ */
+
+int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size) {
+ unsigned int cnt = 0;
+ TAILQ_HEAD(, rd_kafka_op_s) tmpq = TAILQ_HEAD_INITIALIZER(tmpq);
+ struct rd_kafka_op_tailq ctrl_msg_q =
+ TAILQ_HEAD_INITIALIZER(ctrl_msg_q);
+ rd_kafka_op_t *rko, *next;
+ rd_kafka_t *rk = rkq->rkq_rk;
+ rd_kafka_q_t *fwdq;
+ struct timespec timeout_tspec;
+ int i;
+
+ mtx_lock(&rkq->rkq_lock);
+ if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ /* Since the q_pop may block we need to release the parent
+ * queue's lock. */
+ mtx_unlock(&rkq->rkq_lock);
+ cnt = rd_kafka_q_serve_rkmessages(fwdq, timeout_ms, rkmessages,
+ rkmessages_size);
+ rd_kafka_q_destroy(fwdq);
+ return cnt;
+ }
+ mtx_unlock(&rkq->rkq_lock);
+
+ if (timeout_ms)
+ rd_kafka_app_poll_blocking(rk);
+
+ rd_timeout_init_timespec(&timeout_tspec, timeout_ms);
+
+ rd_kafka_yield_thread = 0;
+ while (cnt < rkmessages_size) {
+ rd_kafka_op_res_t res;
+
+ mtx_lock(&rkq->rkq_lock);
+
+ while (!(rko = TAILQ_FIRST(&rkq->rkq_q)) &&
+ !rd_kafka_q_check_yield(rkq) &&
+ cnd_timedwait_abs(&rkq->rkq_cond, &rkq->rkq_lock,
+ &timeout_tspec) == thrd_success)
+ ;
+
+ rd_kafka_q_mark_served(rkq);
+
+ if (!rko) {
+ mtx_unlock(&rkq->rkq_lock);
+ break; /* Timed out */
+ }
+
+ rd_kafka_q_deq0(rkq, rko);
+
+ mtx_unlock(&rkq->rkq_lock);
+
+ if (unlikely(rko->rko_type == RD_KAFKA_OP_BARRIER)) {
+ cnt = (unsigned int)rd_kafka_purge_outdated_messages(
+ rko->rko_rktp, rko->rko_version, rkmessages, cnt,
+ &ctrl_msg_q);
+ rd_kafka_op_destroy(rko);
+ continue;
+ }
+
+ if (rd_kafka_op_version_outdated(rko, 0)) {
+ /* Outdated op, put on discard queue */
+ TAILQ_INSERT_TAIL(&tmpq, rko, rko_link);
+ continue;
+ }
+
+ /* Serve non-FETCH callbacks */
+ res =
+ rd_kafka_poll_cb(rk, rkq, rko, RD_KAFKA_Q_CB_RETURN, NULL);
+ if (res == RD_KAFKA_OP_RES_KEEP ||
+ res == RD_KAFKA_OP_RES_HANDLED) {
+ /* Callback served, rko is destroyed (if HANDLED). */
+ continue;
+ } else if (unlikely(res == RD_KAFKA_OP_RES_YIELD ||
+ rd_kafka_yield_thread)) {
+ /* Yield. */
+ break;
+ }
+ rd_dassert(res == RD_KAFKA_OP_RES_PASS);
+
+ /* If this is a control messages, don't return message to
+ * application. Add it to a tmp queue from where we can store
+ * the offset and destroy the op */
+ if (unlikely(rd_kafka_op_is_ctrl_msg(rko))) {
+ TAILQ_INSERT_TAIL(&ctrl_msg_q, rko, rko_link);
+ continue;
+ }
+
+ /* Get rkmessage from rko and append to array. */
+ rkmessages[cnt++] = rd_kafka_message_get(rko);
+ }
+
+ for (i = cnt - 1; i >= 0; i--) {
+ rko = (rd_kafka_op_t *)rkmessages[i]->_private;
+ rd_kafka_toppar_t *rktp = rko->rko_rktp;
+ int64_t offset = rkmessages[i]->offset + 1;
+ if (unlikely(rktp->rktp_app_pos.offset < offset))
+ rd_kafka_update_app_pos(
+ rk, rktp,
+ RD_KAFKA_FETCH_POS(
+ offset,
+ rd_kafka_message_leader_epoch(rkmessages[i])),
+ RD_DO_LOCK);
+ }
+
+ /* Discard non-desired and already handled ops */
+ next = TAILQ_FIRST(&tmpq);
+ while (next) {
+ rko = next;
+ next = TAILQ_NEXT(next, rko_link);
+ rd_kafka_op_destroy(rko);
+ }
+
+ /* Discard ctrl msgs */
+ next = TAILQ_FIRST(&ctrl_msg_q);
+ while (next) {
+ rko = next;
+ next = TAILQ_NEXT(next, rko_link);
+ rd_kafka_toppar_t *rktp = rko->rko_rktp;
+ int64_t offset = rko->rko_u.fetch.rkm.rkm_rkmessage.offset + 1;
+ if (rktp->rktp_app_pos.offset < offset)
+ rd_kafka_update_app_pos(
+ rk, rktp,
+ RD_KAFKA_FETCH_POS(
+ offset,
+ rd_kafka_message_leader_epoch(
+ &rko->rko_u.fetch.rkm.rkm_rkmessage)),
+ RD_DO_LOCK);
+ rd_kafka_op_destroy(rko);
+ }
+
+ rd_kafka_app_polled(rk);
+
+ return cnt;
+}
+
+
+
+void rd_kafka_queue_destroy(rd_kafka_queue_t *rkqu) {
+ if (rkqu->rkqu_is_owner)
+ rd_kafka_q_destroy_owner(rkqu->rkqu_q);
+ else
+ rd_kafka_q_destroy(rkqu->rkqu_q);
+ rd_free(rkqu);
+}
+
+rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq) {
+ rd_kafka_queue_t *rkqu;
+
+ rkqu = rd_calloc(1, sizeof(*rkqu));
+
+ rkqu->rkqu_q = rkq;
+ rd_kafka_q_keep(rkq);
+
+ rkqu->rkqu_rk = rk;
+
+ return rkqu;
+}
+
+
+rd_kafka_queue_t *rd_kafka_queue_new(rd_kafka_t *rk) {
+ rd_kafka_q_t *rkq;
+ rd_kafka_queue_t *rkqu;
+
+ rkq = rd_kafka_q_new(rk);
+ rkqu = rd_kafka_queue_new0(rk, rkq);
+ rd_kafka_q_destroy(rkq); /* Loose refcount from q_new, one is held
+ * by queue_new0 */
+ rkqu->rkqu_is_owner = 1;
+ return rkqu;
+}
+
+
+rd_kafka_queue_t *rd_kafka_queue_get_main(rd_kafka_t *rk) {
+ return rd_kafka_queue_new0(rk, rk->rk_rep);
+}
+
+
+rd_kafka_queue_t *rd_kafka_queue_get_consumer(rd_kafka_t *rk) {
+ if (!rk->rk_cgrp)
+ return NULL;
+ return rd_kafka_queue_new0(rk, rk->rk_cgrp->rkcg_q);
+}
+
+rd_kafka_queue_t *rd_kafka_queue_get_partition(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition) {
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_queue_t *result;
+
+ if (rk->rk_type == RD_KAFKA_PRODUCER)
+ return NULL;
+
+ rktp = rd_kafka_toppar_get2(rk, topic, partition, 0, /* no ua_on_miss */
+ 1 /* create_on_miss */);
+
+ if (!rktp)
+ return NULL;
+
+ result = rd_kafka_queue_new0(rk, rktp->rktp_fetchq);
+ rd_kafka_toppar_destroy(rktp);
+
+ return result;
+}
+
+rd_kafka_queue_t *rd_kafka_queue_get_background(rd_kafka_t *rk) {
+ rd_kafka_queue_t *rkqu;
+
+ rd_kafka_wrlock(rk);
+ if (!rk->rk_background.q) {
+ char errstr[256];
+
+ if (rd_kafka_background_thread_create(rk, errstr,
+ sizeof(errstr))) {
+ rd_kafka_log(rk, LOG_ERR, "BACKGROUND",
+ "Failed to create background thread: %s",
+ errstr);
+ rd_kafka_wrunlock(rk);
+ return NULL;
+ }
+ }
+
+ rkqu = rd_kafka_queue_new0(rk, rk->rk_background.q);
+ rd_kafka_wrunlock(rk);
+ return rkqu;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_set_log_queue(rd_kafka_t *rk,
+ rd_kafka_queue_t *rkqu) {
+ rd_kafka_q_t *rkq;
+
+ if (!rk->rk_logq)
+ return RD_KAFKA_RESP_ERR__NOT_CONFIGURED;
+
+ if (!rkqu)
+ rkq = rk->rk_rep;
+ else
+ rkq = rkqu->rkqu_q;
+ rd_kafka_q_fwd_set(rk->rk_logq, rkq);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+void rd_kafka_queue_forward(rd_kafka_queue_t *src, rd_kafka_queue_t *dst) {
+ rd_kafka_q_fwd_set0(src->rkqu_q, dst ? dst->rkqu_q : NULL,
+ 1, /* do_lock */
+ 1 /* fwd_app */);
+}
+
+
+size_t rd_kafka_queue_length(rd_kafka_queue_t *rkqu) {
+ return (size_t)rd_kafka_q_len(rkqu->rkqu_q);
+}
+
+/**
+ * @brief Enable or disable(fd==-1) fd-based wake-ups for queue
+ */
+void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq,
+ rd_socket_t fd,
+ const void *payload,
+ size_t size) {
+ struct rd_kafka_q_io *qio = NULL;
+
+ if (fd != -1) {
+ qio = rd_malloc(sizeof(*qio) + size);
+ qio->fd = fd;
+ qio->size = size;
+ qio->payload = (void *)(qio + 1);
+ qio->sent = rd_false;
+ qio->event_cb = NULL;
+ qio->event_cb_opaque = NULL;
+ memcpy(qio->payload, payload, size);
+ }
+
+ mtx_lock(&rkq->rkq_lock);
+ if (rkq->rkq_qio) {
+ rd_free(rkq->rkq_qio);
+ rkq->rkq_qio = NULL;
+ }
+
+ if (fd != -1) {
+ rkq->rkq_qio = qio;
+ }
+
+ mtx_unlock(&rkq->rkq_lock);
+}
+
+void rd_kafka_queue_io_event_enable(rd_kafka_queue_t *rkqu,
+ int fd,
+ const void *payload,
+ size_t size) {
+ rd_kafka_q_io_event_enable(rkqu->rkqu_q, fd, payload, size);
+}
+
+
+void rd_kafka_queue_yield(rd_kafka_queue_t *rkqu) {
+ rd_kafka_q_yield(rkqu->rkqu_q);
+}
+
+
+/**
+ * @brief Enable or disable(event_cb==NULL) callback-based wake-ups for queue
+ */
+void rd_kafka_q_cb_event_enable(rd_kafka_q_t *rkq,
+ void (*event_cb)(rd_kafka_t *rk, void *opaque),
+ void *opaque) {
+ struct rd_kafka_q_io *qio = NULL;
+
+ if (event_cb) {
+ qio = rd_malloc(sizeof(*qio));
+ qio->fd = -1;
+ qio->size = 0;
+ qio->payload = NULL;
+ qio->event_cb = event_cb;
+ qio->event_cb_opaque = opaque;
+ }
+
+ mtx_lock(&rkq->rkq_lock);
+ if (rkq->rkq_qio) {
+ rd_free(rkq->rkq_qio);
+ rkq->rkq_qio = NULL;
+ }
+
+ if (event_cb) {
+ rkq->rkq_qio = qio;
+ }
+
+ mtx_unlock(&rkq->rkq_lock);
+}
+
+void rd_kafka_queue_cb_event_enable(rd_kafka_queue_t *rkqu,
+ void (*event_cb)(rd_kafka_t *rk,
+ void *opaque),
+ void *opaque) {
+ rd_kafka_q_cb_event_enable(rkqu->rkqu_q, event_cb, opaque);
+}
+
+
+/**
+ * Helper: wait for single op on 'rkq', and return its error,
+ * or .._TIMED_OUT on timeout.
+ */
+rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms) {
+ rd_kafka_op_t *rko;
+ rd_kafka_resp_err_t err;
+
+ rko = rd_kafka_q_pop(rkq, rd_timeout_us(timeout_ms), 0);
+ if (!rko)
+ err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ else {
+ err = rko->rko_err;
+ rd_kafka_op_destroy(rko);
+ }
+
+ return err;
+}
+
+
+/**
+ * Apply \p callback on each op in queue.
+ * If the callback wishes to remove the rko it must do so using
+ * using rd_kafka_op_deq0().
+ *
+ * @returns the sum of \p callback() return values.
+ * @remark rkq will be locked, callers should take care not to
+ * interact with \p rkq through other means from the callback to avoid
+ * deadlocks.
+ */
+int rd_kafka_q_apply(rd_kafka_q_t *rkq,
+ int (*callback)(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ void *opaque),
+ void *opaque) {
+ rd_kafka_op_t *rko, *next;
+ rd_kafka_q_t *fwdq;
+ int cnt = 0;
+
+ mtx_lock(&rkq->rkq_lock);
+ if ((fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ mtx_unlock(&rkq->rkq_lock);
+ cnt = rd_kafka_q_apply(fwdq, callback, opaque);
+ rd_kafka_q_destroy(fwdq);
+ return cnt;
+ }
+
+ next = TAILQ_FIRST(&rkq->rkq_q);
+ while ((rko = next)) {
+ next = TAILQ_NEXT(next, rko_link);
+ cnt += callback(rkq, rko, opaque);
+ }
+
+ rd_kafka_q_mark_served(rkq);
+
+ mtx_unlock(&rkq->rkq_lock);
+
+ return cnt;
+}
+
+/**
+ * @brief Convert relative to absolute offsets and also purge any messages
+ * that are older than \p min_offset.
+ * @remark Error ops with ERR__NOT_IMPLEMENTED will not be purged since
+ * they are used to indicate unknnown compression codecs and compressed
+ * messagesets may have a starting offset lower than what we requested.
+ * @remark \p rkq locking is not performed (caller's responsibility)
+ * @remark Must NOT be used on fwdq.
+ */
+void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq,
+ int64_t min_offset,
+ int64_t base_offset) {
+ rd_kafka_op_t *rko, *next;
+ int adj_len = 0;
+ int64_t adj_size = 0;
+
+ rd_kafka_assert(NULL, !rkq->rkq_fwdq);
+
+ next = TAILQ_FIRST(&rkq->rkq_q);
+ while ((rko = next)) {
+ next = TAILQ_NEXT(next, rko_link);
+
+ if (unlikely(rko->rko_type != RD_KAFKA_OP_FETCH))
+ continue;
+
+ rko->rko_u.fetch.rkm.rkm_offset += base_offset;
+
+ if (rko->rko_u.fetch.rkm.rkm_offset < min_offset &&
+ rko->rko_err != RD_KAFKA_RESP_ERR__NOT_IMPLEMENTED) {
+ adj_len++;
+ adj_size += rko->rko_len;
+ TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
+ rd_kafka_op_destroy(rko);
+ continue;
+ }
+ }
+
+
+ rkq->rkq_qlen -= adj_len;
+ rkq->rkq_qsize -= adj_size;
+}
+
+
+/**
+ * @brief Print information and contents of queue
+ */
+void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq) {
+ mtx_lock(&rkq->rkq_lock);
+ fprintf(fp,
+ "Queue %p \"%s\" (refcnt %d, flags 0x%x, %d ops, "
+ "%" PRId64 " bytes)\n",
+ rkq, rkq->rkq_name, rkq->rkq_refcnt, rkq->rkq_flags,
+ rkq->rkq_qlen, rkq->rkq_qsize);
+
+ if (rkq->rkq_qio)
+ fprintf(fp, " QIO fd %d\n", (int)rkq->rkq_qio->fd);
+ if (rkq->rkq_serve)
+ fprintf(fp, " Serve callback %p, opaque %p\n", rkq->rkq_serve,
+ rkq->rkq_opaque);
+
+ if (rkq->rkq_fwdq) {
+ fprintf(fp, " Forwarded ->\n");
+ rd_kafka_q_dump(fp, rkq->rkq_fwdq);
+ } else {
+ rd_kafka_op_t *rko;
+
+ if (!TAILQ_EMPTY(&rkq->rkq_q))
+ fprintf(fp, " Queued ops:\n");
+ TAILQ_FOREACH(rko, &rkq->rkq_q, rko_link) {
+ fprintf(fp,
+ " %p %s (v%" PRId32
+ ", flags 0x%x, "
+ "prio %d, len %" PRId32
+ ", source %s, "
+ "replyq %p)\n",
+ rko, rd_kafka_op2str(rko->rko_type),
+ rko->rko_version, rko->rko_flags, rko->rko_prio,
+ rko->rko_len,
+#if ENABLE_DEVEL
+ rko->rko_source
+#else
+ "-"
+#endif
+ ,
+ rko->rko_replyq.q);
+ }
+ }
+
+ mtx_unlock(&rkq->rkq_lock);
+}
+
+
+void rd_kafka_enq_once_trigger_destroy(void *ptr) {
+ rd_kafka_enq_once_t *eonce = ptr;
+
+ rd_kafka_enq_once_trigger(eonce, RD_KAFKA_RESP_ERR__DESTROY, "destroy");
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h
new file mode 100644
index 000000000..0d50f5870
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_queue.h
@@ -0,0 +1,1171 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_QUEUE_H_
+#define _RDKAFKA_QUEUE_H_
+
+#include "rdkafka_op.h"
+#include "rdkafka_int.h"
+
+#ifdef _WIN32
+#include <io.h> /* for _write() */
+#endif
+
+/** @brief Queueing strategy */
+#define RD_KAFKA_QUEUE_FIFO 0
+#define RD_KAFKA_QUEUE_LIFO 1
+
+TAILQ_HEAD(rd_kafka_op_tailq, rd_kafka_op_s);
+
+/**
+ * @struct Queue for rd_kafka_op_t*.
+ *
+ * @remark All readers of the queue must call rd_kafka_q_mark_served()
+ * after reading the queue (while still holding the queue lock) to
+ * clear the wakeup-sent flag.
+ */
+struct rd_kafka_q_s {
+ mtx_t rkq_lock;
+ cnd_t rkq_cond;
+ struct rd_kafka_q_s *rkq_fwdq; /* Forwarded/Routed queue.
+ * Used in place of this queue
+ * for all operations. */
+
+ struct rd_kafka_op_tailq rkq_q; /* TAILQ_HEAD(, rd_kafka_op_s) */
+ int rkq_qlen; /* Number of entries in queue */
+ int64_t rkq_qsize; /* Size of all entries in queue */
+ int rkq_refcnt;
+ int rkq_flags;
+#define RD_KAFKA_Q_F_ALLOCATED 0x1 /* Allocated: rd_free on destroy */
+#define RD_KAFKA_Q_F_READY \
+ 0x2 /* Queue is ready to be used. \
+ * Flag is cleared on destroy */
+#define RD_KAFKA_Q_F_FWD_APP \
+ 0x4 /* Queue is being forwarded by a call \
+ * to rd_kafka_queue_forward. */
+#define RD_KAFKA_Q_F_YIELD \
+ 0x8 /* Have waiters return even if \
+ * no rko was enqueued. \
+ * This is used to wake up a waiter \
+ * by triggering the cond-var \
+ * but without having to enqueue \
+ * an op. */
+
+ rd_kafka_t *rkq_rk;
+ struct rd_kafka_q_io *rkq_qio; /* FD-based application signalling */
+
+ /* Op serve callback (optional).
+ * Mainly used for forwarded queues to use the original queue's
+ * serve function from the forwarded position.
+ * Shall return 1 if op was handled, else 0. */
+ rd_kafka_q_serve_cb_t *rkq_serve;
+ void *rkq_opaque;
+
+#if ENABLE_DEVEL
+ char rkq_name[64]; /* Debugging: queue name (FUNC:LINE) */
+#else
+ const char *rkq_name; /* Debugging: queue name (FUNC) */
+#endif
+};
+
+
+/* Application signalling state holder. */
+struct rd_kafka_q_io {
+ /* For FD-based signalling */
+ rd_socket_t fd;
+ void *payload;
+ size_t size;
+ rd_bool_t sent; /**< Wake-up has been sent.
+ * This field is reset to false by the queue
+ * reader, allowing a new wake-up to be sent by a
+ * subsequent writer. */
+ /* For callback-based signalling */
+ void (*event_cb)(rd_kafka_t *rk, void *opaque);
+ void *event_cb_opaque;
+};
+
+
+
+/**
+ * @return true if queue is ready/enabled, else false.
+ * @remark queue luck must be held by caller (if applicable)
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_q_ready(rd_kafka_q_t *rkq) {
+ return rkq->rkq_flags & RD_KAFKA_Q_F_READY;
+}
+
+
+
+void rd_kafka_q_init0(rd_kafka_q_t *rkq,
+ rd_kafka_t *rk,
+ const char *func,
+ int line);
+#define rd_kafka_q_init(rkq, rk) \
+ rd_kafka_q_init0(rkq, rk, __FUNCTION__, __LINE__)
+rd_kafka_q_t *rd_kafka_q_new0(rd_kafka_t *rk, const char *func, int line);
+#define rd_kafka_q_new(rk) rd_kafka_q_new0(rk, __FUNCTION__, __LINE__)
+void rd_kafka_q_destroy_final(rd_kafka_q_t *rkq);
+
+#define rd_kafka_q_lock(rkqu) mtx_lock(&(rkqu)->rkq_lock)
+#define rd_kafka_q_unlock(rkqu) mtx_unlock(&(rkqu)->rkq_lock)
+
+static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_keep(rd_kafka_q_t *rkq) {
+ mtx_lock(&rkq->rkq_lock);
+ rkq->rkq_refcnt++;
+ mtx_unlock(&rkq->rkq_lock);
+ return rkq;
+}
+
+static RD_INLINE RD_UNUSED rd_kafka_q_t *
+rd_kafka_q_keep_nolock(rd_kafka_q_t *rkq) {
+ rkq->rkq_refcnt++;
+ return rkq;
+}
+
+
+/**
+ * @returns the queue's name (used for debugging)
+ */
+static RD_INLINE RD_UNUSED const char *rd_kafka_q_name(rd_kafka_q_t *rkq) {
+ return rkq->rkq_name;
+}
+
+/**
+ * @returns the final destination queue name (after forwarding)
+ * @remark rkq MUST NOT be locked
+ */
+static RD_INLINE RD_UNUSED const char *rd_kafka_q_dest_name(rd_kafka_q_t *rkq) {
+ const char *ret;
+ mtx_lock(&rkq->rkq_lock);
+ if (rkq->rkq_fwdq)
+ ret = rd_kafka_q_dest_name(rkq->rkq_fwdq);
+ else
+ ret = rd_kafka_q_name(rkq);
+ mtx_unlock(&rkq->rkq_lock);
+ return ret;
+}
+
+/**
+ * @brief Disable a queue.
+ * Attempting to enqueue ops to the queue will destroy the ops.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_disable0(rd_kafka_q_t *rkq,
+ int do_lock) {
+ if (do_lock)
+ mtx_lock(&rkq->rkq_lock);
+ rkq->rkq_flags &= ~RD_KAFKA_Q_F_READY;
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+}
+#define rd_kafka_q_disable(rkq) rd_kafka_q_disable0(rkq, 1 /*lock*/)
+
+int rd_kafka_q_purge0(rd_kafka_q_t *rkq, int do_lock);
+#define rd_kafka_q_purge(rkq) rd_kafka_q_purge0(rkq, 1 /*lock*/)
+void rd_kafka_q_purge_toppar_version(rd_kafka_q_t *rkq,
+ rd_kafka_toppar_t *rktp,
+ int version);
+
+/**
+ * @brief Loose reference to queue, when refcount reaches 0 the queue
+ * will be destroyed.
+ *
+ * @param disable Also disable the queue, to be used by owner of the queue.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_destroy0(rd_kafka_q_t *rkq,
+ int disable) {
+ int do_delete = 0;
+
+ if (disable) {
+ /* To avoid recursive locking (from ops being purged
+ * that reference this queue somehow),
+ * we disable the queue and purge it with individual
+ * locking. */
+ rd_kafka_q_disable0(rkq, 1 /*lock*/);
+ rd_kafka_q_purge0(rkq, 1 /*lock*/);
+ }
+
+ mtx_lock(&rkq->rkq_lock);
+ rd_kafka_assert(NULL, rkq->rkq_refcnt > 0);
+ do_delete = !--rkq->rkq_refcnt;
+ mtx_unlock(&rkq->rkq_lock);
+
+ if (unlikely(do_delete))
+ rd_kafka_q_destroy_final(rkq);
+}
+
+#define rd_kafka_q_destroy(rkq) rd_kafka_q_destroy0(rkq, 0 /*dont-disable*/)
+
+/**
+ * @brief Queue destroy method to be used by the owner (poller) of
+ * the queue. The only difference to q_destroy() is that this
+ * method also disables the queue so that any q_enq() operations
+ * will fail.
+ * Failure to disable a queue on the poller when it destroys its
+ * queue reference results in ops being enqueued on the queue
+ * but there is noone left to poll it, possibly resulting in a
+ * hang on termination due to refcounts held by the op.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_destroy_owner(rd_kafka_q_t *rkq) {
+ rd_kafka_q_destroy0(rkq, 1 /*disable*/);
+}
+
+
+/**
+ * Reset a queue.
+ * WARNING: All messages will be lost and leaked.
+ * NOTE: No locking is performed.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_reset(rd_kafka_q_t *rkq) {
+ TAILQ_INIT(&rkq->rkq_q);
+ rd_dassert(TAILQ_EMPTY(&rkq->rkq_q));
+ rkq->rkq_qlen = 0;
+ rkq->rkq_qsize = 0;
+}
+
+
+
+/**
+ * Forward 'srcq' to 'destq'
+ */
+void rd_kafka_q_fwd_set0(rd_kafka_q_t *srcq,
+ rd_kafka_q_t *destq,
+ int do_lock,
+ int fwd_app);
+#define rd_kafka_q_fwd_set(S, D) \
+ rd_kafka_q_fwd_set0(S, D, 1 /*lock*/, 0 /*no fwd_app*/)
+
+/**
+ * @returns the forward queue (if any) with its refcount increased.
+ * @locks rd_kafka_q_lock(rkq) == !do_lock
+ */
+static RD_INLINE RD_UNUSED rd_kafka_q_t *rd_kafka_q_fwd_get(rd_kafka_q_t *rkq,
+ int do_lock) {
+ rd_kafka_q_t *fwdq;
+ if (do_lock)
+ mtx_lock(&rkq->rkq_lock);
+
+ if ((fwdq = rkq->rkq_fwdq))
+ rd_kafka_q_keep(fwdq);
+
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+
+ return fwdq;
+}
+
+
+/**
+ * @returns true if queue is forwarded, else false.
+ *
+ * @remark Thread-safe.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_q_is_fwded(rd_kafka_q_t *rkq) {
+ int r;
+ mtx_lock(&rkq->rkq_lock);
+ r = rkq->rkq_fwdq ? 1 : 0;
+ mtx_unlock(&rkq->rkq_lock);
+ return r;
+}
+
+
+
+/**
+ * @brief Trigger an IO event for this queue.
+ *
+ * @remark Queue MUST be locked
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_io_event(rd_kafka_q_t *rkq) {
+
+ if (likely(!rkq->rkq_qio))
+ return;
+
+ if (rkq->rkq_qio->event_cb) {
+ rkq->rkq_qio->event_cb(rkq->rkq_rk,
+ rkq->rkq_qio->event_cb_opaque);
+ return;
+ }
+
+
+ /* Only one wake-up event should be sent per non-polling period.
+ * As the queue reader calls poll/reads the channel it calls to
+ * rd_kafka_q_mark_served() to reset the wakeup sent flag, allowing
+ * further wakeups in the next non-polling period. */
+ if (rkq->rkq_qio->sent)
+ return; /* Wake-up event already written */
+
+ rkq->rkq_qio->sent = rd_true;
+
+ /* Write wake-up event to socket.
+ * Ignore errors, not much to do anyway. */
+ if (rd_socket_write(rkq->rkq_qio->fd, rkq->rkq_qio->payload,
+ (int)rkq->rkq_qio->size) == -1)
+ ;
+}
+
+
+/**
+ * @brief rko->rko_prio comparator
+ * @remark: descending order: higher priority takes preceedence.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_op_cmp_prio(const void *_a,
+ const void *_b) {
+ const rd_kafka_op_t *a = _a, *b = _b;
+
+ return RD_CMP(b->rko_prio, a->rko_prio);
+}
+
+
+/**
+ * @brief Wake up waiters without enqueuing an op.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_yield(rd_kafka_q_t *rkq) {
+ rd_kafka_q_t *fwdq;
+
+ mtx_lock(&rkq->rkq_lock);
+
+ rd_dassert(rkq->rkq_refcnt > 0);
+
+ if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
+ /* Queue has been disabled */
+ mtx_unlock(&rkq->rkq_lock);
+ return;
+ }
+
+ if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ rkq->rkq_flags |= RD_KAFKA_Q_F_YIELD;
+ cnd_broadcast(&rkq->rkq_cond);
+ if (rkq->rkq_qlen == 0)
+ rd_kafka_q_io_event(rkq);
+
+ mtx_unlock(&rkq->rkq_lock);
+ } else {
+ mtx_unlock(&rkq->rkq_lock);
+ rd_kafka_q_yield(fwdq);
+ rd_kafka_q_destroy(fwdq);
+ }
+}
+
+/**
+ * @brief Low-level unprotected enqueue that only performs
+ * the actual queue enqueue and counter updates.
+ * @remark Will not perform locking, signaling, fwdq, READY checking, etc.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_q_enq0(rd_kafka_q_t *rkq, rd_kafka_op_t *rko, int at_head) {
+ if (likely(!rko->rko_prio))
+ TAILQ_INSERT_TAIL(&rkq->rkq_q, rko, rko_link);
+ else if (at_head)
+ TAILQ_INSERT_HEAD(&rkq->rkq_q, rko, rko_link);
+ else
+ TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *, rko_link,
+ rd_kafka_op_cmp_prio);
+ rkq->rkq_qlen++;
+ rkq->rkq_qsize += rko->rko_len;
+}
+
+
+/**
+ * @brief Enqueue \p rko either at head or tail of \p rkq.
+ *
+ * The provided \p rko is either enqueued or destroyed.
+ *
+ * \p orig_destq is the original (outermost) dest queue for which
+ * this op was enqueued, before any queue forwarding has kicked in.
+ * The rko_serve callback from the orig_destq will be set on the rko
+ * if there is no rko_serve callback already set, and the \p rko isn't
+ * failed because the final queue is disabled.
+ *
+ * @returns 1 if op was enqueued or 0 if queue is disabled and
+ * there was no replyq to enqueue on in which case the rko is destroyed.
+ *
+ * @locality any thread.
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_q_enq1(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ rd_kafka_q_t *orig_destq,
+ int at_head,
+ int do_lock) {
+ rd_kafka_q_t *fwdq;
+
+ if (do_lock)
+ mtx_lock(&rkq->rkq_lock);
+
+ rd_dassert(rkq->rkq_refcnt > 0);
+
+ if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
+ /* Queue has been disabled, reply to and fail the rko. */
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+
+ return rd_kafka_op_reply(rko, RD_KAFKA_RESP_ERR__DESTROY);
+ }
+
+ if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ if (!rko->rko_serve && orig_destq->rkq_serve) {
+ /* Store original queue's serve callback and opaque
+ * prior to forwarding. */
+ rko->rko_serve = orig_destq->rkq_serve;
+ rko->rko_serve_opaque = orig_destq->rkq_opaque;
+ }
+
+ rd_kafka_q_enq0(rkq, rko, at_head);
+ cnd_signal(&rkq->rkq_cond);
+ if (rkq->rkq_qlen == 1)
+ rd_kafka_q_io_event(rkq);
+
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+ } else {
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+ rd_kafka_q_enq1(fwdq, rko, orig_destq, at_head, 1 /*do lock*/);
+ rd_kafka_q_destroy(fwdq);
+ }
+
+ return 1;
+}
+
+/**
+ * @brief Enqueue the 'rko' op at the tail of the queue 'rkq'.
+ *
+ * The provided 'rko' is either enqueued or destroyed.
+ *
+ * @returns 1 if op was enqueued or 0 if queue is disabled and
+ * there was no replyq to enqueue on in which case the rko is destroyed.
+ *
+ * @locality any thread.
+ * @locks rkq MUST NOT be locked
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_q_enq(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ return rd_kafka_q_enq1(rkq, rko, rkq, 0 /*at tail*/, 1 /*do lock*/);
+}
+
+
+/**
+ * @brief Re-enqueue rko at head of rkq.
+ *
+ * The provided 'rko' is either enqueued or destroyed.
+ *
+ * @returns 1 if op was enqueued or 0 if queue is disabled and
+ * there was no replyq to enqueue on in which case the rko is destroyed.
+ *
+ * @locality any thread
+ * @locks rkq MUST BE locked
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_q_reenq(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ return rd_kafka_q_enq1(rkq, rko, rkq, 1 /*at head*/, 0 /*don't lock*/);
+}
+
+
+/**
+ * Dequeue 'rko' from queue 'rkq'.
+ *
+ * NOTE: rkq_lock MUST be held
+ * Locality: any thread
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_deq0(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_dassert(rkq->rkq_qlen > 0 &&
+ rkq->rkq_qsize >= (int64_t)rko->rko_len);
+
+ TAILQ_REMOVE(&rkq->rkq_q, rko, rko_link);
+ rkq->rkq_qlen--;
+ rkq->rkq_qsize -= rko->rko_len;
+}
+
+
+/**
+ * @brief Mark queue as served / read.
+ *
+ * This is currently used by the queue reader side to reset the io-event
+ * wakeup flag.
+ *
+ * Should be called by all queue readers.
+ *
+ * @locks_required rkq must be locked.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_q_mark_served(rd_kafka_q_t *rkq) {
+ if (rkq->rkq_qio)
+ rkq->rkq_qio->sent = rd_false;
+}
+
+
+/**
+ * Concat all elements of 'srcq' onto tail of 'rkq'.
+ * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not.
+ * NOTE: 'srcq' will be reset.
+ *
+ * Locality: any thread.
+ *
+ * @returns 0 if operation was performed or -1 if rkq is disabled.
+ */
+static RD_INLINE RD_UNUSED int
+rd_kafka_q_concat0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) {
+ int r = 0;
+
+ while (srcq->rkq_fwdq) /* Resolve source queue */
+ srcq = srcq->rkq_fwdq;
+ if (unlikely(srcq->rkq_qlen == 0))
+ return 0; /* Don't do anything if source queue is empty */
+
+ if (do_lock)
+ mtx_lock(&rkq->rkq_lock);
+ if (!rkq->rkq_fwdq) {
+ rd_kafka_op_t *rko;
+
+ rd_dassert(TAILQ_EMPTY(&srcq->rkq_q) || srcq->rkq_qlen > 0);
+ if (unlikely(!(rkq->rkq_flags & RD_KAFKA_Q_F_READY))) {
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+ return -1;
+ }
+ /* First insert any prioritized ops from srcq
+ * in the right position in rkq. */
+ while ((rko = TAILQ_FIRST(&srcq->rkq_q)) && rko->rko_prio > 0) {
+ TAILQ_REMOVE(&srcq->rkq_q, rko, rko_link);
+ TAILQ_INSERT_SORTED(&rkq->rkq_q, rko, rd_kafka_op_t *,
+ rko_link, rd_kafka_op_cmp_prio);
+ }
+
+ TAILQ_CONCAT(&rkq->rkq_q, &srcq->rkq_q, rko_link);
+ if (rkq->rkq_qlen == 0)
+ rd_kafka_q_io_event(rkq);
+ rkq->rkq_qlen += srcq->rkq_qlen;
+ rkq->rkq_qsize += srcq->rkq_qsize;
+ cnd_signal(&rkq->rkq_cond);
+
+ rd_kafka_q_mark_served(srcq);
+ rd_kafka_q_reset(srcq);
+ } else
+ r = rd_kafka_q_concat0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq,
+ srcq, rkq->rkq_fwdq ? do_lock : 0);
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+
+ return r;
+}
+
+#define rd_kafka_q_concat(dstq, srcq) rd_kafka_q_concat0(dstq, srcq, 1 /*lock*/)
+
+
+/**
+ * @brief Prepend all elements of 'srcq' onto head of 'rkq'.
+ * 'rkq' will be be locked (if 'do_lock'==1), but 'srcq' will not.
+ * 'srcq' will be reset.
+ *
+ * @remark Will not respect priority of ops, srcq will be prepended in its
+ * original form to rkq.
+ *
+ * @locality any thread.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_q_prepend0(rd_kafka_q_t *rkq, rd_kafka_q_t *srcq, int do_lock) {
+ if (do_lock)
+ mtx_lock(&rkq->rkq_lock);
+ if (!rkq->rkq_fwdq && !srcq->rkq_fwdq) {
+ /* FIXME: prio-aware */
+ /* Concat rkq on srcq */
+ TAILQ_CONCAT(&srcq->rkq_q, &rkq->rkq_q, rko_link);
+ /* Move srcq to rkq */
+ TAILQ_MOVE(&rkq->rkq_q, &srcq->rkq_q, rko_link);
+ if (rkq->rkq_qlen == 0 && srcq->rkq_qlen > 0)
+ rd_kafka_q_io_event(rkq);
+ rkq->rkq_qlen += srcq->rkq_qlen;
+ rkq->rkq_qsize += srcq->rkq_qsize;
+
+ rd_kafka_q_mark_served(srcq);
+ rd_kafka_q_reset(srcq);
+ } else
+ rd_kafka_q_prepend0(rkq->rkq_fwdq ? rkq->rkq_fwdq : rkq,
+ srcq->rkq_fwdq ? srcq->rkq_fwdq : srcq,
+ rkq->rkq_fwdq ? do_lock : 0);
+ if (do_lock)
+ mtx_unlock(&rkq->rkq_lock);
+}
+
+#define rd_kafka_q_prepend(dstq, srcq) \
+ rd_kafka_q_prepend0(dstq, srcq, 1 /*lock*/)
+
+
+/* Returns the number of elements in the queue */
+static RD_INLINE RD_UNUSED int rd_kafka_q_len(rd_kafka_q_t *rkq) {
+ int qlen;
+ rd_kafka_q_t *fwdq;
+ mtx_lock(&rkq->rkq_lock);
+ if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ qlen = rkq->rkq_qlen;
+ mtx_unlock(&rkq->rkq_lock);
+ } else {
+ mtx_unlock(&rkq->rkq_lock);
+ qlen = rd_kafka_q_len(fwdq);
+ rd_kafka_q_destroy(fwdq);
+ }
+ return qlen;
+}
+
+/* Returns the total size of elements in the queue */
+static RD_INLINE RD_UNUSED uint64_t rd_kafka_q_size(rd_kafka_q_t *rkq) {
+ uint64_t sz;
+ rd_kafka_q_t *fwdq;
+ mtx_lock(&rkq->rkq_lock);
+ if (!(fwdq = rd_kafka_q_fwd_get(rkq, 0))) {
+ sz = rkq->rkq_qsize;
+ mtx_unlock(&rkq->rkq_lock);
+ } else {
+ mtx_unlock(&rkq->rkq_lock);
+ sz = rd_kafka_q_size(fwdq);
+ rd_kafka_q_destroy(fwdq);
+ }
+ return sz;
+}
+
+/**
+ * @brief Construct a temporary on-stack replyq with increased
+ * \p rkq refcount (unless NULL), version, and debug id.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_replyq_t
+rd_kafka_replyq_make(rd_kafka_q_t *rkq, int version, const char *id) {
+ rd_kafka_replyq_t replyq = RD_ZERO_INIT;
+
+ if (rkq) {
+ replyq.q = rd_kafka_q_keep(rkq);
+ replyq.version = version;
+#if ENABLE_DEVEL
+ replyq._id = rd_strdup(id);
+#endif
+ }
+
+ return replyq;
+}
+
+/* Construct temporary on-stack replyq with increased Q refcount and
+ * optional VERSION. */
+#define RD_KAFKA_REPLYQ(Q, VERSION) \
+ rd_kafka_replyq_make(Q, VERSION, __FUNCTION__)
+
+/* Construct temporary on-stack replyq for indicating no replyq. */
+#if ENABLE_DEVEL
+#define RD_KAFKA_NO_REPLYQ \
+ (rd_kafka_replyq_t) { \
+ NULL, 0, NULL \
+ }
+#else
+#define RD_KAFKA_NO_REPLYQ \
+ (rd_kafka_replyq_t) { \
+ NULL, 0 \
+ }
+#endif
+
+
+/**
+ * @returns true if the replyq is valid, else false.
+ */
+static RD_INLINE RD_UNUSED rd_bool_t
+rd_kafka_replyq_is_valid(rd_kafka_replyq_t *replyq) {
+ rd_bool_t valid = rd_true;
+
+ if (!replyq->q)
+ return rd_false;
+
+ rd_kafka_q_lock(replyq->q);
+ valid = rd_kafka_q_ready(replyq->q);
+ rd_kafka_q_unlock(replyq->q);
+
+ return valid;
+}
+
+
+
+/**
+ * Set up replyq.
+ * Q refcnt is increased.
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_set_replyq(rd_kafka_replyq_t *replyq,
+ rd_kafka_q_t *rkq,
+ int32_t version) {
+ replyq->q = rkq ? rd_kafka_q_keep(rkq) : NULL;
+ replyq->version = version;
+#if ENABLE_DEVEL
+ replyq->_id = rd_strdup(__FUNCTION__);
+#endif
+}
+
+/**
+ * Set rko's replyq with an optional version (versionptr != NULL).
+ * Q refcnt is increased.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_op_set_replyq(rd_kafka_op_t *rko,
+ rd_kafka_q_t *rkq,
+ rd_atomic32_t *versionptr) {
+ rd_kafka_set_replyq(&rko->rko_replyq, rkq,
+ versionptr ? rd_atomic32_get(versionptr) : 0);
+}
+
+/* Set reply rko's version from replyq's version */
+#define rd_kafka_op_get_reply_version(REPLY_RKO, ORIG_RKO) \
+ do { \
+ (REPLY_RKO)->rko_version = (ORIG_RKO)->rko_replyq.version; \
+ } while (0)
+
+
+/* Clear replyq holder without decreasing any .q references. */
+static RD_INLINE RD_UNUSED void
+rd_kafka_replyq_clear(rd_kafka_replyq_t *replyq) {
+ memset(replyq, 0, sizeof(*replyq));
+}
+
+/**
+ * @brief Make a copy of \p src in \p dst, with its own queue reference
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_replyq_copy(rd_kafka_replyq_t *dst,
+ rd_kafka_replyq_t *src) {
+ dst->version = src->version;
+ dst->q = src->q;
+ if (dst->q)
+ rd_kafka_q_keep(dst->q);
+#if ENABLE_DEVEL
+ if (src->_id)
+ dst->_id = rd_strdup(src->_id);
+ else
+ dst->_id = NULL;
+#endif
+}
+
+
+/**
+ * Clear replyq holder and destroy any .q references.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_replyq_destroy(rd_kafka_replyq_t *replyq) {
+ if (replyq->q)
+ rd_kafka_q_destroy(replyq->q);
+#if ENABLE_DEVEL
+ if (replyq->_id) {
+ rd_free(replyq->_id);
+ replyq->_id = NULL;
+ }
+#endif
+ rd_kafka_replyq_clear(replyq);
+}
+
+
+/**
+ * @brief Wrapper for rd_kafka_q_enq() that takes a replyq,
+ * steals its queue reference, enqueues the op with the replyq version,
+ * and then destroys the queue reference.
+ *
+ * If \p version is non-zero it will be updated, else replyq->version.
+ *
+ * @returns Same as rd_kafka_q_enq()
+ */
+static RD_INLINE RD_UNUSED int rd_kafka_replyq_enq(rd_kafka_replyq_t *replyq,
+ rd_kafka_op_t *rko,
+ int version) {
+ rd_kafka_q_t *rkq = replyq->q;
+ int r;
+
+ if (version)
+ rko->rko_version = version;
+ else
+ rko->rko_version = replyq->version;
+
+ /* The replyq queue reference is done after we've enqueued the rko
+ * so clear it here. */
+ replyq->q = NULL; /* destroyed separately below */
+
+#if ENABLE_DEVEL
+ if (replyq->_id) {
+ rd_free(replyq->_id);
+ replyq->_id = NULL;
+ }
+#endif
+
+ /* Retain replyq->version since it is used by buf_callback
+ * when dispatching the callback. */
+
+ r = rd_kafka_q_enq(rkq, rko);
+
+ rd_kafka_q_destroy(rkq);
+
+ return r;
+}
+
+
+
+rd_kafka_op_t *rd_kafka_q_pop_serve(rd_kafka_q_t *rkq,
+ rd_ts_t timeout_us,
+ int32_t version,
+ rd_kafka_q_cb_type_t cb_type,
+ rd_kafka_q_serve_cb_t *callback,
+ void *opaque);
+rd_kafka_op_t *
+rd_kafka_q_pop(rd_kafka_q_t *rkq, rd_ts_t timeout_us, int32_t version);
+int rd_kafka_q_serve(rd_kafka_q_t *rkq,
+ int timeout_ms,
+ int max_cnt,
+ rd_kafka_q_cb_type_t cb_type,
+ rd_kafka_q_serve_cb_t *callback,
+ void *opaque);
+
+
+int rd_kafka_q_move_cnt(rd_kafka_q_t *dstq,
+ rd_kafka_q_t *srcq,
+ int cnt,
+ int do_locks);
+
+int rd_kafka_q_serve_rkmessages(rd_kafka_q_t *rkq,
+ int timeout_ms,
+ rd_kafka_message_t **rkmessages,
+ size_t rkmessages_size);
+rd_kafka_resp_err_t rd_kafka_q_wait_result(rd_kafka_q_t *rkq, int timeout_ms);
+
+int rd_kafka_q_apply(rd_kafka_q_t *rkq,
+ int (*callback)(rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko,
+ void *opaque),
+ void *opaque);
+
+void rd_kafka_q_fix_offsets(rd_kafka_q_t *rkq,
+ int64_t min_offset,
+ int64_t base_offset);
+
+/**
+ * @returns the last op in the queue matching \p op_type and \p allow_err (bool)
+ * @remark The \p rkq must be properly locked before this call, the returned rko
+ * is not removed from the queue and may thus not be held for longer
+ * than the lock is held.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_op_t *
+rd_kafka_q_last(rd_kafka_q_t *rkq, rd_kafka_op_type_t op_type, int allow_err) {
+ rd_kafka_op_t *rko;
+ TAILQ_FOREACH_REVERSE(rko, &rkq->rkq_q, rd_kafka_op_tailq, rko_link) {
+ if (rko->rko_type == op_type && (allow_err || !rko->rko_err))
+ return rko;
+ }
+
+ return NULL;
+}
+
+void rd_kafka_q_io_event_enable(rd_kafka_q_t *rkq,
+ rd_socket_t fd,
+ const void *payload,
+ size_t size);
+
+/* Public interface */
+struct rd_kafka_queue_s {
+ rd_kafka_q_t *rkqu_q;
+ rd_kafka_t *rkqu_rk;
+ int rkqu_is_owner; /**< Is owner/creator of rkqu_q */
+};
+
+
+rd_kafka_queue_t *rd_kafka_queue_new0(rd_kafka_t *rk, rd_kafka_q_t *rkq);
+
+void rd_kafka_q_dump(FILE *fp, rd_kafka_q_t *rkq);
+
+extern int RD_TLS rd_kafka_yield_thread;
+
+
+
+/**
+ * @name Enqueue op once
+ * @{
+ */
+
+/**
+ * @brief Minimal rd_kafka_op_t wrapper that ensures that
+ * the op is only enqueued on the provided queue once.
+ *
+ * Typical use-case is for an op to be triggered from multiple sources,
+ * but at most once, such as from a timer and some other source.
+ */
+typedef struct rd_kafka_enq_once_s {
+ mtx_t lock;
+ int refcnt;
+ rd_kafka_op_t *rko;
+ rd_kafka_replyq_t replyq;
+} rd_kafka_enq_once_t;
+
+
+/**
+ * @brief Allocate and set up a new eonce and set the initial refcount to 1.
+ * @remark This is to be called by the owner of the rko.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_enq_once_t *
+rd_kafka_enq_once_new(rd_kafka_op_t *rko, rd_kafka_replyq_t replyq) {
+ rd_kafka_enq_once_t *eonce = rd_calloc(1, sizeof(*eonce));
+ mtx_init(&eonce->lock, mtx_plain);
+ eonce->rko = rko;
+ eonce->replyq = replyq; /* struct copy */
+ eonce->refcnt = 1;
+ return eonce;
+}
+
+/**
+ * @brief Re-enable triggering of a eonce even after it has been triggered
+ * once.
+ *
+ * @remark This is to be called by the owner.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_enq_once_reenable(rd_kafka_enq_once_t *eonce,
+ rd_kafka_op_t *rko,
+ rd_kafka_replyq_t replyq) {
+ mtx_lock(&eonce->lock);
+ eonce->rko = rko;
+ rd_kafka_replyq_destroy(&eonce->replyq);
+ eonce->replyq = replyq; /* struct copy */
+ mtx_unlock(&eonce->lock);
+}
+
+
+/**
+ * @brief Free eonce and its resources. Must only be called with refcnt==0
+ * and eonce->lock NOT held.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_enq_once_destroy0(rd_kafka_enq_once_t *eonce) {
+ /* This must not be called with the rko or replyq still set, which would
+ * indicate that no enqueueing was performed and that the owner
+ * did not clean up, which is a bug. */
+ rd_assert(!eonce->rko);
+ rd_assert(!eonce->replyq.q);
+#if ENABLE_DEVEL
+ rd_assert(!eonce->replyq._id);
+#endif
+ rd_assert(eonce->refcnt == 0);
+
+ mtx_destroy(&eonce->lock);
+ rd_free(eonce);
+}
+
+
+/**
+ * @brief Increment refcount for source (non-owner), such as a timer.
+ *
+ * @param srcdesc a human-readable descriptive string of the source.
+ * May be used for future debugging.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_enq_once_add_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) {
+ mtx_lock(&eonce->lock);
+ eonce->refcnt++;
+ mtx_unlock(&eonce->lock);
+}
+
+
+/**
+ * @brief Decrement refcount for source (non-owner), such as a timer.
+ *
+ * @param srcdesc a human-readable descriptive string of the source.
+ * May be used for future debugging.
+ *
+ * @remark Must only be called from the owner with the owner
+ * still holding its own refcount.
+ * This API is used to undo an add_source() from the
+ * same code.
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_enq_once_del_source(rd_kafka_enq_once_t *eonce, const char *srcdesc) {
+ int do_destroy;
+
+ mtx_lock(&eonce->lock);
+ rd_assert(eonce->refcnt > 0);
+ eonce->refcnt--;
+ do_destroy = eonce->refcnt == 0;
+ mtx_unlock(&eonce->lock);
+
+ if (do_destroy) {
+ /* We're the last refcount holder, clean up eonce. */
+ rd_kafka_enq_once_destroy0(eonce);
+ }
+}
+
+/**
+ * @brief Trigger a source's reference where the eonce resides on
+ * an rd_list_t. This is typically used as a free_cb for
+ * rd_list_destroy() and the trigger error code is
+ * always RD_KAFKA_RESP_ERR__DESTROY.
+ */
+void rd_kafka_enq_once_trigger_destroy(void *ptr);
+
+
+/**
+ * @brief Decrement refcount for source (non-owner) and return the rko
+ * if still set.
+ *
+ * @remark Must only be called by sources (non-owner) but only on the
+ * the owner's thread to make sure the rko is not freed.
+ *
+ * @remark The rko remains set on the eonce.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_op_t *
+rd_kafka_enq_once_del_source_return(rd_kafka_enq_once_t *eonce,
+ const char *srcdesc) {
+ rd_bool_t do_destroy;
+ rd_kafka_op_t *rko;
+
+ mtx_lock(&eonce->lock);
+
+ rd_assert(eonce->refcnt > 0);
+ /* Owner must still hold a eonce reference, or the eonce must
+ * have been disabled by the owner (no rko) */
+ rd_assert(eonce->refcnt > 1 || !eonce->rko);
+ eonce->refcnt--;
+ do_destroy = eonce->refcnt == 0;
+
+ rko = eonce->rko;
+ mtx_unlock(&eonce->lock);
+
+ if (do_destroy) {
+ /* We're the last refcount holder, clean up eonce. */
+ rd_kafka_enq_once_destroy0(eonce);
+ }
+
+ return rko;
+}
+
+/**
+ * @brief Trigger enqueuing of the rko (unless already enqueued)
+ * and drops the source's refcount.
+ *
+ * @remark Must only be called by sources (non-owner).
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_enq_once_trigger(rd_kafka_enq_once_t *eonce,
+ rd_kafka_resp_err_t err,
+ const char *srcdesc) {
+ int do_destroy;
+ rd_kafka_op_t *rko = NULL;
+ rd_kafka_replyq_t replyq = RD_ZERO_INIT;
+
+ mtx_lock(&eonce->lock);
+
+ rd_assert(eonce->refcnt > 0);
+ eonce->refcnt--;
+ do_destroy = eonce->refcnt == 0;
+
+ if (eonce->rko) {
+ /* Not already enqueued, do it.
+ * Detach the rko and replyq from the eonce and unlock the eonce
+ * before enqueuing rko on reply to avoid recursive locks
+ * if the replyq has been disabled and the ops
+ * destructor is called (which might then access the eonce
+ * to clean up). */
+ rko = eonce->rko;
+ replyq = eonce->replyq;
+
+ eonce->rko = NULL;
+ rd_kafka_replyq_clear(&eonce->replyq);
+
+ /* Reply is enqueued at the end of this function */
+ }
+ mtx_unlock(&eonce->lock);
+
+ if (do_destroy) {
+ /* We're the last refcount holder, clean up eonce. */
+ rd_kafka_enq_once_destroy0(eonce);
+ }
+
+ if (rko) {
+ rko->rko_err = err;
+ rd_kafka_replyq_enq(&replyq, rko, replyq.version);
+ rd_kafka_replyq_destroy(&replyq);
+ }
+}
+
+/**
+ * @brief Destroy eonce, must only be called by the owner.
+ * There may be outstanding refcounts by non-owners after this call
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_enq_once_destroy(rd_kafka_enq_once_t *eonce) {
+ int do_destroy;
+
+ mtx_lock(&eonce->lock);
+ rd_assert(eonce->refcnt > 0);
+ eonce->refcnt--;
+ do_destroy = eonce->refcnt == 0;
+
+ eonce->rko = NULL;
+ rd_kafka_replyq_destroy(&eonce->replyq);
+
+ mtx_unlock(&eonce->lock);
+
+ if (do_destroy) {
+ /* We're the last refcount holder, clean up eonce. */
+ rd_kafka_enq_once_destroy0(eonce);
+ }
+}
+
+
+/**
+ * @brief Disable the owner's eonce, extracting, resetting and returning
+ * the \c rko object.
+ *
+ * This is the same as rd_kafka_enq_once_destroy() but returning
+ * the rko.
+ *
+ * Use this for owner-thread triggering where the enqueuing of the
+ * rko on the replyq is not necessary.
+ *
+ * @returns the eonce's rko object, if still available, else NULL.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_op_t *
+rd_kafka_enq_once_disable(rd_kafka_enq_once_t *eonce) {
+ int do_destroy;
+ rd_kafka_op_t *rko;
+
+ mtx_lock(&eonce->lock);
+ rd_assert(eonce->refcnt > 0);
+ eonce->refcnt--;
+ do_destroy = eonce->refcnt == 0;
+
+ /* May be NULL */
+ rko = eonce->rko;
+ eonce->rko = NULL;
+ rd_kafka_replyq_destroy(&eonce->replyq);
+
+ mtx_unlock(&eonce->lock);
+
+ if (do_destroy) {
+ /* We're the last refcount holder, clean up eonce. */
+ rd_kafka_enq_once_destroy0(eonce);
+ }
+
+ return rko;
+}
+
+
+/**@}*/
+
+
+#endif /* _RDKAFKA_QUEUE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c
new file mode 100644
index 000000000..c83f1f1a4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_range_assignor.c
@@ -0,0 +1,138 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_assignor.h"
+
+
+
+/**
+ * Source:
+ * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RangeAssignor.java
+ *
+ * The range assignor works on a per-topic basis. For each topic, we lay out the
+ * available partitions in numeric order and the consumers in lexicographic
+ * order. We then divide the number of partitions by the total number of
+ * consumers to determine the number of partitions to assign to each consumer.
+ * If it does not evenly divide, then the first few consumers will have one
+ * extra partition.
+ *
+ * For example, suppose there are two consumers C0 and C1, two topics t0 and t1,
+ * and each topic has 3 partitions, resulting in partitions t0p0, t0p1, t0p2,
+ * t1p0, t1p1, and t1p2.
+ *
+ * The assignment will be:
+ * C0: [t0p0, t0p1, t1p0, t1p1]
+ * C1: [t0p2, t1p2]
+ */
+
+rd_kafka_resp_err_t
+rd_kafka_range_assignor_assign_cb(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas,
+ const char *member_id,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ rd_kafka_assignor_topic_t **eligible_topics,
+ size_t eligible_topic_cnt,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque) {
+ unsigned int ti;
+ int i;
+
+ /* The range assignor works on a per-topic basis. */
+ for (ti = 0; ti < eligible_topic_cnt; ti++) {
+ rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti];
+ int numPartitionsPerConsumer;
+ int consumersWithExtraPartition;
+
+ /* For each topic, we lay out the available partitions in
+ * numeric order and the consumers in lexicographic order. */
+ rd_list_sort(&eligible_topic->members,
+ rd_kafka_group_member_cmp);
+
+ /* We then divide the number of partitions by the total number
+ * of consumers to determine the number of partitions to assign
+ * to each consumer. */
+ numPartitionsPerConsumer =
+ eligible_topic->metadata->partition_cnt /
+ rd_list_cnt(&eligible_topic->members);
+
+ /* If it does not evenly divide, then the first few consumers
+ * will have one extra partition. */
+ consumersWithExtraPartition =
+ eligible_topic->metadata->partition_cnt %
+ rd_list_cnt(&eligible_topic->members);
+
+ rd_kafka_dbg(rk, CGRP, "ASSIGN",
+ "range: Topic %s with %d partition(s) and "
+ "%d subscribing member(s)",
+ eligible_topic->metadata->topic,
+ eligible_topic->metadata->partition_cnt,
+ rd_list_cnt(&eligible_topic->members));
+
+ for (i = 0; i < rd_list_cnt(&eligible_topic->members); i++) {
+ rd_kafka_group_member_t *rkgm =
+ rd_list_elem(&eligible_topic->members, i);
+ int start = numPartitionsPerConsumer * i +
+ RD_MIN(i, consumersWithExtraPartition);
+ int length =
+ numPartitionsPerConsumer +
+ (i + 1 > consumersWithExtraPartition ? 0 : 1);
+
+ if (length == 0)
+ continue;
+
+ rd_kafka_dbg(rk, CGRP, "ASSIGN",
+ "range: Member \"%s\": "
+ "assigned topic %s partitions %d..%d",
+ rkgm->rkgm_member_id->str,
+ eligible_topic->metadata->topic, start,
+ start + length - 1);
+ rd_kafka_topic_partition_list_add_range(
+ rkgm->rkgm_assignment,
+ eligible_topic->metadata->topic, start,
+ start + length - 1);
+ }
+ }
+
+ return 0;
+}
+
+
+
+/**
+ * @brief Initialzie and add range assignor.
+ */
+rd_kafka_resp_err_t rd_kafka_range_assignor_init(rd_kafka_t *rk) {
+ return rd_kafka_assignor_add(
+ rk, "consumer", "range", RD_KAFKA_REBALANCE_PROTOCOL_EAGER,
+ rd_kafka_range_assignor_assign_cb,
+ rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL,
+ NULL, NULL);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c
new file mode 100644
index 000000000..12d9eb30e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.c
@@ -0,0 +1,5378 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <stdarg.h>
+
+#include "rdkafka_int.h"
+#include "rdkafka_request.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_offset.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_metadata.h"
+#include "rdkafka_msgset.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_txnmgr.h"
+#include "rdkafka_sasl.h"
+
+#include "rdrand.h"
+#include "rdstring.h"
+#include "rdunittest.h"
+
+
+/**
+ * Kafka protocol request and response handling.
+ * All of this code runs in the broker thread and uses op queues for
+ * propagating results back to the various sub-systems operating in
+ * other threads.
+ */
+
+
+/* RD_KAFKA_ERR_ACTION_.. to string map */
+static const char *rd_kafka_actions_descs[] = {
+ "Permanent", "Ignore", "Refresh", "Retry",
+ "Inform", "Special", "MsgNotPersisted", "MsgPossiblyPersisted",
+ "MsgPersisted", NULL,
+};
+
+const char *rd_kafka_actions2str(int actions) {
+ static RD_TLS char actstr[128];
+ return rd_flags2str(actstr, sizeof(actstr), rd_kafka_actions_descs,
+ actions);
+}
+
+
+/**
+ * @brief Decide action(s) to take based on the returned error code.
+ *
+ * The optional var-args is a .._ACTION_END terminated list
+ * of action,error tuples which overrides the general behaviour.
+ * It is to be read as: for \p error, return \p action(s).
+ *
+ * @warning \p request, \p rkbuf and \p rkb may be NULL.
+ */
+int rd_kafka_err_action(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ const rd_kafka_buf_t *request,
+ ...) {
+ va_list ap;
+ int actions = 0;
+ int exp_act;
+
+ if (!err)
+ return 0;
+
+ /* Match explicitly defined error mappings first. */
+ va_start(ap, request);
+ while ((exp_act = va_arg(ap, int))) {
+ int exp_err = va_arg(ap, int);
+
+ if (err == exp_err)
+ actions |= exp_act;
+ }
+ va_end(ap);
+
+ /* Explicit error match. */
+ if (actions) {
+ if (err && rkb && request)
+ rd_rkb_dbg(
+ rkb, BROKER, "REQERR",
+ "%sRequest failed: %s: explicit actions %s",
+ rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey),
+ rd_kafka_err2str(err),
+ rd_kafka_actions2str(actions));
+
+ return actions;
+ }
+
+ /* Default error matching */
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_NO_ERROR:
+ break;
+ case RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION:
+ case RD_KAFKA_RESP_ERR_BROKER_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
+ case RD_KAFKA_RESP_ERR__WAIT_COORD:
+ /* Request metadata information update */
+ actions |= RD_KAFKA_ERR_ACTION_REFRESH |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
+ break;
+
+ case RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR:
+ /* Request metadata update and retry */
+ actions |= RD_KAFKA_ERR_ACTION_REFRESH |
+ RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
+ break;
+
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ case RD_KAFKA_RESP_ERR__TIMED_OUT:
+ case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
+ case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND:
+ actions |= RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED;
+ break;
+
+ case RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS:
+ /* Client-side wait-response/in-queue timeout */
+ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
+ actions |= RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
+ break;
+
+ case RD_KAFKA_RESP_ERR__PURGE_INFLIGHT:
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED;
+ break;
+
+ case RD_KAFKA_RESP_ERR__BAD_MSG:
+ /* Buffer parse failures are typically a client-side bug,
+ * treat them as permanent failures. */
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED;
+ break;
+
+ case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ case RD_KAFKA_RESP_ERR_INVALID_SESSION_TIMEOUT:
+ case RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE:
+ case RD_KAFKA_RESP_ERR__PURGE_QUEUE:
+ default:
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED;
+ break;
+ }
+
+ /* Fatal or permanent errors are not retriable */
+ if (actions &
+ (RD_KAFKA_ERR_ACTION_FATAL | RD_KAFKA_ERR_ACTION_PERMANENT))
+ actions &= ~RD_KAFKA_ERR_ACTION_RETRY;
+
+ /* If no request buffer was specified, which might be the case
+ * in certain error call chains, mask out the retry action. */
+ if (!request)
+ actions &= ~RD_KAFKA_ERR_ACTION_RETRY;
+ else if (request->rkbuf_reqhdr.ApiKey != RD_KAFKAP_Produce)
+ /* Mask out message-related bits for non-Produce requests */
+ actions &= ~RD_KAFKA_ERR_ACTION_MSG_FLAGS;
+
+ if (err && actions && rkb && request)
+ rd_rkb_dbg(
+ rkb, BROKER, "REQERR", "%sRequest failed: %s: actions %s",
+ rd_kafka_ApiKey2str(request->rkbuf_reqhdr.ApiKey),
+ rd_kafka_err2str(err), rd_kafka_actions2str(actions));
+
+ return actions;
+}
+
+
+/**
+ * @brief Read a list of topic+partitions+extra from \p rkbuf.
+ *
+ * @param rkbuf buffer to read from
+ * @param fields An array of fields to read from the buffer and set on
+ * the rktpar object, in the specified order, must end
+ * with RD_KAFKA_TOPIC_PARTITION_FIELD_END.
+ *
+ * @returns a newly allocated list on success, or NULL on parse error.
+ */
+rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions(
+ rd_kafka_buf_t *rkbuf,
+ size_t estimated_part_cnt,
+ const rd_kafka_topic_partition_field_t *fields) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t TopicArrayCnt;
+ rd_kafka_topic_partition_list_t *parts = NULL;
+
+ rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX);
+
+ parts = rd_kafka_topic_partition_list_new(
+ RD_MAX(TopicArrayCnt * 4, (int)estimated_part_cnt));
+
+ while (TopicArrayCnt-- > 0) {
+ rd_kafkap_str_t kTopic;
+ int32_t PartArrayCnt;
+ char *topic;
+
+ rd_kafka_buf_read_str(rkbuf, &kTopic);
+ rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt,
+ RD_KAFKAP_PARTITIONS_MAX);
+
+ RD_KAFKAP_STR_DUPA(&topic, &kTopic);
+
+ while (PartArrayCnt-- > 0) {
+ int32_t Partition = -1, Epoch = -1234,
+ CurrentLeaderEpoch = -1234;
+ int64_t Offset = -1234;
+ int16_t ErrorCode = 0;
+ rd_kafka_topic_partition_t *rktpar;
+ int fi;
+
+ /*
+ * Read requested fields
+ */
+ for (fi = 0;
+ fields[fi] != RD_KAFKA_TOPIC_PARTITION_FIELD_END;
+ fi++) {
+ switch (fields[fi]) {
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION:
+ rd_kafka_buf_read_i32(rkbuf,
+ &Partition);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET:
+ rd_kafka_buf_read_i64(rkbuf, &Offset);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH:
+ rd_kafka_buf_read_i32(
+ rkbuf, &CurrentLeaderEpoch);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH:
+ rd_kafka_buf_read_i32(rkbuf, &Epoch);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR:
+ rd_kafka_buf_read_i16(rkbuf,
+ &ErrorCode);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA:
+ rd_assert(!*"metadata not implemented");
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP:
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_END:
+ break;
+ }
+ }
+
+ rktpar = rd_kafka_topic_partition_list_add(parts, topic,
+ Partition);
+ /* Use dummy sentinel values that are unlikely to be
+ * seen from the broker to know if we are to set these
+ * fields or not. */
+ if (Offset != -1234)
+ rktpar->offset = Offset;
+ if (Epoch != -1234)
+ rd_kafka_topic_partition_set_leader_epoch(
+ rktpar, Epoch);
+ if (CurrentLeaderEpoch != -1234)
+ rd_kafka_topic_partition_set_current_leader_epoch(
+ rktpar, CurrentLeaderEpoch);
+ rktpar->err = ErrorCode;
+
+
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ return parts;
+
+err_parse:
+ if (parts)
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ return NULL;
+}
+
+
+/**
+ * @brief Write a list of topic+partitions+offsets+extra to \p rkbuf
+ *
+ * @returns the number of partitions written to buffer.
+ *
+ * @remark The \p parts list MUST be sorted.
+ */
+int rd_kafka_buf_write_topic_partitions(
+ rd_kafka_buf_t *rkbuf,
+ const rd_kafka_topic_partition_list_t *parts,
+ rd_bool_t skip_invalid_offsets,
+ rd_bool_t only_invalid_offsets,
+ const rd_kafka_topic_partition_field_t *fields) {
+ size_t of_TopicArrayCnt;
+ size_t of_PartArrayCnt = 0;
+ int TopicArrayCnt = 0, PartArrayCnt = 0;
+ int i;
+ const char *prev_topic = NULL;
+ int cnt = 0;
+
+ rd_assert(!only_invalid_offsets ||
+ (only_invalid_offsets != skip_invalid_offsets));
+
+ /* TopicArrayCnt */
+ of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf);
+
+ for (i = 0; i < parts->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar = &parts->elems[i];
+ int fi;
+
+ if (rktpar->offset < 0) {
+ if (skip_invalid_offsets)
+ continue;
+ } else if (only_invalid_offsets)
+ continue;
+
+ if (!prev_topic || strcmp(rktpar->topic, prev_topic)) {
+ /* Finish previous topic, if any. */
+ if (of_PartArrayCnt > 0) {
+ rd_kafka_buf_finalize_arraycnt(
+ rkbuf, of_PartArrayCnt, PartArrayCnt);
+ /* Tags for previous topic struct */
+ rd_kafka_buf_write_tags(rkbuf);
+ }
+
+
+ /* Topic */
+ rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
+ TopicArrayCnt++;
+ prev_topic = rktpar->topic;
+ /* New topic so reset partition count */
+ PartArrayCnt = 0;
+
+ /* PartitionArrayCnt: updated later */
+ of_PartArrayCnt =
+ rd_kafka_buf_write_arraycnt_pos(rkbuf);
+ }
+
+
+ /*
+ * Write requested fields
+ */
+ for (fi = 0; fields[fi] != RD_KAFKA_TOPIC_PARTITION_FIELD_END;
+ fi++) {
+ switch (fields[fi]) {
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION:
+ rd_kafka_buf_write_i32(rkbuf,
+ rktpar->partition);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET:
+ rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH:
+ rd_kafka_buf_write_i32(
+ rkbuf,
+ rd_kafka_topic_partition_get_current_leader_epoch(
+ rktpar));
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH:
+ rd_kafka_buf_write_i32(
+ rkbuf,
+ rd_kafka_topic_partition_get_leader_epoch(
+ rktpar));
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_ERR:
+ rd_kafka_buf_write_i16(rkbuf, rktpar->err);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA:
+ /* Java client 0.9.0 and broker <0.10.0 can't
+ * parse Null metadata fields, so as a
+ * workaround we send an empty string if
+ * it's Null. */
+ if (!rktpar->metadata)
+ rd_kafka_buf_write_str(rkbuf, "", 0);
+ else
+ rd_kafka_buf_write_str(
+ rkbuf, rktpar->metadata,
+ rktpar->metadata_size);
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP:
+ break;
+ case RD_KAFKA_TOPIC_PARTITION_FIELD_END:
+ break;
+ }
+ }
+
+
+ if (fi > 1)
+ /* If there was more than one field written
+ * then this was a struct and thus needs the
+ * struct suffix tags written. */
+ rd_kafka_buf_write_tags(rkbuf);
+
+ PartArrayCnt++;
+ cnt++;
+ }
+
+ if (of_PartArrayCnt > 0) {
+ rd_kafka_buf_finalize_arraycnt(rkbuf, of_PartArrayCnt,
+ PartArrayCnt);
+ /* Tags for topic struct */
+ rd_kafka_buf_write_tags(rkbuf);
+ }
+
+ rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt, TopicArrayCnt);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Send FindCoordinatorRequest.
+ *
+ * @param coordkey is the group.id for RD_KAFKA_COORD_GROUP,
+ * and the transactional.id for RD_KAFKA_COORD_TXN
+ */
+rd_kafka_resp_err_t
+rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_FindCoordinator, 0, 2, NULL);
+
+ if (coordtype != RD_KAFKA_COORD_GROUP && ApiVersion < 1)
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_FindCoordinator, 1,
+ 1 + 2 + strlen(coordkey));
+
+ rd_kafka_buf_write_str(rkbuf, coordkey, -1);
+
+ if (ApiVersion >= 1)
+ rd_kafka_buf_write_i8(rkbuf, (int8_t)coordtype);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Parses a ListOffsets reply.
+ *
+ * Returns the parsed offsets (and errors) in \p offsets which must have been
+ * initialized by caller.
+ *
+ * @returns 0 on success, else an error (\p offsets may be completely or
+ * partially updated, depending on the nature of the error, and per
+ * partition error codes should be checked by the caller).
+ */
+static rd_kafka_resp_err_t
+rd_kafka_parse_ListOffsets(rd_kafka_buf_t *rkbuf,
+ rd_kafka_topic_partition_list_t *offsets) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t TopicArrayCnt;
+ int16_t api_version;
+ rd_kafka_resp_err_t all_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ api_version = rkbuf->rkbuf_reqhdr.ApiVersion;
+
+ if (api_version >= 2)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ /* NOTE:
+ * Broker may return offsets in a different constellation than
+ * in the original request .*/
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
+ while (TopicArrayCnt-- > 0) {
+ rd_kafkap_str_t ktopic;
+ int32_t PartArrayCnt;
+ char *topic_name;
+
+ rd_kafka_buf_read_str(rkbuf, &ktopic);
+ rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
+
+ RD_KAFKAP_STR_DUPA(&topic_name, &ktopic);
+
+ while (PartArrayCnt-- > 0) {
+ int32_t kpartition;
+ int16_t ErrorCode;
+ int32_t OffsetArrayCnt;
+ int64_t Offset = -1;
+ int32_t LeaderEpoch = -1;
+ rd_kafka_topic_partition_t *rktpar;
+
+ rd_kafka_buf_read_i32(rkbuf, &kpartition);
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ if (api_version >= 1) {
+ int64_t Timestamp;
+ rd_kafka_buf_read_i64(rkbuf, &Timestamp);
+ rd_kafka_buf_read_i64(rkbuf, &Offset);
+ if (api_version >= 4)
+ rd_kafka_buf_read_i32(rkbuf,
+ &LeaderEpoch);
+ } else if (api_version == 0) {
+ rd_kafka_buf_read_i32(rkbuf, &OffsetArrayCnt);
+ /* We only request one offset so just grab
+ * the first one. */
+ while (OffsetArrayCnt-- > 0)
+ rd_kafka_buf_read_i64(rkbuf, &Offset);
+ } else {
+ RD_NOTREACHED();
+ }
+
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, topic_name, kpartition);
+ rktpar->err = ErrorCode;
+ rktpar->offset = Offset;
+ rd_kafka_topic_partition_set_leader_epoch(rktpar,
+ LeaderEpoch);
+
+ if (ErrorCode && !all_err)
+ all_err = ErrorCode;
+ }
+ }
+
+ return all_err;
+
+err_parse:
+ return rkbuf->rkbuf_err;
+}
+
+
+
+/**
+ * @brief Parses and handles ListOffsets replies.
+ *
+ * Returns the parsed offsets (and errors) in \p offsets.
+ * \p offsets must be initialized by the caller.
+ *
+ * @returns 0 on success, else an error. \p offsets may be populated on error,
+ * depending on the nature of the error.
+ * On error \p actionsp (unless NULL) is updated with the recommended
+ * error actions.
+ */
+rd_kafka_resp_err_t
+rd_kafka_handle_ListOffsets(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t *offsets,
+ int *actionsp) {
+
+ int actions;
+
+ if (!err)
+ err = rd_kafka_parse_ListOffsets(rkbuf, offsets);
+ if (!err)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ actions = rd_kafka_err_action(
+ rkb, err, request, RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+
+ RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
+
+ RD_KAFKA_ERR_ACTION_REFRESH,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE,
+
+ RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
+
+ RD_KAFKA_ERR_ACTION_REFRESH, RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE,
+
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR_LEADER_NOT_AVAILABLE,
+
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR_FENCED_LEADER_EPOCH,
+
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR_UNKNOWN_LEADER_EPOCH,
+
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__TRANSPORT,
+
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
+
+
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (actionsp)
+ *actionsp = actions;
+
+ if (rkb)
+ rd_rkb_dbg(
+ rkb, TOPIC, "OFFSET", "OffsetRequest failed: %s (%s)",
+ rd_kafka_err2str(err), rd_kafka_actions2str(actions));
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ char tmp[256];
+ /* Re-query for leader */
+ rd_snprintf(tmp, sizeof(tmp), "ListOffsetsRequest failed: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_metadata_refresh_known_topics(rk, NULL,
+ rd_true /*force*/, tmp);
+ }
+
+ if ((actions & RD_KAFKA_ERR_ACTION_RETRY) &&
+ rd_kafka_buf_retry(rkb, request))
+ return RD_KAFKA_RESP_ERR__IN_PROGRESS;
+
+ return err;
+}
+
+
+
+/**
+ * @brief Async maker for ListOffsetsRequest.
+ */
+static rd_kafka_resp_err_t
+rd_kafka_make_ListOffsetsRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf,
+ void *make_opaque) {
+ const rd_kafka_topic_partition_list_t *partitions =
+ (const rd_kafka_topic_partition_list_t *)make_opaque;
+ int i;
+ size_t of_TopicArrayCnt = 0, of_PartArrayCnt = 0;
+ const char *last_topic = "";
+ int32_t topic_cnt = 0, part_cnt = 0;
+ int16_t ApiVersion;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_ListOffsets, 0, 5, NULL);
+ if (ApiVersion == -1)
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+
+ /* ReplicaId */
+ rd_kafka_buf_write_i32(rkbuf, -1);
+
+ /* IsolationLevel */
+ if (ApiVersion >= 2)
+ rd_kafka_buf_write_i8(rkbuf,
+ rkb->rkb_rk->rk_conf.isolation_level);
+
+ /* TopicArrayCnt */
+ of_TopicArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0); /* updated later */
+
+ for (i = 0; i < partitions->cnt; i++) {
+ const rd_kafka_topic_partition_t *rktpar =
+ &partitions->elems[i];
+
+ if (strcmp(rktpar->topic, last_topic)) {
+ /* Finish last topic, if any. */
+ if (of_PartArrayCnt > 0)
+ rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt,
+ part_cnt);
+
+ /* Topic */
+ rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
+ topic_cnt++;
+ last_topic = rktpar->topic;
+ /* New topic so reset partition count */
+ part_cnt = 0;
+
+ /* PartitionArrayCnt: updated later */
+ of_PartArrayCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+ }
+
+ /* Partition */
+ rd_kafka_buf_write_i32(rkbuf, rktpar->partition);
+ part_cnt++;
+
+ if (ApiVersion >= 4)
+ /* CurrentLeaderEpoch */
+ rd_kafka_buf_write_i32(
+ rkbuf,
+ rd_kafka_topic_partition_get_current_leader_epoch(
+ rktpar));
+
+ /* Time/Offset */
+ rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
+
+ if (ApiVersion == 0) {
+ /* MaxNumberOfOffsets */
+ rd_kafka_buf_write_i32(rkbuf, 1);
+ }
+ }
+
+ if (of_PartArrayCnt > 0) {
+ rd_kafka_buf_update_i32(rkbuf, of_PartArrayCnt, part_cnt);
+ rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, topic_cnt);
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_rkb_dbg(rkb, TOPIC, "OFFSET",
+ "ListOffsetsRequest (v%hd, opv %d) "
+ "for %" PRId32 " topic(s) and %" PRId32 " partition(s)",
+ ApiVersion, rkbuf->rkbuf_replyq.version, topic_cnt,
+ partitions->cnt);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Send ListOffsetsRequest for partitions in \p partitions.
+ */
+void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_topic_partition_list_t *partitions,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ rd_kafka_topic_partition_list_t *make_parts;
+
+ make_parts = rd_kafka_topic_partition_list_copy(partitions);
+ rd_kafka_topic_partition_list_sort_by_topic(make_parts);
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_ListOffsets, 1,
+ /* ReplicaId+IsolationLevel+TopicArrayCnt+Topic */
+ 4 + 1 + 4 + 100 +
+ /* PartArrayCnt */
+ 4 +
+ /* partition_cnt * Partition+Time+MaxNumOffs */
+ (make_parts->cnt * (4 + 8 + 4)));
+
+ /* Postpone creating the request contents until time to send,
+ * at which time the ApiVersion is known. */
+ rd_kafka_buf_set_maker(rkbuf, rd_kafka_make_ListOffsetsRequest,
+ make_parts,
+ rd_kafka_topic_partition_list_destroy_free);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+/**
+ * @brief OffsetForLeaderEpochResponse handler.
+ */
+rd_kafka_resp_err_t rd_kafka_handle_OffsetForLeaderEpoch(
+ rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t **offsets) {
+ const int log_decode_errors = LOG_ERR;
+ int16_t ApiVersion;
+
+ if (err)
+ goto err;
+
+ ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion;
+
+ if (ApiVersion >= 2)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ ApiVersion >= 1 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH
+ : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ *offsets = rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields);
+ if (!*offsets)
+ goto err_parse;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+err:
+ return err;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+ goto err;
+}
+
+
+/**
+ * @brief Send OffsetForLeaderEpochRequest for partition(s).
+ *
+ */
+void rd_kafka_OffsetForLeaderEpochRequest(
+ rd_kafka_broker_t *rkb,
+ rd_kafka_topic_partition_list_t *parts,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_OffsetForLeaderEpoch, 2, 2, NULL);
+ /* If the supported ApiVersions are not yet known,
+ * or this broker doesn't support it, we let this request
+ * succeed or fail later from the broker thread where the
+ * version is checked again. */
+ if (ApiVersion == -1)
+ ApiVersion = 2;
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_OffsetForLeaderEpoch, 1, 4 + (parts->cnt * 64),
+ ApiVersion >= 4 /*flexver*/);
+
+ /* Sort partitions by topic */
+ rd_kafka_topic_partition_list_sort_by_topic(parts);
+
+ /* Write partition list */
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ /* CurrentLeaderEpoch */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH,
+ /* LeaderEpoch */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ rd_kafka_buf_write_topic_partitions(
+ rkbuf, parts, rd_false /*include invalid offsets*/,
+ rd_false /*skip valid offsets */, fields);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ /* Let caller perform retries */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+
+/**
+ * Generic handler for OffsetFetch responses.
+ * Offsets for included partitions will be propagated through the passed
+ * 'offsets' list.
+ *
+ * @param rkbuf response buffer, may be NULL if \p err is set.
+ * @param update_toppar update toppar's committed_offset
+ * @param add_part if true add partitions from the response to \p *offsets,
+ * else just update the partitions that are already
+ * in \p *offsets.
+ */
+rd_kafka_resp_err_t
+rd_kafka_handle_OffsetFetch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t **offsets,
+ rd_bool_t update_toppar,
+ rd_bool_t add_part,
+ rd_bool_t allow_retry) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t TopicArrayCnt;
+ int64_t offset = RD_KAFKA_OFFSET_INVALID;
+ int16_t ApiVersion;
+ rd_kafkap_str_t metadata;
+ int retry_unstable = 0;
+ int i;
+ int actions;
+ int seen_cnt = 0;
+
+ if (err)
+ goto err;
+
+ ApiVersion = rkbuf->rkbuf_reqhdr.ApiVersion;
+
+ if (ApiVersion >= 3)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ if (!*offsets)
+ *offsets = rd_kafka_topic_partition_list_new(16);
+
+ /* Set default offset for all partitions. */
+ rd_kafka_topic_partition_list_set_offsets(rkb->rkb_rk, *offsets, 0,
+ RD_KAFKA_OFFSET_INVALID,
+ 0 /* !is commit */);
+
+ rd_kafka_buf_read_arraycnt(rkbuf, &TopicArrayCnt, RD_KAFKAP_TOPICS_MAX);
+ for (i = 0; i < TopicArrayCnt; i++) {
+ rd_kafkap_str_t topic;
+ int32_t PartArrayCnt;
+ char *topic_name;
+ int j;
+
+ rd_kafka_buf_read_str(rkbuf, &topic);
+
+ rd_kafka_buf_read_arraycnt(rkbuf, &PartArrayCnt,
+ RD_KAFKAP_PARTITIONS_MAX);
+
+ RD_KAFKAP_STR_DUPA(&topic_name, &topic);
+
+ for (j = 0; j < PartArrayCnt; j++) {
+ int32_t partition;
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_topic_partition_t *rktpar;
+ int32_t LeaderEpoch = -1;
+ int16_t err2;
+
+ rd_kafka_buf_read_i32(rkbuf, &partition);
+ rd_kafka_buf_read_i64(rkbuf, &offset);
+ if (ApiVersion >= 5)
+ rd_kafka_buf_read_i32(rkbuf, &LeaderEpoch);
+ rd_kafka_buf_read_str(rkbuf, &metadata);
+ rd_kafka_buf_read_i16(rkbuf, &err2);
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ rktpar = rd_kafka_topic_partition_list_find(
+ *offsets, topic_name, partition);
+ if (!rktpar && add_part)
+ rktpar = rd_kafka_topic_partition_list_add(
+ *offsets, topic_name, partition);
+ else if (!rktpar) {
+ rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH",
+ "OffsetFetchResponse: %s [%" PRId32
+ "] "
+ "not found in local list: ignoring",
+ topic_name, partition);
+ continue;
+ }
+
+ seen_cnt++;
+
+ rktp = rd_kafka_topic_partition_get_toppar(
+ rk, rktpar, rd_false /*no create on miss*/);
+
+ /* broker reports invalid offset as -1 */
+ if (offset == -1)
+ rktpar->offset = RD_KAFKA_OFFSET_INVALID;
+ else
+ rktpar->offset = offset;
+
+ rd_kafka_topic_partition_set_leader_epoch(rktpar,
+ LeaderEpoch);
+ rktpar->err = err2;
+
+ rd_rkb_dbg(rkb, TOPIC, "OFFSETFETCH",
+ "OffsetFetchResponse: %s [%" PRId32
+ "] "
+ "offset %" PRId64 ", leader epoch %" PRId32
+ ", metadata %d byte(s): %s",
+ topic_name, partition, offset, LeaderEpoch,
+ RD_KAFKAP_STR_LEN(&metadata),
+ rd_kafka_err2name(rktpar->err));
+
+ if (update_toppar && !err2 && rktp) {
+ /* Update toppar's committed offset */
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_committed_pos =
+ rd_kafka_topic_partition_get_fetch_pos(
+ rktpar);
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ if (rktpar->err ==
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT)
+ retry_unstable++;
+
+
+ if (rktpar->metadata)
+ rd_free(rktpar->metadata);
+
+ if (RD_KAFKAP_STR_IS_NULL(&metadata)) {
+ rktpar->metadata = NULL;
+ rktpar->metadata_size = 0;
+ } else {
+ rktpar->metadata = RD_KAFKAP_STR_DUP(&metadata);
+ rktpar->metadata_size =
+ RD_KAFKAP_STR_LEN(&metadata);
+ }
+
+ /* Loose ref from get_toppar() */
+ if (rktp)
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ if (ApiVersion >= 2) {
+ int16_t ErrorCode;
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ if (ErrorCode) {
+ err = ErrorCode;
+ goto err;
+ }
+ }
+
+
+err:
+ if (!*offsets)
+ rd_rkb_dbg(rkb, TOPIC, "OFFFETCH", "OffsetFetch returned %s",
+ rd_kafka_err2str(err));
+ else
+ rd_rkb_dbg(rkb, TOPIC, "OFFFETCH",
+ "OffsetFetch for %d/%d partition(s) "
+ "(%d unstable partition(s)) returned %s",
+ seen_cnt, (*offsets)->cnt, retry_unstable,
+ rd_kafka_err2str(err));
+
+ actions =
+ rd_kafka_err_action(rkb, err, request, RD_KAFKA_ERR_ACTION_END);
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ /* Re-query for coordinator */
+ rd_kafka_cgrp_op(rkb->rkb_rk->rk_cgrp, NULL, RD_KAFKA_NO_REPLYQ,
+ RD_KAFKA_OP_COORD_QUERY, err);
+ }
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY || retry_unstable) {
+ if (allow_retry && rd_kafka_buf_retry(rkb, request))
+ return RD_KAFKA_RESP_ERR__IN_PROGRESS;
+ /* FALLTHRU */
+ }
+
+ return err;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+ goto err;
+}
+
+
+
+/**
+ * @brief Handle OffsetFetch response based on an RD_KAFKA_OP_OFFSET_FETCH
+ * rko in \p opaque.
+ *
+ * @param opaque rko wrapper for handle_OffsetFetch.
+ *
+ * The \c rko->rko_u.offset_fetch.partitions list will be filled in with
+ * the fetched offsets.
+ *
+ * A reply will be sent on 'rko->rko_replyq' with type RD_KAFKA_OP_OFFSET_FETCH.
+ *
+ * @remark \p rkb, \p rkbuf and \p request are optional.
+ *
+ * @remark The \p request buffer may be retried on error.
+ *
+ * @locality cgrp's broker thread
+ */
+void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_op_t *rko = opaque;
+ rd_kafka_op_t *rko_reply;
+ rd_kafka_topic_partition_list_t *offsets;
+
+ RD_KAFKA_OP_TYPE_ASSERT(rko, RD_KAFKA_OP_OFFSET_FETCH);
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* Termination, quick cleanup. */
+ rd_kafka_op_destroy(rko);
+ return;
+ }
+
+ offsets = rd_kafka_topic_partition_list_copy(
+ rko->rko_u.offset_fetch.partitions);
+
+ /* If all partitions already had usable offsets then there
+ * was no request sent and thus no reply, the offsets list is
+ * good to go.. */
+ if (rkbuf) {
+ /* ..else parse the response (or perror) */
+ err = rd_kafka_handle_OffsetFetch(
+ rkb->rkb_rk, rkb, err, rkbuf, request, &offsets,
+ rd_false /*dont update rktp*/, rd_false /*dont add part*/,
+ /* Allow retries if replyq is valid */
+ rd_kafka_op_replyq_is_valid(rko));
+ if (err == RD_KAFKA_RESP_ERR__IN_PROGRESS) {
+ if (offsets)
+ rd_kafka_topic_partition_list_destroy(offsets);
+ return; /* Retrying */
+ }
+ }
+
+ rko_reply =
+ rd_kafka_op_new(RD_KAFKA_OP_OFFSET_FETCH | RD_KAFKA_OP_REPLY);
+ rko_reply->rko_err = err;
+ rko_reply->rko_u.offset_fetch.partitions = offsets;
+ rko_reply->rko_u.offset_fetch.do_free = 1;
+ if (rko->rko_rktp)
+ rko_reply->rko_rktp = rd_kafka_toppar_keep(rko->rko_rktp);
+
+ rd_kafka_replyq_enq(&rko->rko_replyq, rko_reply, 0);
+
+ rd_kafka_op_destroy(rko);
+}
+
+/**
+ * Send OffsetFetchRequest for a consumer group id.
+ *
+ * Any partition with a usable offset will be ignored, if all partitions
+ * have usable offsets then no request is sent at all but an empty
+ * reply is enqueued on the replyq.
+ *
+ * @param group_id Request offset for this group id.
+ * @param parts (optional) List of topic partitions to request,
+ * or NULL to return all topic partitions associated with the
+ * group.
+ * @param require_stable_offsets Whether broker should return stable offsets
+ * (transaction-committed).
+ * @param timeout Optional timeout to set to the buffer.
+ */
+void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb,
+ const char *group_id,
+ rd_kafka_topic_partition_list_t *parts,
+ rd_bool_t require_stable_offsets,
+ int timeout,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+ size_t parts_size = 0;
+ int PartCnt = -1;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_OffsetFetch, 0, 7, NULL);
+
+ if (parts) {
+ parts_size = parts->cnt * 32;
+ }
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_OffsetFetch, 1,
+ /* GroupId + rd_kafka_buf_write_arraycnt_pos +
+ * Topics + RequireStable */
+ 32 + 4 + parts_size + 1, ApiVersion >= 6 /*flexver*/);
+
+ /* ConsumerGroup */
+ rd_kafka_buf_write_str(rkbuf, group_id, -1);
+
+ if (parts) {
+ /* Sort partitions by topic */
+ rd_kafka_topic_partition_list_sort_by_topic(parts);
+
+ /* Write partition list, filtering out partitions with valid
+ * offsets */
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ PartCnt = rd_kafka_buf_write_topic_partitions(
+ rkbuf, parts, rd_false /*include invalid offsets*/,
+ rd_false /*skip valid offsets */, fields);
+ } else {
+ rd_kafka_buf_write_arraycnt_pos(rkbuf);
+ }
+
+ if (ApiVersion >= 7) {
+ /* RequireStable */
+ rd_kafka_buf_write_i8(rkbuf, require_stable_offsets);
+ }
+
+ if (PartCnt == 0) {
+ /* No partitions needs OffsetFetch, enqueue empty
+ * response right away. */
+ rkbuf->rkbuf_replyq = replyq;
+ rkbuf->rkbuf_cb = resp_cb;
+ rkbuf->rkbuf_opaque = opaque;
+ rd_kafka_buf_callback(rkb->rkb_rk, rkb, 0, NULL, rkbuf);
+ return;
+ }
+
+ if (timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ if (parts) {
+ rd_rkb_dbg(
+ rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER,
+ "OFFSET",
+ "Group %s OffsetFetchRequest(v%d) for %d/%d partition(s)",
+ group_id, ApiVersion, PartCnt, parts->cnt);
+ } else {
+ rd_rkb_dbg(
+ rkb, TOPIC | RD_KAFKA_DBG_CGRP | RD_KAFKA_DBG_CONSUMER,
+ "OFFSET",
+ "Group %s OffsetFetchRequest(v%d) for all partitions",
+ group_id, ApiVersion);
+ }
+
+ /* Let handler decide if retries should be performed */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
+
+ if (parts) {
+ rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET",
+ "Fetch committed offsets for %d/%d partition(s)",
+ PartCnt, parts->cnt);
+ } else {
+ rd_rkb_dbg(rkb, CGRP | RD_KAFKA_DBG_CONSUMER, "OFFSET",
+ "Fetch committed offsets all the partitions");
+ }
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+
+/**
+ * @brief Handle per-partition OffsetCommit errors and returns actions flags.
+ */
+static int
+rd_kafka_handle_OffsetCommit_error(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *request,
+ const rd_kafka_topic_partition_t *rktpar) {
+
+ /* These actions are mimicking AK's ConsumerCoordinator.java */
+
+ return rd_kafka_err_action(
+ rkb, rktpar->err, request,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
+
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
+
+
+ RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
+
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+
+
+ /* .._SPECIAL: mark coordinator dead, refresh and retry */
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_SPECIAL,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_SPECIAL,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+
+ /* Replicas possibly unavailable:
+ * Refresh coordinator (but don't mark as dead (!.._SPECIAL)),
+ * and retry */
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
+
+
+ /* FIXME: There are some cases in the Java code where
+ * this is not treated as a fatal error. */
+ RD_KAFKA_ERR_ACTION_PERMANENT | RD_KAFKA_ERR_ACTION_FATAL,
+ RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
+
+
+ RD_KAFKA_ERR_ACTION_PERMANENT,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+
+
+ RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+
+ RD_KAFKA_ERR_ACTION_END);
+}
+
+
+/**
+ * @brief Handle OffsetCommit response.
+ *
+ * @remark \p offsets may be NULL if \p err is set
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if all partitions were successfully
+ * committed,
+ * RD_KAFKA_RESP_ERR__IN_PROGRESS if a retry was scheduled,
+ * or any other error code if the request was not retried.
+ */
+rd_kafka_resp_err_t
+rd_kafka_handle_OffsetCommit(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t *offsets,
+ rd_bool_t ignore_cgrp) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t TopicArrayCnt;
+ int errcnt = 0;
+ int partcnt = 0;
+ int i;
+ int actions = 0;
+
+ if (err)
+ goto err;
+
+ if (rd_kafka_buf_ApiVersion(rkbuf) >= 3)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
+ for (i = 0; i < TopicArrayCnt; i++) {
+ rd_kafkap_str_t topic;
+ char *topic_str;
+ int32_t PartArrayCnt;
+ int j;
+
+ rd_kafka_buf_read_str(rkbuf, &topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartArrayCnt);
+
+ RD_KAFKAP_STR_DUPA(&topic_str, &topic);
+
+ for (j = 0; j < PartArrayCnt; j++) {
+ int32_t partition;
+ int16_t ErrorCode;
+ rd_kafka_topic_partition_t *rktpar;
+
+ rd_kafka_buf_read_i32(rkbuf, &partition);
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ rktpar = rd_kafka_topic_partition_list_find(
+ offsets, topic_str, partition);
+
+ if (!rktpar) {
+ /* Received offset for topic/partition we didn't
+ * ask for, this shouldn't really happen. */
+ continue;
+ }
+
+ rktpar->err = ErrorCode;
+ if (ErrorCode) {
+ err = ErrorCode;
+ errcnt++;
+
+ /* Accumulate actions for per-partition
+ * errors. */
+ actions |= rd_kafka_handle_OffsetCommit_error(
+ rkb, request, rktpar);
+ }
+
+ partcnt++;
+ }
+ }
+
+ /* If all partitions failed use error code
+ * from last partition as the global error. */
+ if (offsets && err && errcnt == partcnt)
+ goto err;
+
+ goto done;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+
+err:
+ if (!actions) /* Transport/Request-level error */
+ actions = rd_kafka_err_action(rkb, err, request,
+
+ RD_KAFKA_ERR_ACTION_REFRESH |
+ RD_KAFKA_ERR_ACTION_SPECIAL |
+ RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_FATAL)) {
+ rd_kafka_set_fatal_error(rk, err, "OffsetCommit failed: %s",
+ rd_kafka_err2str(err));
+ return err;
+ }
+
+ if (!ignore_cgrp && (actions & RD_KAFKA_ERR_ACTION_REFRESH) &&
+ rk->rk_cgrp) {
+ /* Mark coordinator dead or re-query for coordinator.
+ * ..dead() will trigger a re-query. */
+ if (actions & RD_KAFKA_ERR_ACTION_SPECIAL)
+ rd_kafka_cgrp_coord_dead(rk->rk_cgrp, err,
+ "OffsetCommitRequest failed");
+ else
+ rd_kafka_cgrp_coord_query(rk->rk_cgrp,
+ "OffsetCommitRequest failed");
+ }
+
+ if (!ignore_cgrp && actions & RD_KAFKA_ERR_ACTION_RETRY &&
+ !(actions & RD_KAFKA_ERR_ACTION_PERMANENT) &&
+ rd_kafka_buf_retry(rkb, request))
+ return RD_KAFKA_RESP_ERR__IN_PROGRESS;
+
+done:
+ return err;
+}
+
+/**
+ * @brief Send OffsetCommitRequest for a list of partitions.
+ *
+ * @param cgmetadata consumer group metadata.
+ *
+ * @param offsets - offsets to commit for each topic-partition.
+ *
+ * @returns 0 if none of the partitions in \p offsets had valid offsets,
+ * else 1.
+ */
+int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_consumer_group_metadata_t *cgmetadata,
+ rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque,
+ const char *reason) {
+ rd_kafka_buf_t *rkbuf;
+ ssize_t of_TopicCnt = -1;
+ int TopicCnt = 0;
+ const char *last_topic = NULL;
+ ssize_t of_PartCnt = -1;
+ int PartCnt = 0;
+ int tot_PartCnt = 0;
+ int i;
+ int16_t ApiVersion;
+ int features;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_OffsetCommit, 0, 7, &features);
+
+ rd_kafka_assert(NULL, offsets != NULL);
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_OffsetCommit, 1,
+ 100 + (offsets->cnt * 128));
+
+ /* ConsumerGroup */
+ rd_kafka_buf_write_str(rkbuf, cgmetadata->group_id, -1);
+
+ /* v1,v2 */
+ if (ApiVersion >= 1) {
+ /* ConsumerGroupGenerationId */
+ rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id);
+ /* ConsumerId */
+ rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1);
+ }
+
+ /* v7: GroupInstanceId */
+ if (ApiVersion >= 7)
+ rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id,
+ -1);
+
+ /* v2-4: RetentionTime */
+ if (ApiVersion >= 2 && ApiVersion <= 4)
+ rd_kafka_buf_write_i64(rkbuf, -1);
+
+ /* Sort offsets by topic */
+ rd_kafka_topic_partition_list_sort_by_topic(offsets);
+
+ /* TopicArrayCnt: Will be updated when we know the number of topics. */
+ of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ for (i = 0; i < offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar = &offsets->elems[i];
+
+ /* Skip partitions with invalid offset. */
+ if (rktpar->offset < 0)
+ continue;
+
+ if (last_topic == NULL || strcmp(last_topic, rktpar->topic)) {
+ /* New topic */
+
+ /* Finalize previous PartitionCnt */
+ if (PartCnt > 0)
+ rd_kafka_buf_update_u32(rkbuf, of_PartCnt,
+ PartCnt);
+
+ /* TopicName */
+ rd_kafka_buf_write_str(rkbuf, rktpar->topic, -1);
+ /* PartitionCnt, finalized later */
+ of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+ PartCnt = 0;
+ last_topic = rktpar->topic;
+ TopicCnt++;
+ }
+
+ /* Partition */
+ rd_kafka_buf_write_i32(rkbuf, rktpar->partition);
+ PartCnt++;
+ tot_PartCnt++;
+
+ /* Offset */
+ rd_kafka_buf_write_i64(rkbuf, rktpar->offset);
+
+ /* v6: KIP-101 CommittedLeaderEpoch */
+ if (ApiVersion >= 6)
+ rd_kafka_buf_write_i32(
+ rkbuf,
+ rd_kafka_topic_partition_get_leader_epoch(rktpar));
+
+ /* v1: TimeStamp */
+ if (ApiVersion == 1)
+ rd_kafka_buf_write_i64(rkbuf, -1);
+
+ /* Metadata */
+ /* Java client 0.9.0 and broker <0.10.0 can't parse
+ * Null metadata fields, so as a workaround we send an
+ * empty string if it's Null. */
+ if (!rktpar->metadata)
+ rd_kafka_buf_write_str(rkbuf, "", 0);
+ else
+ rd_kafka_buf_write_str(rkbuf, rktpar->metadata,
+ rktpar->metadata_size);
+ }
+
+ if (tot_PartCnt == 0) {
+ /* No topic+partitions had valid offsets to commit. */
+ rd_kafka_replyq_destroy(&replyq);
+ rd_kafka_buf_destroy(rkbuf);
+ return 0;
+ }
+
+ /* Finalize previous PartitionCnt */
+ if (PartCnt > 0)
+ rd_kafka_buf_update_u32(rkbuf, of_PartCnt, PartCnt);
+
+ /* Finalize TopicCnt */
+ rd_kafka_buf_update_u32(rkbuf, of_TopicCnt, TopicCnt);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_rkb_dbg(rkb, TOPIC, "OFFSET",
+ "Enqueue OffsetCommitRequest(v%d, %d/%d partition(s))): %s",
+ ApiVersion, tot_PartCnt, offsets->cnt, reason);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return 1;
+}
+
+/**
+ * @brief Construct and send OffsetDeleteRequest to \p rkb
+ * with the partitions in del_grpoffsets (DeleteConsumerGroupOffsets_t*)
+ * using \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @remark Only one del_grpoffsets element is supported.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb,
+ /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */
+ const rd_list_t *del_grpoffsets,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+ const rd_kafka_DeleteConsumerGroupOffsets_t *grpoffsets =
+ rd_list_elem(del_grpoffsets, 0);
+
+ rd_assert(rd_list_cnt(del_grpoffsets) == 1);
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_OffsetDelete, 0, 0, &features);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "OffsetDelete API (KIP-496) not supported "
+ "by broker, requires broker version >= 2.4.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_OffsetDelete, 1,
+ 2 + strlen(grpoffsets->group) + (64 * grpoffsets->partitions->cnt));
+
+ /* GroupId */
+ rd_kafka_buf_write_str(rkbuf, grpoffsets->group, -1);
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ rd_kafka_buf_write_topic_partitions(
+ rkbuf, grpoffsets->partitions,
+ rd_false /*dont skip invalid offsets*/, rd_false /*any offset*/,
+ fields);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Write "consumer" protocol type MemberState for SyncGroupRequest to
+ * enveloping buffer \p rkbuf.
+ */
+static void
+rd_kafka_group_MemberState_consumer_write(rd_kafka_buf_t *env_rkbuf,
+ const rd_kafka_group_member_t *rkgm) {
+ rd_kafka_buf_t *rkbuf;
+ rd_slice_t slice;
+
+ rkbuf = rd_kafka_buf_new(1, 100);
+ rd_kafka_buf_write_i16(rkbuf, 0); /* Version */
+ rd_assert(rkgm->rkgm_assignment);
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ rd_kafka_buf_write_topic_partitions(
+ rkbuf, rkgm->rkgm_assignment,
+ rd_false /*don't skip invalid offsets*/, rd_false /* any offset */,
+ fields);
+ rd_kafka_buf_write_kbytes(rkbuf, rkgm->rkgm_userdata);
+
+ /* Get pointer to binary buffer */
+ rd_slice_init_full(&slice, &rkbuf->rkbuf_buf);
+
+ /* Write binary buffer as Kafka Bytes to enveloping buffer. */
+ rd_kafka_buf_write_i32(env_rkbuf, (int32_t)rd_slice_remains(&slice));
+ rd_buf_write_slice(&env_rkbuf->rkbuf_buf, &slice);
+
+ rd_kafka_buf_destroy(rkbuf);
+}
+
+/**
+ * Send SyncGroupRequest
+ */
+void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *group_id,
+ int32_t generation_id,
+ const rd_kafkap_str_t *member_id,
+ const rd_kafkap_str_t *group_instance_id,
+ const rd_kafka_group_member_t *assignments,
+ int assignment_cnt,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int i;
+ int16_t ApiVersion;
+ int features;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_SyncGroup, 0, 3, &features);
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_SyncGroup, 1,
+ RD_KAFKAP_STR_SIZE(group_id) + 4 /* GenerationId */ +
+ RD_KAFKAP_STR_SIZE(member_id) +
+ RD_KAFKAP_STR_SIZE(group_instance_id) +
+ 4 /* array size group_assignment */ +
+ (assignment_cnt * 100 /*guess*/));
+ rd_kafka_buf_write_kstr(rkbuf, group_id);
+ rd_kafka_buf_write_i32(rkbuf, generation_id);
+ rd_kafka_buf_write_kstr(rkbuf, member_id);
+ if (ApiVersion >= 3)
+ rd_kafka_buf_write_kstr(rkbuf, group_instance_id);
+ rd_kafka_buf_write_i32(rkbuf, assignment_cnt);
+
+ for (i = 0; i < assignment_cnt; i++) {
+ const rd_kafka_group_member_t *rkgm = &assignments[i];
+
+ rd_kafka_buf_write_kstr(rkbuf, rkgm->rkgm_member_id);
+ rd_kafka_group_MemberState_consumer_write(rkbuf, rkgm);
+ }
+
+ /* This is a blocking request */
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
+ rd_kafka_buf_set_abs_timeout(
+ rkbuf,
+ rkb->rkb_rk->rk_conf.group_session_timeout_ms +
+ 3000 /* 3s grace period*/,
+ 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+
+/**
+ * Send JoinGroupRequest
+ */
+void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *group_id,
+ const rd_kafkap_str_t *member_id,
+ const rd_kafkap_str_t *group_instance_id,
+ const rd_kafkap_str_t *protocol_type,
+ const rd_list_t *topics,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_assignor_t *rkas;
+ int i;
+ int16_t ApiVersion = 0;
+ int features;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_JoinGroup, 0, 5, &features);
+
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_JoinGroup, 1,
+ RD_KAFKAP_STR_SIZE(group_id) + 4 /* sessionTimeoutMs */ +
+ 4 /* rebalanceTimeoutMs */ + RD_KAFKAP_STR_SIZE(member_id) +
+ RD_KAFKAP_STR_SIZE(group_instance_id) +
+ RD_KAFKAP_STR_SIZE(protocol_type) +
+ 4 /* array count GroupProtocols */ +
+ (rd_list_cnt(topics) * 100));
+ rd_kafka_buf_write_kstr(rkbuf, group_id);
+ rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.group_session_timeout_ms);
+ if (ApiVersion >= 1)
+ rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.max_poll_interval_ms);
+ rd_kafka_buf_write_kstr(rkbuf, member_id);
+ if (ApiVersion >= 5)
+ rd_kafka_buf_write_kstr(rkbuf, group_instance_id);
+ rd_kafka_buf_write_kstr(rkbuf, protocol_type);
+ rd_kafka_buf_write_i32(rkbuf, rk->rk_conf.enabled_assignor_cnt);
+
+ RD_LIST_FOREACH(rkas, &rk->rk_conf.partition_assignors, i) {
+ rd_kafkap_bytes_t *member_metadata;
+ if (!rkas->rkas_enabled)
+ continue;
+ rd_kafka_buf_write_kstr(rkbuf, rkas->rkas_protocol_name);
+ member_metadata = rkas->rkas_get_metadata_cb(
+ rkas, rk->rk_cgrp->rkcg_assignor_state, topics,
+ rk->rk_cgrp->rkcg_group_assignment);
+ rd_kafka_buf_write_kbytes(rkbuf, member_metadata);
+ rd_kafkap_bytes_destroy(member_metadata);
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ if (ApiVersion < 1 &&
+ rk->rk_conf.max_poll_interval_ms >
+ rk->rk_conf.group_session_timeout_ms &&
+ rd_interval(&rkb->rkb_suppress.unsupported_kip62,
+ /* at most once per day */
+ (rd_ts_t)86400 * 1000 * 1000, 0) > 0)
+ rd_rkb_log(rkb, LOG_NOTICE, "MAXPOLL",
+ "Broker does not support KIP-62 "
+ "(requires Apache Kafka >= v0.10.1.0): "
+ "consumer configuration "
+ "`max.poll.interval.ms` (%d) "
+ "is effectively limited "
+ "by `session.timeout.ms` (%d) "
+ "with this broker version",
+ rk->rk_conf.max_poll_interval_ms,
+ rk->rk_conf.group_session_timeout_ms);
+
+
+ if (ApiVersion < 5 && rk->rk_conf.group_instance_id &&
+ rd_interval(&rkb->rkb_suppress.unsupported_kip345,
+ /* at most once per day */
+ (rd_ts_t)86400 * 1000 * 1000, 0) > 0)
+ rd_rkb_log(rkb, LOG_NOTICE, "STATICMEMBER",
+ "Broker does not support KIP-345 "
+ "(requires Apache Kafka >= v2.3.0): "
+ "consumer configuration "
+ "`group.instance.id` (%s) "
+ "will not take effect",
+ rk->rk_conf.group_instance_id);
+
+ /* Absolute timeout */
+ rd_kafka_buf_set_abs_timeout_force(
+ rkbuf,
+ /* Request timeout is max.poll.interval.ms + grace
+ * if the broker supports it, else
+ * session.timeout.ms + grace. */
+ (ApiVersion >= 1 ? rk->rk_conf.max_poll_interval_ms
+ : rk->rk_conf.group_session_timeout_ms) +
+ 3000 /* 3s grace period*/,
+ 0);
+
+ /* This is a blocking request */
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_BLOCKING;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+
+/**
+ * Send LeaveGroupRequest
+ */
+void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb,
+ const char *group_id,
+ const char *member_id,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_LeaveGroup, 0, 1, &features);
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_LeaveGroup, 1, 300);
+
+ rd_kafka_buf_write_str(rkbuf, group_id, -1);
+ rd_kafka_buf_write_str(rkbuf, member_id, -1);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ /* LeaveGroupRequests are best-effort, the local consumer
+ * does not care if it succeeds or not, so the request timeout
+ * is shortened.
+ * Retries are not needed. */
+ rd_kafka_buf_set_abs_timeout(rkbuf, 5000, 0);
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+/**
+ * Handler for LeaveGroup responses
+ * opaque must be the cgrp handle.
+ */
+void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_cgrp_t *rkcg = opaque;
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode = 0;
+ int actions;
+
+ if (err) {
+ ErrorCode = err;
+ goto err;
+ }
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+err:
+ actions = rd_kafka_err_action(rkb, ErrorCode, request,
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ /* Re-query for coordinator */
+ rd_kafka_cgrp_op(rkcg, NULL, RD_KAFKA_NO_REPLYQ,
+ RD_KAFKA_OP_COORD_QUERY, ErrorCode);
+ }
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ if (rd_kafka_buf_retry(rkb, request))
+ return;
+ /* FALLTHRU */
+ }
+
+ if (ErrorCode)
+ rd_kafka_dbg(rkb->rkb_rk, CGRP, "LEAVEGROUP",
+ "LeaveGroup response: %s",
+ rd_kafka_err2str(ErrorCode));
+
+ return;
+
+err_parse:
+ ErrorCode = rkbuf->rkbuf_err;
+ goto err;
+}
+
+
+
+/**
+ * Send HeartbeatRequest
+ */
+void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *group_id,
+ int32_t generation_id,
+ const rd_kafkap_str_t *member_id,
+ const rd_kafkap_str_t *group_instance_id,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_Heartbeat, 0, 3, &features);
+
+ rd_rkb_dbg(rkb, CGRP, "HEARTBEAT",
+ "Heartbeat for group \"%s\" generation id %" PRId32,
+ group_id->str, generation_id);
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_Heartbeat, 1,
+ RD_KAFKAP_STR_SIZE(group_id) +
+ 4 /* GenerationId */ +
+ RD_KAFKAP_STR_SIZE(member_id));
+
+ rd_kafka_buf_write_kstr(rkbuf, group_id);
+ rd_kafka_buf_write_i32(rkbuf, generation_id);
+ rd_kafka_buf_write_kstr(rkbuf, member_id);
+ if (ApiVersion >= 3)
+ rd_kafka_buf_write_kstr(rkbuf, group_instance_id);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_buf_set_abs_timeout(
+ rkbuf, rkb->rkb_rk->rk_conf.group_session_timeout_ms, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+}
+
+
+
+/**
+ * @brief Construct and send ListGroupsRequest to \p rkb
+ * with the states (const char *) in \p states.
+ * Uses \p max_ApiVersion as maximum API version,
+ * pass -1 to use the maximum available version.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @return NULL on success, a new error instance that must be
+ * released with rd_kafka_error_destroy() in case of error.
+ */
+rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb,
+ int16_t max_ApiVersion,
+ const char **states,
+ size_t states_cnt,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ size_t i;
+
+ if (max_ApiVersion < 0)
+ max_ApiVersion = 4;
+
+ if (max_ApiVersion > ApiVersion) {
+ /* Remark: don't check if max_ApiVersion is zero.
+ * As rd_kafka_broker_ApiVersion_supported cannot be checked
+ * in the application thread reliably . */
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_ListGroups, 0, max_ApiVersion, NULL);
+ }
+
+ if (ApiVersion == -1) {
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "ListGroupsRequest not supported by broker");
+ }
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_ListGroups, 1,
+ /* rd_kafka_buf_write_arraycnt_pos + tags + StatesFilter */
+ 4 + 1 + 32 * states_cnt, ApiVersion >= 3 /* is_flexver */);
+
+ if (ApiVersion >= 4) {
+ size_t of_GroupsArrayCnt =
+ rd_kafka_buf_write_arraycnt_pos(rkbuf);
+ for (i = 0; i < states_cnt; i++) {
+ rd_kafka_buf_write_str(rkbuf, states[i], -1);
+ }
+ rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, i);
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+ return NULL;
+}
+
+/**
+ * @brief Construct and send DescribeGroupsRequest to \p rkb
+ * with the groups (const char *) in \p groups.
+ * Uses \p max_ApiVersion as maximum API version,
+ * pass -1 to use the maximum available version.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @return NULL on success, a new error instance that must be
+ * released with rd_kafka_error_destroy() in case of error.
+ */
+rd_kafka_error_t *rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb,
+ int16_t max_ApiVersion,
+ char **groups,
+ size_t group_cnt,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ size_t of_GroupsArrayCnt;
+
+ if (max_ApiVersion < 0)
+ max_ApiVersion = 4;
+
+ if (max_ApiVersion > ApiVersion) {
+ /* Remark: don't check if max_ApiVersion is zero.
+ * As rd_kafka_broker_ApiVersion_supported cannot be checked
+ * in the application thread reliably . */
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DescribeGroups, 0, max_ApiVersion, NULL);
+ }
+
+ if (ApiVersion == -1) {
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "DescribeGroupsRequest not supported by broker");
+ }
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_DescribeGroups, 1,
+ 4 /* rd_kafka_buf_write_arraycnt_pos */ +
+ 1 /* IncludeAuthorizedOperations */ + 1 /* tags */ +
+ 32 * group_cnt /* Groups */,
+ rd_false);
+
+ /* write Groups */
+ of_GroupsArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf);
+ rd_kafka_buf_finalize_arraycnt(rkbuf, of_GroupsArrayCnt, group_cnt);
+ while (group_cnt-- > 0)
+ rd_kafka_buf_write_str(rkbuf, groups[group_cnt], -1);
+
+ /* write IncludeAuthorizedOperations */
+ if (ApiVersion >= 3) {
+ /* TODO: implement KIP-430 */
+ rd_kafka_buf_write_bool(rkbuf, rd_false);
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+ return NULL;
+}
+
+/**
+ * @brief Generic handler for Metadata responses
+ *
+ * @locality rdkafka main thread
+ */
+static void rd_kafka_handle_Metadata(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_op_t *rko = opaque; /* Possibly NULL */
+ struct rd_kafka_metadata *md = NULL;
+ const rd_list_t *topics = request->rkbuf_u.Metadata.topics;
+ int actions;
+
+ rd_kafka_assert(NULL, err == RD_KAFKA_RESP_ERR__DESTROY ||
+ thrd_is_current(rk->rk_thread));
+
+ /* Avoid metadata updates when we're terminating. */
+ if (rd_kafka_terminating(rkb->rkb_rk) ||
+ err == RD_KAFKA_RESP_ERR__DESTROY) {
+ /* Terminating */
+ goto done;
+ }
+
+ if (err)
+ goto err;
+
+ if (!topics)
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "===== Received metadata: %s =====",
+ request->rkbuf_u.Metadata.reason);
+ else
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "===== Received metadata "
+ "(for %d requested topics): %s =====",
+ rd_list_cnt(topics),
+ request->rkbuf_u.Metadata.reason);
+
+ err = rd_kafka_parse_Metadata(rkb, request, rkbuf, &md);
+ if (err)
+ goto err;
+
+ if (rko && rko->rko_replyq.q) {
+ /* Reply to metadata requester, passing on the metadata.
+ * Reuse requesting rko for the reply. */
+ rko->rko_err = err;
+ rko->rko_u.metadata.md = md;
+
+ rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
+ rko = NULL;
+ } else {
+ if (md)
+ rd_free(md);
+ }
+
+ goto done;
+
+err:
+ actions = rd_kafka_err_action(rkb, err, request,
+
+ RD_KAFKA_ERR_ACTION_RETRY,
+ RD_KAFKA_RESP_ERR__PARTIAL,
+
+ RD_KAFKA_ERR_ACTION_END);
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ if (rd_kafka_buf_retry(rkb, request))
+ return;
+ /* FALLTHRU */
+ } else {
+ rd_rkb_log(rkb, LOG_WARNING, "METADATA",
+ "Metadata request failed: %s: %s (%dms): %s",
+ request->rkbuf_u.Metadata.reason,
+ rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000),
+ rd_kafka_actions2str(actions));
+ /* Respond back to caller on non-retriable errors */
+ if (rko && rko->rko_replyq.q) {
+ rko->rko_err = err;
+ rko->rko_u.metadata.md = NULL;
+ rd_kafka_replyq_enq(&rko->rko_replyq, rko, 0);
+ rko = NULL;
+ }
+ }
+
+
+
+ /* FALLTHRU */
+
+done:
+ if (rko)
+ rd_kafka_op_destroy(rko);
+}
+
+
+/**
+ * @brief Construct MetadataRequest (does not send)
+ *
+ * \p topics is a list of topic names (char *) to request.
+ *
+ * !topics - only request brokers (if supported by broker, else
+ * all topics)
+ * topics.cnt==0 - all topics in cluster are requested
+ * topics.cnt >0 - only specified topics are requested
+ *
+ * @param reason - metadata request reason
+ * @param allow_auto_create_topics - allow broker-side auto topic creation.
+ * This is best-effort, depending on broker
+ * config and version.
+ * @param cgrp_update - Update cgrp in parse_Metadata (see comment there).
+ * @param rko - (optional) rko with replyq for handling response.
+ * Specifying an rko forces a metadata request even if
+ * there is already a matching one in-transit.
+ *
+ * If full metadata for all topics is requested (or all brokers, which
+ * results in all-topics on older brokers) and there is already a full request
+ * in transit then this function will return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
+ * otherwise RD_KAFKA_RESP_ERR_NO_ERROR. If \p rko is non-NULL the request
+ * is sent regardless.
+ */
+rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *topics,
+ const char *reason,
+ rd_bool_t allow_auto_create_topics,
+ rd_bool_t cgrp_update,
+ rd_kafka_op_t *rko) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ size_t of_TopicArrayCnt;
+ int features;
+ int topic_cnt = topics ? rd_list_cnt(topics) : 0;
+ int *full_incr = NULL;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_Metadata, 0, 9, &features);
+
+ rkbuf = rd_kafka_buf_new_flexver_request(rkb, RD_KAFKAP_Metadata, 1,
+ 4 + (50 * topic_cnt) + 1,
+ ApiVersion >= 9);
+
+ if (!reason)
+ reason = "";
+
+ rkbuf->rkbuf_u.Metadata.reason = rd_strdup(reason);
+ rkbuf->rkbuf_u.Metadata.cgrp_update = cgrp_update;
+
+ /* TopicArrayCnt */
+ of_TopicArrayCnt = rd_kafka_buf_write_arraycnt_pos(rkbuf);
+
+ if (!topics) {
+ /* v0: keep 0, brokers only not available,
+ * request all topics */
+ /* v1-8: 0 means empty array, brokers only */
+ if (ApiVersion >= 9) {
+ /* v9+: varint encoded empty array (1), brokers only */
+ rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt,
+ topic_cnt);
+ }
+
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "Request metadata for brokers only: %s", reason);
+ full_incr =
+ &rkb->rkb_rk->rk_metadata_cache.rkmc_full_brokers_sent;
+
+ } else if (topic_cnt == 0) {
+ /* v0: keep 0, request all topics */
+ if (ApiVersion >= 1 && ApiVersion < 9) {
+ /* v1-8: update to -1, all topics */
+ rd_kafka_buf_update_i32(rkbuf, of_TopicArrayCnt, -1);
+ }
+ /* v9+: keep 0, varint encoded null, all topics */
+
+ rkbuf->rkbuf_u.Metadata.all_topics = 1;
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "Request metadata for all topics: "
+ "%s",
+ reason);
+
+ if (!rko)
+ full_incr = &rkb->rkb_rk->rk_metadata_cache
+ .rkmc_full_topics_sent;
+
+ } else {
+ /* request cnt topics */
+ rd_kafka_buf_finalize_arraycnt(rkbuf, of_TopicArrayCnt,
+ topic_cnt);
+
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "Request metadata for %d topic(s): "
+ "%s",
+ topic_cnt, reason);
+ }
+
+ if (full_incr) {
+ /* Avoid multiple outstanding full requests
+ * (since they are redundant and side-effect-less).
+ * Forced requests (app using metadata() API) are passed
+ * through regardless. */
+
+ mtx_lock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock);
+ if (*full_incr > 0 && (!rko || !rko->rko_u.metadata.force)) {
+ mtx_unlock(
+ &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock);
+ rd_rkb_dbg(rkb, METADATA, "METADATA",
+ "Skipping metadata request: %s: "
+ "full request already in-transit",
+ reason);
+ rd_kafka_buf_destroy(rkbuf);
+ return RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS;
+ }
+
+ (*full_incr)++;
+ mtx_unlock(&rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock);
+ rkbuf->rkbuf_u.Metadata.decr = full_incr;
+ rkbuf->rkbuf_u.Metadata.decr_lock =
+ &rkb->rkb_rk->rk_metadata_cache.rkmc_full_lock;
+ }
+
+
+ if (topic_cnt > 0) {
+ char *topic;
+ int i;
+
+ /* Maintain a copy of the topics list so we can purge
+ * hints from the metadata cache on error. */
+ rkbuf->rkbuf_u.Metadata.topics =
+ rd_list_copy(topics, rd_list_string_copy, NULL);
+
+ RD_LIST_FOREACH(topic, topics, i) {
+ rd_kafka_buf_write_str(rkbuf, topic, -1);
+ /* Tags for previous topic */
+ rd_kafka_buf_write_tags(rkbuf);
+ }
+ }
+
+ if (ApiVersion >= 4) {
+ /* AllowAutoTopicCreation */
+ rd_kafka_buf_write_bool(rkbuf, allow_auto_create_topics);
+
+ } else if (rkb->rkb_rk->rk_type == RD_KAFKA_CONSUMER &&
+ !rkb->rkb_rk->rk_conf.allow_auto_create_topics &&
+ rd_kafka_conf_is_modified(&rkb->rkb_rk->rk_conf,
+ "allow.auto.create.topics") &&
+ rd_interval(
+ &rkb->rkb_rk->rk_suppress.allow_auto_create_topics,
+ 30 * 60 * 1000 /* every 30 minutes */, 0) >= 0) {
+ /* Let user know we can't obey allow.auto.create.topics */
+ rd_rkb_log(rkb, LOG_WARNING, "AUTOCREATE",
+ "allow.auto.create.topics=false not supported "
+ "by broker: requires broker version >= 0.11.0.0: "
+ "requested topic(s) may be auto created depending "
+ "on broker auto.create.topics.enable configuration");
+ }
+
+ if (ApiVersion >= 8 && ApiVersion < 10) {
+ /* TODO: implement KIP-430 */
+ /* IncludeClusterAuthorizedOperations */
+ rd_kafka_buf_write_bool(rkbuf, rd_false);
+ }
+
+ if (ApiVersion >= 8) {
+ /* TODO: implement KIP-430 */
+ /* IncludeTopicAuthorizedOperations */
+ rd_kafka_buf_write_bool(rkbuf, rd_false);
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ /* Metadata requests are part of the important control plane
+ * and should go before most other requests (Produce, Fetch, etc). */
+ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_HIGH;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf,
+ /* Handle response thru rk_ops,
+ * but forward parsed result to
+ * rko's replyq when done. */
+ RD_KAFKA_REPLYQ(rkb->rkb_rk->rk_ops, 0),
+ rd_kafka_handle_Metadata, rko);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Parses and handles ApiVersion reply.
+ *
+ * @param apis will be allocated, populated and sorted
+ * with broker's supported APIs, or set to NULL.
+ * @param api_cnt will be set to the number of elements in \p *apis
+ *
+ * @returns 0 on success, else an error.
+ *
+ * @remark A valid \p apis might be returned even if an error is returned.
+ */
+rd_kafka_resp_err_t
+rd_kafka_handle_ApiVersion(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ struct rd_kafka_ApiVersion **apis,
+ size_t *api_cnt) {
+ const int log_decode_errors = LOG_DEBUG;
+ int32_t ApiArrayCnt;
+ int16_t ErrorCode;
+ int i = 0;
+
+ *apis = NULL;
+ *api_cnt = 0;
+
+ if (err)
+ goto err;
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ err = ErrorCode;
+
+ rd_kafka_buf_read_arraycnt(rkbuf, &ApiArrayCnt, 1000);
+ if (err && ApiArrayCnt < 1) {
+ /* Version >=3 returns the ApiVersions array if the error
+ * code is ERR_UNSUPPORTED_VERSION, previous versions don't */
+ goto err;
+ }
+
+ rd_rkb_dbg(rkb, FEATURE, "APIVERSION", "Broker API support:");
+
+ *apis = rd_malloc(sizeof(**apis) * ApiArrayCnt);
+
+ for (i = 0; i < ApiArrayCnt; i++) {
+ struct rd_kafka_ApiVersion *api = &(*apis)[i];
+
+ rd_kafka_buf_read_i16(rkbuf, &api->ApiKey);
+ rd_kafka_buf_read_i16(rkbuf, &api->MinVer);
+ rd_kafka_buf_read_i16(rkbuf, &api->MaxVer);
+
+ rd_rkb_dbg(rkb, FEATURE, "APIVERSION",
+ " ApiKey %s (%hd) Versions %hd..%hd",
+ rd_kafka_ApiKey2str(api->ApiKey), api->ApiKey,
+ api->MinVer, api->MaxVer);
+
+ /* Discard struct tags */
+ rd_kafka_buf_skip_tags(rkbuf);
+ }
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ /* Discard end tags */
+ rd_kafka_buf_skip_tags(rkbuf);
+
+ *api_cnt = ApiArrayCnt;
+ qsort(*apis, *api_cnt, sizeof(**apis), rd_kafka_ApiVersion_key_cmp);
+
+ goto done;
+
+err_parse:
+ /* If the broker does not support our ApiVersionRequest version it
+ * will respond with a version 0 response, which will most likely
+ * fail parsing. Instead of propagating the parse error we
+ * propagate the original error, unless there isn't one in which case
+ * we use the parse error. */
+ if (!err)
+ err = rkbuf->rkbuf_err;
+err:
+ /* There are no retryable errors. */
+
+ if (*apis)
+ rd_free(*apis);
+
+ *apis = NULL;
+ *api_cnt = 0;
+
+done:
+ return err;
+}
+
+
+
+/**
+ * @brief Send ApiVersionRequest (KIP-35)
+ *
+ * @param ApiVersion If -1 use the highest supported version, else use the
+ * specified value.
+ */
+void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb,
+ int16_t ApiVersion,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+
+ if (ApiVersion == -1)
+ ApiVersion = 3;
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_ApiVersion, 1, 3, ApiVersion >= 3 /*flexver*/);
+
+ if (ApiVersion >= 3) {
+ /* KIP-511 adds software name and version through the optional
+ * protocol fields defined in KIP-482. */
+
+ /* ClientSoftwareName */
+ rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_name, -1);
+
+ /* ClientSoftwareVersion */
+ rd_kafka_buf_write_str(rkbuf, rkb->rkb_rk->rk_conf.sw_version,
+ -1);
+ }
+
+ /* Should be sent before any other requests since it is part of
+ * the initial connection handshake. */
+ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH;
+
+ /* Non-supporting brokers will tear down the connection when they
+ * receive an unknown API request, so dont retry request on failure. */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ /* 0.9.0.x brokers will not close the connection on unsupported
+ * API requests, so we minimize the timeout for the request.
+ * This is a regression on the broker part. */
+ rd_kafka_buf_set_abs_timeout(
+ rkbuf, rkb->rkb_rk->rk_conf.api_version_request_timeout_ms, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ if (replyq.q)
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
+ opaque);
+ else /* in broker thread */
+ rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
+}
+
+
+/**
+ * Send SaslHandshakeRequest (KIP-43)
+ */
+void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb,
+ const char *mechanism,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int mechlen = (int)strlen(mechanism);
+ int16_t ApiVersion;
+ int features;
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslHandshake, 1,
+ RD_KAFKAP_STR_SIZE0(mechlen));
+
+ /* Should be sent before any other requests since it is part of
+ * the initial connection handshake. */
+ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH;
+
+ rd_kafka_buf_write_str(rkbuf, mechanism, mechlen);
+
+ /* Non-supporting brokers will tear down the conneciton when they
+ * receive an unknown API request or where the SASL GSSAPI
+ * token type is not recognized, so dont retry request on failure. */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ /* 0.9.0.x brokers will not close the connection on unsupported
+ * API requests, so we minimize the timeout of the request.
+ * This is a regression on the broker part. */
+ if (!rkb->rkb_rk->rk_conf.api_version_request &&
+ rkb->rkb_rk->rk_conf.socket_timeout_ms > 10 * 1000)
+ rd_kafka_buf_set_abs_timeout(rkbuf, 10 * 1000 /*10s*/, 0);
+
+ /* ApiVersion 1 / RD_KAFKA_FEATURE_SASL_REQ enables
+ * the SaslAuthenticateRequest */
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_SaslHandshake, 0, 1, &features);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ if (replyq.q)
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
+ opaque);
+ else /* in broker thread */
+ rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
+}
+
+
+/**
+ * @brief Parses and handles an SaslAuthenticate reply.
+ *
+ * @returns 0 on success, else an error.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int16_t error_code;
+ rd_kafkap_str_t error_str;
+ rd_kafkap_bytes_t auth_data;
+ char errstr[512];
+
+ if (err) {
+ rd_snprintf(errstr, sizeof(errstr),
+ "SaslAuthenticateRequest failed: %s",
+ rd_kafka_err2str(err));
+ goto err;
+ }
+
+ rd_kafka_buf_read_i16(rkbuf, &error_code);
+ rd_kafka_buf_read_str(rkbuf, &error_str);
+
+ if (error_code) {
+ /* Authentication failed */
+
+ /* For backwards compatibility translate the
+ * new broker-side auth error code to our local error code. */
+ if (error_code == RD_KAFKA_RESP_ERR_SASL_AUTHENTICATION_FAILED)
+ err = RD_KAFKA_RESP_ERR__AUTHENTICATION;
+ else
+ err = error_code;
+
+ rd_snprintf(errstr, sizeof(errstr), "%.*s",
+ RD_KAFKAP_STR_PR(&error_str));
+ goto err;
+ }
+
+ rd_kafka_buf_read_bytes(rkbuf, &auth_data);
+
+ /* Pass SASL auth frame to SASL handler */
+ if (rd_kafka_sasl_recv(rkb->rkb_transport, auth_data.data,
+ (size_t)RD_KAFKAP_BYTES_LEN(&auth_data), errstr,
+ sizeof(errstr)) == -1) {
+ err = RD_KAFKA_RESP_ERR__AUTHENTICATION;
+ goto err;
+ }
+
+ return;
+
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+ rd_snprintf(errstr, sizeof(errstr),
+ "SaslAuthenticateResponse parsing failed: %s",
+ rd_kafka_err2str(err));
+
+err:
+ rd_kafka_broker_fail(rkb, LOG_ERR, err, "SASL authentication error: %s",
+ errstr);
+}
+
+
+/**
+ * @brief Send SaslAuthenticateRequest (KIP-152)
+ */
+void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb,
+ const void *buf,
+ size_t size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_SaslAuthenticate, 0, 0);
+
+ /* Should be sent before any other requests since it is part of
+ * the initial connection handshake. */
+ rkbuf->rkbuf_prio = RD_KAFKA_PRIO_FLASH;
+
+ /* Broker does not support -1 (Null) for this field */
+ rd_kafka_buf_write_bytes(rkbuf, buf ? buf : "", size);
+
+ /* There are no errors that can be retried, instead
+ * close down the connection and reconnect on failure. */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ if (replyq.q)
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
+ opaque);
+ else /* in broker thread */
+ rd_kafka_broker_buf_enq1(rkb, rkbuf, resp_cb, opaque);
+}
+
+
+
+/**
+ * @struct Hold temporary result and return values from ProduceResponse
+ */
+struct rd_kafka_Produce_result {
+ int64_t offset; /**< Assigned offset of first message */
+ int64_t timestamp; /**< (Possibly assigned) offset of first message */
+};
+
+/**
+ * @brief Parses a Produce reply.
+ * @returns 0 on success or an error code on failure.
+ * @locality broker thread
+ */
+static rd_kafka_resp_err_t
+rd_kafka_handle_Produce_parse(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ struct rd_kafka_Produce_result *result) {
+ int32_t TopicArrayCnt;
+ int32_t PartitionArrayCnt;
+ struct {
+ int32_t Partition;
+ int16_t ErrorCode;
+ int64_t Offset;
+ } hdr;
+ const int log_decode_errors = LOG_ERR;
+ int64_t log_start_offset = -1;
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicArrayCnt);
+ if (TopicArrayCnt != 1)
+ goto err;
+
+ /* Since we only produce to one single topic+partition in each
+ * request we assume that the reply only contains one topic+partition
+ * and that it is the same that we requested.
+ * If not the broker is buggy. */
+ rd_kafka_buf_skip_str(rkbuf);
+ rd_kafka_buf_read_i32(rkbuf, &PartitionArrayCnt);
+
+ if (PartitionArrayCnt != 1)
+ goto err;
+
+ rd_kafka_buf_read_i32(rkbuf, &hdr.Partition);
+ rd_kafka_buf_read_i16(rkbuf, &hdr.ErrorCode);
+ rd_kafka_buf_read_i64(rkbuf, &hdr.Offset);
+
+ result->offset = hdr.Offset;
+
+ result->timestamp = -1;
+ if (request->rkbuf_reqhdr.ApiVersion >= 2)
+ rd_kafka_buf_read_i64(rkbuf, &result->timestamp);
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 5)
+ rd_kafka_buf_read_i64(rkbuf, &log_start_offset);
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1) {
+ int32_t Throttle_Time;
+ rd_kafka_buf_read_i32(rkbuf, &Throttle_Time);
+
+ rd_kafka_op_throttle_time(rkb, rkb->rkb_rk->rk_rep,
+ Throttle_Time);
+ }
+
+
+ return hdr.ErrorCode;
+
+err_parse:
+ return rkbuf->rkbuf_err;
+err:
+ return RD_KAFKA_RESP_ERR__BAD_MSG;
+}
+
+
+/**
+ * @struct Hold temporary Produce error state
+ */
+struct rd_kafka_Produce_err {
+ rd_kafka_resp_err_t err; /**< Error code */
+ int actions; /**< Actions to take */
+ int incr_retry; /**< Increase per-message retry cnt */
+ rd_kafka_msg_status_t status; /**< Messages persistence status */
+
+ /* Idempotent Producer */
+ int32_t next_ack_seq; /**< Next expected sequence to ack */
+ int32_t next_err_seq; /**< Next expected error sequence */
+ rd_bool_t update_next_ack; /**< Update next_ack_seq */
+ rd_bool_t update_next_err; /**< Update next_err_seq */
+ rd_kafka_pid_t rktp_pid; /**< Partition's current PID */
+ int32_t last_seq; /**< Last sequence in current batch */
+};
+
+
+/**
+ * @brief Error-handling for Idempotent Producer-specific Produce errors.
+ *
+ * May update \p errp, \p actionsp and \p incr_retryp.
+ *
+ * The resulting \p actionsp are handled by the caller.
+ *
+ * @warning May be called on the old leader thread. Lock rktp appropriately!
+ *
+ * @locality broker thread (but not necessarily the leader broker)
+ * @locks none
+ */
+static void
+rd_kafka_handle_idempotent_Produce_error(rd_kafka_broker_t *rkb,
+ rd_kafka_msgbatch_t *batch,
+ struct rd_kafka_Produce_err *perr) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_toppar_t *rktp = batch->rktp;
+ rd_kafka_msg_t *firstmsg, *lastmsg;
+ int r;
+ rd_ts_t now = rd_clock(), state_age;
+ struct rd_kafka_toppar_err last_err;
+
+ rd_kafka_rdlock(rkb->rkb_rk);
+ state_age = now - rkb->rkb_rk->rk_eos.ts_idemp_state;
+ rd_kafka_rdunlock(rkb->rkb_rk);
+
+ firstmsg = rd_kafka_msgq_first(&batch->msgq);
+ lastmsg = rd_kafka_msgq_last(&batch->msgq);
+ rd_assert(firstmsg && lastmsg);
+
+ /* Store the last msgid of the batch
+ * on the first message in case we need to retry
+ * and thus reconstruct the entire batch. */
+ if (firstmsg->rkm_u.producer.last_msgid) {
+ /* last_msgid already set, make sure it
+ * actually points to the last message. */
+ rd_assert(firstmsg->rkm_u.producer.last_msgid ==
+ lastmsg->rkm_u.producer.msgid);
+ } else {
+ firstmsg->rkm_u.producer.last_msgid =
+ lastmsg->rkm_u.producer.msgid;
+ }
+
+ if (!rd_kafka_pid_eq(batch->pid, perr->rktp_pid)) {
+ /* Don't retry if PID changed since we can't
+ * guarantee correctness across PID sessions. */
+ perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
+ perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+
+ rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "ERRPID",
+ "%.*s [%" PRId32
+ "] PID mismatch: "
+ "request %s != partition %s: "
+ "failing messages with error %s",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_pid2str(batch->pid),
+ rd_kafka_pid2str(perr->rktp_pid),
+ rd_kafka_err2str(perr->err));
+ return;
+ }
+
+ /*
+ * Special error handling
+ */
+ switch (perr->err) {
+ case RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER:
+ /* Compare request's sequence to expected next
+ * acked sequence.
+ *
+ * Example requests in flight:
+ * R1(base_seq:5) R2(10) R3(15) R4(20)
+ */
+
+ /* Acquire the last partition error to help
+ * troubleshoot this problem. */
+ rd_kafka_toppar_lock(rktp);
+ last_err = rktp->rktp_last_err;
+ rd_kafka_toppar_unlock(rktp);
+
+ r = batch->first_seq - perr->next_ack_seq;
+
+ if (r == 0) {
+ /* R1 failed:
+ * If this was the head-of-line request in-flight it
+ * means there is a state desynchronization between the
+ * producer and broker (a bug), in which case
+ * we'll raise a fatal error since we can no longer
+ * reason about the state of messages and thus
+ * not guarantee ordering or once-ness for R1,
+ * nor give the user a chance to opt out of sending
+ * R2 to R4 which would be retried automatically. */
+
+ rd_kafka_idemp_set_fatal_error(
+ rk, perr->err,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to sequence desynchronization with "
+ "broker %" PRId32 " (%s, base seq %" PRId32
+ ", "
+ "idemp state change %" PRId64
+ "ms ago, "
+ "last partition error %s (actions %s, "
+ "base seq %" PRId32 "..%" PRId32
+ ", base msgid %" PRIu64 ", %" PRId64 "ms ago)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid,
+ rd_kafka_pid2str(batch->pid), batch->first_seq,
+ state_age / 1000, rd_kafka_err2name(last_err.err),
+ rd_kafka_actions2str(last_err.actions),
+ last_err.base_seq, last_err.last_seq,
+ last_err.base_msgid,
+ last_err.ts ? (now - last_err.ts) / 1000 : -1);
+
+ perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
+ perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_true;
+
+ } else if (r > 0) {
+ /* R2 failed:
+ * With max.in.flight > 1 we can have a situation
+ * where the first request in-flight (R1) to the broker
+ * fails, which causes the sub-sequent requests
+ * that are in-flight to have a non-sequential
+ * sequence number and thus fail.
+ * But these sub-sequent requests (R2 to R4) are not at
+ * the risk of being duplicated so we bump the epoch and
+ * re-enqueue the messages for later retry
+ * (without incrementing retries).
+ */
+ rd_rkb_dbg(
+ rkb, MSG | RD_KAFKA_DBG_EOS, "ERRSEQ",
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to skipped sequence numbers "
+ "(%s, base seq %" PRId32
+ " > "
+ "next seq %" PRId32
+ ") "
+ "caused by previous failed request "
+ "(%s, actions %s, "
+ "base seq %" PRId32 "..%" PRId32
+ ", base msgid %" PRIu64 ", %" PRId64
+ "ms ago): "
+ "recovering and retrying",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_pid2str(batch->pid), batch->first_seq,
+ perr->next_ack_seq, rd_kafka_err2name(last_err.err),
+ rd_kafka_actions2str(last_err.actions),
+ last_err.base_seq, last_err.last_seq,
+ last_err.base_msgid,
+ last_err.ts ? (now - last_err.ts) / 1000 : -1);
+
+ perr->incr_retry = 0;
+ perr->actions = RD_KAFKA_ERR_ACTION_RETRY;
+ perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_true;
+
+ rd_kafka_idemp_drain_epoch_bump(
+ rk, perr->err, "skipped sequence numbers");
+
+ } else {
+ /* Request's sequence is less than next ack,
+ * this should never happen unless we have
+ * local bug or the broker did not respond
+ * to the requests in order. */
+ rd_kafka_idemp_set_fatal_error(
+ rk, perr->err,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "with rewound sequence number on "
+ "broker %" PRId32
+ " (%s, "
+ "base seq %" PRId32 " < next seq %" PRId32
+ "): "
+ "last error %s (actions %s, "
+ "base seq %" PRId32 "..%" PRId32
+ ", base msgid %" PRIu64 ", %" PRId64 "ms ago)",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid,
+ rd_kafka_pid2str(batch->pid), batch->first_seq,
+ perr->next_ack_seq, rd_kafka_err2name(last_err.err),
+ rd_kafka_actions2str(last_err.actions),
+ last_err.base_seq, last_err.last_seq,
+ last_err.base_msgid,
+ last_err.ts ? (now - last_err.ts) / 1000 : -1);
+
+ perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
+ perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_false;
+ }
+ break;
+
+ case RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER:
+ /* This error indicates that we successfully produced
+ * this set of messages before but this (supposed) retry failed.
+ *
+ * Treat as success, however offset and timestamp
+ * will be invalid. */
+
+ /* Future improvement/FIXME:
+ * But first make sure the first message has actually
+ * been retried, getting this error for a non-retried message
+ * indicates a synchronization issue or bug. */
+ rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "DUPSEQ",
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to duplicate sequence number: "
+ "previous send succeeded but was not acknowledged "
+ "(%s, base seq %" PRId32
+ "): "
+ "marking the messages successfully delivered",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_pid2str(batch->pid), batch->first_seq);
+
+ /* Void error, delivery succeeded */
+ perr->err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ perr->actions = 0;
+ perr->status = RD_KAFKA_MSG_STATUS_PERSISTED;
+ perr->update_next_ack = rd_true;
+ perr->update_next_err = rd_true;
+ break;
+
+ case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID:
+ /* The broker/cluster lost track of our PID because
+ * the last message we produced has now been deleted
+ * (by DeleteRecords, compaction, or topic retention policy).
+ *
+ * If all previous messages are accounted for and this is not
+ * a retry we can simply bump the epoch and reset the sequence
+ * number and then retry the message(s) again.
+ *
+ * If there are outstanding messages not yet acknowledged
+ * then there is no safe way to carry on without risking
+ * duplication or reordering, in which case we fail
+ * the producer.
+ *
+ * In case of the transactional producer and a transaction
+ * coordinator that supports KIP-360 (>= AK 2.5, checked from
+ * the txnmgr, not here) we'll raise an abortable error and
+ * flag that the epoch needs to be bumped on the coordinator. */
+ if (rd_kafka_is_transactional(rk)) {
+ rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID",
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to unknown producer id "
+ "(%s, base seq %" PRId32
+ ", %d retries): "
+ "failing the current transaction",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_pid2str(batch->pid),
+ batch->first_seq,
+ firstmsg->rkm_u.producer.retries);
+
+ /* Drain outstanding requests and bump epoch. */
+ rd_kafka_idemp_drain_epoch_bump(rk, perr->err,
+ "unknown producer id");
+
+ rd_kafka_txn_set_abortable_error_with_bump(
+ rk, RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to unknown producer id",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq));
+
+ perr->incr_retry = 0;
+ perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
+ perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_true;
+ break;
+
+ } else if (!firstmsg->rkm_u.producer.retries &&
+ perr->next_err_seq == batch->first_seq) {
+ rd_rkb_dbg(rkb, MSG | RD_KAFKA_DBG_EOS, "UNKPID",
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to unknown producer id "
+ "(%s, base seq %" PRId32
+ ", %d retries): "
+ "no risk of duplication/reordering: "
+ "resetting PID and retrying",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_pid2str(batch->pid),
+ batch->first_seq,
+ firstmsg->rkm_u.producer.retries);
+
+ /* Drain outstanding requests and bump epoch. */
+ rd_kafka_idemp_drain_epoch_bump(rk, perr->err,
+ "unknown producer id");
+
+ perr->incr_retry = 0;
+ perr->actions = RD_KAFKA_ERR_ACTION_RETRY;
+ perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_true;
+ break;
+ }
+
+ rd_kafka_idemp_set_fatal_error(
+ rk, perr->err,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed "
+ "due to unknown producer id ("
+ "broker %" PRId32 " %s, base seq %" PRId32
+ ", %d retries): "
+ "unable to retry without risking "
+ "duplication/reordering",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, rd_kafka_msgq_len(&batch->msgq),
+ rkb->rkb_nodeid, rd_kafka_pid2str(batch->pid),
+ batch->first_seq, firstmsg->rkm_u.producer.retries);
+
+ perr->actions = RD_KAFKA_ERR_ACTION_PERMANENT;
+ perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_true;
+ break;
+
+ default:
+ /* All other errors are handled in the standard
+ * error Produce handler, which will set
+ * update_next_ack|err accordingly. */
+ break;
+ }
+}
+
+
+
+/**
+ * @brief Error-handling for failed ProduceRequests
+ *
+ * @param errp Is the input and output error, it may be changed
+ * by this function.
+ *
+ * @returns 0 if no further processing of the request should be performed,
+ * such as triggering delivery reports, else 1.
+ *
+ * @warning May be called on the old leader thread. Lock rktp appropriately!
+ *
+ * @warning \p request may be NULL.
+ *
+ * @locality broker thread (but not necessarily the leader broker)
+ * @locks none
+ */
+static int rd_kafka_handle_Produce_error(rd_kafka_broker_t *rkb,
+ const rd_kafka_buf_t *request,
+ rd_kafka_msgbatch_t *batch,
+ struct rd_kafka_Produce_err *perr) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_toppar_t *rktp = batch->rktp;
+ int is_leader;
+
+ if (unlikely(perr->err == RD_KAFKA_RESP_ERR__DESTROY))
+ return 0; /* Terminating */
+
+ /* When there is a partition leader change any outstanding
+ * requests to the old broker will be handled by the old
+ * broker thread when the responses are received/timeout:
+ * in this case we need to be careful with locking:
+ * check once if we're the leader (which allows relaxed
+ * locking), and cache the current rktp's eos state vars. */
+ rd_kafka_toppar_lock(rktp);
+ is_leader = rktp->rktp_broker == rkb;
+ perr->rktp_pid = rktp->rktp_eos.pid;
+ perr->next_ack_seq = rktp->rktp_eos.next_ack_seq;
+ perr->next_err_seq = rktp->rktp_eos.next_err_seq;
+ rd_kafka_toppar_unlock(rktp);
+
+ /* All failures are initially treated as if the message
+ * was not persisted, but the status may be changed later
+ * for specific errors and actions. */
+ perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+
+ /* Set actions for known errors (may be overriden later),
+ * all other errors are considered permanent failures.
+ * (also see rd_kafka_err_action() for the default actions). */
+ perr->actions = rd_kafka_err_action(
+ rkb, perr->err, request,
+
+ RD_KAFKA_ERR_ACTION_REFRESH |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
+
+ RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
+
+ RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS,
+
+ RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
+ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS_AFTER_APPEND,
+
+ RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE,
+
+ RD_KAFKA_ERR_ACTION_RETRY |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
+ RD_KAFKA_RESP_ERR__TIMED_OUT,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
+ RD_KAFKA_RESP_ERR__MSG_TIMED_OUT,
+
+ /* All Idempotent Producer-specific errors are
+ * initially set as permanent errors,
+ * special handling may change the actions. */
+ RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
+ RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED,
+ RD_KAFKA_RESP_ERR_DUPLICATE_SEQUENCE_NUMBER,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
+
+ RD_KAFKA_ERR_ACTION_PERMANENT |
+ RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED,
+ RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH,
+
+ /* Message was purged from out-queue due to
+ * Idempotent Producer Id change */
+ RD_KAFKA_ERR_ACTION_RETRY, RD_KAFKA_RESP_ERR__RETRY,
+
+ RD_KAFKA_ERR_ACTION_END);
+
+ rd_rkb_dbg(rkb, MSG, "MSGSET",
+ "%s [%" PRId32
+ "]: MessageSet with %i message(s) "
+ "(MsgId %" PRIu64 ", BaseSeq %" PRId32
+ ") "
+ "encountered error: %s (actions %s)%s",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq), batch->first_msgid,
+ batch->first_seq, rd_kafka_err2str(perr->err),
+ rd_kafka_actions2str(perr->actions),
+ is_leader ? "" : " [NOT LEADER]");
+
+
+ /*
+ * Special handling for Idempotent Producer
+ *
+ * Note: Idempotent Producer-specific errors received
+ * on a non-idempotent producer will be passed through
+ * directly to the application.
+ */
+ if (rd_kafka_is_idempotent(rk))
+ rd_kafka_handle_idempotent_Produce_error(rkb, batch, perr);
+
+ /* Update message persistence status based on action flags.
+ * None of these are typically set after an idempotent error,
+ * which sets the status explicitly. */
+ if (perr->actions & RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED)
+ perr->status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ else if (perr->actions & RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED)
+ perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ else if (perr->actions & RD_KAFKA_ERR_ACTION_MSG_PERSISTED)
+ perr->status = RD_KAFKA_MSG_STATUS_PERSISTED;
+
+ /* Save the last error for debugging sub-sequent errors,
+ * useful for Idempotent Producer throubleshooting. */
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_last_err.err = perr->err;
+ rktp->rktp_last_err.actions = perr->actions;
+ rktp->rktp_last_err.ts = rd_clock();
+ rktp->rktp_last_err.base_seq = batch->first_seq;
+ rktp->rktp_last_err.last_seq = perr->last_seq;
+ rktp->rktp_last_err.base_msgid = batch->first_msgid;
+ rd_kafka_toppar_unlock(rktp);
+
+ /*
+ * Handle actions
+ */
+ if (perr->actions &
+ (RD_KAFKA_ERR_ACTION_REFRESH | RD_KAFKA_ERR_ACTION_RETRY)) {
+ /* Retry (refresh also implies retry) */
+
+ if (perr->actions & RD_KAFKA_ERR_ACTION_REFRESH) {
+ /* Request metadata information update.
+ * These errors imply that we have stale
+ * information and the request was
+ * either rejected or not sent -
+ * we don't need to increment the retry count
+ * when we perform a retry since:
+ * - it is a temporary error (hopefully)
+ * - there is no chance of duplicate delivery
+ */
+ rd_kafka_toppar_leader_unavailable(rktp, "produce",
+ perr->err);
+
+ /* We can't be certain the request wasn't
+ * sent in case of transport failure,
+ * so the ERR__TRANSPORT case will need
+ * the retry count to be increased,
+ * In case of certain other errors we want to
+ * avoid retrying for the duration of the
+ * message.timeout.ms to speed up error propagation. */
+ if (perr->err != RD_KAFKA_RESP_ERR__TRANSPORT &&
+ perr->err != RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR)
+ perr->incr_retry = 0;
+ }
+
+ /* If message timed out in queue, not in transit,
+ * we will retry at a later time but not increment
+ * the retry count since there is no risk
+ * of duplicates. */
+ if (!rd_kafka_buf_was_sent(request))
+ perr->incr_retry = 0;
+
+ if (!perr->incr_retry) {
+ /* If retries are not to be incremented then
+ * there is no chance of duplicates on retry, which
+ * means these messages were not persisted. */
+ perr->status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ }
+
+ if (rd_kafka_is_idempotent(rk)) {
+ /* Any currently in-flight requests will
+ * fail with ERR_OUT_OF_ORDER_SEQUENCE_NUMBER,
+ * which should not be treated as a fatal error
+ * since this request and sub-sequent requests
+ * will be retried and thus return to order.
+ * Unless the error was a timeout, or similar,
+ * in which case the request might have made it
+ * and the messages are considered possibly persisted:
+ * in this case we allow the next in-flight response
+ * to be successful, in which case we mark
+ * this request's messages as succesfully delivered. */
+ if (perr->status &
+ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED)
+ perr->update_next_ack = rd_true;
+ else
+ perr->update_next_ack = rd_false;
+ perr->update_next_err = rd_true;
+
+ /* Drain outstanding requests so that retries
+ * are attempted with proper state knowledge and
+ * without any in-flight requests. */
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_idemp_drain_toppar(rktp,
+ "drain before retrying");
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ /* Since requests are specific to a broker
+ * we move the retryable messages from the request
+ * back to the partition queue (prepend) and then
+ * let the new broker construct a new request.
+ * While doing this we also make sure the retry count
+ * for each message is honoured, any messages that
+ * would exceeded the retry count will not be
+ * moved but instead fail below. */
+ rd_kafka_toppar_retry_msgq(rktp, &batch->msgq, perr->incr_retry,
+ perr->status);
+
+ if (rd_kafka_msgq_len(&batch->msgq) == 0) {
+ /* No need do anything more with the request
+ * here since the request no longer has any
+ * messages associated with it. */
+ return 0;
+ }
+ }
+
+ if (perr->actions & RD_KAFKA_ERR_ACTION_PERMANENT &&
+ rd_kafka_is_idempotent(rk)) {
+ if (rd_kafka_is_transactional(rk) &&
+ perr->err == RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH) {
+ /* Producer was fenced by new transactional producer
+ * with the same transactional.id */
+ rd_kafka_txn_set_fatal_error(
+ rk, RD_DO_LOCK, RD_KAFKA_RESP_ERR__FENCED,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed: %s "
+ "(broker %" PRId32 " %s, base seq %" PRId32
+ "): "
+ "transactional producer fenced by newer "
+ "producer instance",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_err2str(perr->err), rkb->rkb_nodeid,
+ rd_kafka_pid2str(batch->pid), batch->first_seq);
+
+ /* Drain outstanding requests and reset PID. */
+ rd_kafka_idemp_drain_reset(
+ rk, "fenced by new transactional producer");
+
+ } else if (rd_kafka_is_transactional(rk)) {
+ /* When transactional any permanent produce failure
+ * would lead to an incomplete transaction, so raise
+ * an abortable transaction error. */
+ rd_kafka_txn_set_abortable_error(
+ rk, perr->err,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed: %s "
+ "(broker %" PRId32 " %s, base seq %" PRId32
+ "): "
+ "current transaction must be aborted",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_err2str(perr->err), rkb->rkb_nodeid,
+ rd_kafka_pid2str(batch->pid), batch->first_seq);
+
+ } else if (rk->rk_conf.eos.gapless) {
+ /* A permanent non-idempotent error will lead to
+ * gaps in the message series, the next request
+ * will fail with ...ERR_OUT_OF_ORDER_SEQUENCE_NUMBER.
+ * To satisfy the gapless guarantee we need to raise
+ * a fatal error here. */
+ rd_kafka_idemp_set_fatal_error(
+ rk, RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE,
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) failed: "
+ "%s (broker %" PRId32 " %s, base seq %" PRId32
+ "): "
+ "unable to satisfy gap-less guarantee",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq),
+ rd_kafka_err2str(perr->err), rkb->rkb_nodeid,
+ rd_kafka_pid2str(batch->pid), batch->first_seq);
+
+ /* Drain outstanding requests and reset PID. */
+ rd_kafka_idemp_drain_reset(
+ rk, "unable to satisfy gap-less guarantee");
+
+ } else {
+ /* If gapless is not set we bump the Epoch and
+ * renumber the messages to send. */
+
+ /* Drain outstanding requests and bump the epoch .*/
+ rd_kafka_idemp_drain_epoch_bump(rk, perr->err,
+ "message sequence gap");
+ }
+
+ perr->update_next_ack = rd_false;
+ /* Make sure the next error will not raise a fatal error. */
+ perr->update_next_err = rd_true;
+ }
+
+ if (perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT ||
+ perr->err == RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE) {
+ /* Translate request-level timeout error code
+ * to message-level timeout error code. */
+ perr->err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+
+ } else if (perr->err == RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED) {
+ /* If we're no longer authorized to access the topic mark
+ * it as errored to deny further produce requests. */
+ rd_kafka_topic_wrlock(rktp->rktp_rkt);
+ rd_kafka_topic_set_error(rktp->rktp_rkt, perr->err);
+ rd_kafka_topic_wrunlock(rktp->rktp_rkt);
+ }
+
+ return 1;
+}
+
+/**
+ * @brief Handle ProduceResponse success for idempotent producer
+ *
+ * @warning May be called on the old leader thread. Lock rktp appropriately!
+ *
+ * @locks none
+ * @locality broker thread (but not necessarily the leader broker thread)
+ */
+static void
+rd_kafka_handle_idempotent_Produce_success(rd_kafka_broker_t *rkb,
+ rd_kafka_msgbatch_t *batch,
+ int32_t next_seq) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_toppar_t *rktp = batch->rktp;
+ char fatal_err[512];
+ uint64_t first_msgid, last_msgid;
+
+ *fatal_err = '\0';
+
+ first_msgid = rd_kafka_msgq_first(&batch->msgq)->rkm_u.producer.msgid;
+ last_msgid = rd_kafka_msgq_last(&batch->msgq)->rkm_u.producer.msgid;
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* If the last acked msgid is higher than
+ * the next message to (re)transmit in the message queue
+ * it means a previous series of R1,R2 ProduceRequests
+ * had R1 fail with uncertain persistence status,
+ * such as timeout or transport error, but R2 succeeded,
+ * which means the messages in R1 were in fact persisted.
+ * In this case trigger delivery reports for all messages
+ * in queue until we hit a non-acked message msgid. */
+ if (unlikely(rktp->rktp_eos.acked_msgid < first_msgid - 1)) {
+ rd_kafka_dr_implicit_ack(rkb, rktp, last_msgid);
+
+ } else if (unlikely(batch->first_seq != rktp->rktp_eos.next_ack_seq &&
+ batch->first_seq == rktp->rktp_eos.next_err_seq)) {
+ /* Response ordering is typically not a concern
+ * (but will not happen with current broker versions),
+ * unless we're expecting an error to be returned at
+ * this sequence rather than a success ack, in which
+ * case raise a fatal error. */
+
+ /* Can't call set_fatal_error() while
+ * holding the toppar lock, so construct
+ * the error string here and call
+ * set_fatal_error() below after
+ * toppar lock has been released. */
+ rd_snprintf(fatal_err, sizeof(fatal_err),
+ "ProduceRequest for %.*s [%" PRId32
+ "] "
+ "with %d message(s) "
+ "succeeded when expecting failure "
+ "(broker %" PRId32
+ " %s, "
+ "base seq %" PRId32
+ ", "
+ "next ack seq %" PRId32
+ ", "
+ "next err seq %" PRId32
+ ": "
+ "unable to retry without risking "
+ "duplication/reordering",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq), rkb->rkb_nodeid,
+ rd_kafka_pid2str(batch->pid), batch->first_seq,
+ rktp->rktp_eos.next_ack_seq,
+ rktp->rktp_eos.next_err_seq);
+
+ rktp->rktp_eos.next_err_seq = next_seq;
+ }
+
+ if (likely(!*fatal_err)) {
+ /* Advance next expected err and/or ack sequence */
+
+ /* Only step err seq if it hasn't diverged. */
+ if (rktp->rktp_eos.next_err_seq == rktp->rktp_eos.next_ack_seq)
+ rktp->rktp_eos.next_err_seq = next_seq;
+
+ rktp->rktp_eos.next_ack_seq = next_seq;
+ }
+
+ /* Store the last acked message sequence,
+ * since retries within the broker cache window (5 requests)
+ * will succeed for older messages we must only update the
+ * acked msgid if it is higher than the last acked. */
+ if (last_msgid > rktp->rktp_eos.acked_msgid)
+ rktp->rktp_eos.acked_msgid = last_msgid;
+
+ rd_kafka_toppar_unlock(rktp);
+
+ /* Must call set_fatal_error() after releasing
+ * the toppar lock. */
+ if (unlikely(*fatal_err))
+ rd_kafka_idemp_set_fatal_error(
+ rk, RD_KAFKA_RESP_ERR__INCONSISTENT, "%s", fatal_err);
+}
+
+
+/**
+ * @brief Handle ProduceRequest result for a message batch.
+ *
+ * @warning \p request may be NULL.
+ *
+ * @localiy broker thread (but not necessarily the toppar's handler thread)
+ * @locks none
+ */
+static void rd_kafka_msgbatch_handle_Produce_result(
+ rd_kafka_broker_t *rkb,
+ rd_kafka_msgbatch_t *batch,
+ rd_kafka_resp_err_t err,
+ const struct rd_kafka_Produce_result *presult,
+ const rd_kafka_buf_t *request) {
+
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_toppar_t *rktp = batch->rktp;
+ rd_kafka_msg_status_t status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ rd_bool_t last_inflight;
+ int32_t next_seq;
+
+ /* Decrease partition's messages in-flight counter */
+ rd_assert(rd_atomic32_get(&rktp->rktp_msgs_inflight) >=
+ rd_kafka_msgq_len(&batch->msgq));
+ last_inflight = !rd_atomic32_sub(&rktp->rktp_msgs_inflight,
+ rd_kafka_msgq_len(&batch->msgq));
+
+ /* Next expected sequence (and handle wrap) */
+ next_seq = rd_kafka_seq_wrap(batch->first_seq +
+ rd_kafka_msgq_len(&batch->msgq));
+
+ if (likely(!err)) {
+ rd_rkb_dbg(rkb, MSG, "MSGSET",
+ "%s [%" PRId32
+ "]: MessageSet with %i message(s) "
+ "(MsgId %" PRIu64 ", BaseSeq %" PRId32 ") delivered",
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_msgq_len(&batch->msgq), batch->first_msgid,
+ batch->first_seq);
+
+ if (rktp->rktp_rkt->rkt_conf.required_acks != 0)
+ status = RD_KAFKA_MSG_STATUS_PERSISTED;
+
+ if (rd_kafka_is_idempotent(rk))
+ rd_kafka_handle_idempotent_Produce_success(rkb, batch,
+ next_seq);
+ } else {
+ /* Error handling */
+ struct rd_kafka_Produce_err perr = {
+ .err = err,
+ .incr_retry = 1,
+ .status = status,
+ .update_next_ack = rd_true,
+ .update_next_err = rd_true,
+ .last_seq = (batch->first_seq +
+ rd_kafka_msgq_len(&batch->msgq) - 1)};
+
+ rd_kafka_handle_Produce_error(rkb, request, batch, &perr);
+
+ /* Update next expected acked and/or err sequence. */
+ if (perr.update_next_ack || perr.update_next_err) {
+ rd_kafka_toppar_lock(rktp);
+ if (perr.update_next_ack)
+ rktp->rktp_eos.next_ack_seq = next_seq;
+ if (perr.update_next_err)
+ rktp->rktp_eos.next_err_seq = next_seq;
+ rd_kafka_toppar_unlock(rktp);
+ }
+
+ err = perr.err;
+ status = perr.status;
+ }
+
+
+ /* Messages to retry will have been removed from the request's queue */
+ if (likely(rd_kafka_msgq_len(&batch->msgq) > 0)) {
+ /* Set offset, timestamp and status for each message. */
+ rd_kafka_msgq_set_metadata(&batch->msgq, rkb->rkb_nodeid,
+ presult->offset, presult->timestamp,
+ status);
+
+ /* Enqueue messages for delivery report. */
+ rd_kafka_dr_msgq(rktp->rktp_rkt, &batch->msgq, err);
+ }
+
+ if (rd_kafka_is_idempotent(rk) && last_inflight)
+ rd_kafka_idemp_inflight_toppar_sub(rk, rktp);
+}
+
+
+/**
+ * @brief Handle ProduceResponse
+ *
+ * @param reply is NULL when `acks=0` and on various local errors.
+ *
+ * @remark ProduceRequests are never retried, retriable errors are
+ * instead handled by re-enqueuing the request's messages back
+ * on the partition queue to have a new ProduceRequest constructed
+ * eventually.
+ *
+ * @warning May be called on the old leader thread. Lock rktp appropriately!
+ *
+ * @locality broker thread (but not necessarily the leader broker thread)
+ */
+static void rd_kafka_handle_Produce(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *reply,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ rd_kafka_msgbatch_t *batch = &request->rkbuf_batch;
+ rd_kafka_toppar_t *rktp = batch->rktp;
+ struct rd_kafka_Produce_result result = {
+ .offset = RD_KAFKA_OFFSET_INVALID, .timestamp = -1};
+
+ /* Unit test interface: inject errors */
+ if (unlikely(rk->rk_conf.ut.handle_ProduceResponse != NULL)) {
+ err = rk->rk_conf.ut.handle_ProduceResponse(
+ rkb->rkb_rk, rkb->rkb_nodeid, batch->first_msgid, err);
+ }
+
+ /* Parse Produce reply (unless the request errored) */
+ if (!err && reply)
+ err = rd_kafka_handle_Produce_parse(rkb, rktp, reply, request,
+ &result);
+
+ rd_kafka_msgbatch_handle_Produce_result(rkb, batch, err, &result,
+ request);
+}
+
+
+/**
+ * @brief Send ProduceRequest for messages in toppar queue.
+ *
+ * @returns the number of messages included, or 0 on error / no messages.
+ *
+ * @locality broker thread
+ */
+int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid) {
+ rd_kafka_buf_t *rkbuf;
+ rd_kafka_topic_t *rkt = rktp->rktp_rkt;
+ size_t MessageSetSize = 0;
+ int cnt;
+ rd_ts_t now;
+ int64_t first_msg_timeout;
+ int tmout;
+
+ /**
+ * Create ProduceRequest with as many messages from the toppar
+ * transmit queue as possible.
+ */
+ rkbuf = rd_kafka_msgset_create_ProduceRequest(
+ rkb, rktp, &rktp->rktp_xmit_msgq, pid, epoch_base_msgid,
+ &MessageSetSize);
+ if (unlikely(!rkbuf))
+ return 0;
+
+ cnt = rd_kafka_msgq_len(&rkbuf->rkbuf_batch.msgq);
+ rd_dassert(cnt > 0);
+
+ rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchcnt, (int64_t)cnt);
+ rd_avg_add(&rktp->rktp_rkt->rkt_avg_batchsize, (int64_t)MessageSetSize);
+
+ if (!rkt->rkt_conf.required_acks)
+ rkbuf->rkbuf_flags |= RD_KAFKA_OP_F_NO_RESPONSE;
+
+ /* Use timeout from first message in batch */
+ now = rd_clock();
+ first_msg_timeout =
+ (rd_kafka_msgq_first(&rkbuf->rkbuf_batch.msgq)->rkm_ts_timeout -
+ now) /
+ 1000;
+
+ if (unlikely(first_msg_timeout <= 0)) {
+ /* Message has already timed out, allow 100 ms
+ * to produce anyway */
+ tmout = 100;
+ } else {
+ tmout = (int)RD_MIN(INT_MAX, first_msg_timeout);
+ }
+
+ /* Set absolute timeout (including retries), the
+ * effective timeout for this specific request will be
+ * capped by socket.timeout.ms */
+ rd_kafka_buf_set_abs_timeout(rkbuf, tmout, now);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, RD_KAFKA_NO_REPLYQ,
+ rd_kafka_handle_Produce, NULL);
+
+ return cnt;
+}
+
+
+/**
+ * @brief Construct and send CreateTopicsRequest to \p rkb
+ * with the topics (NewTopic_t*) in \p new_topics, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *new_topics /*(NewTopic_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+ int i = 0;
+ rd_kafka_NewTopic_t *newt;
+ int op_timeout;
+
+ if (rd_list_cnt(new_topics) == 0) {
+ rd_snprintf(errstr, errstr_size, "No topics to create");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_CreateTopics, 0, 4, &features);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Topic Admin API (KIP-4) not supported "
+ "by broker, requires broker version >= 0.10.2.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ if (rd_kafka_confval_get_int(&options->validate_only) &&
+ ApiVersion < 1) {
+ rd_snprintf(errstr, errstr_size,
+ "CreateTopics.validate_only=true not "
+ "supported by broker");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateTopics, 1,
+ 4 + (rd_list_cnt(new_topics) * 200) +
+ 4 + 1);
+
+ /* #topics */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_topics));
+
+ while ((newt = rd_list_elem(new_topics, i++))) {
+ int partition;
+ int ei = 0;
+ const rd_kafka_ConfigEntry_t *entry;
+
+ if (ApiVersion < 4) {
+ if (newt->num_partitions == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Default partition count (KIP-464) "
+ "not supported by broker, "
+ "requires broker version <= 2.4.0");
+ rd_kafka_replyq_destroy(&replyq);
+ rd_kafka_buf_destroy(rkbuf);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ if (newt->replication_factor == -1 &&
+ rd_list_empty(&newt->replicas)) {
+ rd_snprintf(errstr, errstr_size,
+ "Default replication factor "
+ "(KIP-464) "
+ "not supported by broker, "
+ "requires broker version <= 2.4.0");
+ rd_kafka_replyq_destroy(&replyq);
+ rd_kafka_buf_destroy(rkbuf);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+
+ /* topic */
+ rd_kafka_buf_write_str(rkbuf, newt->topic, -1);
+
+ if (rd_list_cnt(&newt->replicas)) {
+ /* num_partitions and replication_factor must be
+ * set to -1 if a replica assignment is sent. */
+ /* num_partitions */
+ rd_kafka_buf_write_i32(rkbuf, -1);
+ /* replication_factor */
+ rd_kafka_buf_write_i16(rkbuf, -1);
+ } else {
+ /* num_partitions */
+ rd_kafka_buf_write_i32(rkbuf, newt->num_partitions);
+ /* replication_factor */
+ rd_kafka_buf_write_i16(
+ rkbuf, (int16_t)newt->replication_factor);
+ }
+
+ /* #replica_assignment */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&newt->replicas));
+
+ /* Replicas per partition, see rdkafka_admin.[ch]
+ * for how these are constructed. */
+ for (partition = 0; partition < rd_list_cnt(&newt->replicas);
+ partition++) {
+ const rd_list_t *replicas;
+ int ri = 0;
+
+ replicas = rd_list_elem(&newt->replicas, partition);
+ if (!replicas)
+ continue;
+
+ /* partition */
+ rd_kafka_buf_write_i32(rkbuf, partition);
+ /* #replicas */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(replicas));
+
+ for (ri = 0; ri < rd_list_cnt(replicas); ri++) {
+ /* replica */
+ rd_kafka_buf_write_i32(
+ rkbuf, rd_list_get_int32(replicas, ri));
+ }
+ }
+
+ /* #config_entries */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&newt->config));
+
+ RD_LIST_FOREACH(entry, &newt->config, ei) {
+ /* config_name */
+ rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1);
+ /* config_value (nullable) */
+ rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1);
+ }
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ rd_kafka_buf_write_i32(rkbuf, op_timeout);
+
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ if (ApiVersion >= 1) {
+ /* validate_only */
+ rd_kafka_buf_write_i8(
+ rkbuf, rd_kafka_confval_get_int(&options->validate_only));
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send DeleteTopicsRequest to \p rkb
+ * with the topics (DeleteTopic_t *) in \p del_topics, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_topics /*(DeleteTopic_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+ int i = 0;
+ rd_kafka_DeleteTopic_t *delt;
+ int op_timeout;
+
+ if (rd_list_cnt(del_topics) == 0) {
+ rd_snprintf(errstr, errstr_size, "No topics to delete");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DeleteTopics, 0, 1, &features);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Topic Admin API (KIP-4) not supported "
+ "by broker, requires broker version >= 0.10.2.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf =
+ rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteTopics, 1,
+ /* FIXME */
+ 4 + (rd_list_cnt(del_topics) * 100) + 4);
+
+ /* #topics */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_topics));
+
+ while ((delt = rd_list_elem(del_topics, i++)))
+ rd_kafka_buf_write_str(rkbuf, delt->topic, -1);
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ rd_kafka_buf_write_i32(rkbuf, op_timeout);
+
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send DeleteRecordsRequest to \p rkb
+ * with the offsets to delete (rd_kafka_topic_partition_list_t *) in
+ * \p offsets_list, using \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @remark The rd_kafka_topic_partition_list_t in \p offsets_list must already
+ * be sorted.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb,
+ /*(rd_kafka_topic_partition_list_t*)*/
+ const rd_list_t *offsets_list,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+ const rd_kafka_topic_partition_list_t *partitions;
+ int op_timeout;
+
+ partitions = rd_list_elem(offsets_list, 0);
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DeleteRecords, 0, 1, &features);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "DeleteRecords Admin API (KIP-107) not supported "
+ "by broker, requires broker version >= 0.11.0");
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteRecords, 1,
+ 4 + (partitions->cnt * 100) + 4);
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ rd_kafka_buf_write_topic_partitions(
+ rkbuf, partitions, rd_false /*don't skip invalid offsets*/,
+ rd_false /*any offset*/, fields);
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ rd_kafka_buf_write_i32(rkbuf, op_timeout);
+
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send CreatePartitionsRequest to \p rkb
+ * with the topics (NewPartitions_t*) in \p new_parts, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_CreatePartitionsRequest(rd_kafka_broker_t *rkb,
+ /*(NewPartitions_t*)*/
+ const rd_list_t *new_parts,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int i = 0;
+ rd_kafka_NewPartitions_t *newp;
+ int op_timeout;
+
+ if (rd_list_cnt(new_parts) == 0) {
+ rd_snprintf(errstr, errstr_size, "No partitions to create");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_CreatePartitions, 0, 0, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "CreatePartitions (KIP-195) not supported "
+ "by broker, requires broker version >= 1.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreatePartitions, 1,
+ 4 + (rd_list_cnt(new_parts) * 200) +
+ 4 + 1);
+
+ /* #topics */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_parts));
+
+ while ((newp = rd_list_elem(new_parts, i++))) {
+ /* topic */
+ rd_kafka_buf_write_str(rkbuf, newp->topic, -1);
+
+ /* New partition count */
+ rd_kafka_buf_write_i32(rkbuf, (int32_t)newp->total_cnt);
+
+ /* #replica_assignment */
+ if (rd_list_empty(&newp->replicas)) {
+ rd_kafka_buf_write_i32(rkbuf, -1);
+ } else {
+ const rd_list_t *replicas;
+ int pi = -1;
+
+ rd_kafka_buf_write_i32(rkbuf,
+ rd_list_cnt(&newp->replicas));
+
+ while (
+ (replicas = rd_list_elem(&newp->replicas, ++pi))) {
+ int ri = 0;
+
+ /* replica count */
+ rd_kafka_buf_write_i32(rkbuf,
+ rd_list_cnt(replicas));
+
+ /* replica */
+ for (ri = 0; ri < rd_list_cnt(replicas); ri++) {
+ rd_kafka_buf_write_i32(
+ rkbuf,
+ rd_list_get_int32(replicas, ri));
+ }
+ }
+ }
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ rd_kafka_buf_write_i32(rkbuf, op_timeout);
+
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ /* validate_only */
+ rd_kafka_buf_write_i8(
+ rkbuf, rd_kafka_confval_get_int(&options->validate_only));
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send AlterConfigsRequest to \p rkb
+ * with the configs (ConfigResource_t*) in \p configs, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *configs /*(ConfigResource_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int i;
+ const rd_kafka_ConfigResource_t *config;
+ int op_timeout;
+
+ if (rd_list_cnt(configs) == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "No config resources specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_AlterConfigs, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "AlterConfigs (KIP-133) not supported "
+ "by broker, requires broker version >= 0.11.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ /* Incremental requires IncrementalAlterConfigs */
+ if (rd_kafka_confval_get_int(&options->incremental)) {
+ rd_snprintf(errstr, errstr_size,
+ "AlterConfigs.incremental=true (KIP-248) "
+ "not supported by broker, "
+ "replaced by IncrementalAlterConfigs");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_AlterConfigs, 1,
+ rd_list_cnt(configs) * 200);
+
+ /* #resources */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs));
+
+ RD_LIST_FOREACH(config, configs, i) {
+ const rd_kafka_ConfigEntry_t *entry;
+ int ei;
+
+ /* resource_type */
+ rd_kafka_buf_write_i8(rkbuf, config->restype);
+
+ /* resource_name */
+ rd_kafka_buf_write_str(rkbuf, config->name, -1);
+
+ /* #config */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(&config->config));
+
+ RD_LIST_FOREACH(entry, &config->config, ei) {
+ /* config_name */
+ rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1);
+ /* config_value (nullable) */
+ rd_kafka_buf_write_str(rkbuf, entry->kv->value, -1);
+
+ if (entry->a.operation != RD_KAFKA_ALTER_OP_SET) {
+ rd_snprintf(errstr, errstr_size,
+ "IncrementalAlterConfigs required "
+ "for add/delete config "
+ "entries: only set supported "
+ "by this operation");
+ rd_kafka_buf_destroy(rkbuf);
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ /* validate_only */
+ rd_kafka_buf_write_i8(
+ rkbuf, rd_kafka_confval_get_int(&options->validate_only));
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send DescribeConfigsRequest to \p rkb
+ * with the configs (ConfigResource_t*) in \p configs, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *configs /*(ConfigResource_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int i;
+ const rd_kafka_ConfigResource_t *config;
+ int op_timeout;
+
+ if (rd_list_cnt(configs) == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "No config resources specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DescribeConfigs, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "DescribeConfigs (KIP-133) not supported "
+ "by broker, requires broker version >= 0.11.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DescribeConfigs, 1,
+ rd_list_cnt(configs) * 200);
+
+ /* #resources */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(configs));
+
+ RD_LIST_FOREACH(config, configs, i) {
+ const rd_kafka_ConfigEntry_t *entry;
+ int ei;
+
+ /* resource_type */
+ rd_kafka_buf_write_i8(rkbuf, config->restype);
+
+ /* resource_name */
+ rd_kafka_buf_write_str(rkbuf, config->name, -1);
+
+ /* #config */
+ if (rd_list_empty(&config->config)) {
+ /* Get all configs */
+ rd_kafka_buf_write_i32(rkbuf, -1);
+ } else {
+ /* Get requested configs only */
+ rd_kafka_buf_write_i32(rkbuf,
+ rd_list_cnt(&config->config));
+ }
+
+ RD_LIST_FOREACH(entry, &config->config, ei) {
+ /* config_name */
+ rd_kafka_buf_write_str(rkbuf, entry->kv->name, -1);
+ }
+ }
+
+
+ if (ApiVersion == 1) {
+ /* include_synonyms */
+ rd_kafka_buf_write_i8(rkbuf, 1);
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send DeleteGroupsRequest to \p rkb
+ * with the groups (DeleteGroup_t *) in \p del_groups, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_groups /*(DeleteGroup_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ int features;
+ int i = 0;
+ rd_kafka_DeleteGroup_t *delt;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DeleteGroups, 0, 1, &features);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "DeleteGroups Admin API (KIP-229) not supported "
+ "by broker, requires broker version >= 1.1.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf =
+ rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteGroups, 1,
+ 4 + (rd_list_cnt(del_groups) * 100) + 4);
+
+ /* #groups */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_groups));
+
+ while ((delt = rd_list_elem(del_groups, i++)))
+ rd_kafka_buf_write_str(rkbuf, delt->group, -1);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Returns the request size needed to send a specific AclBinding
+ * specified in \p acl, using the ApiVersion provided in
+ * \p ApiVersion.
+ *
+ * @returns and int16_t with the request size in bytes.
+ */
+static RD_INLINE size_t
+rd_kafka_AclBinding_request_size(const rd_kafka_AclBinding_t *acl,
+ int ApiVersion) {
+ return 1 + 2 + (acl->name ? strlen(acl->name) : 0) + 2 +
+ (acl->principal ? strlen(acl->principal) : 0) + 2 +
+ (acl->host ? strlen(acl->host) : 0) + 1 + 1 +
+ (ApiVersion > 0 ? 1 : 0);
+}
+
+/**
+ * @brief Construct and send CreateAclsRequest to \p rkb
+ * with the acls (AclBinding_t*) in \p new_acls, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *new_acls /*(AclBinding_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+ int i;
+ size_t len;
+ int op_timeout;
+ rd_kafka_AclBinding_t *new_acl;
+
+ if (rd_list_cnt(new_acls) == 0) {
+ rd_snprintf(errstr, errstr_size, "No acls to create");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_CreateAcls, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker version >= 0.11.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ if (ApiVersion == 0) {
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ if (new_acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL) {
+ rd_snprintf(errstr, errstr_size,
+ "Broker only supports LITERAL "
+ "resource pattern types");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+ } else {
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ if (new_acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
+ new_acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED) {
+ rd_snprintf(errstr, errstr_size,
+ "Only LITERAL and PREFIXED "
+ "resource patterns are supported "
+ "when creating ACLs");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+ }
+
+ len = 4;
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ len += rd_kafka_AclBinding_request_size(new_acl, ApiVersion);
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_CreateAcls, 1, len);
+
+ /* #acls */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(new_acls));
+
+ RD_LIST_FOREACH(new_acl, new_acls, i) {
+ rd_kafka_buf_write_i8(rkbuf, new_acl->restype);
+
+ rd_kafka_buf_write_str(rkbuf, new_acl->name, -1);
+
+ if (ApiVersion >= 1) {
+ rd_kafka_buf_write_i8(rkbuf,
+ new_acl->resource_pattern_type);
+ }
+
+ rd_kafka_buf_write_str(rkbuf, new_acl->principal, -1);
+
+ rd_kafka_buf_write_str(rkbuf, new_acl->host, -1);
+
+ rd_kafka_buf_write_i8(rkbuf, new_acl->operation);
+
+ rd_kafka_buf_write_i8(rkbuf, new_acl->permission_type);
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Construct and send DescribeAclsRequest to \p rkb
+ * with the acls (AclBinding_t*) in \p acls, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t rd_kafka_DescribeAclsRequest(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *acls /*(rd_kafka_AclBindingFilter_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ const rd_kafka_AclBindingFilter_t *acl;
+ int op_timeout;
+
+ if (rd_list_cnt(acls) == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "No acl binding filters specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+ if (rd_list_cnt(acls) > 1) {
+ rd_snprintf(errstr, errstr_size,
+ "Too many acl binding filters specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ acl = rd_list_elem(acls, 0);
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DescribeAcls, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker version >= 0.11.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ if (ApiVersion == 0) {
+ if (acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
+ acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_ANY) {
+ rd_snprintf(errstr, errstr_size,
+ "Broker only supports LITERAL and ANY "
+ "resource pattern types");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ } else {
+ if (acl->resource_pattern_type ==
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) {
+ rd_snprintf(errstr, errstr_size,
+ "Filter contains UNKNOWN elements");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+
+ rkbuf = rd_kafka_buf_new_request(
+ rkb, RD_KAFKAP_DescribeAcls, 1,
+ rd_kafka_AclBinding_request_size(acl, ApiVersion));
+
+ /* resource_type */
+ rd_kafka_buf_write_i8(rkbuf, acl->restype);
+
+ /* resource_name filter */
+ rd_kafka_buf_write_str(rkbuf, acl->name, -1);
+
+ if (ApiVersion > 0) {
+ /* resource_pattern_type (rd_kafka_ResourcePatternType_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->resource_pattern_type);
+ }
+
+ /* principal filter */
+ rd_kafka_buf_write_str(rkbuf, acl->principal, -1);
+
+ /* host filter */
+ rd_kafka_buf_write_str(rkbuf, acl->host, -1);
+
+ /* operation (rd_kafka_AclOperation_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->operation);
+
+ /* permission type (rd_kafka_AclPermissionType_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->permission_type);
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Construct and send DeleteAclsRequest to \p rkb
+ * with the acl filters (AclBindingFilter_t*) in \p del_acls, using
+ * \p options.
+ *
+ * The response (unparsed) will be enqueued on \p replyq
+ * for handling by \p resp_cb (with \p opaque passed).
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_acls /*(AclBindingFilter_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ const rd_kafka_AclBindingFilter_t *acl;
+ int op_timeout;
+ int i;
+ size_t len;
+
+ if (rd_list_cnt(del_acls) == 0) {
+ rd_snprintf(errstr, errstr_size,
+ "No acl binding filters specified");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_DeleteAcls, 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker version >= 0.11.0.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ len = 4;
+
+ RD_LIST_FOREACH(acl, del_acls, i) {
+ if (ApiVersion == 0) {
+ if (acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL &&
+ acl->resource_pattern_type !=
+ RD_KAFKA_RESOURCE_PATTERN_ANY) {
+ rd_snprintf(errstr, errstr_size,
+ "Broker only supports LITERAL "
+ "and ANY resource pattern types");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ } else {
+ if (acl->resource_pattern_type ==
+ RD_KAFKA_RESOURCE_PATTERN_UNKNOWN) {
+ rd_snprintf(errstr, errstr_size,
+ "Filter contains UNKNOWN elements");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+
+ len += rd_kafka_AclBinding_request_size(acl, ApiVersion);
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_DeleteAcls, 1, len);
+
+ /* #acls */
+ rd_kafka_buf_write_i32(rkbuf, rd_list_cnt(del_acls));
+
+ RD_LIST_FOREACH(acl, del_acls, i) {
+ /* resource_type */
+ rd_kafka_buf_write_i8(rkbuf, acl->restype);
+
+ /* resource_name filter */
+ rd_kafka_buf_write_str(rkbuf, acl->name, -1);
+
+ if (ApiVersion > 0) {
+ /* resource_pattern_type
+ * (rd_kafka_ResourcePatternType_t) */
+ rd_kafka_buf_write_i8(rkbuf,
+ acl->resource_pattern_type);
+ }
+
+ /* principal filter */
+ rd_kafka_buf_write_str(rkbuf, acl->principal, -1);
+
+ /* host filter */
+ rd_kafka_buf_write_str(rkbuf, acl->host, -1);
+
+ /* operation (rd_kafka_AclOperation_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->operation);
+
+ /* permission type (rd_kafka_AclPermissionType_t) */
+ rd_kafka_buf_write_i8(rkbuf, acl->permission_type);
+ }
+
+ /* timeout */
+ op_timeout = rd_kafka_confval_get_int(&options->operation_timeout);
+ if (op_timeout > rkb->rkb_rk->rk_conf.socket_timeout_ms)
+ rd_kafka_buf_set_abs_timeout(rkbuf, op_timeout + 1000, 0);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Parses and handles an InitProducerId reply.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int16_t error_code;
+ rd_kafka_pid_t pid;
+
+ if (err)
+ goto err;
+
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &error_code);
+ if ((err = error_code))
+ goto err;
+
+ rd_kafka_buf_read_i64(rkbuf, &pid.id);
+ rd_kafka_buf_read_i16(rkbuf, &pid.epoch);
+
+ rd_kafka_idemp_pid_update(rkb, pid);
+
+ return;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return;
+
+ /* Retries are performed by idempotence state handler */
+ rd_kafka_idemp_request_pid_failed(rkb, err);
+}
+
+/**
+ * @brief Construct and send InitProducerIdRequest to \p rkb.
+ *
+ * @param transactional_id may be NULL.
+ * @param transaction_timeout_ms may be set to -1.
+ * @param current_pid the current PID to reset, requires KIP-360. If not NULL
+ * and KIP-360 is not supported by the broker this function
+ * will return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE.
+ *
+ * The response (unparsed) will be handled by \p resp_cb served
+ * by queue \p replyq.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code and errstr will be
+ * updated with a human readable error string.
+ */
+rd_kafka_resp_err_t
+rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ int transaction_timeout_ms,
+ const rd_kafka_pid_t *current_pid,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+
+ if (current_pid) {
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_InitProducerId, 3, 4, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "InitProducerId (KIP-360) not supported by "
+ "broker, requires broker version >= 2.5.0: "
+ "unable to recover from previous "
+ "transactional error");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ } else {
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_InitProducerId, 0, 4, NULL);
+
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "InitProducerId (KIP-98) not supported by "
+ "broker, requires broker "
+ "version >= 0.11.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+ }
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_InitProducerId, 1,
+ 2 + (transactional_id ? strlen(transactional_id) : 0) + 4 + 8 + 4,
+ ApiVersion >= 2 /*flexver*/);
+
+ /* transactional_id */
+ rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
+
+ /* transaction_timeout_ms */
+ rd_kafka_buf_write_i32(rkbuf, transaction_timeout_ms);
+
+ if (ApiVersion >= 3) {
+ /* Current PID */
+ rd_kafka_buf_write_i64(rkbuf,
+ current_pid ? current_pid->id : -1);
+ /* Current Epoch */
+ rd_kafka_buf_write_i16(rkbuf,
+ current_pid ? current_pid->epoch : -1);
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ /* Let the idempotence state handler perform retries */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send AddPartitionsToTxnRequest to \p rkb.
+ *
+ * The response (unparsed) will be handled by \p resp_cb served
+ * by queue \p replyq.
+ *
+ * @param rktps MUST be sorted by topic name.
+ *
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code.
+ */
+rd_kafka_resp_err_t
+rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ rd_kafka_pid_t pid,
+ const rd_kafka_toppar_tqhead_t *rktps,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_topic_t *last_rkt = NULL;
+ size_t of_TopicCnt;
+ ssize_t of_PartCnt = -1;
+ int TopicCnt = 0, PartCnt = 0;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_AddPartitionsToTxn, 0, 0, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "AddPartitionsToTxnRequest (KIP-98) not supported "
+ "by broker, requires broker version >= 0.11.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf =
+ rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddPartitionsToTxn, 1, 500);
+
+ /* transactional_id */
+ rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
+
+ /* PID */
+ rd_kafka_buf_write_i64(rkbuf, pid.id);
+ rd_kafka_buf_write_i16(rkbuf, pid.epoch);
+
+ /* Topics/partitions array (count updated later) */
+ of_TopicCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ TAILQ_FOREACH(rktp, rktps, rktp_txnlink) {
+ if (last_rkt != rktp->rktp_rkt) {
+
+ if (last_rkt) {
+ /* Update last topic's partition count field */
+ rd_kafka_buf_update_i32(rkbuf, of_PartCnt,
+ PartCnt);
+ of_PartCnt = -1;
+ }
+
+ /* Topic name */
+ rd_kafka_buf_write_kstr(rkbuf,
+ rktp->rktp_rkt->rkt_topic);
+ /* Partition count, updated later */
+ of_PartCnt = rd_kafka_buf_write_i32(rkbuf, 0);
+
+ PartCnt = 0;
+ TopicCnt++;
+ last_rkt = rktp->rktp_rkt;
+ }
+
+ /* Partition id */
+ rd_kafka_buf_write_i32(rkbuf, rktp->rktp_partition);
+ PartCnt++;
+ }
+
+ /* Update last partition and topic count fields */
+ if (of_PartCnt != -1)
+ rd_kafka_buf_update_i32(rkbuf, (size_t)of_PartCnt, PartCnt);
+ rd_kafka_buf_update_i32(rkbuf, of_TopicCnt, TopicCnt);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ /* Let the handler perform retries so that it can pick
+ * up more added partitions. */
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_NO_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Construct and send AddOffsetsToTxnRequest to \p rkb.
+ *
+ * The response (unparsed) will be handled by \p resp_cb served
+ * by queue \p replyq.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code.
+ */
+rd_kafka_resp_err_t
+rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ rd_kafka_pid_t pid,
+ const char *group_id,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_AddOffsetsToTxn, 0, 0, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "AddOffsetsToTxnRequest (KIP-98) not supported "
+ "by broker, requires broker version >= 0.11.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf =
+ rd_kafka_buf_new_request(rkb, RD_KAFKAP_AddOffsetsToTxn, 1, 100);
+
+ /* transactional_id */
+ rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
+
+ /* PID */
+ rd_kafka_buf_write_i64(rkbuf, pid.id);
+ rd_kafka_buf_write_i16(rkbuf, pid.epoch);
+
+ /* Group Id */
+ rd_kafka_buf_write_str(rkbuf, group_id, -1);
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Construct and send EndTxnRequest to \p rkb.
+ *
+ * The response (unparsed) will be handled by \p resp_cb served
+ * by queue \p replyq.
+ *
+ * @returns RD_KAFKA_RESP_ERR_NO_ERROR if the request was enqueued for
+ * transmission, otherwise an error code.
+ */
+rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ rd_kafka_pid_t pid,
+ rd_bool_t committed,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque) {
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion = 0;
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(rkb, RD_KAFKAP_EndTxn,
+ 0, 1, NULL);
+ if (ApiVersion == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "EndTxnRequest (KIP-98) not supported "
+ "by broker, requires broker version >= 0.11.0");
+ rd_kafka_replyq_destroy(&replyq);
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_request(rkb, RD_KAFKAP_EndTxn, 1, 500);
+
+ /* transactional_id */
+ rd_kafka_buf_write_str(rkbuf, transactional_id, -1);
+
+ /* PID */
+ rd_kafka_buf_write_i64(rkbuf, pid.id);
+ rd_kafka_buf_write_i16(rkbuf, pid.epoch);
+
+ /* Committed */
+ rd_kafka_buf_write_bool(rkbuf, committed);
+ rkbuf->rkbuf_u.EndTxn.commit = committed;
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb, opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @name Unit tests
+ * @{
+ *
+ *
+ *
+ *
+ */
+
+/**
+ * @brief Create \p cnt messages, starting at \p msgid, and add them
+ * to \p rkmq.
+ *
+ * @returns the number of messages added.
+ */
+static int ut_create_msgs(rd_kafka_msgq_t *rkmq, uint64_t msgid, int cnt) {
+ int i;
+
+ for (i = 0; i < cnt; i++) {
+ rd_kafka_msg_t *rkm;
+
+ rkm = ut_rd_kafka_msg_new(0);
+ rkm->rkm_u.producer.msgid = msgid++;
+ rkm->rkm_ts_enq = rd_clock();
+ rkm->rkm_ts_timeout = rkm->rkm_ts_enq + (900 * 1000 * 1000);
+
+ rd_kafka_msgq_enq(rkmq, rkm);
+ }
+
+ return cnt;
+}
+
+/**
+ * @brief Idempotent Producer request/response unit tests
+ *
+ * The current test verifies proper handling of the following case:
+ * Batch 0 succeeds
+ * Batch 1 fails with temporary error
+ * Batch 2,3 fails with out of order sequence
+ * Retry Batch 1-3 should succeed.
+ */
+static int unittest_idempotent_producer(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_broker_t *rkb;
+#define _BATCH_CNT 4
+#define _MSGS_PER_BATCH 3
+ const int msgcnt = _BATCH_CNT * _MSGS_PER_BATCH;
+ int remaining_batches;
+ uint64_t msgid = 1;
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_pid_t pid = {.id = 1000, .epoch = 0};
+ struct rd_kafka_Produce_result result = {.offset = 1,
+ .timestamp = 1000};
+ rd_kafka_queue_t *rkqu;
+ rd_kafka_event_t *rkev;
+ rd_kafka_buf_t *request[_BATCH_CNT];
+ int rcnt = 0;
+ int retry_msg_cnt = 0;
+ int drcnt = 0;
+ rd_kafka_msgq_t rkmq = RD_KAFKA_MSGQ_INITIALIZER(rkmq);
+ const char *tmp;
+ int i, r;
+
+ RD_UT_SAY("Verifying idempotent producer error handling");
+
+ conf = rd_kafka_conf_new();
+ rd_kafka_conf_set(conf, "batch.num.messages", "3", NULL, 0);
+ rd_kafka_conf_set(conf, "retry.backoff.ms", "1", NULL, 0);
+ if ((tmp = rd_getenv("TEST_DEBUG", NULL)))
+ rd_kafka_conf_set(conf, "debug", tmp, NULL, 0);
+ if (rd_kafka_conf_set(conf, "enable.idempotence", "true", NULL, 0) !=
+ RD_KAFKA_CONF_OK)
+ RD_UT_FAIL("Failed to enable idempotence");
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, NULL, 0);
+ RD_UT_ASSERT(rk, "failed to create producer");
+
+ rkqu = rd_kafka_queue_get_main(rk);
+
+ /* We need a broker handle, use a logical broker to avoid
+ * any connection attempts. */
+ rkb = rd_kafka_broker_add_logical(rk, "unittest");
+
+ /* Have the broker support everything so msgset_writer selects
+ * the most up-to-date output features. */
+ rd_kafka_broker_lock(rkb);
+ rkb->rkb_features = RD_KAFKA_FEATURE_UNITTEST | RD_KAFKA_FEATURE_ALL;
+ rd_kafka_broker_unlock(rkb);
+
+ /* Get toppar */
+ rktp = rd_kafka_toppar_get2(rk, "uttopic", 0, rd_false, rd_true);
+ RD_UT_ASSERT(rktp, "failed to get toppar");
+
+ /* Set the topic as exists so messages are enqueued on
+ * the desired rktp away (otherwise UA partition) */
+ rd_ut_kafka_topic_set_topic_exists(rktp->rktp_rkt, 1, -1);
+
+ /* Produce messages */
+ ut_create_msgs(&rkmq, 1, msgcnt);
+
+ /* Set the pid */
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_WAIT_PID);
+ rd_kafka_idemp_pid_update(rkb, pid);
+ pid = rd_kafka_idemp_get_pid(rk);
+ RD_UT_ASSERT(rd_kafka_pid_valid(pid), "PID is invalid");
+ rd_kafka_toppar_pid_change(rktp, pid, msgid);
+
+ remaining_batches = _BATCH_CNT;
+
+ /* Create a ProduceRequest for each batch */
+ for (rcnt = 0; rcnt < remaining_batches; rcnt++) {
+ size_t msize;
+ request[rcnt] = rd_kafka_msgset_create_ProduceRequest(
+ rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize);
+ RD_UT_ASSERT(request[rcnt], "request #%d failed", rcnt);
+ }
+
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rkmq) == 0,
+ "expected input message queue to be empty, "
+ "but still has %d message(s)",
+ rd_kafka_msgq_len(&rkmq));
+
+ /*
+ * Mock handling of each request
+ */
+
+ /* Batch 0: accepted */
+ i = 0;
+ r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
+ RD_UT_ASSERT(r == _MSGS_PER_BATCH, ".");
+ rd_kafka_msgbatch_handle_Produce_result(rkb, &request[i]->rkbuf_batch,
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ &result, request[i]);
+ result.offset += r;
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == 0,
+ "batch %d: expected no messages in rktp_msgq, not %d", i,
+ rd_kafka_msgq_len(&rktp->rktp_msgq));
+ rd_kafka_buf_destroy(request[i]);
+ remaining_batches--;
+
+ /* Batch 1: fail, triggering retry (re-enq on rktp_msgq) */
+ i = 1;
+ r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
+ RD_UT_ASSERT(r == _MSGS_PER_BATCH, ".");
+ rd_kafka_msgbatch_handle_Produce_result(
+ rkb, &request[i]->rkbuf_batch,
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION, &result, request[i]);
+ retry_msg_cnt += r;
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
+ "batch %d: expected %d messages in rktp_msgq, not %d", i,
+ retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
+ rd_kafka_buf_destroy(request[i]);
+
+ /* Batch 2: OUT_OF_ORDER, triggering retry .. */
+ i = 2;
+ r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
+ RD_UT_ASSERT(r == _MSGS_PER_BATCH, ".");
+ rd_kafka_msgbatch_handle_Produce_result(
+ rkb, &request[i]->rkbuf_batch,
+ RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result,
+ request[i]);
+ retry_msg_cnt += r;
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
+ "batch %d: expected %d messages in rktp_xmit_msgq, not %d",
+ i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
+ rd_kafka_buf_destroy(request[i]);
+
+ /* Batch 3: OUT_OF_ORDER, triggering retry .. */
+ i = 3;
+ r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
+ rd_kafka_msgbatch_handle_Produce_result(
+ rkb, &request[i]->rkbuf_batch,
+ RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER, &result,
+ request[i]);
+ retry_msg_cnt += r;
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
+ "batch %d: expected %d messages in rktp_xmit_msgq, not %d",
+ i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
+ rd_kafka_buf_destroy(request[i]);
+
+
+ /* Retried messages will have been moved to rktp_msgq,
+ * move them back to our local queue. */
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_msgq_move(&rkmq, &rktp->rktp_msgq);
+ rd_kafka_toppar_unlock(rktp);
+
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rkmq) == retry_msg_cnt,
+ "Expected %d messages in retry queue, not %d",
+ retry_msg_cnt, rd_kafka_msgq_len(&rkmq));
+
+ /* Sleep a short while to make sure the retry backoff expires. */
+ rd_usleep(5 * 1000, NULL); /* 5ms */
+
+ /*
+ * Create requests for remaining batches.
+ */
+ for (rcnt = 0; rcnt < remaining_batches; rcnt++) {
+ size_t msize;
+ request[rcnt] = rd_kafka_msgset_create_ProduceRequest(
+ rkb, rktp, &rkmq, rd_kafka_idemp_get_pid(rk), 0, &msize);
+ RD_UT_ASSERT(request[rcnt],
+ "Failed to create retry #%d (%d msgs in queue)",
+ rcnt, rd_kafka_msgq_len(&rkmq));
+ }
+
+ /*
+ * Mock handling of each request, they will now succeed.
+ */
+ for (i = 0; i < rcnt; i++) {
+ r = rd_kafka_msgq_len(&request[i]->rkbuf_batch.msgq);
+ rd_kafka_msgbatch_handle_Produce_result(
+ rkb, &request[i]->rkbuf_batch, RD_KAFKA_RESP_ERR_NO_ERROR,
+ &result, request[i]);
+ result.offset += r;
+ rd_kafka_buf_destroy(request[i]);
+ }
+
+ retry_msg_cnt = 0;
+ RD_UT_ASSERT(rd_kafka_msgq_len(&rktp->rktp_msgq) == retry_msg_cnt,
+ "batch %d: expected %d messages in rktp_xmit_msgq, not %d",
+ i, retry_msg_cnt, rd_kafka_msgq_len(&rktp->rktp_msgq));
+
+ /*
+ * Wait for delivery reports, they should all be successful.
+ */
+ while ((rkev = rd_kafka_queue_poll(rkqu, 1000))) {
+ const rd_kafka_message_t *rkmessage;
+
+ RD_UT_SAY("Got %s event with %d message(s)",
+ rd_kafka_event_name(rkev),
+ (int)rd_kafka_event_message_count(rkev));
+
+ while ((rkmessage = rd_kafka_event_message_next(rkev))) {
+ RD_UT_SAY(" DR for message: %s: (persistence=%d)",
+ rd_kafka_err2str(rkmessage->err),
+ rd_kafka_message_status(rkmessage));
+ if (rkmessage->err)
+ RD_UT_WARN(" ^ Should not have failed");
+ else
+ drcnt++;
+ }
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Should be no more messages in queues */
+ r = rd_kafka_outq_len(rk);
+ RD_UT_ASSERT(r == 0, "expected outq to return 0, not %d", r);
+
+ /* Verify the expected number of good delivery reports were seen */
+ RD_UT_ASSERT(drcnt == msgcnt, "expected %d DRs, not %d", msgcnt, drcnt);
+
+ rd_kafka_queue_destroy(rkqu);
+ rd_kafka_toppar_destroy(rktp);
+ rd_kafka_broker_destroy(rkb);
+ rd_kafka_destroy(rk);
+
+ RD_UT_PASS();
+ return 0;
+}
+
+/**
+ * @brief Request/response unit tests
+ */
+int unittest_request(void) {
+ int fails = 0;
+
+ fails += unittest_idempotent_producer();
+
+ return fails;
+}
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h
new file mode 100644
index 000000000..3eda6be61
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_request.h
@@ -0,0 +1,463 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_REQUEST_H_
+#define _RDKAFKA_REQUEST_H_
+
+#include "rdkafka_cgrp.h"
+#include "rdkafka_feature.h"
+
+
+#define RD_KAFKA_ERR_ACTION_PERMANENT 0x1 /* Permanent error */
+#define RD_KAFKA_ERR_ACTION_IGNORE 0x2 /* Error can be ignored */
+#define RD_KAFKA_ERR_ACTION_REFRESH 0x4 /* Refresh state (e.g., metadata) */
+#define RD_KAFKA_ERR_ACTION_RETRY 0x8 /* Retry request after backoff */
+#define RD_KAFKA_ERR_ACTION_INFORM 0x10 /* Inform application about err */
+#define RD_KAFKA_ERR_ACTION_SPECIAL \
+ 0x20 /* Special-purpose, depends on context */
+#define RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED 0x40 /* ProduceReq msg status */
+#define RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED \
+ 0x80 /* ProduceReq msg status */
+#define RD_KAFKA_ERR_ACTION_MSG_PERSISTED 0x100 /* ProduceReq msg status */
+#define RD_KAFKA_ERR_ACTION_FATAL 0x200 /**< Fatal error */
+#define RD_KAFKA_ERR_ACTION_END 0 /* var-arg sentinel */
+
+/** @macro bitmask of the message persistence flags */
+#define RD_KAFKA_ERR_ACTION_MSG_FLAGS \
+ (RD_KAFKA_ERR_ACTION_MSG_NOT_PERSISTED | \
+ RD_KAFKA_ERR_ACTION_MSG_POSSIBLY_PERSISTED | \
+ RD_KAFKA_ERR_ACTION_MSG_PERSISTED)
+
+int rd_kafka_err_action(rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ const rd_kafka_buf_t *request,
+ ...);
+
+
+const char *rd_kafka_actions2str(int actions);
+
+
+typedef enum {
+ /** Array end sentinel */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END = 0,
+ /** Read/write int32_t for partition */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ /** Read/write int64_t for offset */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
+ /** Read/write int32_t for offset leader_epoch */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH,
+ /** Read/write int32_t for current leader_epoch */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_CURRENT_EPOCH,
+ /** Read/write int16_t for error code */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
+ /** Read/write str for metadata */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA,
+ /** Noop, useful for ternary ifs */
+ RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP,
+} rd_kafka_topic_partition_field_t;
+
+rd_kafka_topic_partition_list_t *rd_kafka_buf_read_topic_partitions(
+ rd_kafka_buf_t *rkbuf,
+ size_t estimated_part_cnt,
+ const rd_kafka_topic_partition_field_t *fields);
+
+int rd_kafka_buf_write_topic_partitions(
+ rd_kafka_buf_t *rkbuf,
+ const rd_kafka_topic_partition_list_t *parts,
+ rd_bool_t skip_invalid_offsets,
+ rd_bool_t only_invalid_offsets,
+ const rd_kafka_topic_partition_field_t *fields);
+
+rd_kafka_resp_err_t
+rd_kafka_FindCoordinatorRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_coordtype_t coordtype,
+ const char *coordkey,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_handle_ListOffsets(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t *offsets,
+ int *actionsp);
+
+void rd_kafka_ListOffsetsRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_handle_OffsetForLeaderEpoch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t **offsets);
+void rd_kafka_OffsetForLeaderEpochRequest(
+ rd_kafka_broker_t *rkb,
+ rd_kafka_topic_partition_list_t *parts,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+
+rd_kafka_resp_err_t
+rd_kafka_handle_OffsetFetch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t **offsets,
+ rd_bool_t update_toppar,
+ rd_bool_t add_part,
+ rd_bool_t allow_retry);
+
+void rd_kafka_op_handle_OffsetFetch(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+void rd_kafka_OffsetFetchRequest(rd_kafka_broker_t *rkb,
+ const char *group_id,
+ rd_kafka_topic_partition_list_t *parts,
+ rd_bool_t require_stable_offsets,
+ int timeout,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_handle_OffsetCommit(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ rd_kafka_topic_partition_list_t *offsets,
+ rd_bool_t ignore_cgrp);
+
+int rd_kafka_OffsetCommitRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_consumer_group_metadata_t *cgmetadata,
+ rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque,
+ const char *reason);
+
+rd_kafka_resp_err_t
+rd_kafka_OffsetDeleteRequest(rd_kafka_broker_t *rkb,
+ /** (rd_kafka_DeleteConsumerGroupOffsets_t*) */
+ const rd_list_t *del_grpoffsets,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+
+void rd_kafka_JoinGroupRequest(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *group_id,
+ const rd_kafkap_str_t *member_id,
+ const rd_kafkap_str_t *group_instance_id,
+ const rd_kafkap_str_t *protocol_type,
+ const rd_list_t *topics,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+
+void rd_kafka_LeaveGroupRequest(rd_kafka_broker_t *rkb,
+ const char *group_id,
+ const char *member_id,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+void rd_kafka_handle_LeaveGroup(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+void rd_kafka_SyncGroupRequest(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *group_id,
+ int32_t generation_id,
+ const rd_kafkap_str_t *member_id,
+ const rd_kafkap_str_t *group_instance_id,
+ const rd_kafka_group_member_t *assignments,
+ int assignment_cnt,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+void rd_kafka_handle_SyncGroup(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+rd_kafka_error_t *rd_kafka_ListGroupsRequest(rd_kafka_broker_t *rkb,
+ int16_t max_ApiVersion,
+ const char **states,
+ size_t states_cnt,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_error_t *rd_kafka_DescribeGroupsRequest(rd_kafka_broker_t *rkb,
+ int16_t max_ApiVersion,
+ char **groups,
+ size_t group_cnt,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+
+void rd_kafka_HeartbeatRequest(rd_kafka_broker_t *rkb,
+ const rd_kafkap_str_t *group_id,
+ int32_t generation_id,
+ const rd_kafkap_str_t *member_id,
+ const rd_kafkap_str_t *group_instance_id,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t rd_kafka_MetadataRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *topics,
+ const char *reason,
+ rd_bool_t allow_auto_create_topics,
+ rd_bool_t cgrp_update,
+ rd_kafka_op_t *rko);
+
+rd_kafka_resp_err_t
+rd_kafka_handle_ApiVersion(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ struct rd_kafka_ApiVersion **apis,
+ size_t *api_cnt);
+void rd_kafka_ApiVersionRequest(rd_kafka_broker_t *rkb,
+ int16_t ApiVersion,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+void rd_kafka_SaslHandshakeRequest(rd_kafka_broker_t *rkb,
+ const char *mechanism,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+void rd_kafka_handle_SaslAuthenticate(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+void rd_kafka_SaslAuthenticateRequest(rd_kafka_broker_t *rkb,
+ const void *buf,
+ size_t size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+int rd_kafka_ProduceRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_toppar_t *rktp,
+ const rd_kafka_pid_t pid,
+ uint64_t epoch_base_msgid);
+
+rd_kafka_resp_err_t
+rd_kafka_CreateTopicsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *new_topics /*(NewTopic_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_DeleteTopicsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_topics /*(DeleteTopic_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t rd_kafka_CreatePartitionsRequest(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *new_parts /*(NewPartitions_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_AlterConfigsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *configs /*(ConfigResource_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t rd_kafka_DescribeConfigsRequest(
+ rd_kafka_broker_t *rkb,
+ const rd_list_t *configs /*(ConfigResource_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_DeleteGroupsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_groups /*(DeleteGroup_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_InitProducerIdRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ int transaction_timeout_ms,
+ const rd_kafka_pid_t *current_pid,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_AddPartitionsToTxnRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ rd_kafka_pid_t pid,
+ const rd_kafka_toppar_tqhead_t *rktps,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+void rd_kafka_handle_InitProducerId(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_AddOffsetsToTxnRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ rd_kafka_pid_t pid,
+ const char *group_id,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t rd_kafka_EndTxnRequest(rd_kafka_broker_t *rkb,
+ const char *transactional_id,
+ rd_kafka_pid_t pid,
+ rd_bool_t committed,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+int unittest_request(void);
+
+
+rd_kafka_resp_err_t
+rd_kafka_DeleteRecordsRequest(rd_kafka_broker_t *rkb,
+ /*(rd_topic_partition_list_t*)*/
+ const rd_list_t *offsets_list,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_CreateAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *new_acls /*(AclBinding_t*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_DescribeAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *acls /*(AclBinding*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_DeleteAclsRequest(rd_kafka_broker_t *rkb,
+ const rd_list_t *del_acls /*(AclBindingFilter*)*/,
+ rd_kafka_AdminOptions_t *options,
+ char *errstr,
+ size_t errstr_size,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *opaque);
+
+
+#endif /* _RDKAFKA_REQUEST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c
new file mode 100644
index 000000000..6cb919364
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_roundrobin_assignor.c
@@ -0,0 +1,123 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_assignor.h"
+
+
+/**
+ * Source:
+ * https://github.com/apache/kafka/blob/trunk/clients/src/main/java/org/apache/kafka/clients/consumer/RoundRobinAssignor.java
+ *
+ * The roundrobin assignor lays out all the available partitions and all the
+ * available consumers. It then proceeds to do a roundrobin assignment from
+ * partition to consumer. If the subscriptions of all consumer instances are
+ * identical, then the partitions will be uniformly distributed. (i.e., the
+ * partition ownership counts will be within a delta of exactly one across all
+ * consumers.)
+ *
+ * For example, suppose there are two consumers C0 and C1, two topics t0 and
+ * t1, and each topic has 3 partitions, resulting in partitions t0p0, t0p1,
+ * t0p2, t1p0, t1p1, and t1p2.
+ *
+ * The assignment will be:
+ * C0: [t0p0, t0p2, t1p1]
+ * C1: [t0p1, t1p0, t1p2]
+ */
+
+rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_assign_cb(
+ rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas,
+ const char *member_id,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ rd_kafka_assignor_topic_t **eligible_topics,
+ size_t eligible_topic_cnt,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque) {
+ unsigned int ti;
+ int next = -1; /* Next member id */
+
+ /* Sort topics by name */
+ qsort(eligible_topics, eligible_topic_cnt, sizeof(*eligible_topics),
+ rd_kafka_assignor_topic_cmp);
+
+ /* Sort members by name */
+ qsort(members, member_cnt, sizeof(*members), rd_kafka_group_member_cmp);
+
+ for (ti = 0; ti < eligible_topic_cnt; ti++) {
+ rd_kafka_assignor_topic_t *eligible_topic = eligible_topics[ti];
+ int partition;
+
+ /* For each topic+partition, assign one member (in a cyclic
+ * iteration) per partition until the partitions are exhausted*/
+ for (partition = 0;
+ partition < eligible_topic->metadata->partition_cnt;
+ partition++) {
+ rd_kafka_group_member_t *rkgm;
+
+ /* Scan through members until we find one with a
+ * subscription to this topic. */
+ do {
+ next = (next + 1) % member_cnt;
+ } while (!rd_kafka_group_member_find_subscription(
+ rk, &members[next],
+ eligible_topic->metadata->topic));
+
+ rkgm = &members[next];
+
+ rd_kafka_dbg(rk, CGRP, "ASSIGN",
+ "roundrobin: Member \"%s\": "
+ "assigned topic %s partition %d",
+ rkgm->rkgm_member_id->str,
+ eligible_topic->metadata->topic,
+ partition);
+
+ rd_kafka_topic_partition_list_add(
+ rkgm->rkgm_assignment,
+ eligible_topic->metadata->topic, partition);
+ }
+ }
+
+
+ return 0;
+}
+
+
+
+/**
+ * @brief Initialzie and add roundrobin assignor.
+ */
+rd_kafka_resp_err_t rd_kafka_roundrobin_assignor_init(rd_kafka_t *rk) {
+ return rd_kafka_assignor_add(
+ rk, "consumer", "roundrobin", RD_KAFKA_REBALANCE_PROTOCOL_EAGER,
+ rd_kafka_roundrobin_assignor_assign_cb,
+ rd_kafka_assignor_get_metadata_with_empty_userdata, NULL, NULL,
+ NULL, NULL);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c
new file mode 100644
index 000000000..cab67f241
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.c
@@ -0,0 +1,522 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_request.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_sasl_int.h"
+#include "rdkafka_request.h"
+#include "rdkafka_queue.h"
+
+/**
+ * @brief Send SASL auth data using legacy directly on socket framing.
+ *
+ * @warning This is a blocking call.
+ */
+static int rd_kafka_sasl_send_legacy(rd_kafka_transport_t *rktrans,
+ const void *payload,
+ int len,
+ char *errstr,
+ size_t errstr_size) {
+ rd_buf_t buf;
+ rd_slice_t slice;
+ int32_t hdr;
+
+ rd_buf_init(&buf, 1 + 1, sizeof(hdr));
+
+ hdr = htobe32(len);
+ rd_buf_write(&buf, &hdr, sizeof(hdr));
+ if (payload)
+ rd_buf_push(&buf, payload, len, NULL);
+
+ rd_slice_init_full(&slice, &buf);
+
+ /* Simulate blocking behaviour on non-blocking socket..
+ * FIXME: This isn't optimal but is highly unlikely to stall since
+ * the socket buffer will most likely not be exceeded. */
+ do {
+ int r;
+
+ r = (int)rd_kafka_transport_send(rktrans, &slice, errstr,
+ errstr_size);
+ if (r == -1) {
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
+ "SASL send failed: %s", errstr);
+ rd_buf_destroy(&buf);
+ return -1;
+ }
+
+ if (rd_slice_remains(&slice) == 0)
+ break;
+
+ /* Avoid busy-looping */
+ rd_usleep(10 * 1000, NULL);
+
+ } while (1);
+
+ rd_buf_destroy(&buf);
+
+ return 0;
+}
+
+/**
+ * @brief Send auth message with framing (either legacy or Kafka framing).
+ *
+ * @warning This is a blocking call when used with the legacy framing.
+ */
+int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans,
+ const void *payload,
+ int len,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+
+ rd_rkb_dbg(
+ rkb, SECURITY, "SASL", "Send SASL %s frame to broker (%d bytes)",
+ (rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ) ? "Kafka"
+ : "legacy",
+ len);
+
+ /* Blocking legacy framed send directly on the socket */
+ if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_AUTH_REQ))
+ return rd_kafka_sasl_send_legacy(rktrans, payload, len, errstr,
+ errstr_size);
+
+ /* Kafka-framed asynchronous send */
+ rd_kafka_SaslAuthenticateRequest(
+ rkb, payload, (size_t)len, RD_KAFKA_NO_REPLYQ,
+ rd_kafka_handle_SaslAuthenticate, NULL);
+
+ return 0;
+}
+
+
+/**
+ * @brief Authentication succesful
+ *
+ * Transition to next connect state.
+ */
+void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans) {
+ /* Authenticated */
+ rd_kafka_broker_connect_up(rktrans->rktrans_rkb);
+}
+
+
+/**
+ * @brief Handle SASL auth data from broker.
+ *
+ * @locality broker thread
+ *
+ * @returns -1 on error, else 0.
+ */
+int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans,
+ const void *buf,
+ size_t len,
+ char *errstr,
+ size_t errstr_size) {
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
+ "Received SASL frame from broker (%" PRIusz " bytes)", len);
+
+ return rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider->recv(
+ rktrans, buf, len, errstr, errstr_size);
+}
+
+/**
+ * @brief Non-kafka-protocol framed SASL auth data receive event.
+ *
+ * @locality broker thread
+ *
+ * @returns -1 on error, else 0.
+ */
+int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans,
+ int events,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_buf_t *rkbuf;
+ int r;
+ const void *buf;
+ size_t len;
+
+ if (!(events & POLLIN))
+ return 0;
+
+ r = rd_kafka_transport_framed_recv(rktrans, &rkbuf, errstr,
+ errstr_size);
+ if (r == -1) {
+ if (!strcmp(errstr, "Disconnected"))
+ rd_snprintf(errstr, errstr_size,
+ "Disconnected: check client %s credentials "
+ "and broker logs",
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl
+ .mechanisms);
+ return -1;
+ } else if (r == 0) /* not fully received yet */
+ return 0;
+
+ if (rkbuf) {
+ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
+ /* Seek past framing header */
+ rd_slice_seek(&rkbuf->rkbuf_reader, 4);
+ len = rd_slice_remains(&rkbuf->rkbuf_reader);
+ buf = rd_slice_ensure_contig(&rkbuf->rkbuf_reader, len);
+ } else {
+ buf = NULL;
+ len = 0;
+ }
+
+ r = rd_kafka_sasl_recv(rktrans, buf, len, errstr, errstr_size);
+
+ if (rkbuf)
+ rd_kafka_buf_destroy(rkbuf);
+
+ return r;
+}
+
+
+/**
+ * @brief Close SASL session (from transport code)
+ * @remark May be called on non-SASL transports (no-op)
+ */
+void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans) {
+ const struct rd_kafka_sasl_provider *provider =
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.provider;
+
+ if (provider && provider->close)
+ provider->close(rktrans);
+}
+
+
+
+/**
+ * Initialize and start SASL authentication.
+ *
+ * Returns 0 on successful init and -1 on error.
+ *
+ * Locality: broker thread
+ */
+int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size) {
+ int r;
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ char *hostname, *t;
+ const struct rd_kafka_sasl_provider *provider =
+ rk->rk_conf.sasl.provider;
+
+ /* Verify broker support:
+ * - RD_KAFKA_FEATURE_SASL_GSSAPI - GSSAPI supported
+ * - RD_KAFKA_FEATURE_SASL_HANDSHAKE - GSSAPI, PLAIN and possibly
+ * other mechanisms supported. */
+ if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
+ if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_GSSAPI)) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL GSSAPI authentication not supported "
+ "by broker");
+ return -1;
+ }
+ } else if (!(rkb->rkb_features & RD_KAFKA_FEATURE_SASL_HANDSHAKE)) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL Handshake not supported by broker "
+ "(required by mechanism %s)%s",
+ rk->rk_conf.sasl.mechanisms,
+ rk->rk_conf.api_version_request
+ ? ""
+ : ": try api.version.request=true");
+ return -1;
+ }
+
+ rd_kafka_broker_lock(rktrans->rktrans_rkb);
+ rd_strdupa(&hostname, rktrans->rktrans_rkb->rkb_nodename);
+ rd_kafka_broker_unlock(rktrans->rktrans_rkb);
+
+ if ((t = strchr(hostname, ':')))
+ *t = '\0'; /* remove ":port" */
+
+ rd_rkb_dbg(rkb, SECURITY, "SASL",
+ "Initializing SASL client: service name %s, "
+ "hostname %s, mechanisms %s, provider %s",
+ rk->rk_conf.sasl.service_name, hostname,
+ rk->rk_conf.sasl.mechanisms, provider->name);
+
+ r = provider->client_new(rktrans, hostname, errstr, errstr_size);
+ if (r != -1)
+ rd_kafka_transport_poll_set(rktrans, POLLIN);
+
+ return r;
+}
+
+
+
+rd_kafka_queue_t *rd_kafka_queue_get_sasl(rd_kafka_t *rk) {
+ if (!rk->rk_sasl.callback_q)
+ return NULL;
+
+ return rd_kafka_queue_new0(rk, rk->rk_sasl.callback_q);
+}
+
+
+/**
+ * Per handle SASL term.
+ *
+ * Locality: broker thread
+ */
+void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb) {
+ const struct rd_kafka_sasl_provider *provider =
+ rkb->rkb_rk->rk_conf.sasl.provider;
+ if (provider->broker_term)
+ provider->broker_term(rkb);
+}
+
+/**
+ * Broker SASL init.
+ *
+ * Locality: broker thread
+ */
+void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb) {
+ const struct rd_kafka_sasl_provider *provider =
+ rkb->rkb_rk->rk_conf.sasl.provider;
+ if (provider->broker_init)
+ provider->broker_init(rkb);
+}
+
+
+/**
+ * @brief Per-instance initializer using the selected provider
+ *
+ * @returns 0 on success or -1 on error.
+ *
+ * @locality app thread (from rd_kafka_new())
+ */
+int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
+ const struct rd_kafka_sasl_provider *provider =
+ rk->rk_conf.sasl.provider;
+
+ if (provider && provider->init)
+ return provider->init(rk, errstr, errstr_size);
+
+ return 0;
+}
+
+
+/**
+ * @brief Per-instance destructor for the selected provider
+ *
+ * @locality app thread (from rd_kafka_new()) or rdkafka main thread
+ */
+void rd_kafka_sasl_term(rd_kafka_t *rk) {
+ const struct rd_kafka_sasl_provider *provider =
+ rk->rk_conf.sasl.provider;
+
+ if (provider && provider->term)
+ provider->term(rk);
+
+ RD_IF_FREE(rk->rk_sasl.callback_q, rd_kafka_q_destroy_owner);
+}
+
+
+/**
+ * @returns rd_true if provider is ready to be used or SASL not configured,
+ * else rd_false.
+ *
+ * @locks none
+ * @locality any thread
+ */
+rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk) {
+ const struct rd_kafka_sasl_provider *provider =
+ rk->rk_conf.sasl.provider;
+
+ if (provider && provider->ready)
+ return provider->ready(rk);
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Select SASL provider for configured mechanism (singularis)
+ * @returns 0 on success or -1 on failure.
+ */
+int rd_kafka_sasl_select_provider(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+ const struct rd_kafka_sasl_provider *provider = NULL;
+
+ if (!strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
+ /* GSSAPI / Kerberos */
+#ifdef _WIN32
+ provider = &rd_kafka_sasl_win32_provider;
+#elif WITH_SASL_CYRUS
+ provider = &rd_kafka_sasl_cyrus_provider;
+#endif
+
+ } else if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) {
+ /* SASL PLAIN */
+ provider = &rd_kafka_sasl_plain_provider;
+
+ } else if (!strncmp(rk->rk_conf.sasl.mechanisms, "SCRAM-SHA-",
+ strlen("SCRAM-SHA-"))) {
+ /* SASL SCRAM */
+#if WITH_SASL_SCRAM
+ provider = &rd_kafka_sasl_scram_provider;
+#endif
+
+ } else if (!strcmp(rk->rk_conf.sasl.mechanisms, "OAUTHBEARER")) {
+ /* SASL OAUTHBEARER */
+#if WITH_SASL_OAUTHBEARER
+ provider = &rd_kafka_sasl_oauthbearer_provider;
+#endif
+ } else {
+ /* Unsupported mechanism */
+ rd_snprintf(errstr, errstr_size,
+ "Unsupported SASL mechanism: %s",
+ rk->rk_conf.sasl.mechanisms);
+ return -1;
+ }
+
+ if (!provider) {
+ rd_snprintf(errstr, errstr_size,
+ "No provider for SASL mechanism %s"
+ ": recompile librdkafka with "
+#ifndef _WIN32
+ "libsasl2 or "
+#endif
+ "openssl support. "
+ "Current build options:"
+ " PLAIN"
+#ifdef _WIN32
+ " WindowsSSPI(GSSAPI)"
+#endif
+#if WITH_SASL_CYRUS
+ " SASL_CYRUS"
+#endif
+#if WITH_SASL_SCRAM
+ " SASL_SCRAM"
+#endif
+#if WITH_SASL_OAUTHBEARER
+ " OAUTHBEARER"
+#endif
+ ,
+ rk->rk_conf.sasl.mechanisms);
+ return -1;
+ }
+
+ rd_kafka_dbg(rk, SECURITY, "SASL",
+ "Selected provider %s for SASL mechanism %s",
+ provider->name, rk->rk_conf.sasl.mechanisms);
+
+ /* Validate SASL config */
+ if (provider->conf_validate &&
+ provider->conf_validate(rk, errstr, errstr_size) == -1)
+ return -1;
+
+ rk->rk_conf.sasl.provider = provider;
+
+ return 0;
+}
+
+
+rd_kafka_error_t *rd_kafka_sasl_background_callbacks_enable(rd_kafka_t *rk) {
+ rd_kafka_queue_t *saslq, *bgq;
+
+ if (!(saslq = rd_kafka_queue_get_sasl(rk)))
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
+ "No SASL mechanism using callbacks is configured");
+
+ if (!(bgq = rd_kafka_queue_get_background(rk))) {
+ rd_kafka_queue_destroy(saslq);
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE,
+ "The background thread is not available");
+ }
+
+ rd_kafka_queue_forward(saslq, bgq);
+
+ rd_kafka_queue_destroy(saslq);
+ rd_kafka_queue_destroy(bgq);
+
+ return NULL;
+}
+
+
+/**
+ * Global SASL termination.
+ */
+void rd_kafka_sasl_global_term(void) {
+#if WITH_SASL_CYRUS
+ rd_kafka_sasl_cyrus_global_term();
+#endif
+}
+
+
+/**
+ * Global SASL init, called once per runtime.
+ */
+int rd_kafka_sasl_global_init(void) {
+#if WITH_SASL_CYRUS
+ return rd_kafka_sasl_cyrus_global_init();
+#else
+ return 0;
+#endif
+}
+
+/**
+ * Sets or resets the SASL (PLAIN or SCRAM) credentials used by this
+ * client when making new connections to brokers.
+ *
+ * @returns NULL on success or an error object on error.
+ */
+rd_kafka_error_t *rd_kafka_sasl_set_credentials(rd_kafka_t *rk,
+ const char *username,
+ const char *password) {
+
+ if (!username || !password)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Username and password are required");
+
+ mtx_lock(&rk->rk_conf.sasl.lock);
+
+ if (rk->rk_conf.sasl.username)
+ rd_free(rk->rk_conf.sasl.username);
+ rk->rk_conf.sasl.username = rd_strdup(username);
+
+ if (rk->rk_conf.sasl.password)
+ rd_free(rk->rk_conf.sasl.password);
+ rk->rk_conf.sasl.password = rd_strdup(password);
+
+ mtx_unlock(&rk->rk_conf.sasl.lock);
+
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "SASL credentials updated");
+
+ return NULL;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h
new file mode 100644
index 000000000..d0dd01b8b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl.h
@@ -0,0 +1,63 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_SASL_H_
+#define _RDKAFKA_SASL_H_
+
+
+
+int rd_kafka_sasl_recv(rd_kafka_transport_t *rktrans,
+ const void *buf,
+ size_t len,
+ char *errstr,
+ size_t errstr_size);
+int rd_kafka_sasl_io_event(rd_kafka_transport_t *rktrans,
+ int events,
+ char *errstr,
+ size_t errstr_size);
+void rd_kafka_sasl_close(rd_kafka_transport_t *rktrans);
+int rd_kafka_sasl_client_new(rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size);
+
+void rd_kafka_sasl_broker_term(rd_kafka_broker_t *rkb);
+void rd_kafka_sasl_broker_init(rd_kafka_broker_t *rkb);
+
+int rd_kafka_sasl_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
+void rd_kafka_sasl_term(rd_kafka_t *rk);
+
+rd_bool_t rd_kafka_sasl_ready(rd_kafka_t *rk);
+
+void rd_kafka_sasl_global_term(void);
+int rd_kafka_sasl_global_init(void);
+
+int rd_kafka_sasl_select_provider(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size);
+
+#endif /* _RDKAFKA_SASL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c
new file mode 100644
index 000000000..41452a336
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_cyrus.c
@@ -0,0 +1,720 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_sasl_int.h"
+#include "rdstring.h"
+
+#if defined(__FreeBSD__) || defined(__OpenBSD__)
+#include <sys/wait.h> /* For WIF.. */
+#endif
+
+#ifdef __APPLE__
+/* Apple has deprecated most of the SASL API for unknown reason,
+ * silence those warnings. */
+#pragma clang diagnostic ignored "-Wdeprecated-declarations"
+#endif
+
+#include <sasl/sasl.h>
+
+/**
+ * @brief Process-global lock to avoid simultaneous invocation of
+ * kinit.cmd when refreshing the tickets, which could lead to
+ * kinit cache corruption.
+ */
+static mtx_t rd_kafka_sasl_cyrus_kinit_lock;
+
+/**
+ * @struct Per-client-instance handle
+ */
+typedef struct rd_kafka_sasl_cyrus_handle_s {
+ rd_kafka_timer_t kinit_refresh_tmr;
+ rd_atomic32_t ready; /**< First kinit command has finished, or there
+ * is no kinit command. */
+} rd_kafka_sasl_cyrus_handle_t;
+
+/**
+ * @struct Per-connection state
+ */
+typedef struct rd_kafka_sasl_cyrus_state_s {
+ sasl_conn_t *conn;
+ sasl_callback_t callbacks[16];
+} rd_kafka_sasl_cyrus_state_t;
+
+
+
+/**
+ * Handle received frame from broker.
+ */
+static int rd_kafka_sasl_cyrus_recv(struct rd_kafka_transport_s *rktrans,
+ const void *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state;
+ int r;
+ int sendcnt = 0;
+
+ if (rktrans->rktrans_sasl.complete && size == 0)
+ goto auth_successful;
+
+ do {
+ sasl_interact_t *interact = NULL;
+ const char *out;
+ unsigned int outlen;
+
+ mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ r = sasl_client_step(state->conn, size > 0 ? buf : NULL, size,
+ &interact, &out, &outlen);
+ mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+
+ if (r >= 0) {
+ /* Note: outlen may be 0 here for an empty response */
+ if (rd_kafka_sasl_send(rktrans, out, outlen, errstr,
+ errstr_size) == -1)
+ return -1;
+ sendcnt++;
+ }
+
+ if (r == SASL_INTERACT)
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
+ "SASL_INTERACT: %lu %s, %s, %s, %p",
+ interact->id, interact->challenge,
+ interact->prompt, interact->defresult,
+ interact->result);
+
+ } while (r == SASL_INTERACT);
+
+ if (r == SASL_CONTINUE)
+ return 0; /* Wait for more data from broker */
+ else if (r != SASL_OK) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL handshake failed (step): %s",
+ sasl_errdetail(state->conn));
+ return -1;
+ }
+
+ if (!rktrans->rktrans_sasl.complete && sendcnt > 0) {
+ /* With SaslAuthenticateRequest Kafka protocol framing
+ * we'll get a Response back after authentication is done,
+ * which should not be processed by Cyrus, but we still
+ * need to wait for the response to propgate its error,
+ * if any, before authentication is considered done.
+ *
+ * The legacy framing does not have a final broker->client
+ * response. */
+ rktrans->rktrans_sasl.complete = 1;
+
+ if (rktrans->rktrans_rkb->rkb_features &
+ RD_KAFKA_FEATURE_SASL_AUTH_REQ) {
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
+ "%s authentication complete but awaiting "
+ "final response from broker",
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl
+ .mechanisms);
+ return 0;
+ }
+ }
+
+ /* Authentication successful */
+auth_successful:
+ if (rktrans->rktrans_rkb->rkb_rk->rk_conf.debug &
+ RD_KAFKA_DBG_SECURITY) {
+ const char *user, *mech, *authsrc;
+
+ mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ if (sasl_getprop(state->conn, SASL_USERNAME,
+ (const void **)&user) != SASL_OK)
+ user = "(unknown)";
+ mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+
+ if (sasl_getprop(state->conn, SASL_MECHNAME,
+ (const void **)&mech) != SASL_OK)
+ mech = "(unknown)";
+
+ if (sasl_getprop(state->conn, SASL_AUTHSOURCE,
+ (const void **)&authsrc) != SASL_OK)
+ authsrc = "(unknown)";
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
+ "Authenticated as %s using %s (%s)", user, mech,
+ authsrc);
+ }
+
+ rd_kafka_sasl_auth_done(rktrans);
+
+ return 0;
+}
+
+
+
+static ssize_t
+render_callback(const char *key, char *buf, size_t size, void *opaque) {
+ rd_kafka_t *rk = opaque;
+ rd_kafka_conf_res_t res;
+ size_t destsize = size;
+
+ /* Try config lookup. */
+ res = rd_kafka_conf_get(&rk->rk_conf, key, buf, &destsize);
+ if (res != RD_KAFKA_CONF_OK)
+ return -1;
+
+ /* Dont include \0 in returned size */
+ return (destsize > 0 ? destsize - 1 : destsize);
+}
+
+
+/**
+ * @brief Execute kinit to refresh ticket.
+ *
+ * @returns 0 on success, -1 on error.
+ *
+ * @locality rdkafka main thread
+ */
+static int rd_kafka_sasl_cyrus_kinit_refresh(rd_kafka_t *rk) {
+ rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle;
+ int r;
+ char *cmd;
+ char errstr[128];
+ rd_ts_t ts_start;
+ int duration;
+
+ /* Build kinit refresh command line using string rendering and config */
+ cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, errstr,
+ sizeof(errstr), render_callback, rk);
+ if (!cmd) {
+ rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
+ "Failed to construct kinit command "
+ "from sasl.kerberos.kinit.cmd template: %s",
+ errstr);
+ return -1;
+ }
+
+ /* Execute kinit */
+ rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
+ "Refreshing Kerberos ticket with command: %s", cmd);
+
+ ts_start = rd_clock();
+
+ /* Prevent multiple simultaneous refreshes by the same process to
+ * avoid Kerberos credential cache corruption. */
+ mtx_lock(&rd_kafka_sasl_cyrus_kinit_lock);
+ r = system(cmd);
+ mtx_unlock(&rd_kafka_sasl_cyrus_kinit_lock);
+
+ duration = (int)((rd_clock() - ts_start) / 1000);
+ if (duration > 5000)
+ rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH",
+ "Slow Kerberos ticket refresh: %dms: %s", duration,
+ cmd);
+
+ /* Regardless of outcome from the kinit command (it can fail
+ * even if the ticket is available), we now allow broker connections. */
+ if (rd_atomic32_add(&handle->ready, 1) == 1) {
+ rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
+ "First kinit command finished: waking up "
+ "broker threads");
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "Kerberos ticket refresh");
+ }
+
+ if (r == -1) {
+ if (errno == ECHILD) {
+ rd_kafka_log(rk, LOG_WARNING, "SASLREFRESH",
+ "Kerberos ticket refresh command "
+ "returned ECHILD: %s: exit status "
+ "unknown, assuming success",
+ cmd);
+ } else {
+ rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
+ "Kerberos ticket refresh failed: %s: %s",
+ cmd, rd_strerror(errno));
+ rd_free(cmd);
+ return -1;
+ }
+ } else if (WIFSIGNALED(r)) {
+ rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
+ "Kerberos ticket refresh failed: %s: "
+ "received signal %d",
+ cmd, WTERMSIG(r));
+ rd_free(cmd);
+ return -1;
+ } else if (WIFEXITED(r) && WEXITSTATUS(r) != 0) {
+ rd_kafka_log(rk, LOG_ERR, "SASLREFRESH",
+ "Kerberos ticket refresh failed: %s: "
+ "exited with code %d",
+ cmd, WEXITSTATUS(r));
+ rd_free(cmd);
+ return -1;
+ }
+
+ rd_free(cmd);
+
+ rd_kafka_dbg(rk, SECURITY, "SASLREFRESH",
+ "Kerberos ticket refreshed in %dms", duration);
+ return 0;
+}
+
+
+/**
+ * @brief Refresh timer callback
+ *
+ * @locality rdkafka main thread
+ */
+static void rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_t *rk = arg;
+
+ rd_kafka_sasl_cyrus_kinit_refresh(rk);
+}
+
+
+
+/**
+ *
+ * libsasl callbacks
+ *
+ */
+static RD_UNUSED int rd_kafka_sasl_cyrus_cb_getopt(void *context,
+ const char *plugin_name,
+ const char *option,
+ const char **result,
+ unsigned *len) {
+ rd_kafka_transport_t *rktrans = context;
+
+ if (!strcmp(option, "client_mech_list"))
+ *result = "GSSAPI";
+ if (!strcmp(option, "canon_user_plugin"))
+ *result = "INTERNAL";
+
+ if (*result && len)
+ *len = strlen(*result);
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
+ "CB_GETOPT: plugin %s, option %s: returning %s", plugin_name,
+ option, *result);
+
+ return SASL_OK;
+}
+
+static int
+rd_kafka_sasl_cyrus_cb_log(void *context, int level, const char *message) {
+ rd_kafka_transport_t *rktrans = context;
+
+ /* Provide a more helpful error message in case Kerberos
+ * plugins are missing. */
+ if (strstr(message, "No worthy mechs found") &&
+ strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms,
+ "GSSAPI"))
+ message =
+ "Cyrus/libsasl2 is missing a GSSAPI module: "
+ "make sure the libsasl2-modules-gssapi-mit or "
+ "cyrus-sasl-gssapi packages are installed";
+
+ /* Treat the "client step" log messages as debug. */
+ if (level >= LOG_DEBUG || !strncmp(message, "GSSAPI client step ", 19))
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL", "%s",
+ message);
+ else
+ rd_rkb_log(rktrans->rktrans_rkb, level, "LIBSASL", "%s",
+ message);
+
+ return SASL_OK;
+}
+
+
+static int rd_kafka_sasl_cyrus_cb_getsimple(void *context,
+ int id,
+ const char **result,
+ unsigned *len) {
+ rd_kafka_transport_t *rktrans = context;
+
+ switch (id) {
+ case SASL_CB_USER:
+ case SASL_CB_AUTHNAME:
+ /* Since cyrus expects the returned pointer to be stable
+ * and not have its content changed, but the username
+ * and password may be updated at anytime by the application
+ * calling sasl_set_credentials(), we need to lock
+ * rk_conf.sasl.lock before each call into cyrus-sasl.
+ * So when we get here the lock is already held. */
+ *result = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.username;
+ break;
+
+ default:
+ *result = NULL;
+ break;
+ }
+
+ if (len)
+ *len = *result ? strlen(*result) : 0;
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
+ "CB_GETSIMPLE: id 0x%x: returning %s", id, *result);
+
+ return *result ? SASL_OK : SASL_FAIL;
+}
+
+
+static int rd_kafka_sasl_cyrus_cb_getsecret(sasl_conn_t *conn,
+ void *context,
+ int id,
+ sasl_secret_t **psecret) {
+ rd_kafka_transport_t *rktrans = context;
+ const char *password;
+
+ /* rk_conf.sasl.lock is already locked */
+ password = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.password;
+
+ if (!password) {
+ *psecret = NULL;
+ } else {
+ size_t passlen = strlen(password);
+ *psecret = rd_realloc(*psecret, sizeof(**psecret) + passlen);
+ (*psecret)->len = passlen;
+ memcpy((*psecret)->data, password, passlen);
+ }
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
+ "CB_GETSECRET: id 0x%x: returning %s", id,
+ *psecret ? "(hidden)" : "NULL");
+
+ return SASL_OK;
+}
+
+static int rd_kafka_sasl_cyrus_cb_chalprompt(void *context,
+ int id,
+ const char *challenge,
+ const char *prompt,
+ const char *defres,
+ const char **result,
+ unsigned *len) {
+ rd_kafka_transport_t *rktrans = context;
+
+ *result = "min_chalprompt";
+ *len = strlen(*result);
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
+ "CB_CHALPROMPT: id 0x%x, challenge %s, prompt %s, "
+ "default %s: returning %s",
+ id, challenge, prompt, defres, *result);
+
+ return SASL_OK;
+}
+
+static int rd_kafka_sasl_cyrus_cb_getrealm(void *context,
+ int id,
+ const char **availrealms,
+ const char **result) {
+ rd_kafka_transport_t *rktrans = context;
+
+ *result = *availrealms;
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "LIBSASL",
+ "CB_GETREALM: id 0x%x: returning %s", id, *result);
+
+ return SASL_OK;
+}
+
+
+static RD_UNUSED int rd_kafka_sasl_cyrus_cb_canon(sasl_conn_t *conn,
+ void *context,
+ const char *in,
+ unsigned inlen,
+ unsigned flags,
+ const char *user_realm,
+ char *out,
+ unsigned out_max,
+ unsigned *out_len) {
+ rd_kafka_transport_t *rktrans = context;
+
+ if (strstr(rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms,
+ "GSSAPI")) {
+ *out_len = rd_snprintf(
+ out, out_max, "%s",
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.principal);
+ } else if (!strcmp(
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.mechanisms,
+ "PLAIN")) {
+ *out_len = rd_snprintf(out, out_max, "%.*s", inlen, in);
+ } else
+ out = NULL;
+
+ rd_rkb_dbg(
+ rktrans->rktrans_rkb, SECURITY, "LIBSASL",
+ "CB_CANON: flags 0x%x, \"%.*s\" @ \"%s\": returning \"%.*s\"",
+ flags, (int)inlen, in, user_realm, (int)(*out_len), out);
+
+ return out ? SASL_OK : SASL_FAIL;
+}
+
+
+static void rd_kafka_sasl_cyrus_close(struct rd_kafka_transport_s *rktrans) {
+ rd_kafka_sasl_cyrus_state_t *state = rktrans->rktrans_sasl.state;
+
+ if (!state)
+ return;
+
+ if (state->conn) {
+ mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ sasl_dispose(&state->conn);
+ mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ }
+ rd_free(state);
+}
+
+
+/**
+ * Initialize and start SASL authentication.
+ *
+ * Returns 0 on successful init and -1 on error.
+ *
+ * Locality: broker thread
+ */
+static int rd_kafka_sasl_cyrus_client_new(rd_kafka_transport_t *rktrans,
+ const char *hostname,
+ char *errstr,
+ size_t errstr_size) {
+ int r;
+ rd_kafka_sasl_cyrus_state_t *state;
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ sasl_callback_t callbacks[16] = {
+ // { SASL_CB_GETOPT, (void *)rd_kafka_sasl_cyrus_cb_getopt, rktrans
+ // },
+ {SASL_CB_LOG, (void *)rd_kafka_sasl_cyrus_cb_log, rktrans},
+ {SASL_CB_AUTHNAME, (void *)rd_kafka_sasl_cyrus_cb_getsimple,
+ rktrans},
+ {SASL_CB_PASS, (void *)rd_kafka_sasl_cyrus_cb_getsecret, rktrans},
+ {SASL_CB_ECHOPROMPT, (void *)rd_kafka_sasl_cyrus_cb_chalprompt,
+ rktrans},
+ {SASL_CB_GETREALM, (void *)rd_kafka_sasl_cyrus_cb_getrealm,
+ rktrans},
+ {SASL_CB_CANON_USER, (void *)rd_kafka_sasl_cyrus_cb_canon, rktrans},
+ {SASL_CB_LIST_END}};
+
+ state = rd_calloc(1, sizeof(*state));
+ rktrans->rktrans_sasl.state = state;
+
+ /* SASL_CB_USER is needed for PLAIN but breaks GSSAPI */
+ if (!strcmp(rk->rk_conf.sasl.mechanisms, "PLAIN")) {
+ int endidx;
+ /* Find end of callbacks array */
+ for (endidx = 0; callbacks[endidx].id != SASL_CB_LIST_END;
+ endidx++)
+ ;
+
+ callbacks[endidx].id = SASL_CB_USER;
+ callbacks[endidx].proc =
+ (void *)rd_kafka_sasl_cyrus_cb_getsimple;
+ callbacks[endidx].context = rktrans;
+ endidx++;
+ callbacks[endidx].id = SASL_CB_LIST_END;
+ }
+
+ memcpy(state->callbacks, callbacks, sizeof(callbacks));
+
+ mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ r = sasl_client_new(rk->rk_conf.sasl.service_name, hostname, NULL,
+ NULL, /* no local & remote IP checks */
+ state->callbacks, 0, &state->conn);
+ mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ if (r != SASL_OK) {
+ rd_snprintf(errstr, errstr_size, "%s",
+ sasl_errstring(r, NULL, NULL));
+ return -1;
+ }
+
+ if (rk->rk_conf.debug & RD_KAFKA_DBG_SECURITY) {
+ const char *avail_mechs;
+ sasl_listmech(state->conn, NULL, NULL, " ", NULL, &avail_mechs,
+ NULL, NULL);
+ rd_rkb_dbg(rkb, SECURITY, "SASL",
+ "My supported SASL mechanisms: %s", avail_mechs);
+ }
+
+ do {
+ const char *out;
+ unsigned int outlen;
+ const char *mech = NULL;
+
+ mtx_lock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+ r = sasl_client_start(state->conn, rk->rk_conf.sasl.mechanisms,
+ NULL, &out, &outlen, &mech);
+ mtx_unlock(&rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.lock);
+
+ if (r >= 0)
+ if (rd_kafka_sasl_send(rktrans, out, outlen, errstr,
+ errstr_size))
+ return -1;
+ } while (r == SASL_INTERACT);
+
+ if (r == SASL_OK) {
+ /* PLAIN is appearantly done here, but we still need to make
+ * sure the PLAIN frame is sent and we get a response back (but
+ * we must not pass the response to libsasl or it will fail). */
+ rktrans->rktrans_sasl.complete = 1;
+ return 0;
+
+ } else if (r != SASL_CONTINUE) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL handshake failed (start (%d)): %s", r,
+ sasl_errdetail(state->conn));
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief SASL/GSSAPI is ready when at least one kinit command has been
+ * executed (regardless of exit status).
+ */
+static rd_bool_t rd_kafka_sasl_cyrus_ready(rd_kafka_t *rk) {
+ rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle;
+ if (!rk->rk_conf.sasl.relogin_min_time)
+ return rd_true;
+ if (!handle)
+ return rd_false;
+
+ return rd_atomic32_get(&handle->ready) > 0;
+}
+
+/**
+ * @brief Per-client-instance initializer
+ */
+static int
+rd_kafka_sasl_cyrus_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
+ rd_kafka_sasl_cyrus_handle_t *handle;
+
+ if (!rk->rk_conf.sasl.relogin_min_time || !rk->rk_conf.sasl.kinit_cmd ||
+ strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
+ return 0; /* kinit not configured, no need to start timer */
+
+ handle = rd_calloc(1, sizeof(*handle));
+ rk->rk_sasl.handle = handle;
+
+ rd_kafka_timer_start(&rk->rk_timers, &handle->kinit_refresh_tmr,
+ rk->rk_conf.sasl.relogin_min_time * 1000ll,
+ rd_kafka_sasl_cyrus_kinit_refresh_tmr_cb, rk);
+
+ /* Kick off the timer immediately to refresh the ticket.
+ * (Timer is triggered from the main loop). */
+ rd_kafka_timer_override_once(&rk->rk_timers, &handle->kinit_refresh_tmr,
+ 0 /*immediately*/);
+
+ return 0;
+}
+
+
+/**
+ * @brief Per-client-instance destructor
+ */
+static void rd_kafka_sasl_cyrus_term(rd_kafka_t *rk) {
+ rd_kafka_sasl_cyrus_handle_t *handle = rk->rk_sasl.handle;
+
+ if (!handle)
+ return;
+
+ rd_kafka_timer_stop(&rk->rk_timers, &handle->kinit_refresh_tmr, 1);
+ rd_free(handle);
+ rk->rk_sasl.handle = NULL;
+}
+
+
+static int rd_kafka_sasl_cyrus_conf_validate(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+
+ if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI"))
+ return 0;
+
+ if (rk->rk_conf.sasl.relogin_min_time && rk->rk_conf.sasl.kinit_cmd) {
+ char *cmd;
+ char tmperr[128];
+
+ cmd = rd_string_render(rk->rk_conf.sasl.kinit_cmd, tmperr,
+ sizeof(tmperr), render_callback, rk);
+
+ if (!cmd) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.kerberos.kinit.cmd value: %s",
+ tmperr);
+ return -1;
+ }
+
+ rd_free(cmd);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Global SASL termination.
+ */
+void rd_kafka_sasl_cyrus_global_term(void) {
+ /* NOTE: Should not be called since the application may be using SASL
+ * too*/
+ /* sasl_done(); */
+ mtx_destroy(&rd_kafka_sasl_cyrus_kinit_lock);
+}
+
+
+/**
+ * Global SASL init, called once per runtime.
+ */
+int rd_kafka_sasl_cyrus_global_init(void) {
+ int r;
+
+ mtx_init(&rd_kafka_sasl_cyrus_kinit_lock, mtx_plain);
+
+ r = sasl_client_init(NULL);
+ if (r != SASL_OK) {
+ fprintf(stderr, "librdkafka: sasl_client_init() failed: %s\n",
+ sasl_errstring(r, NULL, NULL));
+ return -1;
+ }
+
+ return 0;
+}
+
+
+const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider = {
+ .name = "Cyrus",
+ .init = rd_kafka_sasl_cyrus_init,
+ .term = rd_kafka_sasl_cyrus_term,
+ .client_new = rd_kafka_sasl_cyrus_client_new,
+ .recv = rd_kafka_sasl_cyrus_recv,
+ .close = rd_kafka_sasl_cyrus_close,
+ .ready = rd_kafka_sasl_cyrus_ready,
+ .conf_validate = rd_kafka_sasl_cyrus_conf_validate};
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h
new file mode 100644
index 000000000..33e3bdd05
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_int.h
@@ -0,0 +1,89 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_SASL_INT_H_
+#define _RDKAFKA_SASL_INT_H_
+
+struct rd_kafka_sasl_provider {
+ const char *name;
+
+ /** Per client-instance (rk) initializer */
+ int (*init)(rd_kafka_t *rk, char *errstr, size_t errstr_size);
+
+ /** Per client-instance (rk) destructor */
+ void (*term)(rd_kafka_t *rk);
+
+ /** Returns rd_true if provider is ready to be used, else rd_false */
+ rd_bool_t (*ready)(rd_kafka_t *rk);
+
+ int (*client_new)(rd_kafka_transport_t *rktrans,
+ const char *hostname,
+ char *errstr,
+ size_t errstr_size);
+
+ int (*recv)(struct rd_kafka_transport_s *s,
+ const void *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size);
+ void (*close)(struct rd_kafka_transport_s *);
+
+ void (*broker_init)(rd_kafka_broker_t *rkb);
+ void (*broker_term)(rd_kafka_broker_t *rkb);
+
+ int (*conf_validate)(rd_kafka_t *rk, char *errstr, size_t errstr_size);
+};
+
+#ifdef _WIN32
+extern const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider;
+#endif
+
+#if WITH_SASL_CYRUS
+extern const struct rd_kafka_sasl_provider rd_kafka_sasl_cyrus_provider;
+void rd_kafka_sasl_cyrus_global_term(void);
+int rd_kafka_sasl_cyrus_global_init(void);
+#endif
+
+extern const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider;
+
+#if WITH_SASL_SCRAM
+extern const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider;
+#endif
+
+#if WITH_SASL_OAUTHBEARER
+extern const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider;
+#endif
+
+void rd_kafka_sasl_auth_done(rd_kafka_transport_t *rktrans);
+int rd_kafka_sasl_send(rd_kafka_transport_t *rktrans,
+ const void *payload,
+ int len,
+ char *errstr,
+ size_t errstr_size);
+
+#endif /* _RDKAFKA_SASL_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c
new file mode 100644
index 000000000..39b165a7d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.c
@@ -0,0 +1,1825 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Builtin SASL OAUTHBEARER support
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_sasl_int.h"
+#include <openssl/evp.h>
+#include "rdunittest.h"
+
+#if WITH_OAUTHBEARER_OIDC
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+#endif
+
+
+/**
+ * @struct Per-client-instance SASL/OAUTHBEARER handle.
+ */
+typedef struct rd_kafka_sasl_oauthbearer_handle_s {
+ /**< Read-write lock for fields in the handle. */
+ rwlock_t lock;
+
+ /**< The b64token value as defined in RFC 6750 Section 2.1
+ * https://tools.ietf.org/html/rfc6750#section-2.1
+ */
+ char *token_value;
+
+ /**< When the token expires, in terms of the number of
+ * milliseconds since the epoch. Wall clock time.
+ */
+ rd_ts_t wts_md_lifetime;
+
+ /**< The point after which this token should be replaced with a
+ * new one, in terms of the number of milliseconds since the
+ * epoch. Wall clock time.
+ */
+ rd_ts_t wts_refresh_after;
+
+ /**< When the last token refresh was equeued (0 = never)
+ * in terms of the number of milliseconds since the epoch.
+ * Wall clock time.
+ */
+ rd_ts_t wts_enqueued_refresh;
+
+ /**< The name of the principal to which this token applies. */
+ char *md_principal_name;
+
+ /**< The SASL extensions, as per RFC 7628 Section 3.1
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ */
+ rd_list_t extensions; /* rd_strtup_t list */
+
+ /**< Error message for validation and/or token retrieval problems. */
+ char *errstr;
+
+ /**< Back-pointer to client instance. */
+ rd_kafka_t *rk;
+
+ /**< Token refresh timer */
+ rd_kafka_timer_t token_refresh_tmr;
+
+ /** Queue to enqueue token_refresh_cb ops on. */
+ rd_kafka_q_t *callback_q;
+
+ /** Using internal refresh callback (sasl.oauthbearer.method=oidc) */
+ rd_bool_t internal_refresh;
+
+} rd_kafka_sasl_oauthbearer_handle_t;
+
+
+/**
+ * @struct Unsecured JWS info populated when sasl.oauthbearer.config is parsed
+ */
+struct rd_kafka_sasl_oauthbearer_parsed_ujws {
+ char *principal_claim_name;
+ char *principal;
+ char *scope_claim_name;
+ char *scope_csv_text;
+ int life_seconds;
+ rd_list_t extensions; /* rd_strtup_t list */
+};
+
+/**
+ * @struct Unsecured JWS token to be set on the client handle
+ */
+struct rd_kafka_sasl_oauthbearer_token {
+ char *token_value;
+ int64_t md_lifetime_ms;
+ char *md_principal_name;
+ char **extensions;
+ size_t extension_size;
+};
+
+/**
+ * @brief Per-connection state
+ */
+struct rd_kafka_sasl_oauthbearer_state {
+ enum { RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE,
+ RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG,
+ RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL,
+ } state;
+ char *server_error_msg;
+
+ /*
+ * A place to store a consistent view of the token and extensions
+ * throughout the authentication process -- even if it is refreshed
+ * midway through this particular authentication.
+ */
+ char *token_value;
+ char *md_principal_name;
+ rd_list_t extensions; /* rd_strtup_t list */
+};
+
+
+
+/**
+ * @brief free memory inside the given token
+ */
+static void rd_kafka_sasl_oauthbearer_token_free(
+ struct rd_kafka_sasl_oauthbearer_token *token) {
+ size_t i;
+
+ RD_IF_FREE(token->token_value, rd_free);
+ RD_IF_FREE(token->md_principal_name, rd_free);
+
+ for (i = 0; i < token->extension_size; i++)
+ rd_free(token->extensions[i]);
+
+ RD_IF_FREE(token->extensions, rd_free);
+
+ memset(token, 0, sizeof(*token));
+}
+
+
+/**
+ * @brief Op callback for RD_KAFKA_OP_OAUTHBEARER_REFRESH
+ *
+ * @locality Application thread
+ */
+static rd_kafka_op_res_t rd_kafka_oauthbearer_refresh_op(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ /* The op callback is invoked when the op is destroyed via
+ * rd_kafka_op_destroy() or rd_kafka_event_destroy(), so
+ * make sure we don't refresh upon destruction since
+ * the op has already been handled by this point.
+ */
+ if (rko->rko_err != RD_KAFKA_RESP_ERR__DESTROY &&
+ rk->rk_conf.sasl.oauthbearer.token_refresh_cb)
+ rk->rk_conf.sasl.oauthbearer.token_refresh_cb(
+ rk, rk->rk_conf.sasl.oauthbearer_config,
+ rk->rk_conf.opaque);
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+/**
+ * @brief Enqueue a token refresh.
+ * @locks rwlock_wrlock(&handle->lock) MUST be held
+ */
+static void rd_kafka_oauthbearer_enqueue_token_refresh(
+ rd_kafka_sasl_oauthbearer_handle_t *handle) {
+ rd_kafka_op_t *rko;
+
+ rko = rd_kafka_op_new_cb(handle->rk, RD_KAFKA_OP_OAUTHBEARER_REFRESH,
+ rd_kafka_oauthbearer_refresh_op);
+ rd_kafka_op_set_prio(rko, RD_KAFKA_PRIO_FLASH);
+
+ /* For internal OIDC refresh callback:
+ * Force op to be handled by internal callback on the
+ * receiving queue, rather than being passed as an event to
+ * the application. */
+ if (handle->internal_refresh)
+ rko->rko_flags |= RD_KAFKA_OP_F_FORCE_CB;
+
+ handle->wts_enqueued_refresh = rd_uclock();
+ rd_kafka_q_enq(handle->callback_q, rko);
+}
+
+/**
+ * @brief Enqueue a token refresh if necessary.
+ *
+ * The method rd_kafka_oauthbearer_enqueue_token_refresh() is invoked
+ * if necessary; the required lock is acquired and released. This method
+ * returns immediately when SASL/OAUTHBEARER is not in use by the client.
+ */
+static void rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary(
+ rd_kafka_sasl_oauthbearer_handle_t *handle) {
+ rd_ts_t now_wallclock;
+
+ now_wallclock = rd_uclock();
+
+ rwlock_wrlock(&handle->lock);
+ if (handle->wts_refresh_after < now_wallclock &&
+ handle->wts_enqueued_refresh <= handle->wts_refresh_after)
+ /* Refresh required and not yet scheduled; refresh it */
+ rd_kafka_oauthbearer_enqueue_token_refresh(handle);
+ rwlock_wrunlock(&handle->lock);
+}
+
+/**
+ * @returns \c rd_true if SASL/OAUTHBEARER is the configured authentication
+ * mechanism and a token is available, otherwise \c rd_false.
+ *
+ * @locks none
+ * @locality any
+ */
+static rd_bool_t
+rd_kafka_oauthbearer_has_token(rd_kafka_sasl_oauthbearer_handle_t *handle) {
+ rd_bool_t retval_has_token;
+
+ rwlock_rdlock(&handle->lock);
+ retval_has_token = handle->token_value != NULL;
+ rwlock_rdunlock(&handle->lock);
+
+ return retval_has_token;
+}
+
+/**
+ * @brief Verify that the provided \p key is valid.
+ * @returns 0 on success or -1 if \p key is invalid.
+ */
+static int check_oauthbearer_extension_key(const char *key,
+ char *errstr,
+ size_t errstr_size) {
+ const char *c;
+
+ if (!strcmp(key, "auth")) {
+ rd_snprintf(errstr, errstr_size,
+ "Cannot explicitly set the reserved `auth` "
+ "SASL/OAUTHBEARER extension key");
+ return -1;
+ }
+
+ /*
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ * key = 1*(ALPHA)
+ *
+ * https://tools.ietf.org/html/rfc5234#appendix-B.1
+ * ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
+ */
+ if (!*key) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL/OAUTHBEARER extension keys "
+ "must not be empty");
+ return -1;
+ }
+
+ for (c = key; *c; c++) {
+ if (!(*c >= 'A' && *c <= 'Z') && !(*c >= 'a' && *c <= 'z')) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL/OAUTHBEARER extension keys must "
+ "only consist of A-Z or "
+ "a-z characters: %s (%c)",
+ key, *c);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Verify that the provided \p value is valid.
+ * @returns 0 on success or -1 if \p value is invalid.
+ */
+static int check_oauthbearer_extension_value(const char *value,
+ char *errstr,
+ size_t errstr_size) {
+ const char *c;
+
+ /*
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ *
+ * https://tools.ietf.org/html/rfc5234#appendix-B.1
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * SP = %x20 ; space
+ * HTAB = %x09 ; horizontal tab
+ * CR = %x0D ; carriage return
+ * LF = %x0A ; linefeed
+ */
+ for (c = value; *c; c++) {
+ if (!(*c >= '\x21' && *c <= '\x7E') && *c != '\x20' &&
+ *c != '\x09' && *c != '\x0D' && *c != '\x0A') {
+ rd_snprintf(errstr, errstr_size,
+ "SASL/OAUTHBEARER extension values must "
+ "only consist of space, horizontal tab, "
+ "CR, LF, and "
+ "visible characters (%%x21-7E): %s (%c)",
+ value, *c);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Set SASL/OAUTHBEARER token and metadata
+ *
+ * @param rk Client instance.
+ * @param token_value the mandatory token value to set, often (but not
+ * necessarily) a JWS compact serialization as per
+ * https://tools.ietf.org/html/rfc7515#section-3.1.
+ * Use rd_kafka_sasl_oauthbearer_token_free() to free members if
+ * return value is not -1.
+ * @param md_lifetime_ms when the token expires, in terms of the number of
+ * milliseconds since the epoch. See https://currentmillis.com/.
+ * @param md_principal_name the mandatory Kafka principal name associated
+ * with the token.
+ * @param extensions optional SASL extensions key-value array with
+ * \p extensions_size elements (number of keys * 2), where [i] is the key and
+ * [i+1] is the key's value, to be communicated to the broker
+ * as additional key-value pairs during the initial client response as per
+ * https://tools.ietf.org/html/rfc7628#section-3.1.
+ * @param extension_size the number of SASL extension keys plus values,
+ * which should be a non-negative multiple of 2.
+ *
+ * The SASL/OAUTHBEARER token refresh callback or event handler should cause
+ * this method to be invoked upon success, via
+ * rd_kafka_oauthbearer_set_token(). The extension keys must not include the
+ * reserved key "`auth`", and all extension keys and values must conform to the
+ * required format as per https://tools.ietf.org/html/rfc7628#section-3.1:
+ *
+ * key = 1*(ALPHA)
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ *
+ * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise errstr set and:
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if any of the arguments are
+ * invalid;
+ * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is not configured as
+ * the client's authentication mechanism.
+ *
+ * @sa rd_kafka_oauthbearer_set_token_failure0
+ */
+rd_kafka_resp_err_t
+rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk,
+ const char *token_value,
+ int64_t md_lifetime_ms,
+ const char *md_principal_name,
+ const char **extensions,
+ size_t extension_size,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
+ size_t i;
+ rd_ts_t now_wallclock;
+ rd_ts_t wts_md_lifetime = md_lifetime_ms * 1000;
+
+ /* Check if SASL/OAUTHBEARER is the configured auth mechanism */
+ if (rk->rk_conf.sasl.provider != &rd_kafka_sasl_oauthbearer_provider ||
+ !handle) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL/OAUTHBEARER is not the "
+ "configured authentication mechanism");
+ return RD_KAFKA_RESP_ERR__STATE;
+ }
+
+ /* Check if there is an odd number of extension keys + values */
+ if (extension_size & 1) {
+ rd_snprintf(errstr, errstr_size,
+ "Incorrect extension size "
+ "(must be a non-negative multiple of 2): %" PRIusz,
+ extension_size);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ /* Check args for correct format/value */
+ now_wallclock = rd_uclock();
+ if (wts_md_lifetime <= now_wallclock) {
+ rd_snprintf(errstr, errstr_size,
+ "Must supply an unexpired token: "
+ "now=%" PRId64 "ms, exp=%" PRId64 "ms",
+ now_wallclock / 1000, wts_md_lifetime / 1000);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ if (check_oauthbearer_extension_value(token_value, errstr,
+ errstr_size) == -1)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ for (i = 0; i + 1 < extension_size; i += 2) {
+ if (check_oauthbearer_extension_key(extensions[i], errstr,
+ errstr_size) == -1 ||
+ check_oauthbearer_extension_value(extensions[i + 1], errstr,
+ errstr_size) == -1)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ rwlock_wrlock(&handle->lock);
+
+ RD_IF_FREE(handle->md_principal_name, rd_free);
+ handle->md_principal_name = rd_strdup(md_principal_name);
+
+ RD_IF_FREE(handle->token_value, rd_free);
+ handle->token_value = rd_strdup(token_value);
+
+ handle->wts_md_lifetime = wts_md_lifetime;
+
+ /* Schedule a refresh 80% through its remaining lifetime */
+ handle->wts_refresh_after =
+ (rd_ts_t)(now_wallclock + 0.8 * (wts_md_lifetime - now_wallclock));
+
+ rd_list_clear(&handle->extensions);
+ for (i = 0; i + 1 < extension_size; i += 2)
+ rd_list_add(&handle->extensions,
+ rd_strtup_new(extensions[i], extensions[i + 1]));
+
+ RD_IF_FREE(handle->errstr, rd_free);
+ handle->errstr = NULL;
+
+ rwlock_wrunlock(&handle->lock);
+
+ rd_kafka_dbg(rk, SECURITY, "BRKMAIN",
+ "Waking up waiting broker threads after "
+ "setting OAUTHBEARER token");
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_TRY_CONNECT,
+ "OAUTHBEARER token update");
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief SASL/OAUTHBEARER token refresh failure indicator.
+ *
+ * @param rk Client instance.
+ * @param errstr mandatory human readable error reason for failing to acquire
+ * a token.
+ *
+ * The SASL/OAUTHBEARER token refresh callback or event handler should cause
+ * this method to be invoked upon failure, via
+ * rd_kafka_oauthbearer_set_token_failure().
+ *
+ * @returns \c RD_KAFKA_RESP_ERR_NO_ERROR on success, otherwise
+ * \c RD_KAFKA_RESP_ERR__STATE if SASL/OAUTHBEARER is enabled but is
+ * not configured to be the client's authentication mechanism,
+ * \c RD_KAFKA_RESP_ERR__INVALID_ARG if no error string is supplied.
+
+ * @sa rd_kafka_oauthbearer_set_token0
+ */
+rd_kafka_resp_err_t
+rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk, const char *errstr) {
+ rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
+ rd_bool_t error_changed;
+
+ /* Check if SASL/OAUTHBEARER is the configured auth mechanism */
+ if (rk->rk_conf.sasl.provider != &rd_kafka_sasl_oauthbearer_provider ||
+ !handle)
+ return RD_KAFKA_RESP_ERR__STATE;
+
+ if (!errstr || !*errstr)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ rwlock_wrlock(&handle->lock);
+ error_changed = !handle->errstr || strcmp(handle->errstr, errstr);
+ RD_IF_FREE(handle->errstr, rd_free);
+ handle->errstr = rd_strdup(errstr);
+ /* Leave any existing token because it may have some life left,
+ * schedule a refresh for 10 seconds later. */
+ handle->wts_refresh_after = rd_uclock() + (10 * 1000 * 1000);
+ rwlock_wrunlock(&handle->lock);
+
+ /* Trigger an ERR__AUTHENTICATION error if the error changed. */
+ if (error_changed)
+ rd_kafka_op_err(rk, RD_KAFKA_RESP_ERR__AUTHENTICATION,
+ "Failed to acquire SASL OAUTHBEARER token: %s",
+ errstr);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Parse a config value from the string pointed to by \p loc and starting
+ * with the given \p prefix and ending with the given \p value_end_char, storing
+ * the newly-allocated memory result in the string pointed to by \p value.
+ * @returns -1 if string pointed to by \p value is non-empty (\p errstr set, no
+ * memory allocated), else 0 (caller must free allocated memory).
+ */
+static int parse_ujws_config_value_for_prefix(char **loc,
+ const char *prefix,
+ const char value_end_char,
+ char **value,
+ char *errstr,
+ size_t errstr_size) {
+ if (*value) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "multiple '%s' entries",
+ prefix);
+ return -1;
+ }
+
+ *loc += strlen(prefix);
+ *value = *loc;
+ while (**loc != '\0' && **loc != value_end_char)
+ ++*loc;
+
+ if (**loc == value_end_char) {
+ /* End the string and skip the character */
+ **loc = '\0';
+ ++*loc;
+ }
+
+ /* return new allocated memory */
+ *value = rd_strdup(*value);
+
+ return 0;
+}
+
+/*
+ * @brief Parse Unsecured JWS config, allocates strings that must be freed
+ * @param cfg the config to parse (typically from `sasl.oauthbearer.config`)
+ * @param parsed holds the parsed output; it must be all zeros to start.
+ * @returns -1 on failure (\p errstr set), else 0.
+ */
+static int
+parse_ujws_config(const char *cfg,
+ struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed,
+ char *errstr,
+ size_t errstr_size) {
+ /*
+ * Extensions:
+ *
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ * key = 1*(ALPHA)
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ *
+ * https://tools.ietf.org/html/rfc5234#appendix-B.1
+ * ALPHA = %x41-5A / %x61-7A ; A-Z / a-z
+ * VCHAR = %x21-7E ; visible (printing) characters
+ * SP = %x20 ; space
+ * HTAB = %x09 ; horizontal tab
+ * CR = %x0D ; carriage return
+ * LF = %x0A ; linefeed
+ */
+
+ static const char *prefix_principal_claim_name = "principalClaimName=";
+ static const char *prefix_principal = "principal=";
+ static const char *prefix_scope_claim_name = "scopeClaimName=";
+ static const char *prefix_scope = "scope=";
+ static const char *prefix_life_seconds = "lifeSeconds=";
+ static const char *prefix_extension = "extension_";
+
+ char *cfg_copy = rd_strdup(cfg);
+ char *loc = cfg_copy;
+ int r = 0;
+
+ while (*loc != '\0' && !r) {
+ if (*loc == ' ')
+ ++loc;
+ else if (!strncmp(prefix_principal_claim_name, loc,
+ strlen(prefix_principal_claim_name))) {
+ r = parse_ujws_config_value_for_prefix(
+ &loc, prefix_principal_claim_name, ' ',
+ &parsed->principal_claim_name, errstr, errstr_size);
+
+ if (!r && !*parsed->principal_claim_name) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "empty '%s'",
+ prefix_principal_claim_name);
+ r = -1;
+ }
+
+ } else if (!strncmp(prefix_principal, loc,
+ strlen(prefix_principal))) {
+ r = parse_ujws_config_value_for_prefix(
+ &loc, prefix_principal, ' ', &parsed->principal,
+ errstr, errstr_size);
+
+ if (!r && !*parsed->principal) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "empty '%s'",
+ prefix_principal);
+ r = -1;
+ }
+
+ } else if (!strncmp(prefix_scope_claim_name, loc,
+ strlen(prefix_scope_claim_name))) {
+ r = parse_ujws_config_value_for_prefix(
+ &loc, prefix_scope_claim_name, ' ',
+ &parsed->scope_claim_name, errstr, errstr_size);
+
+ if (!r && !*parsed->scope_claim_name) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "empty '%s'",
+ prefix_scope_claim_name);
+ r = -1;
+ }
+
+ } else if (!strncmp(prefix_scope, loc, strlen(prefix_scope))) {
+ r = parse_ujws_config_value_for_prefix(
+ &loc, prefix_scope, ' ', &parsed->scope_csv_text,
+ errstr, errstr_size);
+
+ if (!r && !*parsed->scope_csv_text) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "empty '%s'",
+ prefix_scope);
+ r = -1;
+ }
+
+ } else if (!strncmp(prefix_life_seconds, loc,
+ strlen(prefix_life_seconds))) {
+ char *life_seconds_text = NULL;
+
+ r = parse_ujws_config_value_for_prefix(
+ &loc, prefix_life_seconds, ' ', &life_seconds_text,
+ errstr, errstr_size);
+
+ if (!r && !*life_seconds_text) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid "
+ "sasl.oauthbearer.config: "
+ "empty '%s'",
+ prefix_life_seconds);
+ r = -1;
+ } else if (!r) {
+ long long life_seconds_long;
+ char *end_ptr;
+ life_seconds_long =
+ strtoll(life_seconds_text, &end_ptr, 10);
+ if (*end_ptr != '\0') {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid "
+ "sasl.oauthbearer.config: "
+ "non-integral '%s': %s",
+ prefix_life_seconds,
+ life_seconds_text);
+ r = -1;
+ } else if (life_seconds_long <= 0 ||
+ life_seconds_long > INT_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid "
+ "sasl.oauthbearer.config: "
+ "value out of range of "
+ "positive int '%s': %s",
+ prefix_life_seconds,
+ life_seconds_text);
+ r = -1;
+ } else {
+ parsed->life_seconds =
+ (int)life_seconds_long;
+ }
+ }
+
+ RD_IF_FREE(life_seconds_text, rd_free);
+
+ } else if (!strncmp(prefix_extension, loc,
+ strlen(prefix_extension))) {
+ char *extension_key = NULL;
+
+ r = parse_ujws_config_value_for_prefix(
+ &loc, prefix_extension, '=', &extension_key, errstr,
+ errstr_size);
+
+ if (!r && !*extension_key) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid "
+ "sasl.oauthbearer.config: "
+ "empty '%s' key",
+ prefix_extension);
+ r = -1;
+ } else if (!r) {
+ char *extension_value = NULL;
+ r = parse_ujws_config_value_for_prefix(
+ &loc, "", ' ', &extension_value, errstr,
+ errstr_size);
+ if (!r) {
+ rd_list_add(
+ &parsed->extensions,
+ rd_strtup_new(extension_key,
+ extension_value));
+ rd_free(extension_value);
+ }
+ }
+
+ RD_IF_FREE(extension_key, rd_free);
+
+ } else {
+ rd_snprintf(errstr, errstr_size,
+ "Unrecognized sasl.oauthbearer.config "
+ "beginning at: %s",
+ loc);
+ r = -1;
+ }
+ }
+
+ rd_free(cfg_copy);
+
+ return r;
+}
+
+/**
+ * @brief Create unsecured JWS compact serialization
+ * from the given information.
+ * @returns allocated memory that the caller must free.
+ */
+static char *create_jws_compact_serialization(
+ const struct rd_kafka_sasl_oauthbearer_parsed_ujws *parsed,
+ rd_ts_t now_wallclock) {
+ static const char *jose_header_encoded =
+ "eyJhbGciOiJub25lIn0"; // {"alg":"none"}
+ int scope_json_length = 0;
+ int max_json_length;
+ double now_wallclock_seconds;
+ char *scope_json;
+ char *scope_curr;
+ int i;
+ char *claims_json;
+ char *jws_claims;
+ size_t encode_len;
+ char *jws_last_char;
+ char *jws_maybe_non_url_char;
+ char *retval_jws;
+ size_t retval_size;
+ rd_list_t scope;
+
+ rd_list_init(&scope, 0, rd_free);
+ if (parsed->scope_csv_text) {
+ /* Convert from csv to rd_list_t and
+ * calculate json length. */
+ char *start = parsed->scope_csv_text;
+ char *curr = start;
+
+ while (*curr != '\0') {
+ /* Ignore empty elements (e.g. ",,") */
+ while (*curr == ',') {
+ ++curr;
+ ++start;
+ }
+
+ while (*curr != '\0' && *curr != ',')
+ ++curr;
+
+ if (curr == start)
+ continue;
+
+ if (*curr == ',') {
+ *curr = '\0';
+ ++curr;
+ }
+
+ if (!rd_list_find(&scope, start, (void *)strcmp))
+ rd_list_add(&scope, rd_strdup(start));
+
+ if (scope_json_length == 0) {
+ scope_json_length =
+ 2 + // ,"
+ (int)strlen(parsed->scope_claim_name) +
+ 4 + // ":["
+ (int)strlen(start) + 1 + // "
+ 1; // ]
+ } else {
+ scope_json_length += 2; // ,"
+ scope_json_length += (int)strlen(start);
+ scope_json_length += 1; // "
+ }
+
+ start = curr;
+ }
+ }
+
+ now_wallclock_seconds = now_wallclock / 1000000.0;
+
+ /* Generate json */
+ max_json_length = 2 + // {"
+ (int)strlen(parsed->principal_claim_name) +
+ 3 + // ":"
+ (int)strlen(parsed->principal) + 8 + // ","iat":
+ 14 + // iat NumericDate (e.g. 1549251467.546)
+ 7 + // ,"exp":
+ 14 + // exp NumericDate (e.g. 1549252067.546)
+ scope_json_length + 1; // }
+
+ /* Generate scope portion of json */
+ scope_json = rd_malloc(scope_json_length + 1);
+ *scope_json = '\0';
+ scope_curr = scope_json;
+
+ for (i = 0; i < rd_list_cnt(&scope); i++) {
+ if (i == 0)
+ scope_curr += rd_snprintf(
+ scope_curr,
+ (size_t)(scope_json + scope_json_length + 1 -
+ scope_curr),
+ ",\"%s\":[\"", parsed->scope_claim_name);
+ else
+ scope_curr += sprintf(scope_curr, "%s", ",\"");
+ scope_curr += sprintf(scope_curr, "%s\"",
+ (const char *)rd_list_elem(&scope, i));
+ if (i == rd_list_cnt(&scope) - 1)
+ scope_curr += sprintf(scope_curr, "%s", "]");
+ }
+
+ claims_json = rd_malloc(max_json_length + 1);
+ rd_snprintf(claims_json, max_json_length + 1,
+ "{\"%s\":\"%s\",\"iat\":%.3f,\"exp\":%.3f%s}",
+ parsed->principal_claim_name, parsed->principal,
+ now_wallclock_seconds,
+ now_wallclock_seconds + parsed->life_seconds, scope_json);
+ rd_free(scope_json);
+
+ /* Convert to base64URL format, first to base64, then to base64URL */
+ retval_size = strlen(jose_header_encoded) + 1 +
+ (((max_json_length + 2) / 3) * 4) + 1 + 1;
+ retval_jws = rd_malloc(retval_size);
+ rd_snprintf(retval_jws, retval_size, "%s.", jose_header_encoded);
+ jws_claims = retval_jws + strlen(retval_jws);
+ encode_len =
+ EVP_EncodeBlock((uint8_t *)jws_claims, (uint8_t *)claims_json,
+ (int)strlen(claims_json));
+ rd_free(claims_json);
+ jws_last_char = jws_claims + encode_len - 1;
+
+ /* Convert from padded base64 to unpadded base64URL
+ * and eliminate any padding. */
+ while (jws_last_char >= jws_claims && *jws_last_char == '=')
+ --jws_last_char;
+ *(++jws_last_char) = '.';
+ *(jws_last_char + 1) = '\0';
+
+ /* Convert the 2 differing encode characters */
+ for (jws_maybe_non_url_char = retval_jws; *jws_maybe_non_url_char;
+ jws_maybe_non_url_char++)
+ if (*jws_maybe_non_url_char == '+')
+ *jws_maybe_non_url_char = '-';
+ else if (*jws_maybe_non_url_char == '/')
+ *jws_maybe_non_url_char = '_';
+
+ rd_list_destroy(&scope);
+
+ return retval_jws;
+}
+
+/**
+ * @brief Same as rd_kafka_oauthbearer_unsecured_token() except it takes
+ * additional explicit arguments and return a status code along with
+ * the token to set in order to facilitate unit testing.
+ * @param token output defining the token to set
+ * @param cfg the config to parse (typically from `sasl.oauthbearer.config`)
+ * @param now_wallclock_ms the valued to be used for the `iat` claim
+ * (and by implication, the `exp` claim)
+ * @returns -1 on failure (\p errstr set), else 0.
+ */
+static int rd_kafka_oauthbearer_unsecured_token0(
+ struct rd_kafka_sasl_oauthbearer_token *token,
+ const char *cfg,
+ int64_t now_wallclock_ms,
+ char *errstr,
+ size_t errstr_size) {
+ struct rd_kafka_sasl_oauthbearer_parsed_ujws parsed = RD_ZERO_INIT;
+ int r;
+ int i;
+
+ if (!cfg || !*cfg) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "must not be empty");
+ return -1;
+ }
+
+ memset(token, 0, sizeof(*token));
+
+ rd_list_init(&parsed.extensions, 0,
+ (void (*)(void *))rd_strtup_destroy);
+
+ if (!(r = parse_ujws_config(cfg, &parsed, errstr, errstr_size))) {
+ /* Make sure we have required and valid info */
+ if (!parsed.principal_claim_name)
+ parsed.principal_claim_name = rd_strdup("sub");
+ if (!parsed.scope_claim_name)
+ parsed.scope_claim_name = rd_strdup("scope");
+ if (!parsed.life_seconds)
+ parsed.life_seconds = 3600;
+ if (!parsed.principal) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "no principal=<value>");
+ r = -1;
+ } else if (strchr(parsed.principal, '"')) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "'\"' cannot appear in principal: %s",
+ parsed.principal);
+ r = -1;
+ } else if (strchr(parsed.principal_claim_name, '"')) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "'\"' cannot appear in "
+ "principalClaimName: %s",
+ parsed.principal_claim_name);
+ r = -1;
+ } else if (strchr(parsed.scope_claim_name, '"')) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "'\"' cannot appear in scopeClaimName: %s",
+ parsed.scope_claim_name);
+ r = -1;
+ } else if (parsed.scope_csv_text &&
+ strchr(parsed.scope_csv_text, '"')) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid sasl.oauthbearer.config: "
+ "'\"' cannot appear in scope: %s",
+ parsed.scope_csv_text);
+ r = -1;
+ } else {
+ char **extensionv;
+ int extension_pair_count;
+ char *jws = create_jws_compact_serialization(
+ &parsed, now_wallclock_ms * 1000);
+
+ extension_pair_count = rd_list_cnt(&parsed.extensions);
+ extensionv = rd_malloc(sizeof(*extensionv) * 2 *
+ extension_pair_count);
+ for (i = 0; i < extension_pair_count; ++i) {
+ rd_strtup_t *strtup =
+ (rd_strtup_t *)rd_list_elem(
+ &parsed.extensions, i);
+ extensionv[2 * i] = rd_strdup(strtup->name);
+ extensionv[2 * i + 1] =
+ rd_strdup(strtup->value);
+ }
+ token->token_value = jws;
+ token->md_lifetime_ms =
+ now_wallclock_ms + parsed.life_seconds * 1000;
+ token->md_principal_name = rd_strdup(parsed.principal);
+ token->extensions = extensionv;
+ token->extension_size = 2 * extension_pair_count;
+ }
+ }
+ RD_IF_FREE(parsed.principal_claim_name, rd_free);
+ RD_IF_FREE(parsed.principal, rd_free);
+ RD_IF_FREE(parsed.scope_claim_name, rd_free);
+ RD_IF_FREE(parsed.scope_csv_text, rd_free);
+ rd_list_destroy(&parsed.extensions);
+
+ if (r == -1)
+ rd_kafka_sasl_oauthbearer_token_free(token);
+
+ return r;
+}
+
+/**
+ * @brief Default SASL/OAUTHBEARER token refresh callback that generates an
+ * unsecured JWS as per https://tools.ietf.org/html/rfc7515#appendix-A.5.
+ *
+ * This method interprets `sasl.oauthbearer.config` as space-separated
+ * name=value pairs with valid names including principalClaimName,
+ * principal, scopeClaimName, scope, and lifeSeconds. The default
+ * value for principalClaimName is "sub". The principal must be specified.
+ * The default value for scopeClaimName is "scope", and the default value
+ * for lifeSeconds is 3600. The scope value is CSV format with the
+ * default value being no/empty scope. For example:
+ * "principalClaimName=azp principal=admin scopeClaimName=roles
+ * scope=role1,role2 lifeSeconds=600".
+ *
+ * SASL extensions can be communicated to the broker via
+ * extension_NAME=value. For example:
+ * "principal=admin extension_traceId=123". Extension names and values
+ * must conform to the required syntax as per
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ *
+ * All values -- whether extensions, claim names, or scope elements -- must not
+ * include a quote (") character. The parsing rules also imply that names
+ * and values cannot include a space character, and scope elements cannot
+ * include a comma (,) character.
+ *
+ * The existence of any kind of parsing problem -- an unrecognized name,
+ * a quote character in a value, an empty value, etc. -- raises the
+ * \c RD_KAFKA_RESP_ERR__AUTHENTICATION event.
+ *
+ * Unsecured tokens are not to be used in production -- they are only good for
+ * testing and development purposess -- so while the inflexibility of the
+ * parsing rules is acknowledged, it is assumed that this is not problematic.
+ */
+void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque) {
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT;
+
+ rd_kafka_dbg(rk, SECURITY, "OAUTHBEARER", "Creating unsecured token");
+
+ if (rd_kafka_oauthbearer_unsecured_token0(&token, oauthbearer_config,
+ rd_uclock() / 1000, errstr,
+ sizeof(errstr)) == -1 ||
+ rd_kafka_oauthbearer_set_token(
+ rk, token.token_value, token.md_lifetime_ms,
+ token.md_principal_name, (const char **)token.extensions,
+ token.extension_size, errstr, sizeof(errstr)) == -1) {
+ rd_kafka_oauthbearer_set_token_failure(rk, errstr);
+ }
+
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+}
+
+/**
+ * @brief Close and free authentication state
+ */
+static void rd_kafka_sasl_oauthbearer_close(rd_kafka_transport_t *rktrans) {
+ struct rd_kafka_sasl_oauthbearer_state *state =
+ rktrans->rktrans_sasl.state;
+
+ if (!state)
+ return;
+
+ RD_IF_FREE(state->server_error_msg, rd_free);
+ rd_free(state->token_value);
+ rd_free(state->md_principal_name);
+ rd_list_destroy(&state->extensions);
+ rd_free(state);
+}
+
+
+
+/**
+ * @brief Build client-first-message
+ */
+static void rd_kafka_sasl_oauthbearer_build_client_first_message(
+ rd_kafka_transport_t *rktrans,
+ rd_chariov_t *out) {
+ struct rd_kafka_sasl_oauthbearer_state *state =
+ rktrans->rktrans_sasl.state;
+
+ /*
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ * kvsep = %x01
+ * key = 1*(ALPHA)
+ * value = *(VCHAR / SP / HTAB / CR / LF )
+ * kvpair = key "=" value kvsep
+ * ;;gs2-header = See RFC 5801
+ * client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
+ */
+
+ static const char *gs2_header = "n,,";
+ static const char *kvsep = "\x01";
+ const int kvsep_size = (int)strlen(kvsep);
+ int extension_size = 0;
+ int i;
+ char *buf;
+ int size_written;
+ unsigned long r;
+
+ for (i = 0; i < rd_list_cnt(&state->extensions); i++) {
+ rd_strtup_t *extension = rd_list_elem(&state->extensions, i);
+ // kvpair = key "=" value kvsep
+ extension_size += (int)strlen(extension->name) + 1 // "="
+ + (int)strlen(extension->value) + kvsep_size;
+ }
+
+ // client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
+ out->size = strlen(gs2_header) + kvsep_size + strlen("auth=Bearer ") +
+ strlen(state->token_value) + kvsep_size + extension_size +
+ kvsep_size;
+ out->ptr = rd_malloc(out->size + 1);
+
+ buf = out->ptr;
+ size_written = 0;
+ r = rd_snprintf(buf, out->size + 1 - size_written,
+ "%s%sauth=Bearer %s%s", gs2_header, kvsep,
+ state->token_value, kvsep);
+ rd_assert(r < out->size + 1 - size_written);
+ size_written += r;
+ buf = out->ptr + size_written;
+
+ for (i = 0; i < rd_list_cnt(&state->extensions); i++) {
+ rd_strtup_t *extension = rd_list_elem(&state->extensions, i);
+ r = rd_snprintf(buf, out->size + 1 - size_written, "%s=%s%s",
+ extension->name, extension->value, kvsep);
+ rd_assert(r < out->size + 1 - size_written);
+ size_written += r;
+ buf = out->ptr + size_written;
+ }
+
+ r = rd_snprintf(buf, out->size + 1 - size_written, "%s", kvsep);
+ rd_assert(r < out->size + 1 - size_written);
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER",
+ "Built client first message");
+}
+
+
+
+/**
+ * @brief SASL OAUTHBEARER client state machine
+ * @returns -1 on failure (\p errstr set), else 0.
+ */
+static int rd_kafka_sasl_oauthbearer_fsm(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *in,
+ char *errstr,
+ size_t errstr_size) {
+ static const char *state_names[] = {
+ "client-first-message",
+ "server-first-message",
+ "server-failure-message",
+ };
+ struct rd_kafka_sasl_oauthbearer_state *state =
+ rktrans->rktrans_sasl.state;
+ rd_chariov_t out = RD_ZERO_INIT;
+ int r = -1;
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "OAUTHBEARER",
+ "SASL OAUTHBEARER client in state %s",
+ state_names[state->state]);
+
+ switch (state->state) {
+ case RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE:
+ rd_dassert(!in); /* Not expecting any server-input */
+
+ rd_kafka_sasl_oauthbearer_build_client_first_message(rktrans,
+ &out);
+ state->state = RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG;
+ break;
+
+
+ case RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_FIRST_MSG:
+ if (!in->size || !*in->ptr) {
+ /* Success */
+ rd_rkb_dbg(rktrans->rktrans_rkb,
+ SECURITY | RD_KAFKA_DBG_BROKER,
+ "OAUTHBEARER",
+ "SASL OAUTHBEARER authentication "
+ "successful (principal=%s)",
+ state->md_principal_name);
+ rd_kafka_sasl_auth_done(rktrans);
+ r = 0;
+ break;
+ }
+
+ /* Failure; save error message for later */
+ state->server_error_msg = rd_strndup(in->ptr, in->size);
+
+ /*
+ * https://tools.ietf.org/html/rfc7628#section-3.1
+ * kvsep = %x01
+ * client-resp = (gs2-header kvsep *kvpair kvsep) / kvsep
+ *
+ * Send final kvsep (CTRL-A) character
+ */
+ out.size = 1;
+ out.ptr = rd_malloc(out.size + 1);
+ rd_snprintf(out.ptr, out.size + 1, "\x01");
+ state->state =
+ RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL;
+ r = 0; // Will fail later in next state after sending response
+ break;
+
+ case RD_KAFKA_SASL_OAUTHB_STATE_RECV_SERVER_MSG_AFTER_FAIL:
+ /* Failure as previosuly communicated by server first message */
+ rd_snprintf(errstr, errstr_size,
+ "SASL OAUTHBEARER authentication failed "
+ "(principal=%s): %s",
+ state->md_principal_name, state->server_error_msg);
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
+ "OAUTHBEARER", "%s", errstr);
+ r = -1;
+ break;
+ }
+
+ if (out.ptr) {
+ r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr,
+ errstr_size);
+ rd_free(out.ptr);
+ }
+
+ return r;
+}
+
+
+/**
+ * @brief Handle received frame from broker.
+ */
+static int rd_kafka_sasl_oauthbearer_recv(rd_kafka_transport_t *rktrans,
+ const void *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+ const rd_chariov_t in = {.ptr = (char *)buf, .size = size};
+ return rd_kafka_sasl_oauthbearer_fsm(rktrans, &in, errstr, errstr_size);
+}
+
+
+/**
+ * @brief Initialize and start SASL OAUTHBEARER (builtin) authentication.
+ *
+ * Returns 0 on successful init and -1 on error.
+ *
+ * @locality broker thread
+ */
+static int rd_kafka_sasl_oauthbearer_client_new(rd_kafka_transport_t *rktrans,
+ const char *hostname,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_oauthbearer_handle_t *handle =
+ rktrans->rktrans_rkb->rkb_rk->rk_sasl.handle;
+ struct rd_kafka_sasl_oauthbearer_state *state;
+
+ state = rd_calloc(1, sizeof(*state));
+ state->state = RD_KAFKA_SASL_OAUTHB_STATE_SEND_CLIENT_FIRST_MESSAGE;
+
+ /*
+ * Save off the state structure now, before any possibility of
+ * returning, so that we will always free up the allocated memory in
+ * rd_kafka_sasl_oauthbearer_close().
+ */
+ rktrans->rktrans_sasl.state = state;
+
+ /*
+ * Make sure we have a consistent view of the token and extensions
+ * throughout the authentication process -- even if it is refreshed
+ * midway through this particular authentication.
+ */
+ rwlock_rdlock(&handle->lock);
+ if (!handle->token_value) {
+ rd_snprintf(errstr, errstr_size,
+ "OAUTHBEARER cannot log in because there "
+ "is no token available; last error: %s",
+ handle->errstr ? handle->errstr
+ : "(not available)");
+ rwlock_rdunlock(&handle->lock);
+ return -1;
+ }
+
+ state->token_value = rd_strdup(handle->token_value);
+ state->md_principal_name = rd_strdup(handle->md_principal_name);
+ rd_list_copy_to(&state->extensions, &handle->extensions,
+ rd_strtup_list_copy, NULL);
+
+ rwlock_rdunlock(&handle->lock);
+
+ /* Kick off the FSM */
+ return rd_kafka_sasl_oauthbearer_fsm(rktrans, NULL, errstr,
+ errstr_size);
+}
+
+
+/**
+ * @brief Token refresh timer callback.
+ *
+ * @locality rdkafka main thread
+ */
+static void
+rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_t *rk = arg;
+ rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
+
+ /* Enqueue a token refresh if necessary */
+ rd_kafka_oauthbearer_enqueue_token_refresh_if_necessary(handle);
+}
+
+
+/**
+ * @brief Per-client-instance initializer
+ */
+static int rd_kafka_sasl_oauthbearer_init(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_oauthbearer_handle_t *handle;
+
+ handle = rd_calloc(1, sizeof(*handle));
+ rk->rk_sasl.handle = handle;
+
+ rwlock_init(&handle->lock);
+
+ handle->rk = rk;
+
+ rd_list_init(&handle->extensions, 0,
+ (void (*)(void *))rd_strtup_destroy);
+
+ rd_kafka_timer_start(
+ &rk->rk_timers, &handle->token_refresh_tmr, 1 * 1000 * 1000,
+ rd_kafka_sasl_oauthbearer_token_refresh_tmr_cb, rk);
+
+ /* Automatically refresh the token if using the builtin
+ * unsecure JWS token refresher, to avoid an initial connection
+ * stall as we wait for the application to call poll(). */
+ if (rk->rk_conf.sasl.oauthbearer.token_refresh_cb ==
+ rd_kafka_oauthbearer_unsecured_token) {
+ rk->rk_conf.sasl.oauthbearer.token_refresh_cb(
+ rk, rk->rk_conf.sasl.oauthbearer_config,
+ rk->rk_conf.opaque);
+
+ return 0;
+ }
+
+ if (rk->rk_conf.sasl.enable_callback_queue) {
+ /* SASL specific callback queue enabled */
+ rk->rk_sasl.callback_q = rd_kafka_q_new(rk);
+ handle->callback_q = rd_kafka_q_keep(rk->rk_sasl.callback_q);
+ } else {
+ /* Use main queue */
+ handle->callback_q = rd_kafka_q_keep(rk->rk_rep);
+ }
+
+#if WITH_OAUTHBEARER_OIDC
+ if (rk->rk_conf.sasl.oauthbearer.method ==
+ RD_KAFKA_SASL_OAUTHBEARER_METHOD_OIDC &&
+ rk->rk_conf.sasl.oauthbearer.token_refresh_cb ==
+ rd_kafka_oidc_token_refresh_cb) {
+ handle->internal_refresh = rd_true;
+ rd_kafka_sasl_background_callbacks_enable(rk);
+ }
+#endif
+
+ /* Otherwise enqueue a refresh callback for the application. */
+ rd_kafka_oauthbearer_enqueue_token_refresh(handle);
+
+ return 0;
+}
+
+
+/**
+ * @brief Per-client-instance destructor
+ */
+static void rd_kafka_sasl_oauthbearer_term(rd_kafka_t *rk) {
+ rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
+
+ if (!handle)
+ return;
+
+ rk->rk_sasl.handle = NULL;
+
+ rd_kafka_timer_stop(&rk->rk_timers, &handle->token_refresh_tmr, 1);
+
+ RD_IF_FREE(handle->md_principal_name, rd_free);
+ RD_IF_FREE(handle->token_value, rd_free);
+ rd_list_destroy(&handle->extensions);
+ RD_IF_FREE(handle->errstr, rd_free);
+ RD_IF_FREE(handle->callback_q, rd_kafka_q_destroy);
+
+ rwlock_destroy(&handle->lock);
+
+ rd_free(handle);
+}
+
+
+/**
+ * @brief SASL/OAUTHBEARER is unable to connect unless a valid
+ * token is available, and a valid token CANNOT be
+ * available unless/until an initial token retrieval
+ * succeeds, so wait for this precondition if necessary.
+ */
+static rd_bool_t rd_kafka_sasl_oauthbearer_ready(rd_kafka_t *rk) {
+ rd_kafka_sasl_oauthbearer_handle_t *handle = rk->rk_sasl.handle;
+
+ if (!handle)
+ return rd_false;
+
+ return rd_kafka_oauthbearer_has_token(handle);
+}
+
+
+/**
+ * @brief Validate OAUTHBEARER config, which is a no-op
+ * (we rely on initial token retrieval)
+ */
+static int rd_kafka_sasl_oauthbearer_conf_validate(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+ /*
+ * We must rely on the initial token retrieval as a proxy
+ * for configuration validation because the configuration is
+ * implementation-dependent, and it is not necessarily the case
+ * that the config reflects the default unsecured JWS config
+ * that we know how to parse.
+ */
+ return 0;
+}
+
+
+
+const struct rd_kafka_sasl_provider rd_kafka_sasl_oauthbearer_provider = {
+ .name = "OAUTHBEARER (builtin)",
+ .init = rd_kafka_sasl_oauthbearer_init,
+ .term = rd_kafka_sasl_oauthbearer_term,
+ .ready = rd_kafka_sasl_oauthbearer_ready,
+ .client_new = rd_kafka_sasl_oauthbearer_client_new,
+ .recv = rd_kafka_sasl_oauthbearer_recv,
+ .close = rd_kafka_sasl_oauthbearer_close,
+ .conf_validate = rd_kafka_sasl_oauthbearer_conf_validate,
+};
+
+
+
+/**
+ * @name Unit tests
+ *
+ *
+ */
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should generate correct default values.
+ */
+static int do_unittest_config_defaults(void) {
+ static const char *sasl_oauthbearer_config =
+ "principal=fubar "
+ "scopeClaimName=whatever";
+ // default scope is empty, default lifetime is 3600 seconds
+ // {"alg":"none"}
+ // .
+ // {"sub":"fubar","iat":1.000,"exp":3601.000}
+ //
+ static const char *expected_token_value =
+ "eyJhbGciOiJub25lIn0"
+ "."
+ "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6MzYwMS4wMDB9"
+ ".";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+ if (r == -1)
+ RD_UT_FAIL("Failed to create a token: %s: %s",
+ sasl_oauthbearer_config, errstr);
+
+ RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 3600 * 1000,
+ "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms);
+ RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"),
+ "Invalid md_principal_name %s", token.md_principal_name);
+ RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value),
+ "Invalid token_value %s, expected %s", token.token_value,
+ expected_token_value);
+
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should generate correct token for explicit scope and lifeSeconds values.
+ */
+static int do_unittest_config_explicit_scope_and_life(void) {
+ static const char *sasl_oauthbearer_config =
+ "principal=fubar "
+ "scope=role1,role2 lifeSeconds=60";
+ // {"alg":"none"}
+ // .
+ // {"sub":"fubar","iat":1.000,"exp":61.000,"scope":["role1","role2"]}
+ //
+ static const char *expected_token_value =
+ "eyJhbGciOiJub25lIn0"
+ "."
+ "eyJzdWIiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJzY29wZ"
+ "SI6WyJyb2xlMSIsInJvbGUyIl19"
+ ".";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+ if (r == -1)
+ RD_UT_FAIL("Failed to create a token: %s: %s",
+ sasl_oauthbearer_config, errstr);
+
+ RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000,
+ "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms);
+ RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"),
+ "Invalid md_principal_name %s", token.md_principal_name);
+ RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value),
+ "Invalid token_value %s, expected %s", token.token_value,
+ expected_token_value);
+
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should generate correct token when all values are provided explicitly.
+ */
+static int do_unittest_config_all_explicit_values(void) {
+ static const char *sasl_oauthbearer_config =
+ "principal=fubar "
+ "principalClaimName=azp scope=role1,role2 "
+ "scopeClaimName=roles lifeSeconds=60";
+ // {"alg":"none"}
+ // .
+ // {"azp":"fubar","iat":1.000,"exp":61.000,"roles":["role1","role2"]}
+ //
+ static const char *expected_token_value =
+ "eyJhbGciOiJub25lIn0"
+ "."
+ "eyJhenAiOiJmdWJhciIsImlhdCI6MS4wMDAsImV4cCI6NjEuMDAwLCJyb2xlc"
+ "yI6WyJyb2xlMSIsInJvbGUyIl19"
+ ".";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+ if (r == -1)
+ RD_UT_FAIL("Failed to create a token: %s: %s",
+ sasl_oauthbearer_config, errstr);
+
+ RD_UT_ASSERT(token.md_lifetime_ms == now_wallclock_ms + 60 * 1000,
+ "Invalid md_lifetime_ms %" PRId64, token.md_lifetime_ms);
+ RD_UT_ASSERT(!strcmp(token.md_principal_name, "fubar"),
+ "Invalid md_principal_name %s", token.md_principal_name);
+ RD_UT_ASSERT(!strcmp(token.token_value, expected_token_value),
+ "Invalid token_value %s, expected %s", token.token_value,
+ expected_token_value);
+
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should fail when no principal specified.
+ */
+static int do_unittest_config_no_principal_should_fail(void) {
+ static const char *expected_msg =
+ "Invalid sasl.oauthbearer.config: "
+ "no principal=<value>";
+ static const char *sasl_oauthbearer_config =
+ "extension_notaprincipal=hi";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+ if (r != -1)
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_ASSERT(r == -1, "Did not fail despite missing principal");
+
+ RD_UT_ASSERT(!strcmp(errstr, expected_msg),
+ "Incorrect error message when no principal: "
+ "expected=%s received=%s",
+ expected_msg, errstr);
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should fail when no sasl.oauthbearer.config is specified.
+ */
+static int do_unittest_config_empty_should_fail(void) {
+ static const char *expected_msg =
+ "Invalid sasl.oauthbearer.config: "
+ "must not be empty";
+ static const char *sasl_oauthbearer_config = "";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token = RD_ZERO_INIT;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+ if (r != -1)
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_ASSERT(r == -1, "Did not fail despite empty config");
+
+ RD_UT_ASSERT(!strcmp(errstr, expected_msg),
+ "Incorrect error message with empty config: "
+ "expected=%s received=%s",
+ expected_msg, errstr);
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should fail when something unrecognized is specified.
+ */
+static int do_unittest_config_unrecognized_should_fail(void) {
+ static const char *expected_msg =
+ "Unrecognized "
+ "sasl.oauthbearer.config beginning at: unrecognized";
+ static const char *sasl_oauthbearer_config =
+ "principal=fubar unrecognized";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+ if (r != -1)
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_ASSERT(r == -1, "Did not fail with something unrecognized");
+
+ RD_UT_ASSERT(!strcmp(errstr, expected_msg),
+ "Incorrect error message with something unrecognized: "
+ "expected=%s received=%s",
+ expected_msg, errstr);
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should fail when empty values are specified.
+ */
+static int do_unittest_config_empty_value_should_fail(void) {
+ static const char *sasl_oauthbearer_configs[] = {
+ "principal=", "principal=fubar principalClaimName=",
+ "principal=fubar scope=", "principal=fubar scopeClaimName=",
+ "principal=fubar lifeSeconds="};
+ static const char *expected_prefix =
+ "Invalid sasl.oauthbearer.config: empty";
+ size_t i;
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ int r;
+
+ for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *);
+ i++) {
+ struct rd_kafka_sasl_oauthbearer_token token;
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_configs[i], now_wallclock_ms,
+ errstr, sizeof(errstr));
+ if (r != -1)
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_ASSERT(r == -1, "Did not fail with an empty value: %s",
+ sasl_oauthbearer_configs[i]);
+
+ RD_UT_ASSERT(
+ !strncmp(expected_prefix, errstr, strlen(expected_prefix)),
+ "Incorrect error message prefix when empty "
+ "(%s): expected=%s received=%s",
+ sasl_oauthbearer_configs[i], expected_prefix, errstr);
+ }
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should fail when value with embedded quote is specified.
+ */
+static int do_unittest_config_value_with_quote_should_fail(void) {
+ static const char *sasl_oauthbearer_configs[] = {
+ "principal=\"fu", "principal=fubar principalClaimName=\"bar",
+ "principal=fubar scope=\"a,b,c",
+ "principal=fubar scopeClaimName=\"baz"};
+ static const char *expected_prefix =
+ "Invalid "
+ "sasl.oauthbearer.config: '\"' cannot appear in ";
+ size_t i;
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ int r;
+
+ for (i = 0; i < sizeof(sasl_oauthbearer_configs) / sizeof(const char *);
+ i++) {
+ struct rd_kafka_sasl_oauthbearer_token token;
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_configs[i], now_wallclock_ms,
+ errstr, sizeof(errstr));
+ if (r != -1)
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_ASSERT(r == -1, "Did not fail with embedded quote: %s",
+ sasl_oauthbearer_configs[i]);
+
+ RD_UT_ASSERT(
+ !strncmp(expected_prefix, errstr, strlen(expected_prefix)),
+ "Incorrect error message prefix with "
+ "embedded quote (%s): expected=%s received=%s",
+ sasl_oauthbearer_configs[i], expected_prefix, errstr);
+ }
+ RD_UT_PASS();
+}
+
+/**
+ * @brief `sasl.oauthbearer.config` test:
+ * should generate correct extensions.
+ */
+static int do_unittest_config_extensions(void) {
+ static const char *sasl_oauthbearer_config =
+ "principal=fubar "
+ "extension_a=b extension_yz=yzval";
+ rd_ts_t now_wallclock_ms = 1000;
+ char errstr[512];
+ struct rd_kafka_sasl_oauthbearer_token token;
+ int r;
+
+ r = rd_kafka_oauthbearer_unsecured_token0(
+ &token, sasl_oauthbearer_config, now_wallclock_ms, errstr,
+ sizeof(errstr));
+
+ if (r == -1)
+ RD_UT_FAIL("Failed to create a token: %s: %s",
+ sasl_oauthbearer_config, errstr);
+
+ RD_UT_ASSERT(token.extension_size == 4,
+ "Incorrect extensions: expected 4, received %" PRIusz,
+ token.extension_size);
+
+ RD_UT_ASSERT(!strcmp(token.extensions[0], "a") &&
+ !strcmp(token.extensions[1], "b") &&
+ !strcmp(token.extensions[2], "yz") &&
+ !strcmp(token.extensions[3], "yzval"),
+ "Incorrect extensions: expected a=b and "
+ "yz=yzval but received %s=%s and %s=%s",
+ token.extensions[0], token.extensions[1],
+ token.extensions[2], token.extensions[3]);
+
+ rd_kafka_sasl_oauthbearer_token_free(&token);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief make sure illegal extensions keys are rejected
+ */
+static int do_unittest_illegal_extension_keys_should_fail(void) {
+ static const char *illegal_keys[] = {"", "auth", "a1", " a"};
+ size_t i;
+ char errstr[512];
+ int r;
+
+ for (i = 0; i < sizeof(illegal_keys) / sizeof(const char *); i++) {
+ r = check_oauthbearer_extension_key(illegal_keys[i], errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(r == -1,
+ "Did not recognize illegal extension key: %s",
+ illegal_keys[i]);
+ }
+ RD_UT_PASS();
+}
+
+/**
+ * @brief make sure illegal extensions keys are rejected
+ */
+static int do_unittest_odd_extension_size_should_fail(void) {
+ static const char *expected_errstr =
+ "Incorrect extension size "
+ "(must be a non-negative multiple of 2): 1";
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ rd_kafka_t rk = RD_ZERO_INIT;
+ rd_kafka_sasl_oauthbearer_handle_t handle = RD_ZERO_INIT;
+
+ rk.rk_conf.sasl.provider = &rd_kafka_sasl_oauthbearer_provider;
+ rk.rk_sasl.handle = &handle;
+
+ rwlock_init(&handle.lock);
+
+ err = rd_kafka_oauthbearer_set_token0(&rk, "abcd", 1000, "fubar", NULL,
+ 1, errstr, sizeof(errstr));
+
+ rwlock_destroy(&handle.lock);
+
+ RD_UT_ASSERT(err, "Did not recognize illegal extension size");
+ RD_UT_ASSERT(!strcmp(errstr, expected_errstr),
+ "Incorrect error message for illegal "
+ "extension size: expected=%s; received=%s",
+ expected_errstr, errstr);
+ RD_UT_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "Expected ErrInvalidArg, not %s", rd_kafka_err2name(err));
+
+ RD_UT_PASS();
+}
+
+int unittest_sasl_oauthbearer(void) {
+ int fails = 0;
+
+ fails += do_unittest_config_no_principal_should_fail();
+ fails += do_unittest_config_empty_should_fail();
+ fails += do_unittest_config_empty_value_should_fail();
+ fails += do_unittest_config_value_with_quote_should_fail();
+ fails += do_unittest_config_unrecognized_should_fail();
+ fails += do_unittest_config_defaults();
+ fails += do_unittest_config_explicit_scope_and_life();
+ fails += do_unittest_config_all_explicit_values();
+ fails += do_unittest_config_extensions();
+ fails += do_unittest_illegal_extension_keys_should_fail();
+ fails += do_unittest_odd_extension_size_should_fail();
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h
new file mode 100644
index 000000000..75ab51d02
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer.h
@@ -0,0 +1,52 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_SASL_OAUTHBEARER_H_
+#define _RDKAFKA_SASL_OAUTHBEARER_H_
+
+void rd_kafka_oauthbearer_unsecured_token(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque);
+
+rd_kafka_resp_err_t
+rd_kafka_oauthbearer_set_token0(rd_kafka_t *rk,
+ const char *token_value,
+ int64_t md_lifetime_ms,
+ const char *md_principal_name,
+ const char **extensions,
+ size_t extension_size,
+ char *errstr,
+ size_t errstr_size);
+
+rd_kafka_resp_err_t rd_kafka_oauthbearer_set_token_failure0(rd_kafka_t *rk,
+ const char *errstr);
+
+int unittest_sasl_oauthbearer(void);
+
+
+#endif /* _RDKAFKA_SASL_OAUTHBEARER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c
new file mode 100644
index 000000000..6c2773b02
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.c
@@ -0,0 +1,604 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2021 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Builtin SASL OAUTHBEARER OIDC support
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_sasl_int.h"
+#include "rdunittest.h"
+#include "cJSON.h"
+#include <curl/curl.h>
+#include "rdhttp.h"
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+
+
+/**
+ * @brief Base64 encode binary input \p in, and write base64-encoded string
+ * and it's size to \p out
+ */
+static void rd_base64_encode(const rd_chariov_t *in, rd_chariov_t *out) {
+ size_t max_len;
+
+ max_len = (((in->size + 2) / 3) * 4) + 1;
+ out->ptr = rd_malloc(max_len);
+ rd_assert(out->ptr);
+
+ out->size = EVP_EncodeBlock((uint8_t *)out->ptr, (uint8_t *)in->ptr,
+ (int)in->size);
+
+ rd_assert(out->size <= max_len);
+ out->ptr[out->size] = 0;
+}
+
+
+/**
+ * @brief Generate Authorization field for HTTP header.
+ * The field contains base64-encoded string which
+ * is generated from \p client_id and \p client_secret.
+ *
+ * @returns Return the authorization field.
+ *
+ * @locality Any thread.
+ */
+static char *rd_kafka_oidc_build_auth_header(const char *client_id,
+ const char *client_secret) {
+
+ rd_chariov_t client_authorization_in;
+ rd_chariov_t client_authorization_out;
+
+ size_t authorization_base64_header_size;
+ char *authorization_base64_header;
+
+ client_authorization_in.size =
+ strlen(client_id) + strlen(client_secret) + 2;
+ client_authorization_in.ptr = rd_malloc(client_authorization_in.size);
+ rd_snprintf(client_authorization_in.ptr, client_authorization_in.size,
+ "%s:%s", client_id, client_secret);
+
+ client_authorization_in.size--;
+ rd_base64_encode(&client_authorization_in, &client_authorization_out);
+
+ authorization_base64_header_size =
+ strlen("Authorization: Basic ") + client_authorization_out.size + 1;
+ authorization_base64_header =
+ rd_malloc(authorization_base64_header_size);
+ rd_snprintf(authorization_base64_header,
+ authorization_base64_header_size, "Authorization: Basic %s",
+ client_authorization_out.ptr);
+
+ rd_free(client_authorization_in.ptr);
+ rd_free(client_authorization_out.ptr);
+ return authorization_base64_header;
+}
+
+
+/**
+ * @brief Build headers for HTTP(S) requests based on \p client_id
+ * and \p client_secret. The result will be returned in \p *headersp.
+ *
+ * @locality Any thread.
+ */
+static void rd_kafka_oidc_build_headers(const char *client_id,
+ const char *client_secret,
+ struct curl_slist **headersp) {
+ char *authorization_base64_header;
+
+ authorization_base64_header =
+ rd_kafka_oidc_build_auth_header(client_id, client_secret);
+
+ *headersp = curl_slist_append(*headersp, "Accept: application/json");
+ *headersp = curl_slist_append(*headersp, authorization_base64_header);
+
+ *headersp = curl_slist_append(
+ *headersp, "Content-Type: application/x-www-form-urlencoded");
+
+ rd_free(authorization_base64_header);
+}
+
+/**
+ * @brief The format of JWT is Header.Payload.Signature.
+ * Extract and decode payloads from JWT \p src.
+ * The decoded payloads will be returned in \p *bufplainp.
+ *
+ * @returns Return error message while decoding the payload.
+ */
+static const char *rd_kafka_jwt_b64_decode_payload(const char *src,
+ char **bufplainp) {
+ char *converted_src;
+ char *payload = NULL;
+
+ const char *errstr = NULL;
+
+ int i, padding, len;
+
+ int payload_len;
+ int nbytesdecoded;
+
+ int payloads_start = 0;
+ int payloads_end = 0;
+
+ len = (int)strlen(src);
+ converted_src = rd_malloc(len + 4);
+
+ for (i = 0; i < len; i++) {
+ switch (src[i]) {
+ case '-':
+ converted_src[i] = '+';
+ break;
+
+ case '_':
+ converted_src[i] = '/';
+ break;
+
+ case '.':
+ if (payloads_start == 0)
+ payloads_start = i + 1;
+ else {
+ if (payloads_end > 0) {
+ errstr =
+ "The token is invalid with more "
+ "than 2 delimiters";
+ goto done;
+ }
+ payloads_end = i;
+ }
+ /* FALLTHRU */
+
+ default:
+ converted_src[i] = src[i];
+ }
+ }
+
+ if (payloads_start == 0 || payloads_end == 0) {
+ errstr = "The token is invalid with less than 2 delimiters";
+ goto done;
+ }
+
+ payload_len = payloads_end - payloads_start;
+ payload = rd_malloc(payload_len + 4);
+ strncpy(payload, (converted_src + payloads_start), payload_len);
+
+ padding = 4 - (payload_len % 4);
+ if (padding < 4) {
+ while (padding--)
+ payload[payload_len++] = '=';
+ }
+
+ nbytesdecoded = ((payload_len + 3) / 4) * 3;
+ *bufplainp = rd_malloc(nbytesdecoded + 1);
+
+ if (EVP_DecodeBlock((uint8_t *)(*bufplainp), (uint8_t *)payload,
+ (int)payload_len) == -1) {
+ errstr = "Failed to decode base64 payload";
+ }
+
+done:
+ RD_IF_FREE(payload, rd_free);
+ RD_IF_FREE(converted_src, rd_free);
+ return errstr;
+}
+
+/**
+ * @brief Build post_fields with \p scope.
+ * The format of the post_fields is
+ * `grant_type=client_credentials&scope=scope`
+ * The post_fields will be returned in \p *post_fields.
+ * The post_fields_size will be returned in \p post_fields_size.
+ *
+ */
+static void rd_kafka_oidc_build_post_fields(const char *scope,
+ char **post_fields,
+ size_t *post_fields_size) {
+ size_t scope_size = 0;
+
+ if (scope)
+ scope_size = strlen(scope);
+ if (scope_size == 0) {
+ *post_fields = rd_strdup("grant_type=client_credentials");
+ *post_fields_size = strlen("grant_type=client_credentials");
+ } else {
+ *post_fields_size =
+ strlen("grant_type=client_credentials&scope=") + scope_size;
+ *post_fields = rd_malloc(*post_fields_size + 1);
+ rd_snprintf(*post_fields, *post_fields_size + 1,
+ "grant_type=client_credentials&scope=%s", scope);
+ }
+}
+
+
+/**
+ * @brief Implementation of Oauth/OIDC token refresh callback function,
+ * will receive the JSON response after HTTP call to token provider,
+ * then extract the jwt from the JSON response, and forward it to
+ * the broker.
+ */
+void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque) {
+ const int timeout_s = 20;
+ const int retry = 4;
+ const int retry_ms = 5 * 1000;
+
+ double exp;
+
+ cJSON *json = NULL;
+ cJSON *payloads = NULL;
+ cJSON *parsed_token, *jwt_exp, *jwt_sub;
+
+ rd_http_error_t *herr;
+
+ char *jwt_token;
+ char *post_fields;
+ char *decoded_payloads = NULL;
+
+ struct curl_slist *headers = NULL;
+
+ const char *token_url;
+ const char *sub;
+ const char *errstr;
+
+ size_t post_fields_size;
+ size_t extension_cnt;
+ size_t extension_key_value_cnt = 0;
+
+ char set_token_errstr[512];
+ char decode_payload_errstr[512];
+
+ char **extensions = NULL;
+ char **extension_key_value = NULL;
+
+ if (rd_kafka_terminating(rk))
+ return;
+
+ rd_kafka_oidc_build_headers(rk->rk_conf.sasl.oauthbearer.client_id,
+ rk->rk_conf.sasl.oauthbearer.client_secret,
+ &headers);
+
+ /* Build post fields */
+ rd_kafka_oidc_build_post_fields(rk->rk_conf.sasl.oauthbearer.scope,
+ &post_fields, &post_fields_size);
+
+ token_url = rk->rk_conf.sasl.oauthbearer.token_endpoint_url;
+
+ herr = rd_http_post_expect_json(rk, token_url, headers, post_fields,
+ post_fields_size, timeout_s, retry,
+ retry_ms, &json);
+
+ if (unlikely(herr != NULL)) {
+ rd_kafka_log(rk, LOG_ERR, "OIDC",
+ "Failed to retrieve OIDC "
+ "token from \"%s\": %s (%d)",
+ token_url, herr->errstr, herr->code);
+ rd_kafka_oauthbearer_set_token_failure(rk, herr->errstr);
+ rd_http_error_destroy(herr);
+ goto done;
+ }
+
+ parsed_token = cJSON_GetObjectItem(json, "access_token");
+
+ if (parsed_token == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "\"access_token\" field");
+ goto done;
+ }
+
+ jwt_token = cJSON_GetStringValue(parsed_token);
+ if (jwt_token == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON "
+ "response as a value string");
+ goto done;
+ }
+
+ errstr = rd_kafka_jwt_b64_decode_payload(jwt_token, &decoded_payloads);
+ if (errstr != NULL) {
+ rd_snprintf(decode_payload_errstr,
+ sizeof(decode_payload_errstr),
+ "Failed to decode JWT payload: %s", errstr);
+ rd_kafka_oauthbearer_set_token_failure(rk,
+ decode_payload_errstr);
+ goto done;
+ }
+
+ payloads = cJSON_Parse(decoded_payloads);
+ if (payloads == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk, "Failed to parse JSON JWT payload");
+ goto done;
+ }
+
+ jwt_exp = cJSON_GetObjectItem(payloads, "exp");
+ if (jwt_exp == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "\"exp\" field");
+ goto done;
+ }
+
+ exp = cJSON_GetNumberValue(jwt_exp);
+ if (exp <= 0) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "valid \"exp\" field");
+ goto done;
+ }
+
+ jwt_sub = cJSON_GetObjectItem(payloads, "sub");
+ if (jwt_sub == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "\"sub\" field");
+ goto done;
+ }
+
+ sub = cJSON_GetStringValue(jwt_sub);
+ if (sub == NULL) {
+ rd_kafka_oauthbearer_set_token_failure(
+ rk,
+ "Expected JSON JWT response with "
+ "valid \"sub\" field");
+ goto done;
+ }
+
+ if (rk->rk_conf.sasl.oauthbearer.extensions_str) {
+ extensions =
+ rd_string_split(rk->rk_conf.sasl.oauthbearer.extensions_str,
+ ',', rd_true, &extension_cnt);
+
+ extension_key_value = rd_kafka_conf_kv_split(
+ (const char **)extensions, extension_cnt,
+ &extension_key_value_cnt);
+ }
+
+ if (rd_kafka_oauthbearer_set_token(
+ rk, jwt_token, (int64_t)exp * 1000, sub,
+ (const char **)extension_key_value, extension_key_value_cnt,
+ set_token_errstr,
+ sizeof(set_token_errstr)) != RD_KAFKA_RESP_ERR_NO_ERROR)
+ rd_kafka_oauthbearer_set_token_failure(rk, set_token_errstr);
+
+done:
+ RD_IF_FREE(decoded_payloads, rd_free);
+ RD_IF_FREE(post_fields, rd_free);
+ RD_IF_FREE(json, cJSON_Delete);
+ RD_IF_FREE(headers, curl_slist_free_all);
+ RD_IF_FREE(extensions, rd_free);
+ RD_IF_FREE(extension_key_value, rd_free);
+ RD_IF_FREE(payloads, cJSON_Delete);
+}
+
+
+/**
+ * @brief Make sure the jwt is able to be extracted from HTTP(S) response.
+ * The JSON response after HTTP(S) call to token provider will be in
+ * rd_http_req_t.hreq_buf and jwt is the value of field "access_token",
+ * the format is {"access_token":"*******"}.
+ * This function mocks up the rd_http_req_t.hreq_buf using an dummy
+ * jwt. The rd_http_parse_json will extract the jwt from rd_http_req_t
+ * and make sure the extracted jwt is same with the dummy one.
+ */
+static int ut_sasl_oauthbearer_oidc_should_succeed(void) {
+ /* Generate a token in the https://jwt.io/ website by using the
+ * following steps:
+ * 1. Select the algorithm RS256 from the Algorithm drop-down menu.
+ * 2. Enter the header and the payload.
+ * payload should contains "exp", "iat", "sub", for example:
+ * payloads = {"exp": 1636532769,
+ "iat": 1516239022,
+ "sub": "sub"}
+ header should contains "kid", for example:
+ headers={"kid": "abcedfg"} */
+ static const char *expected_jwt_token =
+ "eyJhbGciOiJIUzI1NiIsInR5"
+ "cCI6IkpXVCIsImtpZCI6ImFiY2VkZmcifQ"
+ "."
+ "eyJpYXQiOjE2MzIzNzUzMjAsInN1YiI6InN"
+ "1YiIsImV4cCI6MTYzMjM3NTYyMH0"
+ "."
+ "bT5oY8K-rS2gQ7Awc40844bK3zhzBhZb7sputErqQHY";
+ char *expected_token_value;
+ size_t token_len;
+ rd_http_req_t hreq;
+ rd_http_error_t *herr;
+ cJSON *json = NULL;
+ char *token;
+ cJSON *parsed_token;
+
+ RD_UT_BEGIN();
+
+ herr = rd_http_req_init(&hreq, "");
+
+ RD_UT_ASSERT(!herr,
+ "Expected initialize to succeed, "
+ "but failed with error code: %d, error string: %s",
+ herr->code, herr->errstr);
+
+ token_len = strlen("access_token") + strlen(expected_jwt_token) + 8;
+
+ expected_token_value = rd_malloc(token_len);
+ rd_snprintf(expected_token_value, token_len, "{\"%s\":\"%s\"}",
+ "access_token", expected_jwt_token);
+ rd_buf_write(hreq.hreq_buf, expected_token_value, token_len);
+
+ herr = rd_http_parse_json(&hreq, &json);
+ RD_UT_ASSERT(!herr,
+ "Failed to parse JSON token: error code: %d, "
+ "error string: %s",
+ herr->code, herr->errstr);
+
+ RD_UT_ASSERT(json, "Expected non-empty json.");
+
+ parsed_token = cJSON_GetObjectItem(json, "access_token");
+
+ RD_UT_ASSERT(parsed_token, "Expected access_token in JSON response.");
+ token = parsed_token->valuestring;
+
+ RD_UT_ASSERT(!strcmp(expected_jwt_token, token),
+ "Incorrect token received: "
+ "expected=%s; received=%s",
+ expected_jwt_token, token);
+
+ rd_free(expected_token_value);
+ rd_http_error_destroy(herr);
+ rd_http_req_destroy(&hreq);
+ cJSON_Delete(json);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Make sure JSON doesn't include the "access_token" key,
+ * it will fail and return an empty token.
+ */
+static int ut_sasl_oauthbearer_oidc_with_empty_key(void) {
+ static const char *empty_token_format = "{}";
+ size_t token_len;
+ rd_http_req_t hreq;
+ rd_http_error_t *herr;
+ cJSON *json = NULL;
+ cJSON *parsed_token;
+
+ RD_UT_BEGIN();
+
+ herr = rd_http_req_init(&hreq, "");
+ RD_UT_ASSERT(!herr,
+ "Expected initialization to succeed, "
+ "but it failed with error code: %d, error string: %s",
+ herr->code, herr->errstr);
+
+ token_len = strlen(empty_token_format);
+
+ rd_buf_write(hreq.hreq_buf, empty_token_format, token_len);
+
+ herr = rd_http_parse_json(&hreq, &json);
+
+ RD_UT_ASSERT(!herr,
+ "Expected JSON token parsing to succeed, "
+ "but it failed with error code: %d, error string: %s",
+ herr->code, herr->errstr);
+
+ RD_UT_ASSERT(json, "Expected non-empty json.");
+
+ parsed_token = cJSON_GetObjectItem(json, "access_token");
+
+ RD_UT_ASSERT(!parsed_token,
+ "Did not expecte access_token in JSON response");
+
+ rd_http_req_destroy(&hreq);
+ rd_http_error_destroy(herr);
+ cJSON_Delete(json);
+ cJSON_Delete(parsed_token);
+ RD_UT_PASS();
+}
+
+/**
+ * @brief Make sure the post_fields return correct with the scope.
+ */
+static int ut_sasl_oauthbearer_oidc_post_fields(void) {
+ static const char *scope = "test-scope";
+ static const char *expected_post_fields =
+ "grant_type=client_credentials&scope=test-scope";
+
+ size_t expected_post_fields_size = strlen(expected_post_fields);
+
+ size_t post_fields_size;
+
+ char *post_fields;
+
+ RD_UT_BEGIN();
+
+ rd_kafka_oidc_build_post_fields(scope, &post_fields, &post_fields_size);
+
+ RD_UT_ASSERT(expected_post_fields_size == post_fields_size,
+ "Expected expected_post_fields_size is %" PRIusz
+ " received post_fields_size is %" PRIusz,
+ expected_post_fields_size, post_fields_size);
+ RD_UT_ASSERT(!strcmp(expected_post_fields, post_fields),
+ "Expected expected_post_fields is %s"
+ " received post_fields is %s",
+ expected_post_fields, post_fields);
+
+ rd_free(post_fields);
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief Make sure the post_fields return correct with the empty scope.
+ */
+static int ut_sasl_oauthbearer_oidc_post_fields_with_empty_scope(void) {
+ static const char *scope = NULL;
+ static const char *expected_post_fields =
+ "grant_type=client_credentials";
+
+ size_t expected_post_fields_size = strlen(expected_post_fields);
+
+ size_t post_fields_size;
+
+ char *post_fields;
+
+ RD_UT_BEGIN();
+
+ rd_kafka_oidc_build_post_fields(scope, &post_fields, &post_fields_size);
+
+ RD_UT_ASSERT(expected_post_fields_size == post_fields_size,
+ "Expected expected_post_fields_size is %" PRIusz
+ " received post_fields_size is %" PRIusz,
+ expected_post_fields_size, post_fields_size);
+ RD_UT_ASSERT(!strcmp(expected_post_fields, post_fields),
+ "Expected expected_post_fields is %s"
+ " received post_fields is %s",
+ expected_post_fields, post_fields);
+
+ rd_free(post_fields);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief make sure the jwt is able to be extracted from HTTP(S) requests
+ * or fail as expected.
+ */
+int unittest_sasl_oauthbearer_oidc(void) {
+ int fails = 0;
+ fails += ut_sasl_oauthbearer_oidc_should_succeed();
+ fails += ut_sasl_oauthbearer_oidc_with_empty_key();
+ fails += ut_sasl_oauthbearer_oidc_post_fields();
+ fails += ut_sasl_oauthbearer_oidc_post_fields_with_empty_scope();
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h
new file mode 100644
index 000000000..a944f2efa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_oauthbearer_oidc.h
@@ -0,0 +1,37 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2021 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_
+#define _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_
+void rd_kafka_oidc_token_refresh_cb(rd_kafka_t *rk,
+ const char *oauthbearer_config,
+ void *opaque);
+
+int unittest_sasl_oauthbearer_oidc(void);
+
+#endif /* _RDKAFKA_SASL_OAUTHBEARER_OIDC_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c
new file mode 100644
index 000000000..1e715cfba
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_plain.c
@@ -0,0 +1,142 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Builtin SASL PLAIN support when Cyrus SASL is not available
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_sasl_int.h"
+
+
+/**
+ * @brief Handle received frame from broker.
+ */
+static int rd_kafka_sasl_plain_recv(struct rd_kafka_transport_s *rktrans,
+ const void *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+ if (size)
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLPLAIN",
+ "Received non-empty SASL PLAIN (builtin) "
+ "response from broker (%" PRIusz " bytes)",
+ size);
+
+ rd_kafka_sasl_auth_done(rktrans);
+
+ return 0;
+}
+
+
+/**
+ * @brief Initialize and start SASL PLAIN (builtin) authentication.
+ *
+ * Returns 0 on successful init and -1 on error.
+ *
+ * @locality broker thread
+ */
+int rd_kafka_sasl_plain_client_new(rd_kafka_transport_t *rktrans,
+ const char *hostname,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ rd_kafka_t *rk = rkb->rkb_rk;
+ /* [authzid] UTF8NUL authcid UTF8NUL passwd */
+ char *buf;
+ int of = 0;
+ int zidlen = 0;
+ int cidlen, pwlen;
+
+ mtx_lock(&rk->rk_conf.sasl.lock);
+
+ cidlen = rk->rk_conf.sasl.username
+ ? (int)strlen(rk->rk_conf.sasl.username)
+ : 0;
+ pwlen = rk->rk_conf.sasl.password
+ ? (int)strlen(rk->rk_conf.sasl.password)
+ : 0;
+
+ buf = rd_alloca(zidlen + 1 + cidlen + 1 + pwlen + 1);
+
+ /* authzid: none (empty) */
+ /* UTF8NUL */
+ buf[of++] = 0;
+ /* authcid */
+ memcpy(&buf[of], rk->rk_conf.sasl.username, cidlen);
+ of += cidlen;
+ /* UTF8NUL */
+ buf[of++] = 0;
+ /* passwd */
+ memcpy(&buf[of], rk->rk_conf.sasl.password, pwlen);
+ of += pwlen;
+ mtx_unlock(&rk->rk_conf.sasl.lock);
+
+ rd_rkb_dbg(rkb, SECURITY, "SASLPLAIN",
+ "Sending SASL PLAIN (builtin) authentication token");
+
+ if (rd_kafka_sasl_send(rktrans, buf, of, errstr, errstr_size))
+ return -1;
+
+ /* PLAIN is appearantly done here, but we still need to make sure
+ * the PLAIN frame is sent and we get a response back (empty) */
+ rktrans->rktrans_sasl.complete = 1;
+ return 0;
+}
+
+
+/**
+ * @brief Validate PLAIN config
+ */
+static int rd_kafka_sasl_plain_conf_validate(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+ rd_bool_t both_set;
+
+ mtx_lock(&rk->rk_conf.sasl.lock);
+ both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password;
+ mtx_unlock(&rk->rk_conf.sasl.lock);
+
+ if (!both_set) {
+ rd_snprintf(errstr, errstr_size,
+ "sasl.username and sasl.password must be set");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+const struct rd_kafka_sasl_provider rd_kafka_sasl_plain_provider = {
+ .name = "PLAIN (builtin)",
+ .client_new = rd_kafka_sasl_plain_client_new,
+ .recv = rd_kafka_sasl_plain_recv,
+ .conf_validate = rd_kafka_sasl_plain_conf_validate};
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c
new file mode 100644
index 000000000..7d5db5649
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_scram.c
@@ -0,0 +1,973 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Builtin SASL SCRAM support when Cyrus SASL is not available
+ */
+#include "rdkafka_int.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_sasl_int.h"
+#include "rdrand.h"
+#include "rdunittest.h"
+
+
+#if WITH_SSL
+#include <openssl/hmac.h>
+#include <openssl/evp.h>
+#include <openssl/sha.h>
+#else
+#error "WITH_SSL (OpenSSL) is required for SASL SCRAM"
+#endif
+
+
+/**
+ * @brief Per-connection state
+ */
+struct rd_kafka_sasl_scram_state {
+ enum { RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE,
+ RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE,
+ RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE,
+ } state;
+ rd_chariov_t cnonce; /* client c-nonce */
+ rd_chariov_t first_msg_bare; /* client-first-message-bare */
+ char *ServerSignatureB64; /* ServerSignature in Base64 */
+ const EVP_MD *evp; /* Hash function pointer */
+};
+
+
+/**
+ * @brief Close and free authentication state
+ */
+static void rd_kafka_sasl_scram_close(rd_kafka_transport_t *rktrans) {
+ struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
+
+ if (!state)
+ return;
+
+ RD_IF_FREE(state->cnonce.ptr, rd_free);
+ RD_IF_FREE(state->first_msg_bare.ptr, rd_free);
+ RD_IF_FREE(state->ServerSignatureB64, rd_free);
+ rd_free(state);
+}
+
+
+
+/**
+ * @brief Generates a nonce string (a random printable string)
+ * @remark dst->ptr will be allocated and must be freed.
+ */
+static void rd_kafka_sasl_scram_generate_nonce(rd_chariov_t *dst) {
+ int i;
+ dst->size = 32;
+ dst->ptr = rd_malloc(dst->size + 1);
+ for (i = 0; i < (int)dst->size; i++)
+ dst->ptr[i] = (char)rd_jitter(0x2d /*-*/, 0x7e /*~*/);
+ dst->ptr[i] = 0;
+}
+
+
+/**
+ * @brief Parses inbuf for SCRAM attribute \p attr (e.g., 's')
+ * @returns a newly allocated copy of the value, or NULL
+ * on failure in which case an error is written to \p errstr
+ * prefixed by \p description.
+ */
+static char *rd_kafka_sasl_scram_get_attr(const rd_chariov_t *inbuf,
+ char attr,
+ const char *description,
+ char *errstr,
+ size_t errstr_size) {
+ size_t of = 0;
+
+ for (of = 0; of < inbuf->size;) {
+ const char *td;
+ size_t len;
+
+ /* Find next delimiter , (if any) */
+ td = memchr(&inbuf->ptr[of], ',', inbuf->size - of);
+ if (td)
+ len = (size_t)(td - &inbuf->ptr[of]);
+ else
+ len = inbuf->size - of;
+
+ /* Check if attr "x=" matches */
+ if (inbuf->ptr[of] == attr && inbuf->size > of + 1 &&
+ inbuf->ptr[of + 1] == '=') {
+ char *ret;
+ of += 2; /* past = */
+ ret = rd_malloc(len - 2 + 1);
+ memcpy(ret, &inbuf->ptr[of], len - 2);
+ ret[len - 2] = '\0';
+ return ret;
+ }
+
+ /* Not the attr we are looking for, skip
+ * past the next delimiter and continue looking. */
+ of += len + 1;
+ }
+
+ rd_snprintf(errstr, errstr_size, "%s: could not find attribute (%c)",
+ description, attr);
+ return NULL;
+}
+
+
+/**
+ * @brief Base64 encode binary input \p in
+ * @returns a newly allocated, base64-encoded string or NULL on error.
+ */
+static char *rd_base64_encode(const rd_chariov_t *in) {
+ char *ret;
+ size_t ret_len, max_len;
+
+ /* OpenSSL takes an |int| argument so the input cannot exceed that. */
+ if (in->size > INT_MAX) {
+ return NULL;
+ }
+
+ /* This does not overflow given the |INT_MAX| bound, above. */
+ max_len = (((in->size + 2) / 3) * 4) + 1;
+ ret = rd_malloc(max_len);
+ if (ret == NULL) {
+ return NULL;
+ }
+
+ ret_len =
+ EVP_EncodeBlock((uint8_t *)ret, (uint8_t *)in->ptr, (int)in->size);
+ assert(ret_len < max_len);
+ ret[ret_len] = 0;
+
+ return ret;
+}
+
+
+/**
+ * @brief Base64 decode input string \p in. Ignores leading and trailing
+ * whitespace.
+ * @returns -1 on invalid Base64, or 0 on successes in which case a
+ * newly allocated binary string is set in out (and size).
+ */
+static int rd_base64_decode(const rd_chariov_t *in, rd_chariov_t *out) {
+ size_t ret_len;
+
+ /* OpenSSL takes an |int| argument, so |in->size| must not exceed
+ * that. */
+ if (in->size % 4 != 0 || in->size > INT_MAX) {
+ return -1;
+ }
+
+ ret_len = ((in->size / 4) * 3);
+ out->ptr = rd_malloc(ret_len + 1);
+
+ if (EVP_DecodeBlock((uint8_t *)out->ptr, (uint8_t *)in->ptr,
+ (int)in->size) == -1) {
+ rd_free(out->ptr);
+ out->ptr = NULL;
+ return -1;
+ }
+
+ /* EVP_DecodeBlock will pad the output with trailing NULs and count
+ * them in the return value. */
+ if (in->size > 1 && in->ptr[in->size - 1] == '=') {
+ if (in->size > 2 && in->ptr[in->size - 2] == '=') {
+ ret_len -= 2;
+ } else {
+ ret_len -= 1;
+ }
+ }
+
+ out->ptr[ret_len] = 0;
+ out->size = ret_len;
+
+ return 0;
+}
+
+
+/**
+ * @brief Perform H(str) hash function and stores the result in \p out
+ * which must be at least EVP_MAX_MD_SIZE.
+ * @returns 0 on success, else -1
+ */
+static int rd_kafka_sasl_scram_H(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *str,
+ rd_chariov_t *out) {
+
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H(
+ (const unsigned char *)str->ptr, str->size,
+ (unsigned char *)out->ptr);
+
+ out->size = rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_H_size;
+ return 0;
+}
+
+/**
+ * @brief Perform HMAC(key,str) and stores the result in \p out
+ * which must be at least EVP_MAX_MD_SIZE.
+ * @returns 0 on success, else -1
+ */
+static int rd_kafka_sasl_scram_HMAC(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *key,
+ const rd_chariov_t *str,
+ rd_chariov_t *out) {
+ const EVP_MD *evp =
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp;
+ unsigned int outsize;
+
+ if (!HMAC(evp, (const unsigned char *)key->ptr, (int)key->size,
+ (const unsigned char *)str->ptr, (int)str->size,
+ (unsigned char *)out->ptr, &outsize)) {
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
+ "HMAC failed");
+ return -1;
+ }
+
+ out->size = outsize;
+
+ return 0;
+}
+
+
+
+/**
+ * @brief Perform \p itcnt iterations of HMAC() on the given buffer \p in
+ * using \p salt, writing the output into \p out which must be
+ * at least EVP_MAX_MD_SIZE. Actual size is updated in \p *outsize.
+ * @returns 0 on success, else -1
+ */
+static int rd_kafka_sasl_scram_Hi(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *in,
+ const rd_chariov_t *salt,
+ int itcnt,
+ rd_chariov_t *out) {
+ const EVP_MD *evp =
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.scram_evp;
+ unsigned int ressize = 0;
+ unsigned char tempres[EVP_MAX_MD_SIZE];
+ unsigned char *saltplus;
+ int i;
+
+ /* U1 := HMAC(str, salt + INT(1)) */
+ saltplus = rd_alloca(salt->size + 4);
+ memcpy(saltplus, salt->ptr, salt->size);
+ saltplus[salt->size] = 0;
+ saltplus[salt->size + 1] = 0;
+ saltplus[salt->size + 2] = 0;
+ saltplus[salt->size + 3] = 1;
+
+ /* U1 := HMAC(str, salt + INT(1)) */
+ if (!HMAC(evp, (const unsigned char *)in->ptr, (int)in->size, saltplus,
+ salt->size + 4, tempres, &ressize)) {
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
+ "HMAC priming failed");
+ return -1;
+ }
+
+ memcpy(out->ptr, tempres, ressize);
+
+ /* Ui-1 := HMAC(str, Ui-2) .. */
+ for (i = 1; i < itcnt; i++) {
+ unsigned char tempdest[EVP_MAX_MD_SIZE];
+ int j;
+
+ if (unlikely(!HMAC(evp, (const unsigned char *)in->ptr,
+ (int)in->size, tempres, ressize, tempdest,
+ NULL))) {
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
+ "Hi() HMAC #%d/%d failed", i, itcnt);
+ return -1;
+ }
+
+ /* U1 XOR U2 .. */
+ for (j = 0; j < (int)ressize; j++) {
+ out->ptr[j] ^= tempdest[j];
+ tempres[j] = tempdest[j];
+ }
+ }
+
+ out->size = ressize;
+
+ return 0;
+}
+
+
+/**
+ * @returns a SASL value-safe-char encoded string, replacing "," and "="
+ * with their escaped counterparts in a newly allocated string.
+ */
+static char *rd_kafka_sasl_safe_string(const char *str) {
+ char *safe = NULL, *d = NULL /*avoid warning*/;
+ int pass;
+ size_t len = 0;
+
+ /* Pass #1: scan for needed length and allocate.
+ * Pass #2: encode string */
+ for (pass = 0; pass < 2; pass++) {
+ const char *s;
+ for (s = str; *s; s++) {
+ if (pass == 0) {
+ /* If this byte needs to be escaped then
+ * 3 output bytes are needed instead of 1. */
+ len += (*s == ',' || *s == '=') ? 3 : 1;
+ continue;
+ }
+
+ if (*s == ',') {
+ *(d++) = '=';
+ *(d++) = '2';
+ *(d++) = 'C';
+ } else if (*s == '=') {
+ *(d++) = '=';
+ *(d++) = '3';
+ *(d++) = 'D';
+ } else
+ *(d++) = *s;
+ }
+
+ if (pass == 0)
+ d = safe = rd_malloc(len + 1);
+ }
+
+ rd_assert(d == safe + (int)len);
+ *d = '\0';
+
+ return safe;
+}
+
+
+/**
+ * @brief Build client-final-message-without-proof
+ * @remark out->ptr will be allocated and must be freed.
+ */
+static void rd_kafka_sasl_scram_build_client_final_message_wo_proof(
+ struct rd_kafka_sasl_scram_state *state,
+ const char *snonce,
+ rd_chariov_t *out) {
+ const char *attr_c = "biws"; /* base64 encode of "n,," */
+
+ /*
+ * client-final-message-without-proof =
+ * channel-binding "," nonce [","
+ * extensions]
+ */
+ out->size = strlen("c=,r=") + strlen(attr_c) + state->cnonce.size +
+ strlen(snonce);
+ out->ptr = rd_malloc(out->size + 1);
+ rd_snprintf(out->ptr, out->size + 1, "c=%s,r=%.*s%s", attr_c,
+ (int)state->cnonce.size, state->cnonce.ptr, snonce);
+}
+
+
+/**
+ * @brief Build client-final-message
+ * @returns -1 on error.
+ */
+static int rd_kafka_sasl_scram_build_client_final_message(
+ rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *salt,
+ const char *server_nonce,
+ const rd_chariov_t *server_first_msg,
+ int itcnt,
+ rd_chariov_t *out) {
+ struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
+ rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
+ rd_chariov_t SaslPassword = RD_ZERO_INIT;
+ rd_chariov_t SaltedPassword = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ rd_chariov_t ClientKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ rd_chariov_t ServerKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ rd_chariov_t StoredKey = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ rd_chariov_t AuthMessage = RD_ZERO_INIT;
+ rd_chariov_t ClientSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ rd_chariov_t ServerSignature = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ const rd_chariov_t ClientKeyVerbatim = {.ptr = "Client Key",
+ .size = 10};
+ const rd_chariov_t ServerKeyVerbatim = {.ptr = "Server Key",
+ .size = 10};
+ rd_chariov_t ClientProof = {.ptr = rd_alloca(EVP_MAX_MD_SIZE)};
+ rd_chariov_t client_final_msg_wo_proof;
+ char *ClientProofB64;
+ int i;
+
+ mtx_lock(&conf->sasl.lock);
+ rd_strdupa(&SaslPassword.ptr, conf->sasl.password);
+ mtx_unlock(&conf->sasl.lock);
+ SaslPassword.size = strlen(SaslPassword.ptr);
+
+ /* Constructing the ClientProof attribute (p):
+ *
+ * p = Base64-encoded ClientProof
+ * SaltedPassword := Hi(Normalize(password), salt, i)
+ * ClientKey := HMAC(SaltedPassword, "Client Key")
+ * StoredKey := H(ClientKey)
+ * AuthMessage := client-first-message-bare + "," +
+ * server-first-message + "," +
+ * client-final-message-without-proof
+ * ClientSignature := HMAC(StoredKey, AuthMessage)
+ * ClientProof := ClientKey XOR ClientSignature
+ * ServerKey := HMAC(SaltedPassword, "Server Key")
+ * ServerSignature := HMAC(ServerKey, AuthMessage)
+ */
+
+ /* SaltedPassword := Hi(Normalize(password), salt, i) */
+ if (rd_kafka_sasl_scram_Hi(rktrans, &SaslPassword, salt, itcnt,
+ &SaltedPassword) == -1)
+ return -1;
+
+ /* ClientKey := HMAC(SaltedPassword, "Client Key") */
+ if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword,
+ &ClientKeyVerbatim, &ClientKey) == -1)
+ return -1;
+
+ /* StoredKey := H(ClientKey) */
+ if (rd_kafka_sasl_scram_H(rktrans, &ClientKey, &StoredKey) == -1)
+ return -1;
+
+ /* client-final-message-without-proof */
+ rd_kafka_sasl_scram_build_client_final_message_wo_proof(
+ state, server_nonce, &client_final_msg_wo_proof);
+
+ /* AuthMessage := client-first-message-bare + "," +
+ * server-first-message + "," +
+ * client-final-message-without-proof */
+ AuthMessage.size = state->first_msg_bare.size + 1 +
+ server_first_msg->size + 1 +
+ client_final_msg_wo_proof.size;
+ AuthMessage.ptr = rd_alloca(AuthMessage.size + 1);
+ rd_snprintf(AuthMessage.ptr, AuthMessage.size + 1, "%.*s,%.*s,%.*s",
+ (int)state->first_msg_bare.size, state->first_msg_bare.ptr,
+ (int)server_first_msg->size, server_first_msg->ptr,
+ (int)client_final_msg_wo_proof.size,
+ client_final_msg_wo_proof.ptr);
+
+ /*
+ * Calculate ServerSignature for later verification when
+ * server-final-message is received.
+ */
+
+ /* ServerKey := HMAC(SaltedPassword, "Server Key") */
+ if (rd_kafka_sasl_scram_HMAC(rktrans, &SaltedPassword,
+ &ServerKeyVerbatim, &ServerKey) == -1) {
+ rd_free(client_final_msg_wo_proof.ptr);
+ return -1;
+ }
+
+ /* ServerSignature := HMAC(ServerKey, AuthMessage) */
+ if (rd_kafka_sasl_scram_HMAC(rktrans, &ServerKey, &AuthMessage,
+ &ServerSignature) == -1) {
+ rd_free(client_final_msg_wo_proof.ptr);
+ return -1;
+ }
+
+ /* Store the Base64 encoded ServerSignature for quick comparison */
+ state->ServerSignatureB64 = rd_base64_encode(&ServerSignature);
+ if (state->ServerSignatureB64 == NULL) {
+ rd_free(client_final_msg_wo_proof.ptr);
+ return -1;
+ }
+
+ /*
+ * Continue with client-final-message
+ */
+
+ /* ClientSignature := HMAC(StoredKey, AuthMessage) */
+ if (rd_kafka_sasl_scram_HMAC(rktrans, &StoredKey, &AuthMessage,
+ &ClientSignature) == -1) {
+ rd_free(client_final_msg_wo_proof.ptr);
+ return -1;
+ }
+
+ /* ClientProof := ClientKey XOR ClientSignature */
+ assert(ClientKey.size == ClientSignature.size);
+ for (i = 0; i < (int)ClientKey.size; i++)
+ ClientProof.ptr[i] = ClientKey.ptr[i] ^ ClientSignature.ptr[i];
+ ClientProof.size = ClientKey.size;
+
+
+ /* Base64 encoded ClientProof */
+ ClientProofB64 = rd_base64_encode(&ClientProof);
+ if (ClientProofB64 == NULL) {
+ rd_free(client_final_msg_wo_proof.ptr);
+ return -1;
+ }
+
+ /* Construct client-final-message */
+ out->size = client_final_msg_wo_proof.size + strlen(",p=") +
+ strlen(ClientProofB64);
+ out->ptr = rd_malloc(out->size + 1);
+
+ rd_snprintf(out->ptr, out->size + 1, "%.*s,p=%s",
+ (int)client_final_msg_wo_proof.size,
+ client_final_msg_wo_proof.ptr, ClientProofB64);
+ rd_free(ClientProofB64);
+ rd_free(client_final_msg_wo_proof.ptr);
+
+ return 0;
+}
+
+
+/**
+ * @brief Handle first message from server
+ *
+ * Parse server response which looks something like:
+ * "r=fyko+d2lbbFgONR....,s=QSXCR+Q6sek8bf92,i=4096"
+ *
+ * @returns -1 on error.
+ */
+static int
+rd_kafka_sasl_scram_handle_server_first_message(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *in,
+ rd_chariov_t *out,
+ char *errstr,
+ size_t errstr_size) {
+ struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
+ char *server_nonce;
+ rd_chariov_t salt_b64, salt;
+ char *itcntstr;
+ const char *endptr;
+ int itcnt;
+ char *attr_m;
+
+ /* Mandatory future extension check */
+ if ((attr_m = rd_kafka_sasl_scram_get_attr(in, 'm', NULL, NULL, 0))) {
+ rd_snprintf(errstr, errstr_size,
+ "Unsupported mandatory SCRAM extension");
+ rd_free(attr_m);
+ return -1;
+ }
+
+ /* Server nonce */
+ if (!(server_nonce = rd_kafka_sasl_scram_get_attr(
+ in, 'r', "Server nonce in server-first-message", errstr,
+ errstr_size)))
+ return -1;
+
+ if (strlen(server_nonce) <= state->cnonce.size ||
+ strncmp(state->cnonce.ptr, server_nonce, state->cnonce.size)) {
+ rd_snprintf(errstr, errstr_size,
+ "Server/client nonce mismatch in "
+ "server-first-message");
+ rd_free(server_nonce);
+ return -1;
+ }
+
+ /* Salt (Base64) */
+ if (!(salt_b64.ptr = rd_kafka_sasl_scram_get_attr(
+ in, 's', "Salt in server-first-message", errstr,
+ errstr_size))) {
+ rd_free(server_nonce);
+ return -1;
+ }
+ salt_b64.size = strlen(salt_b64.ptr);
+
+ /* Convert Salt to binary */
+ if (rd_base64_decode(&salt_b64, &salt) == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid Base64 Salt in server-first-message");
+ rd_free(server_nonce);
+ rd_free(salt_b64.ptr);
+ return -1;
+ }
+ rd_free(salt_b64.ptr);
+
+ /* Iteration count (as string) */
+ if (!(itcntstr = rd_kafka_sasl_scram_get_attr(
+ in, 'i', "Iteration count in server-first-message", errstr,
+ errstr_size))) {
+ rd_free(server_nonce);
+ rd_free(salt.ptr);
+ return -1;
+ }
+
+ /* Iteration count (as int) */
+ errno = 0;
+ itcnt = (int)strtoul(itcntstr, (char **)&endptr, 10);
+ if (itcntstr == endptr || *endptr != '\0' || errno != 0 ||
+ itcnt > 1000000) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid value (not integer or too large) "
+ "for Iteration count in server-first-message");
+ rd_free(server_nonce);
+ rd_free(salt.ptr);
+ rd_free(itcntstr);
+ return -1;
+ }
+ rd_free(itcntstr);
+
+ /* Build client-final-message */
+ if (rd_kafka_sasl_scram_build_client_final_message(
+ rktrans, &salt, server_nonce, in, itcnt, out) == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to build SCRAM client-final-message");
+ rd_free(salt.ptr);
+ rd_free(server_nonce);
+ return -1;
+ }
+
+ rd_free(server_nonce);
+ rd_free(salt.ptr);
+
+ return 0;
+}
+
+/**
+ * @brief Handle server-final-message
+ *
+ * This is the end of authentication and the SCRAM state
+ * will be freed at the end of this function regardless of
+ * authentication outcome.
+ *
+ * @returns -1 on failure
+ */
+static int
+rd_kafka_sasl_scram_handle_server_final_message(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *in,
+ char *errstr,
+ size_t errstr_size) {
+ struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
+ char *attr_v, *attr_e;
+
+ if ((attr_e = rd_kafka_sasl_scram_get_attr(
+ in, 'e', "server-error in server-final-message", errstr,
+ errstr_size))) {
+ /* Authentication failed */
+
+ rd_snprintf(errstr, errstr_size,
+ "SASL SCRAM authentication failed: "
+ "broker responded with %s",
+ attr_e);
+ rd_free(attr_e);
+ return -1;
+
+ } else if ((attr_v = rd_kafka_sasl_scram_get_attr(
+ in, 'v', "verifier in server-final-message", errstr,
+ errstr_size))) {
+ rd_kafka_conf_t *conf;
+
+ /* Authentication succesful on server,
+ * but we need to verify the ServerSignature too. */
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
+ "SCRAMAUTH",
+ "SASL SCRAM authentication successful on server: "
+ "verifying ServerSignature");
+
+ if (strcmp(attr_v, state->ServerSignatureB64)) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL SCRAM authentication failed: "
+ "ServerSignature mismatch "
+ "(server's %s != ours %s)",
+ attr_v, state->ServerSignatureB64);
+ rd_free(attr_v);
+ return -1;
+ }
+ rd_free(attr_v);
+
+ conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
+
+ mtx_lock(&conf->sasl.lock);
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY | RD_KAFKA_DBG_BROKER,
+ "SCRAMAUTH", "Authenticated as %s using %s",
+ conf->sasl.username, conf->sasl.mechanisms);
+ mtx_unlock(&conf->sasl.lock);
+
+ rd_kafka_sasl_auth_done(rktrans);
+ return 0;
+
+ } else {
+ rd_snprintf(errstr, errstr_size,
+ "SASL SCRAM authentication failed: "
+ "no verifier or server-error returned from broker");
+ return -1;
+ }
+}
+
+
+
+/**
+ * @brief Build client-first-message
+ */
+static void
+rd_kafka_sasl_scram_build_client_first_message(rd_kafka_transport_t *rktrans,
+ rd_chariov_t *out) {
+ char *sasl_username;
+ struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
+ rd_kafka_conf_t *conf = &rktrans->rktrans_rkb->rkb_rk->rk_conf;
+
+ rd_kafka_sasl_scram_generate_nonce(&state->cnonce);
+
+ mtx_lock(&conf->sasl.lock);
+ sasl_username = rd_kafka_sasl_safe_string(conf->sasl.username);
+ mtx_unlock(&conf->sasl.lock);
+
+ out->size =
+ strlen("n,,n=,r=") + strlen(sasl_username) + state->cnonce.size;
+ out->ptr = rd_malloc(out->size + 1);
+
+ rd_snprintf(out->ptr, out->size + 1, "n,,n=%s,r=%.*s", sasl_username,
+ (int)state->cnonce.size, state->cnonce.ptr);
+ rd_free(sasl_username);
+
+ /* Save client-first-message-bare (skip gs2-header) */
+ state->first_msg_bare.size = out->size - 3;
+ state->first_msg_bare.ptr =
+ rd_memdup(out->ptr + 3, state->first_msg_bare.size);
+}
+
+
+
+/**
+ * @brief SASL SCRAM client state machine
+ * @returns -1 on failure (errstr set), else 0.
+ */
+static int rd_kafka_sasl_scram_fsm(rd_kafka_transport_t *rktrans,
+ const rd_chariov_t *in,
+ char *errstr,
+ size_t errstr_size) {
+ static const char *state_names[] = {
+ "client-first-message",
+ "server-first-message",
+ "client-final-message",
+ };
+ struct rd_kafka_sasl_scram_state *state = rktrans->rktrans_sasl.state;
+ rd_chariov_t out = RD_ZERO_INIT;
+ int r = -1;
+ rd_ts_t ts_start = rd_clock();
+ int prev_state = state->state;
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLSCRAM",
+ "SASL SCRAM client in state %s", state_names[state->state]);
+
+ switch (state->state) {
+ case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE:
+ rd_dassert(!in); /* Not expecting any server-input */
+
+ rd_kafka_sasl_scram_build_client_first_message(rktrans, &out);
+ state->state = RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE;
+ break;
+
+
+ case RD_KAFKA_SASL_SCRAM_STATE_SERVER_FIRST_MESSAGE:
+ rd_dassert(in); /* Requires server-input */
+
+ if (rd_kafka_sasl_scram_handle_server_first_message(
+ rktrans, in, &out, errstr, errstr_size) == -1)
+ return -1;
+
+ state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE;
+ break;
+
+ case RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FINAL_MESSAGE:
+ rd_dassert(in); /* Requires server-input */
+
+ r = rd_kafka_sasl_scram_handle_server_final_message(
+ rktrans, in, errstr, errstr_size);
+ break;
+ }
+
+ if (out.ptr) {
+ r = rd_kafka_sasl_send(rktrans, out.ptr, (int)out.size, errstr,
+ errstr_size);
+ rd_free(out.ptr);
+ }
+
+ ts_start = (rd_clock() - ts_start) / 1000;
+ if (ts_start >= 100)
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SCRAM",
+ "SASL SCRAM state %s handled in %" PRId64 "ms",
+ state_names[prev_state], ts_start);
+
+
+ return r;
+}
+
+
+/**
+ * @brief Handle received frame from broker.
+ */
+static int rd_kafka_sasl_scram_recv(rd_kafka_transport_t *rktrans,
+ const void *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+ const rd_chariov_t in = {.ptr = (char *)buf, .size = size};
+ return rd_kafka_sasl_scram_fsm(rktrans, &in, errstr, errstr_size);
+}
+
+
+/**
+ * @brief Initialize and start SASL SCRAM (builtin) authentication.
+ *
+ * Returns 0 on successful init and -1 on error.
+ *
+ * @locality broker thread
+ */
+static int rd_kafka_sasl_scram_client_new(rd_kafka_transport_t *rktrans,
+ const char *hostname,
+ char *errstr,
+ size_t errstr_size) {
+ struct rd_kafka_sasl_scram_state *state;
+
+ state = rd_calloc(1, sizeof(*state));
+ state->state = RD_KAFKA_SASL_SCRAM_STATE_CLIENT_FIRST_MESSAGE;
+ rktrans->rktrans_sasl.state = state;
+
+ /* Kick off the FSM */
+ return rd_kafka_sasl_scram_fsm(rktrans, NULL, errstr, errstr_size);
+}
+
+
+
+/**
+ * @brief Validate SCRAM config and look up the hash function
+ */
+static int rd_kafka_sasl_scram_conf_validate(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+ const char *mech = rk->rk_conf.sasl.mechanisms;
+ rd_bool_t both_set;
+
+ mtx_lock(&rk->rk_conf.sasl.lock);
+ both_set = rk->rk_conf.sasl.username && rk->rk_conf.sasl.password;
+ mtx_unlock(&rk->rk_conf.sasl.lock);
+
+ if (!both_set) {
+ rd_snprintf(errstr, errstr_size,
+ "sasl.username and sasl.password must be set");
+ return -1;
+ }
+
+ if (!strcmp(mech, "SCRAM-SHA-1")) {
+ rk->rk_conf.sasl.scram_evp = EVP_sha1();
+ rk->rk_conf.sasl.scram_H = SHA1;
+ rk->rk_conf.sasl.scram_H_size = SHA_DIGEST_LENGTH;
+ } else if (!strcmp(mech, "SCRAM-SHA-256")) {
+ rk->rk_conf.sasl.scram_evp = EVP_sha256();
+ rk->rk_conf.sasl.scram_H = SHA256;
+ rk->rk_conf.sasl.scram_H_size = SHA256_DIGEST_LENGTH;
+ } else if (!strcmp(mech, "SCRAM-SHA-512")) {
+ rk->rk_conf.sasl.scram_evp = EVP_sha512();
+ rk->rk_conf.sasl.scram_H = SHA512;
+ rk->rk_conf.sasl.scram_H_size = SHA512_DIGEST_LENGTH;
+ } else {
+ rd_snprintf(errstr, errstr_size,
+ "Unsupported hash function: %s "
+ "(try SCRAM-SHA-512)",
+ mech);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+const struct rd_kafka_sasl_provider rd_kafka_sasl_scram_provider = {
+ .name = "SCRAM (builtin)",
+ .client_new = rd_kafka_sasl_scram_client_new,
+ .recv = rd_kafka_sasl_scram_recv,
+ .close = rd_kafka_sasl_scram_close,
+ .conf_validate = rd_kafka_sasl_scram_conf_validate,
+};
+
+
+
+/**
+ * @name Unit tests
+ */
+
+/**
+ * @brief Verify that a random nonce is generated.
+ */
+static int unittest_scram_nonce(void) {
+ rd_chariov_t out1 = RD_ZERO_INIT;
+ rd_chariov_t out2 = RD_ZERO_INIT;
+
+ rd_kafka_sasl_scram_generate_nonce(&out1);
+ RD_UT_ASSERT(out1.size == 32, "Wrong size %d", (int)out1.size);
+
+ rd_kafka_sasl_scram_generate_nonce(&out2);
+ RD_UT_ASSERT(out1.size == 32, "Wrong size %d", (int)out2.size);
+
+ RD_UT_ASSERT(memcmp(out1.ptr, out2.ptr, out1.size) != 0,
+ "Expected generate_nonce() to return a random nonce");
+
+ rd_free(out1.ptr);
+ rd_free(out2.ptr);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Verify that the safe string function does not overwrite memory.
+ * Needs to be run with ASAN (which is done in release-tests) for
+ * proper verification.
+ */
+static int unittest_scram_safe(void) {
+ const char *inout[] = {
+ "just a string",
+ "just a string",
+
+ "another,one,that,needs=escaping!",
+ "another=2Cone=2Cthat=2Cneeds=3Descaping!",
+
+ "overflow?============================",
+ "overflow?=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D"
+ "=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D=3D",
+
+ "=3D=3D=3D the mind boggles",
+ "=3D3D=3D3D=3D3D the mind boggles",
+
+ NULL,
+ NULL};
+ int i;
+
+ for (i = 0; inout[i]; i += 2) {
+ char *out = rd_kafka_sasl_safe_string(inout[i]);
+ const char *expected = inout[i + 1];
+
+ RD_UT_ASSERT(!strcmp(out, expected),
+ "Expected sasl_safe_string(%s) => %s, not %s\n",
+ inout[i], expected, out);
+
+ rd_free(out);
+ }
+
+ RD_UT_PASS();
+}
+
+
+int unittest_scram(void) {
+ int fails = 0;
+
+ fails += unittest_scram_nonce();
+ fails += unittest_scram_safe();
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c
new file mode 100644
index 000000000..b07e1808d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sasl_win32.c
@@ -0,0 +1,548 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Impelements SASL Kerberos GSSAPI authentication client
+ * using the native Win32 SSPI.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_sasl.h"
+#include "rdkafka_sasl_int.h"
+
+
+#include <stdio.h>
+#include <windows.h>
+#include <ntsecapi.h>
+
+#define SECURITY_WIN32
+#pragma comment(lib, "secur32.lib")
+#include <sspi.h>
+
+
+#define RD_KAFKA_SASL_SSPI_CTX_ATTRS \
+ (ISC_REQ_CONFIDENTIALITY | ISC_REQ_REPLAY_DETECT | \
+ ISC_REQ_SEQUENCE_DETECT | ISC_REQ_CONNECTION)
+
+
+/* Default maximum kerberos token size for newer versions of Windows */
+#define RD_KAFKA_SSPI_MAX_TOKEN_SIZE 48000
+
+
+/**
+ * @brief Per-connection SASL state
+ */
+typedef struct rd_kafka_sasl_win32_state_s {
+ CredHandle *cred;
+ CtxtHandle *ctx;
+ wchar_t principal[512]; /* Broker service principal and hostname */
+} rd_kafka_sasl_win32_state_t;
+
+
+/**
+ * @returns the string representation of a SECURITY_STATUS error code
+ */
+static const char *rd_kafka_sasl_sspi_err2str(SECURITY_STATUS sr) {
+ switch (sr) {
+ case SEC_E_INSUFFICIENT_MEMORY:
+ return "Insufficient memory";
+ case SEC_E_INTERNAL_ERROR:
+ return "Internal error";
+ case SEC_E_INVALID_HANDLE:
+ return "Invalid handle";
+ case SEC_E_INVALID_TOKEN:
+ return "Invalid token";
+ case SEC_E_LOGON_DENIED:
+ return "Logon denied";
+ case SEC_E_NO_AUTHENTICATING_AUTHORITY:
+ return "No authority could be contacted for authentication.";
+ case SEC_E_NO_CREDENTIALS:
+ return "No credentials";
+ case SEC_E_TARGET_UNKNOWN:
+ return "Target unknown";
+ case SEC_E_UNSUPPORTED_FUNCTION:
+ return "Unsupported functionality";
+ case SEC_E_WRONG_CREDENTIAL_HANDLE:
+ return "The principal that received the authentication "
+ "request is not the same as the one passed "
+ "into the pszTargetName parameter. "
+ "This indicates a failure in mutual "
+ "authentication.";
+ default:
+ return "(no string representation)";
+ }
+}
+
+
+/**
+ * @brief Create new CredHandle
+ */
+static CredHandle *rd_kafka_sasl_sspi_cred_new(rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size) {
+ TimeStamp expiry = {0, 0};
+ SECURITY_STATUS sr;
+ CredHandle *cred = rd_calloc(1, sizeof(*cred));
+
+ sr = AcquireCredentialsHandle(NULL, __TEXT("Kerberos"),
+ SECPKG_CRED_OUTBOUND, NULL, NULL, NULL,
+ NULL, cred, &expiry);
+
+ if (sr != SEC_E_OK) {
+ rd_free(cred);
+ rd_snprintf(errstr, errstr_size,
+ "Failed to acquire CredentialsHandle: "
+ "error code %d",
+ sr);
+ return NULL;
+ }
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASL",
+ "Acquired Kerberos credentials handle (expiry in %d.%ds)",
+ expiry.u.HighPart, expiry.u.LowPart);
+
+ return cred;
+}
+
+
+/**
+ * @brief Start or continue SSPI-based authentication processing.
+ */
+static int rd_kafka_sasl_sspi_continue(rd_kafka_transport_t *rktrans,
+ const void *inbuf,
+ size_t insize,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
+ SecBufferDesc outbufdesc, inbufdesc;
+ SecBuffer outsecbuf, insecbuf;
+ BYTE outbuf[RD_KAFKA_SSPI_MAX_TOKEN_SIZE];
+ TimeStamp lifespan = {0, 0};
+ ULONG ret_ctxattrs;
+ CtxtHandle *ctx;
+ SECURITY_STATUS sr;
+
+ if (inbuf) {
+ if (insize > ULONG_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "Input buffer length too large (%" PRIusz
+ ") "
+ "and would overflow",
+ insize);
+ return -1;
+ }
+
+ inbufdesc.ulVersion = SECBUFFER_VERSION;
+ inbufdesc.cBuffers = 1;
+ inbufdesc.pBuffers = &insecbuf;
+
+ insecbuf.cbBuffer = (unsigned long)insize;
+ insecbuf.BufferType = SECBUFFER_TOKEN;
+ insecbuf.pvBuffer = (void *)inbuf;
+ }
+
+ outbufdesc.ulVersion = SECBUFFER_VERSION;
+ outbufdesc.cBuffers = 1;
+ outbufdesc.pBuffers = &outsecbuf;
+
+ outsecbuf.cbBuffer = sizeof(outbuf);
+ outsecbuf.BufferType = SECBUFFER_TOKEN;
+ outsecbuf.pvBuffer = outbuf;
+
+ if (!(ctx = state->ctx)) {
+ /* First time: allocate context handle
+ * which will be filled in by Initialize..() */
+ ctx = rd_calloc(1, sizeof(*ctx));
+ }
+
+ sr = InitializeSecurityContext(
+ state->cred, state->ctx, state->principal,
+ RD_KAFKA_SASL_SSPI_CTX_ATTRS |
+ (state->ctx ? 0 : ISC_REQ_MUTUAL_AUTH | ISC_REQ_IDENTIFY),
+ 0, SECURITY_NATIVE_DREP, inbuf ? &inbufdesc : NULL, 0, ctx,
+ &outbufdesc, &ret_ctxattrs, &lifespan);
+
+ if (!state->ctx)
+ state->ctx = ctx;
+
+ switch (sr) {
+ case SEC_E_OK:
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
+ "Initialized security context");
+
+ rktrans->rktrans_sasl.complete = 1;
+ break;
+ case SEC_I_CONTINUE_NEEDED:
+ break;
+ case SEC_I_COMPLETE_NEEDED:
+ case SEC_I_COMPLETE_AND_CONTINUE:
+ rd_snprintf(errstr, errstr_size,
+ "CompleteAuthToken (Digest auth, %d) "
+ "not implemented",
+ sr);
+ return -1;
+ case SEC_I_INCOMPLETE_CREDENTIALS:
+ rd_snprintf(errstr, errstr_size,
+ "Incomplete credentials: "
+ "invalid or untrusted certificate");
+ return -1;
+ default:
+ rd_snprintf(errstr, errstr_size,
+ "InitializeSecurityContext "
+ "failed: %s (0x%x)",
+ rd_kafka_sasl_sspi_err2str(sr), sr);
+ return -1;
+ }
+
+ if (rd_kafka_sasl_send(rktrans, outsecbuf.pvBuffer, outsecbuf.cbBuffer,
+ errstr, errstr_size) == -1)
+ return -1;
+
+ return 0;
+}
+
+
+/**
+ * @brief Sends the token response to the broker
+ */
+static int rd_kafka_sasl_win32_send_response(rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size,
+ SecBuffer *server_token) {
+ rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
+ SECURITY_STATUS sr;
+ SecBuffer in_buffer;
+ SecBuffer out_buffer;
+ SecBuffer buffers[4];
+ SecBufferDesc buffer_desc;
+ SecPkgContext_Sizes sizes;
+ SecPkgCredentials_NamesA names;
+ int send_response;
+ size_t namelen;
+
+ sr = QueryContextAttributes(state->ctx, SECPKG_ATTR_SIZES, &sizes);
+ if (sr != SEC_E_OK) {
+ rd_snprintf(errstr, errstr_size,
+ "Send response failed: %s (0x%x)",
+ rd_kafka_sasl_sspi_err2str(sr), sr);
+ return -1;
+ }
+
+ RD_MEMZERO(names);
+ sr = QueryCredentialsAttributesA(state->cred, SECPKG_CRED_ATTR_NAMES,
+ &names);
+
+ if (sr != SEC_E_OK) {
+ rd_snprintf(errstr, errstr_size,
+ "Query credentials failed: %s (0x%x)",
+ rd_kafka_sasl_sspi_err2str(sr), sr);
+ return -1;
+ }
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
+ "Sending response message for user: %s", names.sUserName);
+
+ namelen = strlen(names.sUserName) + 1;
+ if (namelen > ULONG_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "User name length too large (%" PRIusz
+ ") "
+ "and would overflow");
+ return -1;
+ }
+
+ in_buffer.pvBuffer = (char *)names.sUserName;
+ in_buffer.cbBuffer = (unsigned long)namelen;
+
+ buffer_desc.cBuffers = 4;
+ buffer_desc.pBuffers = buffers;
+ buffer_desc.ulVersion = SECBUFFER_VERSION;
+
+ /* security trailer */
+ buffers[0].cbBuffer = sizes.cbSecurityTrailer;
+ buffers[0].BufferType = SECBUFFER_TOKEN;
+ buffers[0].pvBuffer = rd_calloc(1, sizes.cbSecurityTrailer);
+
+ /* protection level and buffer size received from the server */
+ buffers[1].cbBuffer = server_token->cbBuffer;
+ buffers[1].BufferType = SECBUFFER_DATA;
+ buffers[1].pvBuffer = rd_calloc(1, server_token->cbBuffer);
+ memcpy(buffers[1].pvBuffer, server_token->pvBuffer,
+ server_token->cbBuffer);
+
+ /* user principal */
+ buffers[2].cbBuffer = in_buffer.cbBuffer;
+ buffers[2].BufferType = SECBUFFER_DATA;
+ buffers[2].pvBuffer = rd_calloc(1, buffers[2].cbBuffer);
+ memcpy(buffers[2].pvBuffer, in_buffer.pvBuffer, in_buffer.cbBuffer);
+
+ /* padding */
+ buffers[3].cbBuffer = sizes.cbBlockSize;
+ buffers[3].BufferType = SECBUFFER_PADDING;
+ buffers[3].pvBuffer = rd_calloc(1, buffers[2].cbBuffer);
+
+ sr = EncryptMessage(state->ctx, KERB_WRAP_NO_ENCRYPT, &buffer_desc, 0);
+ if (sr != SEC_E_OK) {
+ rd_snprintf(errstr, errstr_size,
+ "Encrypt message failed: %s (0x%x)",
+ rd_kafka_sasl_sspi_err2str(sr), sr);
+
+ FreeContextBuffer(in_buffer.pvBuffer);
+ rd_free(buffers[0].pvBuffer);
+ rd_free(buffers[1].pvBuffer);
+ rd_free(buffers[2].pvBuffer);
+ rd_free(buffers[3].pvBuffer);
+ return -1;
+ }
+
+ out_buffer.cbBuffer = buffers[0].cbBuffer + buffers[1].cbBuffer +
+ buffers[2].cbBuffer + buffers[3].cbBuffer;
+
+ out_buffer.pvBuffer =
+ rd_calloc(1, buffers[0].cbBuffer + buffers[1].cbBuffer +
+ buffers[2].cbBuffer + buffers[3].cbBuffer);
+
+ memcpy(out_buffer.pvBuffer, buffers[0].pvBuffer, buffers[0].cbBuffer);
+
+ memcpy((unsigned char *)out_buffer.pvBuffer + (int)buffers[0].cbBuffer,
+ buffers[1].pvBuffer, buffers[1].cbBuffer);
+
+ memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer +
+ buffers[1].cbBuffer,
+ buffers[2].pvBuffer, buffers[2].cbBuffer);
+
+ memcpy((unsigned char *)out_buffer.pvBuffer + buffers[0].cbBuffer +
+ buffers[1].cbBuffer + buffers[2].cbBuffer,
+ buffers[3].pvBuffer, buffers[3].cbBuffer);
+
+ send_response =
+ rd_kafka_sasl_send(rktrans, out_buffer.pvBuffer,
+ out_buffer.cbBuffer, errstr, errstr_size);
+
+ FreeContextBuffer(in_buffer.pvBuffer);
+ rd_free(out_buffer.pvBuffer);
+ rd_free(buffers[0].pvBuffer);
+ rd_free(buffers[1].pvBuffer);
+ rd_free(buffers[2].pvBuffer);
+ rd_free(buffers[3].pvBuffer);
+
+ return send_response;
+}
+
+
+/**
+ * @brief Unwrap and validate token response from broker.
+ */
+static int rd_kafka_sasl_win32_validate_token(rd_kafka_transport_t *rktrans,
+ const void *inbuf,
+ size_t insize,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
+ SecBuffer buffers[2];
+ SecBufferDesc buffer_desc;
+ SECURITY_STATUS sr;
+ char supported;
+
+ if (insize > ULONG_MAX) {
+ rd_snprintf(errstr, errstr_size,
+ "Input buffer length too large (%" PRIusz
+ ") "
+ "and would overflow");
+ return -1;
+ }
+
+ buffer_desc.cBuffers = 2;
+ buffer_desc.pBuffers = buffers;
+ buffer_desc.ulVersion = SECBUFFER_VERSION;
+
+ buffers[0].cbBuffer = (unsigned long)insize;
+ buffers[0].BufferType = SECBUFFER_STREAM;
+ buffers[0].pvBuffer = (void *)inbuf;
+
+ buffers[1].cbBuffer = 0;
+ buffers[1].BufferType = SECBUFFER_DATA;
+ buffers[1].pvBuffer = NULL;
+
+ sr = DecryptMessage(state->ctx, &buffer_desc, 0, NULL);
+ if (sr != SEC_E_OK) {
+ rd_snprintf(errstr, errstr_size,
+ "Decrypt message failed: %s (0x%x)",
+ rd_kafka_sasl_sspi_err2str(sr), sr);
+ return -1;
+ }
+
+ if (buffers[1].cbBuffer < 4) {
+ rd_snprintf(errstr, errstr_size,
+ "Validate token: "
+ "invalid message");
+ return -1;
+ }
+
+ supported = ((char *)buffers[1].pvBuffer)[0];
+ if (!(supported & 1)) {
+ rd_snprintf(errstr, errstr_size,
+ "Validate token: "
+ "server does not support layer");
+ return -1;
+ }
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
+ "Validated server token");
+
+ return rd_kafka_sasl_win32_send_response(rktrans, errstr, errstr_size,
+ &buffers[1]);
+}
+
+
+/**
+ * @brief Handle SASL frame received from broker.
+ */
+static int rd_kafka_sasl_win32_recv(struct rd_kafka_transport_s *rktrans,
+ const void *buf,
+ size_t size,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
+
+ if (rktrans->rktrans_sasl.complete) {
+
+ if (size > 0) {
+ /* After authentication is done the broker will send
+ * back its token for us to verify.
+ * The client responds to the broker which will
+ * return an empty (size==0) frame that
+ * completes the authentication handshake.
+ * With legacy SASL framing the final empty token
+ * is not sent. */
+ int r;
+
+ r = rd_kafka_sasl_win32_validate_token(
+ rktrans, buf, size, errstr, errstr_size);
+
+ if (r == -1) {
+ rktrans->rktrans_sasl.complete = 0;
+ return r;
+ } else if (rktrans->rktrans_rkb->rkb_features &
+ RD_KAFKA_FEATURE_SASL_AUTH_REQ) {
+ /* Kafka-framed handshake requires
+ * one more back and forth. */
+ return r;
+ }
+
+ /* Legacy-framed handshake is done here */
+ }
+
+ /* Final ack from broker. */
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SASLAUTH",
+ "Authenticated");
+ rd_kafka_sasl_auth_done(rktrans);
+ return 0;
+ }
+
+ return rd_kafka_sasl_sspi_continue(rktrans, buf, size, errstr,
+ errstr_size);
+}
+
+
+/**
+ * @brief Decommission SSPI state
+ */
+static void rd_kafka_sasl_win32_close(rd_kafka_transport_t *rktrans) {
+ rd_kafka_sasl_win32_state_t *state = rktrans->rktrans_sasl.state;
+
+ if (!state)
+ return;
+
+ if (state->ctx) {
+ DeleteSecurityContext(state->ctx);
+ rd_free(state->ctx);
+ }
+ if (state->cred) {
+ FreeCredentialsHandle(state->cred);
+ rd_free(state->cred);
+ }
+ rd_free(state);
+}
+
+
+static int rd_kafka_sasl_win32_client_new(rd_kafka_transport_t *rktrans,
+ const char *hostname,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_t *rk = rktrans->rktrans_rkb->rkb_rk;
+ rd_kafka_sasl_win32_state_t *state;
+
+ if (strcmp(rk->rk_conf.sasl.mechanisms, "GSSAPI")) {
+ rd_snprintf(errstr, errstr_size,
+ "SASL mechanism \"%s\" not supported on platform",
+ rk->rk_conf.sasl.mechanisms);
+ return -1;
+ }
+
+ state = rd_calloc(1, sizeof(*state));
+ rktrans->rktrans_sasl.state = state;
+
+ _snwprintf(state->principal, RD_ARRAYSIZE(state->principal), L"%hs/%hs",
+ rktrans->rktrans_rkb->rkb_rk->rk_conf.sasl.service_name,
+ hostname);
+
+ state->cred = rd_kafka_sasl_sspi_cred_new(rktrans, errstr, errstr_size);
+ if (!state->cred)
+ return -1;
+
+ if (rd_kafka_sasl_sspi_continue(rktrans, NULL, 0, errstr,
+ errstr_size) == -1)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * @brief Validate config
+ */
+static int rd_kafka_sasl_win32_conf_validate(rd_kafka_t *rk,
+ char *errstr,
+ size_t errstr_size) {
+ if (!rk->rk_conf.sasl.service_name) {
+ rd_snprintf(errstr, errstr_size,
+ "sasl.kerberos.service.name must be set");
+ return -1;
+ }
+
+ return 0;
+}
+
+const struct rd_kafka_sasl_provider rd_kafka_sasl_win32_provider = {
+ .name = "Win32 SSPI",
+ .client_new = rd_kafka_sasl_win32_client_new,
+ .recv = rd_kafka_sasl_win32_recv,
+ .close = rd_kafka_sasl_win32_close,
+ .conf_validate = rd_kafka_sasl_win32_conf_validate};
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c
new file mode 100644
index 000000000..9961a240f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.c
@@ -0,0 +1,1841 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name OpenSSL integration
+ *
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_cert.h"
+
+#ifdef _WIN32
+#include <wincrypt.h>
+#pragma comment(lib, "crypt32.lib")
+#pragma comment(lib, "libcrypto.lib")
+#pragma comment(lib, "libssl.lib")
+#endif
+
+#include <openssl/x509.h>
+#include <openssl/x509_vfy.h>
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+#include <openssl/provider.h>
+#endif
+
+#include <ctype.h>
+
+#if !_WIN32
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#endif
+
+
+#if WITH_VALGRIND
+/* OpenSSL relies on uninitialized memory, which Valgrind will whine about.
+ * We use in-code Valgrind macros to suppress those warnings. */
+#include <valgrind/memcheck.h>
+#else
+#define VALGRIND_MAKE_MEM_DEFINED(A, B)
+#endif
+
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+static mtx_t *rd_kafka_ssl_locks;
+static int rd_kafka_ssl_locks_cnt;
+#endif
+
+
+/**
+ * @brief Close and destroy SSL session
+ */
+void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans) {
+ SSL_shutdown(rktrans->rktrans_ssl);
+ SSL_free(rktrans->rktrans_ssl);
+ rktrans->rktrans_ssl = NULL;
+}
+
+
+/**
+ * @brief Clear OpenSSL error queue to get a proper error reporting in case
+ * the next SSL_*() operation fails.
+ */
+static RD_INLINE void
+rd_kafka_transport_ssl_clear_error(rd_kafka_transport_t *rktrans) {
+ ERR_clear_error();
+#ifdef _WIN32
+ WSASetLastError(0);
+#else
+ rd_set_errno(0);
+#endif
+}
+
+/**
+ * @returns a thread-local single-invocation-use error string for
+ * the last thread-local error in OpenSSL, or an empty string
+ * if no error.
+ */
+const char *rd_kafka_ssl_last_error_str(void) {
+ static RD_TLS char errstr[256];
+ unsigned long l;
+ const char *file, *data, *func;
+ int line, flags;
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+ l = ERR_peek_last_error_all(&file, &line, &func, &data, &flags);
+#else
+ l = ERR_peek_last_error_line_data(&file, &line, &data, &flags);
+ func = ERR_func_error_string(l);
+#endif
+
+ if (!l)
+ return "";
+
+ rd_snprintf(errstr, sizeof(errstr), "%lu:%s:%s:%s:%d: %s", l,
+ ERR_lib_error_string(l), func, file, line,
+ ((flags & ERR_TXT_STRING) && data && *data)
+ ? data
+ : ERR_reason_error_string(l));
+
+ return errstr;
+}
+
+/**
+ * Serves the entire OpenSSL error queue and logs each error.
+ * The last error is not logged but returned in 'errstr'.
+ *
+ * If 'rkb' is non-NULL broker-specific logging will be used,
+ * else it will fall back on global 'rk' debugging.
+ */
+static char *rd_kafka_ssl_error(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ char *errstr,
+ size_t errstr_size) {
+ unsigned long l;
+ const char *file, *data, *func;
+ int line, flags;
+ int cnt = 0;
+
+ if (!rk) {
+ rd_assert(rkb);
+ rk = rkb->rkb_rk;
+ }
+
+ while (
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+ (l = ERR_get_error_all(&file, &line, &func, &data, &flags))
+#else
+ (l = ERR_get_error_line_data(&file, &line, &data, &flags))
+#endif
+ ) {
+ char buf[256];
+
+#if OPENSSL_VERSION_NUMBER < 0x30000000
+ func = ERR_func_error_string(l);
+#endif
+
+ if (cnt++ > 0) {
+ /* Log last message */
+ if (rkb)
+ rd_rkb_log(rkb, LOG_ERR, "SSL", "%s", errstr);
+ else
+ rd_kafka_log(rk, LOG_ERR, "SSL", "%s", errstr);
+ }
+
+ ERR_error_string_n(l, buf, sizeof(buf));
+
+ if (!(flags & ERR_TXT_STRING) || !data || !*data)
+ data = NULL;
+
+ /* Include openssl file:line:func if debugging is enabled */
+ if (rk->rk_conf.log_level >= LOG_DEBUG)
+ rd_snprintf(errstr, errstr_size, "%s:%d:%s %s%s%s",
+ file, line, func, buf, data ? ": " : "",
+ data ? data : "");
+ else
+ rd_snprintf(errstr, errstr_size, "%s%s%s", buf,
+ data ? ": " : "", data ? data : "");
+ }
+
+ if (cnt == 0)
+ rd_snprintf(errstr, errstr_size,
+ "No further error information available");
+
+ return errstr;
+}
+
+
+
+/**
+ * Set transport IO event polling based on SSL error.
+ *
+ * Returns -1 on permanent errors.
+ *
+ * Locality: broker thread
+ */
+static RD_INLINE int
+rd_kafka_transport_ssl_io_update(rd_kafka_transport_t *rktrans,
+ int ret,
+ char *errstr,
+ size_t errstr_size) {
+ int serr = SSL_get_error(rktrans->rktrans_ssl, ret);
+ int serr2;
+
+ switch (serr) {
+ case SSL_ERROR_WANT_READ:
+ rd_kafka_transport_poll_set(rktrans, POLLIN);
+ break;
+
+ case SSL_ERROR_WANT_WRITE:
+ rd_kafka_transport_set_blocked(rktrans, rd_true);
+ rd_kafka_transport_poll_set(rktrans, POLLOUT);
+ break;
+
+ case SSL_ERROR_SYSCALL:
+ serr2 = ERR_peek_error();
+ if (serr2)
+ rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr,
+ errstr_size);
+ else if (!rd_socket_errno || rd_socket_errno == ECONNRESET)
+ rd_snprintf(errstr, errstr_size, "Disconnected");
+ else
+ rd_snprintf(errstr, errstr_size,
+ "SSL transport error: %s",
+ rd_strerror(rd_socket_errno));
+ return -1;
+
+ case SSL_ERROR_ZERO_RETURN:
+ rd_snprintf(errstr, errstr_size, "Disconnected");
+ return -1;
+
+ default:
+ rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr,
+ errstr_size);
+ return -1;
+ }
+
+ return 0;
+}
+
+ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t sum = 0;
+ const void *p;
+ size_t rlen;
+
+ rd_kafka_transport_ssl_clear_error(rktrans);
+
+ while ((rlen = rd_slice_peeker(slice, &p))) {
+ int r;
+ size_t r2;
+
+ r = SSL_write(rktrans->rktrans_ssl, p, (int)rlen);
+
+ if (unlikely(r <= 0)) {
+ if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
+ errstr_size) == -1)
+ return -1;
+ else
+ return sum;
+ }
+
+ /* Update buffer read position */
+ r2 = rd_slice_read(slice, NULL, (size_t)r);
+ rd_assert((size_t)r == r2 &&
+ *"BUG: wrote more bytes than available in slice");
+
+
+ sum += r;
+ /* FIXME: remove this and try again immediately and let
+ * the next SSL_write() call fail instead? */
+ if ((size_t)r < rlen)
+ break;
+ }
+ return sum;
+}
+
+ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans,
+ rd_buf_t *rbuf,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t sum = 0;
+ void *p;
+ size_t len;
+
+ while ((len = rd_buf_get_writable(rbuf, &p))) {
+ int r;
+
+ rd_kafka_transport_ssl_clear_error(rktrans);
+
+ r = SSL_read(rktrans->rktrans_ssl, p, (int)len);
+
+ if (unlikely(r <= 0)) {
+ if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
+ errstr_size) == -1)
+ return -1;
+ else
+ return sum;
+ }
+
+ VALGRIND_MAKE_MEM_DEFINED(p, r);
+
+ /* Update buffer write position */
+ rd_buf_write(rbuf, NULL, (size_t)r);
+
+ sum += r;
+
+ /* FIXME: remove this and try again immediately and let
+ * the next SSL_read() call fail instead? */
+ if ((size_t)r < len)
+ break;
+ }
+ return sum;
+}
+
+
+/**
+ * OpenSSL password query callback
+ *
+ * Locality: application thread
+ */
+static int rd_kafka_transport_ssl_passwd_cb(char *buf,
+ int size,
+ int rwflag,
+ void *userdata) {
+ rd_kafka_t *rk = userdata;
+ int pwlen;
+
+ rd_kafka_dbg(rk, SECURITY, "SSLPASSWD",
+ "Private key requires password");
+
+ if (!rk->rk_conf.ssl.key_password) {
+ rd_kafka_log(rk, LOG_WARNING, "SSLPASSWD",
+ "Private key requires password but "
+ "no password configured (ssl.key.password)");
+ return -1;
+ }
+
+
+ pwlen = (int)strlen(rk->rk_conf.ssl.key_password);
+ memcpy(buf, rk->rk_conf.ssl.key_password, RD_MIN(pwlen, size));
+
+ return pwlen;
+}
+
+
+/**
+ * @brief OpenSSL callback to perform additional broker certificate
+ * verification and validation.
+ *
+ * @return 1 on success when the broker certificate
+ * is valid and 0 when the certificate is not valid.
+ *
+ * @sa SSL_CTX_set_verify()
+ */
+static int rd_kafka_transport_ssl_cert_verify_cb(int preverify_ok,
+ X509_STORE_CTX *x509_ctx) {
+ rd_kafka_transport_t *rktrans = rd_kafka_curr_transport;
+ rd_kafka_broker_t *rkb;
+ rd_kafka_t *rk;
+ X509 *cert;
+ char *buf = NULL;
+ int buf_size;
+ int depth;
+ int x509_orig_error, x509_error;
+ char errstr[512];
+ int ok;
+
+ rd_assert(rktrans != NULL);
+ rkb = rktrans->rktrans_rkb;
+ rk = rkb->rkb_rk;
+
+ cert = X509_STORE_CTX_get_current_cert(x509_ctx);
+ if (!cert) {
+ rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY",
+ "Failed to get current certificate to verify");
+ return 0;
+ }
+
+ depth = X509_STORE_CTX_get_error_depth(x509_ctx);
+
+ x509_orig_error = x509_error = X509_STORE_CTX_get_error(x509_ctx);
+
+ buf_size = i2d_X509(cert, (unsigned char **)&buf);
+ if (buf_size < 0 || !buf) {
+ rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY",
+ "Unable to convert certificate to X509 format");
+ return 0;
+ }
+
+ *errstr = '\0';
+
+ /* Call application's verification callback. */
+ ok = rk->rk_conf.ssl.cert_verify_cb(
+ rk, rkb->rkb_nodename, rkb->rkb_nodeid, &x509_error, depth, buf,
+ (size_t)buf_size, errstr, sizeof(errstr), rk->rk_conf.opaque);
+
+ OPENSSL_free(buf);
+
+ if (!ok) {
+ char subject[128];
+ char issuer[128];
+
+ X509_NAME_oneline(X509_get_subject_name(cert), subject,
+ sizeof(subject));
+ X509_NAME_oneline(X509_get_issuer_name(cert), issuer,
+ sizeof(issuer));
+ rd_rkb_log(rkb, LOG_ERR, "SSLCERTVRFY",
+ "Certificate (subject=%s, issuer=%s) verification "
+ "callback failed: %s",
+ subject, issuer, errstr);
+
+ X509_STORE_CTX_set_error(x509_ctx, x509_error);
+
+ return 0; /* verification failed */
+ }
+
+ /* Clear error */
+ if (x509_orig_error != 0 && x509_error == 0)
+ X509_STORE_CTX_set_error(x509_ctx, 0);
+
+ return 1; /* verification successful */
+}
+
+/**
+ * @brief Set TLSEXT hostname for SNI and optionally enable
+ * SSL endpoint identification verification.
+ *
+ * @returns 0 on success or -1 on error.
+ */
+static int rd_kafka_transport_ssl_set_endpoint_id(rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size) {
+ char name[RD_KAFKA_NODENAME_SIZE];
+ char *t;
+
+ rd_kafka_broker_lock(rktrans->rktrans_rkb);
+ rd_snprintf(name, sizeof(name), "%s",
+ rktrans->rktrans_rkb->rkb_nodename);
+ rd_kafka_broker_unlock(rktrans->rktrans_rkb);
+
+ /* Remove ":9092" port suffix from nodename */
+ if ((t = strrchr(name, ':')))
+ *t = '\0';
+
+#if (OPENSSL_VERSION_NUMBER >= 0x0090806fL) && !defined(OPENSSL_NO_TLSEXT)
+ /* If non-numerical hostname, send it for SNI */
+ if (!(/*ipv6*/ (strchr(name, ':') &&
+ strspn(name, "0123456789abcdefABCDEF:.[]%") ==
+ strlen(name)) ||
+ /*ipv4*/ strspn(name, "0123456789.") == strlen(name)) &&
+ !SSL_set_tlsext_host_name(rktrans->rktrans_ssl, name))
+ goto fail;
+#endif
+
+ if (rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.endpoint_identification ==
+ RD_KAFKA_SSL_ENDPOINT_ID_NONE)
+ return 0;
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000 && !defined(OPENSSL_IS_BORINGSSL)
+ if (!SSL_set1_host(rktrans->rktrans_ssl, name))
+ goto fail;
+#elif OPENSSL_VERSION_NUMBER >= 0x1000200fL /* 1.0.2 */
+ {
+ X509_VERIFY_PARAM *param;
+
+ param = SSL_get0_param(rktrans->rktrans_ssl);
+
+ if (!X509_VERIFY_PARAM_set1_host(param, name, 0))
+ goto fail;
+ }
+#else
+ rd_snprintf(errstr, errstr_size,
+ "Endpoint identification not supported on this "
+ "OpenSSL version (0x%lx)",
+ OPENSSL_VERSION_NUMBER);
+ return -1;
+#endif
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "ENDPOINT",
+ "Enabled endpoint identification using hostname %s", name);
+
+ return 0;
+
+fail:
+ rd_kafka_ssl_error(NULL, rktrans->rktrans_rkb, errstr, errstr_size);
+ return -1;
+}
+
+
+/**
+ * @brief Set up SSL for a newly connected connection
+ *
+ * @returns -1 on failure, else 0.
+ */
+int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb,
+ rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size) {
+ int r;
+
+ rktrans->rktrans_ssl = SSL_new(rkb->rkb_rk->rk_conf.ssl.ctx);
+ if (!rktrans->rktrans_ssl)
+ goto fail;
+
+ if (!SSL_set_fd(rktrans->rktrans_ssl, (int)rktrans->rktrans_s))
+ goto fail;
+
+ if (rd_kafka_transport_ssl_set_endpoint_id(rktrans, errstr,
+ errstr_size) == -1)
+ return -1;
+
+ rd_kafka_transport_ssl_clear_error(rktrans);
+
+ r = SSL_connect(rktrans->rktrans_ssl);
+ if (r == 1) {
+ /* Connected, highly unlikely since this is a
+ * non-blocking operation. */
+ rd_kafka_transport_connect_done(rktrans, NULL);
+ return 0;
+ }
+
+ if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr, errstr_size) ==
+ -1)
+ return -1;
+
+ return 0;
+
+fail:
+ rd_kafka_ssl_error(NULL, rkb, errstr, errstr_size);
+ return -1;
+}
+
+
+static RD_UNUSED int
+rd_kafka_transport_ssl_io_event(rd_kafka_transport_t *rktrans, int events) {
+ int r;
+ char errstr[512];
+
+ if (events & POLLOUT) {
+ rd_kafka_transport_ssl_clear_error(rktrans);
+
+ r = SSL_write(rktrans->rktrans_ssl, NULL, 0);
+ if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
+ sizeof(errstr)) == -1)
+ goto fail;
+ }
+
+ return 0;
+
+fail:
+ /* Permanent error */
+ rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
+ RD_KAFKA_RESP_ERR__TRANSPORT, "%s", errstr);
+ return -1;
+}
+
+
+/**
+ * @brief Verify SSL handshake was valid.
+ */
+static int rd_kafka_transport_ssl_verify(rd_kafka_transport_t *rktrans) {
+ long int rl;
+ X509 *cert;
+
+ if (!rktrans->rktrans_rkb->rkb_rk->rk_conf.ssl.enable_verify)
+ return 0;
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+ cert = SSL_get1_peer_certificate(rktrans->rktrans_ssl);
+#else
+ cert = SSL_get_peer_certificate(rktrans->rktrans_ssl);
+#endif
+ X509_free(cert);
+ if (!cert) {
+ rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
+ RD_KAFKA_RESP_ERR__SSL,
+ "Broker did not provide a certificate");
+ return -1;
+ }
+
+ if ((rl = SSL_get_verify_result(rktrans->rktrans_ssl)) != X509_V_OK) {
+ rd_kafka_broker_fail(rktrans->rktrans_rkb, LOG_ERR,
+ RD_KAFKA_RESP_ERR__SSL,
+ "Failed to verify broker certificate: %s",
+ X509_verify_cert_error_string(rl));
+ return -1;
+ }
+
+ rd_rkb_dbg(rktrans->rktrans_rkb, SECURITY, "SSLVERIFY",
+ "Broker SSL certificate verified");
+ return 0;
+}
+
+/**
+ * @brief SSL handshake handling.
+ * Call repeatedly (based on IO events) until handshake is done.
+ *
+ * @returns -1 on error, 0 if handshake is still in progress,
+ * or 1 on completion.
+ */
+int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ char errstr[512];
+ int r;
+
+ r = SSL_do_handshake(rktrans->rktrans_ssl);
+ if (r == 1) {
+ /* SSL handshake done. Verify. */
+ if (rd_kafka_transport_ssl_verify(rktrans) == -1)
+ return -1;
+
+ rd_kafka_transport_connect_done(rktrans, NULL);
+ return 1;
+
+ } else if (rd_kafka_transport_ssl_io_update(rktrans, r, errstr,
+ sizeof(errstr)) == -1) {
+ const char *extra = "";
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR__SSL;
+
+ if (strstr(errstr, "unexpected message"))
+ extra =
+ ": client SSL authentication might be "
+ "required (see ssl.key.location and "
+ "ssl.certificate.location and consult the "
+ "broker logs for more information)";
+ else if (strstr(errstr,
+ "tls_process_server_certificate:"
+ "certificate verify failed") ||
+ strstr(errstr, "error:0A000086") /*openssl3*/ ||
+ strstr(errstr,
+ "get_server_certificate:"
+ "certificate verify failed"))
+ extra =
+ ": broker certificate could not be verified, "
+ "verify that ssl.ca.location is correctly "
+ "configured or root CA certificates are "
+ "installed"
+#ifdef __APPLE__
+ " (brew install openssl)"
+#elif defined(_WIN32)
+ " (add broker's CA certificate to the Windows "
+ "Root certificate store)"
+#else
+ " (install ca-certificates package)"
+#endif
+ ;
+ else if (!strcmp(errstr, "Disconnected")) {
+ extra = ": connecting to a PLAINTEXT broker listener?";
+ /* Disconnects during handshake are most likely
+ * not due to SSL, but rather at the transport level */
+ err = RD_KAFKA_RESP_ERR__TRANSPORT;
+ }
+
+ rd_kafka_broker_fail(rkb, LOG_ERR, err,
+ "SSL handshake failed: %s%s", errstr,
+ extra);
+ return -1;
+ }
+
+ return 0;
+}
+
+
+
+/**
+ * @brief Parse a PEM-formatted string into an EVP_PKEY (PrivateKey) object.
+ *
+ * @param str Input PEM string, nul-terminated
+ *
+ * @remark This method does not provide automatic addition of PEM
+ * headers and footers.
+ *
+ * @returns a new EVP_PKEY on success or NULL on error.
+ */
+static EVP_PKEY *rd_kafka_ssl_PKEY_from_string(rd_kafka_t *rk,
+ const char *str) {
+ BIO *bio = BIO_new_mem_buf((void *)str, -1);
+ EVP_PKEY *pkey;
+
+ pkey = PEM_read_bio_PrivateKey(bio, NULL,
+ rd_kafka_transport_ssl_passwd_cb, rk);
+
+ BIO_free(bio);
+
+ return pkey;
+}
+
+/**
+ * @brief Parse a PEM-formatted string into an X509 object.
+ *
+ * @param str Input PEM string, nul-terminated
+ *
+ * @returns a new X509 on success or NULL on error.
+ */
+static X509 *rd_kafka_ssl_X509_from_string(rd_kafka_t *rk, const char *str) {
+ BIO *bio = BIO_new_mem_buf((void *)str, -1);
+ X509 *x509;
+
+ x509 =
+ PEM_read_bio_X509(bio, NULL, rd_kafka_transport_ssl_passwd_cb, rk);
+
+ BIO_free(bio);
+
+ return x509;
+}
+
+
+#ifdef _WIN32
+
+/**
+ * @brief Attempt load CA certificates from a Windows Certificate store.
+ */
+static int rd_kafka_ssl_win_load_cert_store(rd_kafka_t *rk,
+ SSL_CTX *ctx,
+ const char *store_name) {
+ HCERTSTORE w_store;
+ PCCERT_CONTEXT w_cctx = NULL;
+ X509_STORE *store;
+ int fail_cnt = 0, cnt = 0;
+ char errstr[256];
+ wchar_t *wstore_name;
+ size_t wsize = 0;
+ errno_t werr;
+
+ /* Convert store_name to wide-char */
+ werr = mbstowcs_s(&wsize, NULL, 0, store_name, strlen(store_name));
+ if (werr || wsize < 2 || wsize > 1000) {
+ rd_kafka_log(rk, LOG_ERR, "CERTSTORE",
+ "Invalid Windows certificate store name: %.*s%s",
+ 30, store_name,
+ wsize < 2 ? " (empty)" : " (truncated)");
+ return -1;
+ }
+ wstore_name = rd_alloca(sizeof(*wstore_name) * wsize);
+ werr = mbstowcs_s(NULL, wstore_name, wsize, store_name,
+ strlen(store_name));
+ rd_assert(!werr);
+
+ w_store = CertOpenStore(CERT_STORE_PROV_SYSTEM, 0, 0,
+ CERT_SYSTEM_STORE_CURRENT_USER |
+ CERT_STORE_READONLY_FLAG |
+ CERT_STORE_OPEN_EXISTING_FLAG,
+ wstore_name);
+ if (!w_store) {
+ rd_kafka_log(
+ rk, LOG_ERR, "CERTSTORE",
+ "Failed to open Windows certificate "
+ "%s store: %s",
+ store_name,
+ rd_strerror_w32(GetLastError(), errstr, sizeof(errstr)));
+ return -1;
+ }
+
+ /* Get the OpenSSL trust store */
+ store = SSL_CTX_get_cert_store(ctx);
+
+ /* Enumerate the Windows certs */
+ while ((w_cctx = CertEnumCertificatesInStore(w_store, w_cctx))) {
+ X509 *x509;
+
+ /* Parse Windows cert: DER -> X.509 */
+ x509 = d2i_X509(NULL,
+ (const unsigned char **)&w_cctx->pbCertEncoded,
+ (long)w_cctx->cbCertEncoded);
+ if (!x509) {
+ fail_cnt++;
+ continue;
+ }
+
+ /* Add cert to OpenSSL's trust store */
+ if (!X509_STORE_add_cert(store, x509))
+ fail_cnt++;
+ else
+ cnt++;
+
+ X509_free(x509);
+ }
+
+ if (w_cctx)
+ CertFreeCertificateContext(w_cctx);
+
+ CertCloseStore(w_store, 0);
+
+ rd_kafka_dbg(rk, SECURITY, "CERTSTORE",
+ "%d certificate(s) successfully added from "
+ "Windows Certificate %s store, %d failed",
+ cnt, store_name, fail_cnt);
+
+ if (cnt == 0 && fail_cnt > 0)
+ return -1;
+
+ return cnt;
+}
+
+/**
+ * @brief Load certs from the configured CSV list of Windows Cert stores.
+ *
+ * @returns the number of successfully loaded certificates, or -1 on error.
+ */
+static int rd_kafka_ssl_win_load_cert_stores(rd_kafka_t *rk,
+ SSL_CTX *ctx,
+ const char *store_names) {
+ char *s;
+ int cert_cnt = 0, fail_cnt = 0;
+
+ if (!store_names || !*store_names)
+ return 0;
+
+ rd_strdupa(&s, store_names);
+
+ /* Parse CSV list ("Root,CA, , ,Something") and load
+ * each store in order. */
+ while (*s) {
+ char *t;
+ const char *store_name;
+ int r;
+
+ while (isspace((int)*s) || *s == ',')
+ s++;
+
+ if (!*s)
+ break;
+
+ store_name = s;
+
+ t = strchr(s, (int)',');
+ if (t) {
+ *t = '\0';
+ s = t + 1;
+ for (; t >= store_name && isspace((int)*t); t--)
+ *t = '\0';
+ } else {
+ s = "";
+ }
+
+ r = rd_kafka_ssl_win_load_cert_store(rk, ctx, store_name);
+ if (r != -1)
+ cert_cnt += r;
+ else
+ fail_cnt++;
+ }
+
+ if (cert_cnt == 0 && fail_cnt > 0)
+ return -1;
+
+ return cert_cnt;
+}
+#endif /* MSC_VER */
+
+
+
+/**
+ * @brief Probe for the system's CA certificate location and if found set it
+ * on the \p CTX.
+ *
+ * @returns 0 if CA location was set, else -1.
+ */
+static int rd_kafka_ssl_probe_and_set_default_ca_location(rd_kafka_t *rk,
+ SSL_CTX *ctx) {
+#if _WIN32
+ /* No standard location on Windows, CA certs are in the ROOT store. */
+ return -1;
+#else
+ /* The probe paths are based on:
+ * https://www.happyassassin.net/posts/2015/01/12/a-note-about-ssltls-trusted-certificate-stores-and-platforms/
+ * Golang's crypto probing paths:
+ * https://golang.org/search?q=certFiles and certDirectories
+ */
+ static const char *paths[] = {
+ "/etc/pki/tls/certs/ca-bundle.crt",
+ "/etc/ssl/certs/ca-bundle.crt",
+ "/etc/pki/tls/certs/ca-bundle.trust.crt",
+ "/etc/pki/ca-trust/extracted/pem/tls-ca-bundle.pem",
+
+ "/etc/ssl/ca-bundle.pem",
+ "/etc/pki/tls/cacert.pem",
+ "/etc/ssl/cert.pem",
+ "/etc/ssl/cacert.pem",
+
+ "/etc/certs/ca-certificates.crt",
+ "/etc/ssl/certs/ca-certificates.crt",
+
+ "/etc/ssl/certs",
+
+ "/usr/local/etc/ssl/cert.pem",
+ "/usr/local/etc/ssl/cacert.pem",
+
+ "/usr/local/etc/ssl/certs/cert.pem",
+ "/usr/local/etc/ssl/certs/cacert.pem",
+
+ /* BSD */
+ "/usr/local/share/certs/ca-root-nss.crt",
+ "/etc/openssl/certs/ca-certificates.crt",
+#ifdef __APPLE__
+ "/private/etc/ssl/cert.pem",
+ "/private/etc/ssl/certs",
+ "/usr/local/etc/openssl@1.1/cert.pem",
+ "/usr/local/etc/openssl@1.0/cert.pem",
+ "/usr/local/etc/openssl/certs",
+ "/System/Library/OpenSSL",
+#endif
+#ifdef _AIX
+ "/var/ssl/certs/ca-bundle.crt",
+#endif
+ NULL,
+ };
+ const char *path = NULL;
+ int i;
+
+ for (i = 0; (path = paths[i]); i++) {
+ struct stat st;
+ rd_bool_t is_dir;
+ int r;
+
+ if (stat(path, &st) != 0)
+ continue;
+
+ is_dir = S_ISDIR(st.st_mode);
+
+ if (is_dir && rd_kafka_dir_is_empty(path))
+ continue;
+
+ rd_kafka_dbg(rk, SECURITY, "CACERTS",
+ "Setting default CA certificate location "
+ "to %s, override with ssl.ca.location",
+ path);
+
+ r = SSL_CTX_load_verify_locations(ctx, is_dir ? NULL : path,
+ is_dir ? path : NULL);
+ if (r != 1) {
+ char errstr[512];
+ /* Read error and clear the error stack */
+ rd_kafka_ssl_error(rk, NULL, errstr, sizeof(errstr));
+ rd_kafka_dbg(rk, SECURITY, "CACERTS",
+ "Failed to set default CA certificate "
+ "location to %s %s: %s: skipping",
+ is_dir ? "directory" : "file", path,
+ errstr);
+ continue;
+ }
+
+ return 0;
+ }
+
+ rd_kafka_dbg(rk, SECURITY, "CACERTS",
+ "Unable to find any standard CA certificate"
+ "paths: is the ca-certificates package installed?");
+ return -1;
+#endif
+}
+
+
+/**
+ * @brief Registers certificates, keys, etc, on the SSL_CTX
+ *
+ * @returns -1 on error, or 0 on success.
+ */
+static int rd_kafka_ssl_set_certs(rd_kafka_t *rk,
+ SSL_CTX *ctx,
+ char *errstr,
+ size_t errstr_size) {
+ rd_bool_t ca_probe = rd_true;
+ rd_bool_t check_pkey = rd_false;
+ int r;
+
+ /*
+ * ssl_ca, ssl.ca.location, or Windows cert root store,
+ * or default paths.
+ */
+ if (rk->rk_conf.ssl.ca) {
+ /* CA certificate chain set with conf_set_ssl_cert() */
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading CA certificate(s) from memory");
+
+ SSL_CTX_set_cert_store(ctx, rk->rk_conf.ssl.ca->store);
+
+ /* OpenSSL takes ownership of the store */
+ rk->rk_conf.ssl.ca->store = NULL;
+
+ ca_probe = rd_false;
+
+ } else {
+
+ if (rk->rk_conf.ssl.ca_location &&
+ strcmp(rk->rk_conf.ssl.ca_location, "probe")) {
+ /* CA certificate location, either file or directory. */
+ int is_dir =
+ rd_kafka_path_is_dir(rk->rk_conf.ssl.ca_location);
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading CA certificate(s) from %s %s",
+ is_dir ? "directory" : "file",
+ rk->rk_conf.ssl.ca_location);
+
+ r = SSL_CTX_load_verify_locations(
+ ctx, !is_dir ? rk->rk_conf.ssl.ca_location : NULL,
+ is_dir ? rk->rk_conf.ssl.ca_location : NULL);
+
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.ca.location failed: ");
+ return -1;
+ }
+
+ ca_probe = rd_false;
+ }
+
+ if (rk->rk_conf.ssl.ca_pem) {
+ /* CA as PEM string */
+ X509 *x509;
+ X509_STORE *store;
+ BIO *bio;
+ int cnt = 0;
+
+ /* Get the OpenSSL trust store */
+ store = SSL_CTX_get_cert_store(ctx);
+ rd_assert(store != NULL);
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading CA certificate(s) from string");
+
+ bio =
+ BIO_new_mem_buf((void *)rk->rk_conf.ssl.ca_pem, -1);
+ rd_assert(bio != NULL);
+
+ /* Add all certificates to cert store */
+ while ((x509 = PEM_read_bio_X509(
+ bio, NULL, rd_kafka_transport_ssl_passwd_cb,
+ rk))) {
+ if (!X509_STORE_add_cert(store, x509)) {
+ rd_snprintf(errstr, errstr_size,
+ "failed to add ssl.ca.pem "
+ "certificate "
+ "#%d to CA cert store: ",
+ cnt);
+ X509_free(x509);
+ BIO_free(bio);
+ return -1;
+ }
+
+ X509_free(x509);
+ cnt++;
+ }
+
+ if (!BIO_eof(bio) || !cnt) {
+ rd_snprintf(errstr, errstr_size,
+ "failed to read certificate #%d "
+ "from ssl.ca.pem: "
+ "not in PEM format?: ",
+ cnt);
+ BIO_free(bio);
+ return -1;
+ }
+
+ BIO_free(bio);
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loaded %d CA certificate(s) from string",
+ cnt);
+
+
+ ca_probe = rd_false;
+ }
+ }
+
+ if (ca_probe) {
+#ifdef _WIN32
+ /* Attempt to load CA root certificates from the
+ * configured Windows certificate stores. */
+ r = rd_kafka_ssl_win_load_cert_stores(
+ rk, ctx, rk->rk_conf.ssl.ca_cert_stores);
+ if (r == 0) {
+ rd_kafka_log(
+ rk, LOG_NOTICE, "CERTSTORE",
+ "No CA certificates loaded from "
+ "Windows certificate stores: "
+ "falling back to default OpenSSL CA paths");
+ r = -1;
+ } else if (r == -1)
+ rd_kafka_log(
+ rk, LOG_NOTICE, "CERTSTORE",
+ "Failed to load CA certificates from "
+ "Windows certificate stores: "
+ "falling back to default OpenSSL CA paths");
+#else
+ r = -1;
+#endif
+
+ if ((rk->rk_conf.ssl.ca_location &&
+ !strcmp(rk->rk_conf.ssl.ca_location, "probe"))
+#if WITH_STATIC_LIB_libcrypto
+ || r == -1
+#endif
+ ) {
+ /* If OpenSSL was linked statically there is a risk
+ * that the system installed CA certificate path
+ * doesn't match the cert path of OpenSSL.
+ * To circumvent this we check for the existence
+ * of standard CA certificate paths and use the
+ * first one that is found.
+ * Ignore failures. */
+ r = rd_kafka_ssl_probe_and_set_default_ca_location(rk,
+ ctx);
+ }
+
+ if (r == -1) {
+ /* Use default CA certificate paths from linked OpenSSL:
+ * ignore failures */
+
+ r = SSL_CTX_set_default_verify_paths(ctx);
+ if (r != 1) {
+ char errstr2[512];
+ /* Read error and clear the error stack. */
+ rd_kafka_ssl_error(rk, NULL, errstr2,
+ sizeof(errstr2));
+ rd_kafka_dbg(
+ rk, SECURITY, "SSL",
+ "SSL_CTX_set_default_verify_paths() "
+ "failed: %s: ignoring",
+ errstr2);
+ }
+ r = 0;
+ }
+ }
+
+ if (rk->rk_conf.ssl.crl_location) {
+ rd_kafka_dbg(rk, SECURITY, "SSL", "Loading CRL from file %s",
+ rk->rk_conf.ssl.crl_location);
+
+ r = SSL_CTX_load_verify_locations(
+ ctx, rk->rk_conf.ssl.crl_location, NULL);
+
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.crl.location failed: ");
+ return -1;
+ }
+
+
+ rd_kafka_dbg(rk, SECURITY, "SSL", "Enabling CRL checks");
+
+ X509_STORE_set_flags(SSL_CTX_get_cert_store(ctx),
+ X509_V_FLAG_CRL_CHECK);
+ }
+
+
+ /*
+ * ssl_cert, ssl.certificate.location and ssl.certificate.pem
+ */
+ if (rk->rk_conf.ssl.cert) {
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading public key from memory");
+
+ rd_assert(rk->rk_conf.ssl.cert->x509);
+ r = SSL_CTX_use_certificate(ctx, rk->rk_conf.ssl.cert->x509);
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size, "ssl_cert failed: ");
+ return -1;
+ }
+ }
+
+ if (rk->rk_conf.ssl.cert_location) {
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading public key from file %s",
+ rk->rk_conf.ssl.cert_location);
+
+ r = SSL_CTX_use_certificate_chain_file(
+ ctx, rk->rk_conf.ssl.cert_location);
+
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.certificate.location failed: ");
+ return -1;
+ }
+ }
+
+ if (rk->rk_conf.ssl.cert_pem) {
+ X509 *x509;
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading public key from string");
+
+ x509 =
+ rd_kafka_ssl_X509_from_string(rk, rk->rk_conf.ssl.cert_pem);
+ if (!x509) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.certificate.pem failed: "
+ "not in PEM format?: ");
+ return -1;
+ }
+
+ r = SSL_CTX_use_certificate(ctx, x509);
+
+ X509_free(x509);
+
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.certificate.pem failed: ");
+ return -1;
+ }
+ }
+
+
+ /*
+ * ssl_key, ssl.key.location and ssl.key.pem
+ */
+ if (rk->rk_conf.ssl.key) {
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading private key file from memory");
+
+ rd_assert(rk->rk_conf.ssl.key->pkey);
+ r = SSL_CTX_use_PrivateKey(ctx, rk->rk_conf.ssl.key->pkey);
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl_key (in-memory) failed: ");
+ return -1;
+ }
+
+ check_pkey = rd_true;
+ }
+
+ if (rk->rk_conf.ssl.key_location) {
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading private key file from %s",
+ rk->rk_conf.ssl.key_location);
+
+ r = SSL_CTX_use_PrivateKey_file(
+ ctx, rk->rk_conf.ssl.key_location, SSL_FILETYPE_PEM);
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.key.location failed: ");
+ return -1;
+ }
+
+ check_pkey = rd_true;
+ }
+
+ if (rk->rk_conf.ssl.key_pem) {
+ EVP_PKEY *pkey;
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading private key from string");
+
+ pkey =
+ rd_kafka_ssl_PKEY_from_string(rk, rk->rk_conf.ssl.key_pem);
+ if (!pkey) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.key.pem failed: "
+ "not in PEM format?: ");
+ return -1;
+ }
+
+ r = SSL_CTX_use_PrivateKey(ctx, pkey);
+
+ EVP_PKEY_free(pkey);
+
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.key.pem failed: ");
+ return -1;
+ }
+
+ /* We no longer need the PEM key (it is cached in the CTX),
+ * clear its memory. */
+ rd_kafka_desensitize_str(rk->rk_conf.ssl.key_pem);
+
+ check_pkey = rd_true;
+ }
+
+
+ /*
+ * ssl.keystore.location
+ */
+ if (rk->rk_conf.ssl.keystore_location) {
+ EVP_PKEY *pkey;
+ X509 *cert;
+ STACK_OF(X509) *ca = NULL;
+ BIO *bio;
+ PKCS12 *p12;
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading client's keystore file from %s",
+ rk->rk_conf.ssl.keystore_location);
+
+ bio = BIO_new_file(rk->rk_conf.ssl.keystore_location, "rb");
+ if (!bio) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to open ssl.keystore.location: "
+ "%s: ",
+ rk->rk_conf.ssl.keystore_location);
+ return -1;
+ }
+
+ p12 = d2i_PKCS12_bio(bio, NULL);
+ if (!p12) {
+ BIO_free(bio);
+ rd_snprintf(errstr, errstr_size,
+ "Error reading ssl.keystore.location "
+ "PKCS#12 file: %s: ",
+ rk->rk_conf.ssl.keystore_location);
+ return -1;
+ }
+
+ pkey = EVP_PKEY_new();
+ cert = X509_new();
+ if (!PKCS12_parse(p12, rk->rk_conf.ssl.keystore_password, &pkey,
+ &cert, &ca)) {
+ EVP_PKEY_free(pkey);
+ X509_free(cert);
+ PKCS12_free(p12);
+ BIO_free(bio);
+ if (ca != NULL)
+ sk_X509_pop_free(ca, X509_free);
+ rd_snprintf(errstr, errstr_size,
+ "Failed to parse ssl.keystore.location "
+ "PKCS#12 file: %s: ",
+ rk->rk_conf.ssl.keystore_location);
+ return -1;
+ }
+
+ if (ca != NULL)
+ sk_X509_pop_free(ca, X509_free);
+
+ PKCS12_free(p12);
+ BIO_free(bio);
+
+ r = SSL_CTX_use_certificate(ctx, cert);
+ X509_free(cert);
+ if (r != 1) {
+ EVP_PKEY_free(pkey);
+ rd_snprintf(errstr, errstr_size,
+ "Failed to use ssl.keystore.location "
+ "certificate: ");
+ return -1;
+ }
+
+ r = SSL_CTX_use_PrivateKey(ctx, pkey);
+ EVP_PKEY_free(pkey);
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to use ssl.keystore.location "
+ "private key: ");
+ return -1;
+ }
+
+ check_pkey = rd_true;
+ }
+
+#if WITH_SSL_ENGINE
+ /*
+ * If applicable, use OpenSSL engine to fetch SSL certificate.
+ */
+ if (rk->rk_conf.ssl.engine) {
+ STACK_OF(X509_NAME) *cert_names = sk_X509_NAME_new_null();
+ STACK_OF(X509_OBJECT) *roots =
+ X509_STORE_get0_objects(SSL_CTX_get_cert_store(ctx));
+ X509 *x509 = NULL;
+ EVP_PKEY *pkey = NULL;
+ int i = 0;
+ for (i = 0; i < sk_X509_OBJECT_num(roots); i++) {
+ x509 = X509_OBJECT_get0_X509(
+ sk_X509_OBJECT_value(roots, i));
+
+ if (x509)
+ sk_X509_NAME_push(cert_names,
+ X509_get_subject_name(x509));
+ }
+
+ if (cert_names)
+ sk_X509_NAME_free(cert_names);
+
+ x509 = NULL;
+ r = ENGINE_load_ssl_client_cert(
+ rk->rk_conf.ssl.engine, NULL, cert_names, &x509, &pkey,
+ NULL, NULL, rk->rk_conf.ssl.engine_callback_data);
+
+ sk_X509_NAME_free(cert_names);
+ if (r == -1 || !x509 || !pkey) {
+ X509_free(x509);
+ EVP_PKEY_free(pkey);
+ if (r == -1)
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL "
+ "ENGINE_load_ssl_client_cert "
+ "failed: ");
+ else if (!x509)
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine failed to "
+ "load certificate: ");
+ else
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine failed to "
+ "load private key: ");
+
+ return -1;
+ }
+
+ r = SSL_CTX_use_certificate(ctx, x509);
+ X509_free(x509);
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to use SSL_CTX_use_certificate "
+ "with engine: ");
+ EVP_PKEY_free(pkey);
+ return -1;
+ }
+
+ r = SSL_CTX_use_PrivateKey(ctx, pkey);
+ EVP_PKEY_free(pkey);
+ if (r != 1) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to use SSL_CTX_use_PrivateKey "
+ "with engine: ");
+ return -1;
+ }
+
+ check_pkey = rd_true;
+ }
+#endif /*WITH_SSL_ENGINE*/
+
+ /* Check that a valid private/public key combo was set. */
+ if (check_pkey && SSL_CTX_check_private_key(ctx) != 1) {
+ rd_snprintf(errstr, errstr_size, "Private key check failed: ");
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief Once per rd_kafka_t handle cleanup of OpenSSL
+ *
+ * @locality any thread
+ *
+ * @locks rd_kafka_wrlock() MUST be held
+ */
+void rd_kafka_ssl_ctx_term(rd_kafka_t *rk) {
+ SSL_CTX_free(rk->rk_conf.ssl.ctx);
+ rk->rk_conf.ssl.ctx = NULL;
+
+#if WITH_SSL_ENGINE
+ RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free);
+#endif
+}
+
+
+#if WITH_SSL_ENGINE
+/**
+ * @brief Initialize and load OpenSSL engine, if configured.
+ *
+ * @returns true on success, false on error.
+ */
+static rd_bool_t
+rd_kafka_ssl_ctx_init_engine(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
+ ENGINE *engine;
+
+ /* OpenSSL loads an engine as dynamic id and stores it in
+ * internal list, as per LIST_ADD command below. If engine
+ * already exists in internal list, it is supposed to be
+ * fetched using engine id.
+ */
+ engine = ENGINE_by_id(rk->rk_conf.ssl.engine_id);
+ if (!engine) {
+ engine = ENGINE_by_id("dynamic");
+ if (!engine) {
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine initialization failed in"
+ " ENGINE_by_id: ");
+ return rd_false;
+ }
+ }
+
+ if (!ENGINE_ctrl_cmd_string(engine, "SO_PATH",
+ rk->rk_conf.ssl.engine_location, 0)) {
+ ENGINE_free(engine);
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine initialization failed in"
+ " ENGINE_ctrl_cmd_string SO_PATH: ");
+ return rd_false;
+ }
+
+ if (!ENGINE_ctrl_cmd_string(engine, "LIST_ADD", "1", 0)) {
+ ENGINE_free(engine);
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine initialization failed in"
+ " ENGINE_ctrl_cmd_string LIST_ADD: ");
+ return rd_false;
+ }
+
+ if (!ENGINE_ctrl_cmd_string(engine, "LOAD", NULL, 0)) {
+ ENGINE_free(engine);
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine initialization failed in"
+ " ENGINE_ctrl_cmd_string LOAD: ");
+ return rd_false;
+ }
+
+ if (!ENGINE_init(engine)) {
+ ENGINE_free(engine);
+ rd_snprintf(errstr, errstr_size,
+ "OpenSSL engine initialization failed in"
+ " ENGINE_init: ");
+ return rd_false;
+ }
+
+ rk->rk_conf.ssl.engine = engine;
+
+ return rd_true;
+}
+#endif
+
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+/**
+ * @brief Wrapper around OSSL_PROVIDER_unload() to expose a free(void*) API
+ * suitable for rd_list_t's free_cb.
+ */
+static void rd_kafka_ssl_OSSL_PROVIDER_free(void *ptr) {
+ OSSL_PROVIDER *prov = ptr;
+ (void)OSSL_PROVIDER_unload(prov);
+}
+
+
+/**
+ * @brief Load OpenSSL 3.0.x providers specified in comma-separated string.
+ *
+ * @remark Only the error preamble/prefix is written here, the actual
+ * OpenSSL error is retrieved from the OpenSSL error stack by
+ * the caller.
+ *
+ * @returns rd_false on failure (errstr will be written to), or rd_true
+ * on successs.
+ */
+static rd_bool_t rd_kafka_ssl_ctx_load_providers(rd_kafka_t *rk,
+ const char *providers_csv,
+ char *errstr,
+ size_t errstr_size) {
+ size_t provider_cnt, i;
+ char **providers = rd_string_split(
+ providers_csv, ',', rd_true /*skip empty*/, &provider_cnt);
+
+
+ if (!providers || !provider_cnt) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.providers expects a comma-separated "
+ "list of OpenSSL 3.0.x providers");
+ if (providers)
+ rd_free(providers);
+ return rd_false;
+ }
+
+ rd_list_init(&rk->rk_conf.ssl.loaded_providers, (int)provider_cnt,
+ rd_kafka_ssl_OSSL_PROVIDER_free);
+
+ for (i = 0; i < provider_cnt; i++) {
+ const char *provider = providers[i];
+ OSSL_PROVIDER *prov;
+ const char *buildinfo = NULL;
+ OSSL_PARAM request[] = {{"buildinfo", OSSL_PARAM_UTF8_PTR,
+ (void *)&buildinfo, 0, 0},
+ {NULL, 0, NULL, 0, 0}};
+
+ prov = OSSL_PROVIDER_load(NULL, provider);
+ if (!prov) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to load OpenSSL provider \"%s\": ",
+ provider);
+ rd_free(providers);
+ return rd_false;
+ }
+
+ if (!OSSL_PROVIDER_get_params(prov, request))
+ buildinfo = "no buildinfo";
+
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "OpenSSL provider \"%s\" loaded (%s)", provider,
+ buildinfo);
+
+ rd_list_add(&rk->rk_conf.ssl.loaded_providers, prov);
+ }
+
+ rd_free(providers);
+
+ return rd_true;
+}
+#endif
+
+
+
+/**
+ * @brief Once per rd_kafka_t handle initialization of OpenSSL
+ *
+ * @locality application thread
+ *
+ * @locks rd_kafka_wrlock() MUST be held
+ */
+int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size) {
+ int r;
+ SSL_CTX *ctx = NULL;
+ const char *linking =
+#if WITH_STATIC_LIB_libcrypto
+ "statically linked "
+#else
+ ""
+#endif
+ ;
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000
+ rd_kafka_dbg(rk, SECURITY, "OPENSSL",
+ "Using %sOpenSSL version %s "
+ "(0x%lx, librdkafka built with 0x%lx)",
+ linking, OpenSSL_version(OPENSSL_VERSION),
+ OpenSSL_version_num(), OPENSSL_VERSION_NUMBER);
+#else
+ rd_kafka_dbg(rk, SECURITY, "OPENSSL",
+ "librdkafka built with %sOpenSSL version 0x%lx", linking,
+ OPENSSL_VERSION_NUMBER);
+#endif
+
+ if (errstr_size > 0)
+ errstr[0] = '\0';
+
+#if OPENSSL_VERSION_NUMBER >= 0x30000000
+ if (rk->rk_conf.ssl.providers &&
+ !rd_kafka_ssl_ctx_load_providers(rk, rk->rk_conf.ssl.providers,
+ errstr, errstr_size))
+ goto fail;
+#endif
+
+#if WITH_SSL_ENGINE
+ if (rk->rk_conf.ssl.engine_location && !rk->rk_conf.ssl.engine) {
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Loading OpenSSL engine from \"%s\"",
+ rk->rk_conf.ssl.engine_location);
+ if (!rd_kafka_ssl_ctx_init_engine(rk, errstr, errstr_size))
+ goto fail;
+ }
+#endif
+
+#if OPENSSL_VERSION_NUMBER >= 0x10100000
+ ctx = SSL_CTX_new(TLS_client_method());
+#else
+ ctx = SSL_CTX_new(SSLv23_client_method());
+#endif
+ if (!ctx) {
+ rd_snprintf(errstr, errstr_size, "SSL_CTX_new() failed: ");
+ goto fail;
+ }
+
+#ifdef SSL_OP_NO_SSLv3
+ /* Disable SSLv3 (unsafe) */
+ SSL_CTX_set_options(ctx, SSL_OP_NO_SSLv3);
+#endif
+
+ /* Key file password callback */
+ SSL_CTX_set_default_passwd_cb(ctx, rd_kafka_transport_ssl_passwd_cb);
+ SSL_CTX_set_default_passwd_cb_userdata(ctx, rk);
+
+ /* Ciphers */
+ if (rk->rk_conf.ssl.cipher_suites) {
+ rd_kafka_dbg(rk, SECURITY, "SSL", "Setting cipher list: %s",
+ rk->rk_conf.ssl.cipher_suites);
+ if (!SSL_CTX_set_cipher_list(ctx,
+ rk->rk_conf.ssl.cipher_suites)) {
+ /* Set a string that will prefix the
+ * the OpenSSL error message (which is lousy)
+ * to make it more meaningful. */
+ rd_snprintf(errstr, errstr_size,
+ "ssl.cipher.suites failed: ");
+ goto fail;
+ }
+ }
+
+ /* Set up broker certificate verification. */
+ SSL_CTX_set_verify(ctx,
+ rk->rk_conf.ssl.enable_verify ? SSL_VERIFY_PEER
+ : SSL_VERIFY_NONE,
+ rk->rk_conf.ssl.cert_verify_cb
+ ? rd_kafka_transport_ssl_cert_verify_cb
+ : NULL);
+
+#if OPENSSL_VERSION_NUMBER >= 0x1000200fL && !defined(LIBRESSL_VERSION_NUMBER)
+ /* Curves */
+ if (rk->rk_conf.ssl.curves_list) {
+ rd_kafka_dbg(rk, SECURITY, "SSL", "Setting curves list: %s",
+ rk->rk_conf.ssl.curves_list);
+ if (!SSL_CTX_set1_curves_list(ctx,
+ rk->rk_conf.ssl.curves_list)) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.curves.list failed: ");
+ goto fail;
+ }
+ }
+
+ /* Certificate signature algorithms */
+ if (rk->rk_conf.ssl.sigalgs_list) {
+ rd_kafka_dbg(rk, SECURITY, "SSL",
+ "Setting signature algorithms list: %s",
+ rk->rk_conf.ssl.sigalgs_list);
+ if (!SSL_CTX_set1_sigalgs_list(ctx,
+ rk->rk_conf.ssl.sigalgs_list)) {
+ rd_snprintf(errstr, errstr_size,
+ "ssl.sigalgs.list failed: ");
+ goto fail;
+ }
+ }
+#endif
+
+ /* Register certificates, keys, etc. */
+ if (rd_kafka_ssl_set_certs(rk, ctx, errstr, errstr_size) == -1)
+ goto fail;
+
+
+ SSL_CTX_set_mode(ctx, SSL_MODE_ENABLE_PARTIAL_WRITE);
+
+ rk->rk_conf.ssl.ctx = ctx;
+
+ return 0;
+
+fail:
+ r = (int)strlen(errstr);
+ /* If only the error preamble is provided in errstr and ending with
+ * "....: ", then retrieve the last error from the OpenSSL error stack,
+ * else treat the errstr as complete. */
+ if (r > 2 && !strcmp(&errstr[r - 2], ": "))
+ rd_kafka_ssl_error(rk, NULL, errstr + r,
+ (int)errstr_size > r ? (int)errstr_size - r
+ : 0);
+ RD_IF_FREE(ctx, SSL_CTX_free);
+#if WITH_SSL_ENGINE
+ RD_IF_FREE(rk->rk_conf.ssl.engine, ENGINE_free);
+#endif
+ rd_list_destroy(&rk->rk_conf.ssl.loaded_providers);
+
+ return -1;
+}
+
+
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+static RD_UNUSED void
+rd_kafka_transport_ssl_lock_cb(int mode, int i, const char *file, int line) {
+ if (mode & CRYPTO_LOCK)
+ mtx_lock(&rd_kafka_ssl_locks[i]);
+ else
+ mtx_unlock(&rd_kafka_ssl_locks[i]);
+}
+#endif
+
+static RD_UNUSED unsigned long rd_kafka_transport_ssl_threadid_cb(void) {
+#ifdef _WIN32
+ /* Windows makes a distinction between thread handle
+ * and thread id, which means we can't use the
+ * thrd_current() API that returns the handle. */
+ return (unsigned long)GetCurrentThreadId();
+#else
+ return (unsigned long)(intptr_t)thrd_current();
+#endif
+}
+
+#ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK
+static void
+rd_kafka_transport_libcrypto_THREADID_callback(CRYPTO_THREADID *id) {
+ unsigned long thread_id = rd_kafka_transport_ssl_threadid_cb();
+
+ CRYPTO_THREADID_set_numeric(id, thread_id);
+}
+#endif
+
+/**
+ * @brief Global OpenSSL cleanup.
+ */
+void rd_kafka_ssl_term(void) {
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ int i;
+
+ if (CRYPTO_get_locking_callback() == &rd_kafka_transport_ssl_lock_cb) {
+ CRYPTO_set_locking_callback(NULL);
+#ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK
+ CRYPTO_THREADID_set_callback(NULL);
+#else
+ CRYPTO_set_id_callback(NULL);
+#endif
+
+ for (i = 0; i < rd_kafka_ssl_locks_cnt; i++)
+ mtx_destroy(&rd_kafka_ssl_locks[i]);
+
+ rd_free(rd_kafka_ssl_locks);
+ }
+#endif
+}
+
+
+/**
+ * @brief Global (once per process) OpenSSL init.
+ */
+void rd_kafka_ssl_init(void) {
+#if OPENSSL_VERSION_NUMBER < 0x10100000L
+ int i;
+
+ if (!CRYPTO_get_locking_callback()) {
+ rd_kafka_ssl_locks_cnt = CRYPTO_num_locks();
+ rd_kafka_ssl_locks = rd_malloc(rd_kafka_ssl_locks_cnt *
+ sizeof(*rd_kafka_ssl_locks));
+ for (i = 0; i < rd_kafka_ssl_locks_cnt; i++)
+ mtx_init(&rd_kafka_ssl_locks[i], mtx_plain);
+
+ CRYPTO_set_locking_callback(rd_kafka_transport_ssl_lock_cb);
+
+#ifdef HAVE_OPENSSL_CRYPTO_THREADID_SET_CALLBACK
+ CRYPTO_THREADID_set_callback(
+ rd_kafka_transport_libcrypto_THREADID_callback);
+#else
+ CRYPTO_set_id_callback(rd_kafka_transport_ssl_threadid_cb);
+#endif
+ }
+
+ /* OPENSSL_init_ssl(3) and OPENSSL_init_crypto(3) say:
+ * "As of version 1.1.0 OpenSSL will automatically allocate
+ * all resources that it needs so no explicit initialisation
+ * is required. Similarly it will also automatically
+ * deinitialise as required."
+ */
+ SSL_load_error_strings();
+ SSL_library_init();
+
+ ERR_load_BIO_strings();
+ ERR_load_crypto_strings();
+ OpenSSL_add_all_algorithms();
+#endif
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h
new file mode 100644
index 000000000..325abbe1d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_ssl.h
@@ -0,0 +1,57 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDKAFKA_SSL_H_
+#define _RDKAFKA_SSL_H_
+
+void rd_kafka_transport_ssl_close(rd_kafka_transport_t *rktrans);
+int rd_kafka_transport_ssl_connect(rd_kafka_broker_t *rkb,
+ rd_kafka_transport_t *rktrans,
+ char *errstr,
+ size_t errstr_size);
+int rd_kafka_transport_ssl_handshake(rd_kafka_transport_t *rktrans);
+ssize_t rd_kafka_transport_ssl_send(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size);
+ssize_t rd_kafka_transport_ssl_recv(rd_kafka_transport_t *rktrans,
+ rd_buf_t *rbuf,
+ char *errstr,
+ size_t errstr_size);
+
+
+void rd_kafka_ssl_ctx_term(rd_kafka_t *rk);
+int rd_kafka_ssl_ctx_init(rd_kafka_t *rk, char *errstr, size_t errstr_size);
+
+void rd_kafka_ssl_term(void);
+void rd_kafka_ssl_init(void);
+
+const char *rd_kafka_ssl_last_error_str(void);
+
+#endif /* _RDKAFKA_SSL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c
new file mode 100644
index 000000000..8e76ddb14
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_sticky_assignor.c
@@ -0,0 +1,3428 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rdkafka_int.h"
+#include "rdkafka_assignor.h"
+#include "rdkafka_request.h"
+#include "rdmap.h"
+#include "rdunittest.h"
+
+#include <stdarg.h>
+#include <math.h> /* abs() */
+
+/**
+ * @name KIP-54 and KIP-341 Sticky assignor.
+ *
+ * Closely mimicking the official Apache Kafka AbstractStickyAssignor
+ * implementation.
+ */
+
+/** FIXME
+ * Remaining:
+ * isSticky() -- used by tests
+ */
+
+
+/** @brief Assignor state from last rebalance */
+typedef struct rd_kafka_sticky_assignor_state_s {
+ rd_kafka_topic_partition_list_t *prev_assignment;
+ int32_t generation_id;
+} rd_kafka_sticky_assignor_state_t;
+
+
+
+/**
+ * Auxilliary glue types
+ */
+
+/**
+ * @struct ConsumerPair_t represents a pair of consumer member ids involved in
+ * a partition reassignment, indicating a source consumer a partition
+ * is moving from and a destination partition the same partition is
+ * moving to.
+ *
+ * @sa PartitionMovements_t
+ */
+typedef struct ConsumerPair_s {
+ const char *src; /**< Source member id */
+ const char *dst; /**< Destination member id */
+} ConsumerPair_t;
+
+
+static ConsumerPair_t *ConsumerPair_new(const char *src, const char *dst) {
+ ConsumerPair_t *cpair;
+
+ cpair = rd_malloc(sizeof(*cpair));
+ cpair->src = src ? rd_strdup(src) : NULL;
+ cpair->dst = dst ? rd_strdup(dst) : NULL;
+
+ return cpair;
+}
+
+
+static void ConsumerPair_free(void *p) {
+ ConsumerPair_t *cpair = p;
+ if (cpair->src)
+ rd_free((void *)cpair->src);
+ if (cpair->dst)
+ rd_free((void *)cpair->dst);
+ rd_free(cpair);
+}
+
+static int ConsumerPair_cmp(const void *_a, const void *_b) {
+ const ConsumerPair_t *a = _a, *b = _b;
+ int r = strcmp(a->src ? a->src : "", b->src ? b->src : "");
+ if (r)
+ return r;
+ return strcmp(a->dst ? a->dst : "", b->dst ? b->dst : "");
+}
+
+
+static unsigned int ConsumerPair_hash(const void *_a) {
+ const ConsumerPair_t *a = _a;
+ return 31 * (a->src ? rd_map_str_hash(a->src) : 1) +
+ (a->dst ? rd_map_str_hash(a->dst) : 1);
+}
+
+
+
+typedef struct ConsumerGenerationPair_s {
+ const char *consumer; /**< Memory owned by caller */
+ int generation;
+} ConsumerGenerationPair_t;
+
+static void ConsumerGenerationPair_destroy(void *ptr) {
+ ConsumerGenerationPair_t *cgpair = ptr;
+ rd_free(cgpair);
+}
+
+/**
+ * @param consumer This memory will be referenced, not copied, and thus must
+ * outlive the ConsumerGenerationPair_t object.
+ */
+static ConsumerGenerationPair_t *
+ConsumerGenerationPair_new(const char *consumer, int generation) {
+ ConsumerGenerationPair_t *cgpair = rd_malloc(sizeof(*cgpair));
+ cgpair->consumer = consumer;
+ cgpair->generation = generation;
+ return cgpair;
+}
+
+static int ConsumerGenerationPair_cmp_generation(const void *_a,
+ const void *_b) {
+ const ConsumerGenerationPair_t *a = _a, *b = _b;
+ return a->generation - b->generation;
+}
+
+
+
+/**
+ * Hash map types.
+ *
+ * Naming convention is:
+ * map_<keytype>_<valuetype>_t
+ *
+ * Where the keytype and valuetype are spoken names of the types and
+ * not the specific C types (since that'd be too long).
+ */
+typedef RD_MAP_TYPE(const char *,
+ rd_kafka_topic_partition_list_t *) map_str_toppar_list_t;
+
+typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
+ const char *) map_toppar_str_t;
+
+typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
+ rd_list_t *) map_toppar_list_t;
+
+typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
+ ConsumerGenerationPair_t *) map_toppar_cgpair_t;
+
+typedef RD_MAP_TYPE(const rd_kafka_topic_partition_t *,
+ ConsumerPair_t *) map_toppar_cpair_t;
+
+typedef RD_MAP_TYPE(const ConsumerPair_t *,
+ rd_kafka_topic_partition_list_t *) map_cpair_toppar_list_t;
+
+/* map<string, map<ConsumerPair*, topic_partition_list_t*>> */
+typedef RD_MAP_TYPE(const char *,
+ map_cpair_toppar_list_t *) map_str_map_cpair_toppar_list_t;
+
+
+
+/** Glue type helpers */
+
+static map_cpair_toppar_list_t *map_cpair_toppar_list_t_new(void) {
+ map_cpair_toppar_list_t *map = rd_calloc(1, sizeof(*map));
+
+ RD_MAP_INIT(map, 0, ConsumerPair_cmp, ConsumerPair_hash, NULL,
+ rd_kafka_topic_partition_list_destroy_free);
+
+ return map;
+}
+
+static void map_cpair_toppar_list_t_free(void *ptr) {
+ map_cpair_toppar_list_t *map = ptr;
+ RD_MAP_DESTROY(map);
+ rd_free(map);
+}
+
+
+
+/**
+ * @struct Provides current state of partition movements between consumers
+ * for each topic, and possible movements for each partition.
+ */
+typedef struct PartitionMovements_s {
+ map_toppar_cpair_t partitionMovements;
+ map_str_map_cpair_toppar_list_t partitionMovementsByTopic;
+} PartitionMovements_t;
+
+
+static void PartitionMovements_init(PartitionMovements_t *pmov,
+ size_t topic_cnt) {
+ RD_MAP_INIT(&pmov->partitionMovements, topic_cnt * 3,
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ NULL, ConsumerPair_free);
+
+ RD_MAP_INIT(&pmov->partitionMovementsByTopic, topic_cnt, rd_map_str_cmp,
+ rd_map_str_hash, NULL, map_cpair_toppar_list_t_free);
+}
+
+static void PartitionMovements_destroy(PartitionMovements_t *pmov) {
+ RD_MAP_DESTROY(&pmov->partitionMovementsByTopic);
+ RD_MAP_DESTROY(&pmov->partitionMovements);
+}
+
+
+static ConsumerPair_t *PartitionMovements_removeMovementRecordOfPartition(
+ PartitionMovements_t *pmov,
+ const rd_kafka_topic_partition_t *toppar) {
+
+ ConsumerPair_t *cpair;
+ map_cpair_toppar_list_t *partitionMovementsForThisTopic;
+ rd_kafka_topic_partition_list_t *plist;
+
+ cpair = RD_MAP_GET(&pmov->partitionMovements, toppar);
+ rd_assert(cpair);
+
+ partitionMovementsForThisTopic =
+ RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic);
+
+ plist = RD_MAP_GET(partitionMovementsForThisTopic, cpair);
+ rd_assert(plist);
+
+ rd_kafka_topic_partition_list_del(plist, toppar->topic,
+ toppar->partition);
+ if (plist->cnt == 0)
+ RD_MAP_DELETE(partitionMovementsForThisTopic, cpair);
+ if (RD_MAP_IS_EMPTY(partitionMovementsForThisTopic))
+ RD_MAP_DELETE(&pmov->partitionMovementsByTopic, toppar->topic);
+
+ return cpair;
+}
+
+static void PartitionMovements_addPartitionMovementRecord(
+ PartitionMovements_t *pmov,
+ const rd_kafka_topic_partition_t *toppar,
+ ConsumerPair_t *cpair) {
+ map_cpair_toppar_list_t *partitionMovementsForThisTopic;
+ rd_kafka_topic_partition_list_t *plist;
+
+ RD_MAP_SET(&pmov->partitionMovements, toppar, cpair);
+
+ partitionMovementsForThisTopic =
+ RD_MAP_GET_OR_SET(&pmov->partitionMovementsByTopic, toppar->topic,
+ map_cpair_toppar_list_t_new());
+
+ plist = RD_MAP_GET_OR_SET(partitionMovementsForThisTopic, cpair,
+ rd_kafka_topic_partition_list_new(16));
+
+ rd_kafka_topic_partition_list_add(plist, toppar->topic,
+ toppar->partition);
+}
+
+static void
+PartitionMovements_movePartition(PartitionMovements_t *pmov,
+ const rd_kafka_topic_partition_t *toppar,
+ const char *old_consumer,
+ const char *new_consumer) {
+
+ if (RD_MAP_GET(&pmov->partitionMovements, toppar)) {
+ /* This partition has previously moved */
+ ConsumerPair_t *existing_cpair;
+
+ existing_cpair =
+ PartitionMovements_removeMovementRecordOfPartition(pmov,
+ toppar);
+
+ rd_assert(!rd_strcmp(existing_cpair->dst, old_consumer));
+
+ if (rd_strcmp(existing_cpair->src, new_consumer)) {
+ /* Partition is not moving back to its
+ * previous consumer */
+ PartitionMovements_addPartitionMovementRecord(
+ pmov, toppar,
+ ConsumerPair_new(existing_cpair->src,
+ new_consumer));
+ }
+ } else {
+ PartitionMovements_addPartitionMovementRecord(
+ pmov, toppar, ConsumerPair_new(old_consumer, new_consumer));
+ }
+}
+
+static const rd_kafka_topic_partition_t *
+PartitionMovements_getTheActualPartitionToBeMoved(
+ PartitionMovements_t *pmov,
+ const rd_kafka_topic_partition_t *toppar,
+ const char *oldConsumer,
+ const char *newConsumer) {
+
+ ConsumerPair_t *cpair;
+ ConsumerPair_t reverse_cpair = {.src = newConsumer, .dst = oldConsumer};
+ map_cpair_toppar_list_t *partitionMovementsForThisTopic;
+ rd_kafka_topic_partition_list_t *plist;
+
+ if (!RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic))
+ return toppar;
+
+ cpair = RD_MAP_GET(&pmov->partitionMovements, toppar);
+ if (cpair) {
+ /* This partition has previously moved */
+ rd_assert(!rd_strcmp(oldConsumer, cpair->dst));
+
+ oldConsumer = cpair->src;
+ }
+
+ partitionMovementsForThisTopic =
+ RD_MAP_GET(&pmov->partitionMovementsByTopic, toppar->topic);
+
+ plist = RD_MAP_GET(partitionMovementsForThisTopic, &reverse_cpair);
+ if (!plist)
+ return toppar;
+
+ return &plist->elems[0];
+}
+
+#if FIXME
+
+static rd_bool_t hasCycles(map_cpair_toppar_list_t *pairs) {
+ return rd_true; // FIXME
+}
+
+/**
+ * @remark This method is only used by the AbstractStickyAssignorTest
+ * in the Java client.
+ */
+static rd_bool_t PartitionMovements_isSticky(rd_kafka_t *rk,
+ PartitionMovements_t *pmov) {
+ const char *topic;
+ map_cpair_toppar_list_t *topicMovementPairs;
+
+ RD_MAP_FOREACH(topic, topicMovementPairs,
+ &pmov->partitionMovementsByTopic) {
+ if (hasCycles(topicMovementPairs)) {
+ const ConsumerPair_t *cpair;
+ const rd_kafka_topic_partition_list_t *partitions;
+
+ rd_kafka_log(
+ rk, LOG_ERR, "STICKY",
+ "Sticky assignor: Stickiness is violated for "
+ "topic %s: partition movements for this topic "
+ "occurred among the following consumers: ",
+ topic);
+ RD_MAP_FOREACH(cpair, partitions, topicMovementPairs) {
+ rd_kafka_log(rk, LOG_ERR, "STICKY", " %s -> %s",
+ cpair->src, cpair->dst);
+ }
+
+ if (partitions)
+ ; /* Avoid unused warning */
+
+ return rd_false;
+ }
+ }
+
+ return rd_true;
+}
+#endif
+
+
+/**
+ * @brief Comparator to sort ascendingly by rd_map_elem_t object value as
+ * topic partition list count, or by member id if the list count is
+ * identical.
+ * Used to sort sortedCurrentSubscriptions list.
+ *
+ * elem.key is the consumer member id string,
+ * elem.value is the partition list.
+ */
+static int sort_by_map_elem_val_toppar_list_cnt(const void *_a,
+ const void *_b) {
+ const rd_map_elem_t *a = _a, *b = _b;
+ const rd_kafka_topic_partition_list_t *al = a->value, *bl = b->value;
+ int r = al->cnt - bl->cnt;
+ if (r)
+ return r;
+ return strcmp((const char *)a->key, (const char *)b->key);
+}
+
+
+/**
+ * @brief Assign partition to the most eligible consumer.
+ *
+ * The assignment should improve the overall balance of the partition
+ * assignments to consumers.
+ */
+static void
+assignPartition(const rd_kafka_topic_partition_t *partition,
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_str_toppar_list_t *currentAssignment,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ map_toppar_str_t *currentPartitionConsumer) {
+ const rd_map_elem_t *elem;
+ int i;
+
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
+ const char *consumer = (const char *)elem->key;
+ const rd_kafka_topic_partition_list_t *partitions;
+
+ partitions =
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
+ if (!rd_kafka_topic_partition_list_find(
+ partitions, partition->topic, partition->partition))
+ continue;
+
+ rd_kafka_topic_partition_list_add(
+ RD_MAP_GET(currentAssignment, consumer), partition->topic,
+ partition->partition);
+
+ RD_MAP_SET(currentPartitionConsumer,
+ rd_kafka_topic_partition_copy(partition), consumer);
+
+ /* Re-sort sortedCurrentSubscriptions since this consumer's
+ * assignment count has increased.
+ * This is an O(N) operation since it is a single shuffle. */
+ rd_list_sort(sortedCurrentSubscriptions,
+ sort_by_map_elem_val_toppar_list_cnt);
+ return;
+ }
+}
+
+/**
+ * @returns true if the partition has two or more potential consumers.
+ */
+static RD_INLINE rd_bool_t partitionCanParticipateInReassignment(
+ const rd_kafka_topic_partition_t *partition,
+ map_toppar_list_t *partition2AllPotentialConsumers) {
+ rd_list_t *consumers;
+
+ if (!(consumers =
+ RD_MAP_GET(partition2AllPotentialConsumers, partition)))
+ return rd_false;
+
+ return rd_list_cnt(consumers) >= 2;
+}
+
+
+/**
+ * @returns true if consumer can participate in reassignment based on
+ * its current assignment.
+ */
+static RD_INLINE rd_bool_t consumerCanParticipateInReassignment(
+ rd_kafka_t *rk,
+ const char *consumer,
+ map_str_toppar_list_t *currentAssignment,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ map_toppar_list_t *partition2AllPotentialConsumers) {
+ const rd_kafka_topic_partition_list_t *currentPartitions =
+ RD_MAP_GET(currentAssignment, consumer);
+ int currentAssignmentSize = currentPartitions->cnt;
+ int maxAssignmentSize =
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer)->cnt;
+ int i;
+
+ /* FIXME: And then what? Is this a local error? If so, assert. */
+ if (currentAssignmentSize > maxAssignmentSize)
+ rd_kafka_log(rk, LOG_ERR, "STICKY",
+ "Sticky assignor error: "
+ "Consumer %s is assigned more partitions (%d) "
+ "than the maximum possible (%d)",
+ consumer, currentAssignmentSize,
+ maxAssignmentSize);
+
+ /* If a consumer is not assigned all its potential partitions it is
+ * subject to reassignment. */
+ if (currentAssignmentSize < maxAssignmentSize)
+ return rd_true;
+
+ /* If any of the partitions assigned to a consumer is subject to
+ * reassignment the consumer itself is subject to reassignment. */
+ for (i = 0; i < currentPartitions->cnt; i++) {
+ const rd_kafka_topic_partition_t *partition =
+ &currentPartitions->elems[i];
+
+ if (partitionCanParticipateInReassignment(
+ partition, partition2AllPotentialConsumers))
+ return rd_true;
+ }
+
+ return rd_false;
+}
+
+
+/**
+ * @brief Process moving partition from old consumer to new consumer.
+ */
+static void processPartitionMovement(
+ rd_kafka_t *rk,
+ PartitionMovements_t *partitionMovements,
+ const rd_kafka_topic_partition_t *partition,
+ const char *newConsumer,
+ map_str_toppar_list_t *currentAssignment,
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_toppar_str_t *currentPartitionConsumer) {
+
+ const char *oldConsumer =
+ RD_MAP_GET(currentPartitionConsumer, partition);
+
+ PartitionMovements_movePartition(partitionMovements, partition,
+ oldConsumer, newConsumer);
+
+ rd_kafka_topic_partition_list_add(
+ RD_MAP_GET(currentAssignment, newConsumer), partition->topic,
+ partition->partition);
+
+ rd_kafka_topic_partition_list_del(
+ RD_MAP_GET(currentAssignment, oldConsumer), partition->topic,
+ partition->partition);
+
+ RD_MAP_SET(currentPartitionConsumer,
+ rd_kafka_topic_partition_copy(partition), newConsumer);
+
+ /* Re-sort after assignment count has changed. */
+ rd_list_sort(sortedCurrentSubscriptions,
+ sort_by_map_elem_val_toppar_list_cnt);
+
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "%s [%" PRId32 "] %sassigned to %s (from %s)",
+ partition->topic, partition->partition,
+ oldConsumer ? "re" : "", newConsumer,
+ oldConsumer ? oldConsumer : "(none)");
+}
+
+
+/**
+ * @brief Reassign \p partition to \p newConsumer
+ */
+static void reassignPartitionToConsumer(
+ rd_kafka_t *rk,
+ PartitionMovements_t *partitionMovements,
+ const rd_kafka_topic_partition_t *partition,
+ map_str_toppar_list_t *currentAssignment,
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_toppar_str_t *currentPartitionConsumer,
+ const char *newConsumer) {
+
+ const char *consumer = RD_MAP_GET(currentPartitionConsumer, partition);
+ const rd_kafka_topic_partition_t *partitionToBeMoved;
+
+ /* Find the correct partition movement considering
+ * the stickiness requirement. */
+ partitionToBeMoved = PartitionMovements_getTheActualPartitionToBeMoved(
+ partitionMovements, partition, consumer, newConsumer);
+
+ processPartitionMovement(rk, partitionMovements, partitionToBeMoved,
+ newConsumer, currentAssignment,
+ sortedCurrentSubscriptions,
+ currentPartitionConsumer);
+}
+
+/**
+ * @brief Reassign \p partition to an eligible new consumer.
+ */
+static void
+reassignPartition(rd_kafka_t *rk,
+ PartitionMovements_t *partitionMovements,
+ const rd_kafka_topic_partition_t *partition,
+ map_str_toppar_list_t *currentAssignment,
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_toppar_str_t *currentPartitionConsumer,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions) {
+
+ const rd_map_elem_t *elem;
+ int i;
+
+ /* Find the new consumer */
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
+ const char *newConsumer = (const char *)elem->key;
+
+ if (rd_kafka_topic_partition_list_find(
+ RD_MAP_GET(consumer2AllPotentialPartitions,
+ newConsumer),
+ partition->topic, partition->partition)) {
+ reassignPartitionToConsumer(
+ rk, partitionMovements, partition,
+ currentAssignment, sortedCurrentSubscriptions,
+ currentPartitionConsumer, newConsumer);
+
+ return;
+ }
+ }
+
+ rd_assert(!*"reassignPartition(): no new consumer found");
+}
+
+
+
+/**
+ * @brief Determine if the current assignment is balanced.
+ *
+ * @param currentAssignment the assignment whose balance needs to be checked
+ * @param sortedCurrentSubscriptions an ascending sorted set of consumers based
+ * on how many topic partitions are already
+ * assigned to them
+ * @param consumer2AllPotentialPartitions a mapping of all consumers to all
+ * potential topic partitions that can be
+ * assigned to them.
+ * This parameter is called
+ * allSubscriptions in the Java
+ * implementation, but we choose this
+ * name to be more consistent with its
+ * use elsewhere in the code.
+ * @param partition2AllPotentialConsumers a mapping of all partitions to
+ * all potential consumers.
+ *
+ * @returns true if the given assignment is balanced; false otherwise
+ */
+static rd_bool_t
+isBalanced(rd_kafka_t *rk,
+ map_str_toppar_list_t *currentAssignment,
+ const rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ map_toppar_list_t *partition2AllPotentialConsumers) {
+
+ int minimum = ((const rd_kafka_topic_partition_list_t
+ *)((const rd_map_elem_t *)rd_list_first(
+ sortedCurrentSubscriptions))
+ ->value)
+ ->cnt;
+ int maximum = ((const rd_kafka_topic_partition_list_t
+ *)((const rd_map_elem_t *)rd_list_last(
+ sortedCurrentSubscriptions))
+ ->value)
+ ->cnt;
+
+ /* Mapping from partitions to the consumer assigned to them */
+ // FIXME: don't create prior to min/max check below */
+ map_toppar_str_t allPartitions = RD_MAP_INITIALIZER(
+ RD_MAP_CNT(partition2AllPotentialConsumers),
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ NULL /* references currentAssignment */,
+ NULL /* references currentAssignment */);
+
+ /* Iterators */
+ const rd_kafka_topic_partition_list_t *partitions;
+ const char *consumer;
+ const rd_map_elem_t *elem;
+ int i;
+
+ /* The assignment is balanced if minimum and maximum numbers of
+ * partitions assigned to consumers differ by at most one. */
+ if (minimum >= maximum - 1) {
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "Assignment is balanced: "
+ "minimum %d and maximum %d partitions assigned "
+ "to each consumer",
+ minimum, maximum);
+ RD_MAP_DESTROY(&allPartitions);
+ return rd_true;
+ }
+
+ /* Create a mapping from partitions to the consumer assigned to them */
+ RD_MAP_FOREACH(consumer, partitions, currentAssignment) {
+
+ for (i = 0; i < partitions->cnt; i++) {
+ const rd_kafka_topic_partition_t *partition =
+ &partitions->elems[i];
+ const char *existing;
+ if ((existing = RD_MAP_GET(&allPartitions, partition)))
+ rd_kafka_log(rk, LOG_ERR, "STICKY",
+ "Sticky assignor: %s [%" PRId32
+ "] "
+ "is assigned to more than one "
+ "consumer (%s and %s)",
+ partition->topic,
+ partition->partition, existing,
+ consumer);
+
+ RD_MAP_SET(&allPartitions, partition, consumer);
+ }
+ }
+
+
+ /* For each consumer that does not have all the topic partitions it
+ * can get make sure none of the topic partitions it could but did
+ * not get cannot be moved to it, because that would break the balance.
+ *
+ * Note: Since sortedCurrentSubscriptions elements are pointers to
+ * currentAssignment's element we get both the consumer
+ * and partition list in elem here. */
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
+ const char *consumer = (const char *)elem->key;
+ const rd_kafka_topic_partition_list_t *potentialTopicPartitions;
+ const rd_kafka_topic_partition_list_t *consumerPartitions;
+
+ consumerPartitions =
+ (const rd_kafka_topic_partition_list_t *)elem->value;
+
+ potentialTopicPartitions =
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
+
+ /* Skip if this consumer already has all the topic partitions
+ * it can get. */
+ if (consumerPartitions->cnt == potentialTopicPartitions->cnt)
+ continue;
+
+ /* Otherwise make sure it can't get any more partitions */
+
+ for (i = 0; i < potentialTopicPartitions->cnt; i++) {
+ const rd_kafka_topic_partition_t *partition =
+ &potentialTopicPartitions->elems[i];
+ const char *otherConsumer;
+ int otherConsumerPartitionCount;
+
+ if (rd_kafka_topic_partition_list_find(
+ consumerPartitions, partition->topic,
+ partition->partition))
+ continue;
+
+ otherConsumer = RD_MAP_GET(&allPartitions, partition);
+ otherConsumerPartitionCount =
+ RD_MAP_GET(currentAssignment, otherConsumer)->cnt;
+
+ if (consumerPartitions->cnt <
+ otherConsumerPartitionCount) {
+ rd_kafka_dbg(
+ rk, ASSIGNOR, "STICKY",
+ "%s [%" PRId32
+ "] can be moved from "
+ "consumer %s (%d partition(s)) to "
+ "consumer %s (%d partition(s)) "
+ "for a more balanced assignment",
+ partition->topic, partition->partition,
+ otherConsumer, otherConsumerPartitionCount,
+ consumer, consumerPartitions->cnt);
+ RD_MAP_DESTROY(&allPartitions);
+ return rd_false;
+ }
+ }
+ }
+
+ RD_MAP_DESTROY(&allPartitions);
+ return rd_true;
+}
+
+
+/**
+ * @brief Perform reassignment.
+ *
+ * @returns true if reassignment was performed.
+ */
+static rd_bool_t
+performReassignments(rd_kafka_t *rk,
+ PartitionMovements_t *partitionMovements,
+ rd_kafka_topic_partition_list_t *reassignablePartitions,
+ map_str_toppar_list_t *currentAssignment,
+ map_toppar_cgpair_t *prevAssignment,
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ map_toppar_list_t *partition2AllPotentialConsumers,
+ map_toppar_str_t *currentPartitionConsumer) {
+ rd_bool_t reassignmentPerformed = rd_false;
+ rd_bool_t modified, saveIsBalanced = rd_false;
+ int iterations = 0;
+
+ /* Repeat reassignment until no partition can be moved to
+ * improve the balance. */
+ do {
+ int i;
+
+ iterations++;
+
+ modified = rd_false;
+
+ /* Reassign all reassignable partitions (starting from the
+ * partition with least potential consumers and if needed)
+ * until the full list is processed or a balance is achieved. */
+
+ for (i = 0; i < reassignablePartitions->cnt &&
+ !isBalanced(rk, currentAssignment,
+ sortedCurrentSubscriptions,
+ consumer2AllPotentialPartitions,
+ partition2AllPotentialConsumers);
+ i++) {
+ const rd_kafka_topic_partition_t *partition =
+ &reassignablePartitions->elems[i];
+ const rd_list_t *consumers = RD_MAP_GET(
+ partition2AllPotentialConsumers, partition);
+ const char *consumer, *otherConsumer;
+ const ConsumerGenerationPair_t *prevcgp;
+ const rd_kafka_topic_partition_list_t *currAssignment;
+ int j;
+
+ /* FIXME: Is this a local error/bug? If so, assert */
+ if (rd_list_cnt(consumers) <= 1)
+ rd_kafka_log(
+ rk, LOG_ERR, "STICKY",
+ "Sticky assignor: expected more than "
+ "one potential consumer for partition "
+ "%s [%" PRId32 "]",
+ partition->topic, partition->partition);
+
+ /* The partition must have a current consumer */
+ consumer =
+ RD_MAP_GET(currentPartitionConsumer, partition);
+ rd_assert(consumer);
+
+ currAssignment =
+ RD_MAP_GET(currentAssignment, consumer);
+ prevcgp = RD_MAP_GET(prevAssignment, partition);
+
+ if (prevcgp &&
+ currAssignment->cnt >
+ RD_MAP_GET(currentAssignment, prevcgp->consumer)
+ ->cnt +
+ 1) {
+ reassignPartitionToConsumer(
+ rk, partitionMovements, partition,
+ currentAssignment,
+ sortedCurrentSubscriptions,
+ currentPartitionConsumer,
+ prevcgp->consumer);
+ reassignmentPerformed = rd_true;
+ modified = rd_true;
+ continue;
+ }
+
+ /* Check if a better-suited consumer exists for the
+ * partition; if so, reassign it. */
+ RD_LIST_FOREACH(otherConsumer, consumers, j) {
+ if (consumer == otherConsumer)
+ continue;
+
+ if (currAssignment->cnt <=
+ RD_MAP_GET(currentAssignment, otherConsumer)
+ ->cnt +
+ 1)
+ continue;
+
+ reassignPartition(
+ rk, partitionMovements, partition,
+ currentAssignment,
+ sortedCurrentSubscriptions,
+ currentPartitionConsumer,
+ consumer2AllPotentialPartitions);
+
+ reassignmentPerformed = rd_true;
+ modified = rd_true;
+ break;
+ }
+ }
+
+ if (i < reassignablePartitions->cnt)
+ saveIsBalanced = rd_true;
+
+ } while (modified);
+
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "Reassignment %sperformed after %d iteration(s) of %d "
+ "reassignable partition(s)%s",
+ reassignmentPerformed ? "" : "not ", iterations,
+ reassignablePartitions->cnt,
+ saveIsBalanced ? ": assignment is balanced" : "");
+
+ return reassignmentPerformed;
+}
+
+
+/**
+ * @returns the balance score of the given assignment, as the sum of assigned
+ * partitions size difference of all consumer pairs.
+ *
+ * A perfectly balanced assignment (with all consumers getting the same number
+ * of partitions) has a balance score of 0.
+ *
+ * Lower balance score indicates a more balanced assignment.
+ * FIXME: should be called imbalance score then?
+ */
+static int getBalanceScore(map_str_toppar_list_t *assignment) {
+ const char *consumer;
+ const rd_kafka_topic_partition_list_t *partitions;
+ int *sizes;
+ int cnt = 0;
+ int score = 0;
+ int i, next;
+
+ /* If there is just a single consumer the assignment will be balanced */
+ if (RD_MAP_CNT(assignment) < 2)
+ return 0;
+
+ sizes = rd_malloc(sizeof(*sizes) * RD_MAP_CNT(assignment));
+
+ RD_MAP_FOREACH(consumer, partitions, assignment)
+ sizes[cnt++] = partitions->cnt;
+
+ for (next = 0; next < cnt; next++)
+ for (i = next + 1; i < cnt; i++)
+ score += abs(sizes[next] - sizes[i]);
+
+ rd_free(sizes);
+
+ if (consumer)
+ ; /* Avoid unused warning */
+
+ return score;
+}
+
+
+
+/**
+ * @brief Balance the current assignment using the data structures
+ * created in assign_cb(). */
+static void balance(rd_kafka_t *rk,
+ PartitionMovements_t *partitionMovements,
+ map_str_toppar_list_t *currentAssignment,
+ map_toppar_cgpair_t *prevAssignment,
+ rd_kafka_topic_partition_list_t *sortedPartitions,
+ rd_kafka_topic_partition_list_t *unassignedPartitions,
+ rd_list_t *sortedCurrentSubscriptions /*rd_map_elem_t*/,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ map_toppar_list_t *partition2AllPotentialConsumers,
+ map_toppar_str_t *currentPartitionConsumer,
+ rd_bool_t revocationRequired) {
+
+ /* If the consumer with most assignments (thus the last element
+ * in the ascendingly ordered sortedCurrentSubscriptions list) has
+ * zero partitions assigned it means there is no current assignment
+ * for any consumer and the group is thus initializing for the first
+ * time. */
+ rd_bool_t initializing = ((const rd_kafka_topic_partition_list_t
+ *)((const rd_map_elem_t *)rd_list_last(
+ sortedCurrentSubscriptions))
+ ->value)
+ ->cnt == 0;
+ rd_bool_t reassignmentPerformed = rd_false;
+
+ map_str_toppar_list_t fixedAssignments =
+ RD_MAP_INITIALIZER(RD_MAP_CNT(partition2AllPotentialConsumers),
+ rd_map_str_cmp,
+ rd_map_str_hash,
+ NULL,
+ NULL /* Will transfer ownership of the list
+ * to currentAssignment at the end of
+ * this function. */);
+
+ map_str_toppar_list_t preBalanceAssignment = RD_MAP_INITIALIZER(
+ RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash,
+ NULL /* references currentAssignment */,
+ rd_kafka_topic_partition_list_destroy_free);
+ map_toppar_str_t preBalancePartitionConsumers = RD_MAP_INITIALIZER(
+ RD_MAP_CNT(partition2AllPotentialConsumers),
+ rd_kafka_topic_partition_cmp, rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free,
+ NULL /* refs currentPartitionConsumer */);
+ int newScore, oldScore;
+ /* Iterator variables */
+ const rd_kafka_topic_partition_t *partition;
+ const void *ignore;
+ const rd_map_elem_t *elem;
+ int i;
+
+ /* Assign all unassigned partitions */
+ for (i = 0; i < unassignedPartitions->cnt; i++) {
+ partition = &unassignedPartitions->elems[i];
+
+ /* Skip if there is no potential consumer for the partition.
+ * FIXME: How could this be? */
+ if (rd_list_empty(RD_MAP_GET(partition2AllPotentialConsumers,
+ partition))) {
+ rd_dassert(!*"sticky assignor bug");
+ continue;
+ }
+
+ assignPartition(
+ partition, sortedCurrentSubscriptions, currentAssignment,
+ consumer2AllPotentialPartitions, currentPartitionConsumer);
+ }
+
+
+ /* Narrow down the reassignment scope to only those partitions that can
+ * actually be reassigned. */
+ RD_MAP_FOREACH(partition, ignore, partition2AllPotentialConsumers) {
+ if (partitionCanParticipateInReassignment(
+ partition, partition2AllPotentialConsumers))
+ continue;
+
+ rd_kafka_topic_partition_list_del(
+ sortedPartitions, partition->topic, partition->partition);
+ rd_kafka_topic_partition_list_del(unassignedPartitions,
+ partition->topic,
+ partition->partition);
+ }
+
+ if (ignore)
+ ; /* Avoid unused warning */
+
+
+ /* Narrow down the reassignment scope to only those consumers that are
+ * subject to reassignment. */
+ RD_LIST_FOREACH(elem, sortedCurrentSubscriptions, i) {
+ const char *consumer = (const char *)elem->key;
+ rd_kafka_topic_partition_list_t *partitions;
+
+ if (consumerCanParticipateInReassignment(
+ rk, consumer, currentAssignment,
+ consumer2AllPotentialPartitions,
+ partition2AllPotentialConsumers))
+ continue;
+
+ rd_list_remove_elem(sortedCurrentSubscriptions, i);
+ i--; /* Since the current element is removed we need
+ * to rewind the iterator. */
+
+ partitions = rd_kafka_topic_partition_list_copy(
+ RD_MAP_GET(currentAssignment, consumer));
+ RD_MAP_DELETE(currentAssignment, consumer);
+
+ RD_MAP_SET(&fixedAssignments, consumer, partitions);
+ }
+
+
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "Prepared balanced reassignment for %d consumers, "
+ "%d available partition(s) where of %d are unassigned "
+ "(initializing=%s, revocationRequired=%s, "
+ "%d fixed assignments)",
+ (int)RD_MAP_CNT(consumer2AllPotentialPartitions),
+ sortedPartitions->cnt, unassignedPartitions->cnt,
+ initializing ? "true" : "false",
+ revocationRequired ? "true" : "false",
+ (int)RD_MAP_CNT(&fixedAssignments));
+
+ /* Create a deep copy of the current assignment so we can revert to it
+ * if we do not get a more balanced assignment later. */
+ RD_MAP_COPY(&preBalanceAssignment, currentAssignment,
+ NULL /* just reference the key */,
+ (rd_map_copy_t *)rd_kafka_topic_partition_list_copy);
+ RD_MAP_COPY(&preBalancePartitionConsumers, currentPartitionConsumer,
+ rd_kafka_topic_partition_copy_void,
+ NULL /* references assign_cb(members) fields */);
+
+
+ /* If we don't already need to revoke something due to subscription
+ * changes, first try to balance by only moving newly added partitions.
+ */
+ if (!revocationRequired && unassignedPartitions->cnt > 0)
+ performReassignments(
+ rk, partitionMovements, unassignedPartitions,
+ currentAssignment, prevAssignment,
+ sortedCurrentSubscriptions, consumer2AllPotentialPartitions,
+ partition2AllPotentialConsumers, currentPartitionConsumer);
+
+ reassignmentPerformed = performReassignments(
+ rk, partitionMovements, sortedPartitions, currentAssignment,
+ prevAssignment, sortedCurrentSubscriptions,
+ consumer2AllPotentialPartitions, partition2AllPotentialConsumers,
+ currentPartitionConsumer);
+
+ /* If we are not preserving existing assignments and we have made
+ * changes to the current assignment make sure we are getting a more
+ * balanced assignment; otherwise, revert to previous assignment. */
+
+ if (!initializing && reassignmentPerformed &&
+ (newScore = getBalanceScore(currentAssignment)) >=
+ (oldScore = getBalanceScore(&preBalanceAssignment))) {
+
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "Reassignment performed but keeping previous "
+ "assignment since balance score did not improve: "
+ "new score %d (%d consumers) vs "
+ "old score %d (%d consumers): "
+ "lower score is better",
+ newScore, (int)RD_MAP_CNT(currentAssignment),
+ oldScore, (int)RD_MAP_CNT(&preBalanceAssignment));
+
+ RD_MAP_COPY(
+ currentAssignment, &preBalanceAssignment,
+ NULL /* just reference the key */,
+ (rd_map_copy_t *)rd_kafka_topic_partition_list_copy);
+
+ RD_MAP_CLEAR(currentPartitionConsumer);
+ RD_MAP_COPY(currentPartitionConsumer,
+ &preBalancePartitionConsumers,
+ rd_kafka_topic_partition_copy_void,
+ NULL /* references assign_cb(members) fields */);
+ }
+
+ RD_MAP_DESTROY(&preBalancePartitionConsumers);
+ RD_MAP_DESTROY(&preBalanceAssignment);
+
+ /* Add the fixed assignments (those that could not change) back. */
+ if (!RD_MAP_IS_EMPTY(&fixedAssignments)) {
+ const rd_map_elem_t *elem;
+
+ RD_MAP_FOREACH_ELEM(elem, &fixedAssignments.rmap) {
+ const char *consumer = elem->key;
+ rd_kafka_topic_partition_list_t *partitions =
+ (rd_kafka_topic_partition_list_t *)elem->value;
+
+ RD_MAP_SET(currentAssignment, consumer, partitions);
+
+ rd_list_add(sortedCurrentSubscriptions, (void *)elem);
+ }
+
+ /* Re-sort */
+ rd_list_sort(sortedCurrentSubscriptions,
+ sort_by_map_elem_val_toppar_list_cnt);
+ }
+
+ RD_MAP_DESTROY(&fixedAssignments);
+}
+
+
+
+/**
+ * @brief Populate subscriptions, current and previous assignments based on the
+ * \p members assignments.
+ */
+static void prepopulateCurrentAssignments(
+ rd_kafka_t *rk,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ map_str_toppar_list_t *subscriptions,
+ map_str_toppar_list_t *currentAssignment,
+ map_toppar_cgpair_t *prevAssignment,
+ map_toppar_str_t *currentPartitionConsumer,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ size_t estimated_partition_cnt) {
+
+ /* We need to process subscriptions' user data with each consumer's
+ * reported generation in mind.
+ * Higher generations overwrite lower generations in case of a conflict.
+ * Conflicts will only exist if user data is for different generations.
+ */
+
+ /* For each partition we create a sorted list (by generation) of
+ * its consumers. */
+ RD_MAP_LOCAL_INITIALIZER(
+ sortedPartitionConsumersByGeneration, member_cnt * 10 /* FIXME */,
+ const rd_kafka_topic_partition_t *,
+ /* List of ConsumerGenerationPair_t */
+ rd_list_t *, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash, NULL, rd_list_destroy_free);
+ const rd_kafka_topic_partition_t *partition;
+ rd_list_t *consumers;
+ int i;
+
+ /* For each partition that is currently assigned to the group members
+ * add the member and its generation to
+ * sortedPartitionConsumersByGeneration (which is sorted afterwards)
+ * indexed by the partition. */
+ for (i = 0; i < (int)member_cnt; i++) {
+ rd_kafka_group_member_t *consumer = &members[i];
+ int j;
+
+ RD_MAP_SET(subscriptions, consumer->rkgm_member_id->str,
+ consumer->rkgm_subscription);
+
+ RD_MAP_SET(currentAssignment, consumer->rkgm_member_id->str,
+ rd_kafka_topic_partition_list_new(10));
+
+ RD_MAP_SET(consumer2AllPotentialPartitions,
+ consumer->rkgm_member_id->str,
+ rd_kafka_topic_partition_list_new(
+ (int)estimated_partition_cnt));
+
+ if (!consumer->rkgm_owned)
+ continue;
+
+ for (j = 0; j < (int)consumer->rkgm_owned->cnt; j++) {
+ partition = &consumer->rkgm_owned->elems[j];
+
+ consumers = RD_MAP_GET_OR_SET(
+ &sortedPartitionConsumersByGeneration, partition,
+ rd_list_new(10, ConsumerGenerationPair_destroy));
+
+ if (consumer->rkgm_generation != -1 &&
+ rd_list_find(
+ consumers, &consumer->rkgm_generation,
+ ConsumerGenerationPair_cmp_generation)) {
+ rd_kafka_log(
+ rk, LOG_WARNING, "STICKY",
+ "Sticky assignor: "
+ "%s [%" PRId32
+ "] is assigned to "
+ "multiple consumers with same "
+ "generation %d: "
+ "skipping member %.*s",
+ partition->topic, partition->partition,
+ consumer->rkgm_generation,
+ RD_KAFKAP_STR_PR(consumer->rkgm_member_id));
+ continue;
+ }
+
+ rd_list_add(consumers,
+ ConsumerGenerationPair_new(
+ consumer->rkgm_member_id->str,
+ consumer->rkgm_generation));
+
+ RD_MAP_SET(currentPartitionConsumer,
+ rd_kafka_topic_partition_copy(partition),
+ consumer->rkgm_member_id->str);
+ }
+ }
+
+ /* Populate currentAssignment and prevAssignment.
+ * prevAssignment holds the prior ConsumerGenerationPair_t
+ * (before current) of each partition. */
+ RD_MAP_FOREACH(partition, consumers,
+ &sortedPartitionConsumersByGeneration) {
+ /* current and previous are the last two consumers
+ * of each partition. */
+ ConsumerGenerationPair_t *current, *previous;
+ rd_kafka_topic_partition_list_t *partitions;
+
+ /* Sort the per-partition consumers list by generation */
+ rd_list_sort(consumers, ConsumerGenerationPair_cmp_generation);
+
+ /* Add current (highest generation) consumer
+ * to currentAssignment. */
+ current = rd_list_elem(consumers, 0);
+ partitions = RD_MAP_GET(currentAssignment, current->consumer);
+ rd_kafka_topic_partition_list_add(partitions, partition->topic,
+ partition->partition);
+
+ /* Add previous (next highest generation) consumer, if any,
+ * to prevAssignment. */
+ previous = rd_list_elem(consumers, 1);
+ if (previous)
+ RD_MAP_SET(
+ prevAssignment,
+ rd_kafka_topic_partition_copy(partition),
+ ConsumerGenerationPair_new(previous->consumer,
+ previous->generation));
+ }
+
+ RD_MAP_DESTROY(&sortedPartitionConsumersByGeneration);
+}
+
+
+/**
+ * @brief Populate maps for potential partitions per consumer and vice-versa.
+ */
+static void
+populatePotentialMaps(const rd_kafka_assignor_topic_t *atopic,
+ map_toppar_list_t *partition2AllPotentialConsumers,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions,
+ size_t estimated_partition_cnt) {
+ int i;
+ const rd_kafka_group_member_t *rkgm;
+
+ /* for each eligible (subscribed and available) topic (\p atopic):
+ * for each member subscribing to that topic:
+ * and for each partition of that topic:
+ * add consumer and partition to:
+ * partition2AllPotentialConsumers
+ * consumer2AllPotentialPartitions
+ */
+
+ RD_LIST_FOREACH(rkgm, &atopic->members, i) {
+ const char *consumer = rkgm->rkgm_member_id->str;
+ rd_kafka_topic_partition_list_t *partitions =
+ RD_MAP_GET(consumer2AllPotentialPartitions, consumer);
+ int j;
+
+ rd_assert(partitions != NULL);
+
+ for (j = 0; j < atopic->metadata->partition_cnt; j++) {
+ rd_kafka_topic_partition_t *partition;
+ rd_list_t *consumers;
+
+ /* consumer2AllPotentialPartitions[consumer] += part */
+ partition = rd_kafka_topic_partition_list_add(
+ partitions, atopic->metadata->topic,
+ atopic->metadata->partitions[j].id);
+
+ /* partition2AllPotentialConsumers[part] += consumer */
+ if (!(consumers =
+ RD_MAP_GET(partition2AllPotentialConsumers,
+ partition))) {
+ consumers = rd_list_new(
+ RD_MAX(2, (int)estimated_partition_cnt / 2),
+ NULL);
+ RD_MAP_SET(
+ partition2AllPotentialConsumers,
+ rd_kafka_topic_partition_copy(partition),
+ consumers);
+ }
+ rd_list_add(consumers, (void *)consumer);
+ }
+ }
+}
+
+
+/**
+ * @returns true if all consumers have identical subscriptions based on
+ * the currently available topics and partitions.
+ *
+ * @remark The Java code checks both partition2AllPotentialConsumers and
+ * and consumer2AllPotentialPartitions but since these maps
+ * are symmetrical we only check one of them.
+ * ^ FIXME, but we do.
+ */
+static rd_bool_t areSubscriptionsIdentical(
+ map_toppar_list_t *partition2AllPotentialConsumers,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions) {
+ const void *ignore;
+ const rd_list_t *lcurr, *lprev = NULL;
+ const rd_kafka_topic_partition_list_t *pcurr, *pprev = NULL;
+
+ RD_MAP_FOREACH(ignore, lcurr, partition2AllPotentialConsumers) {
+ if (lprev && rd_list_cmp(lcurr, lprev, rd_map_str_cmp))
+ return rd_false;
+ lprev = lcurr;
+ }
+
+ RD_MAP_FOREACH(ignore, pcurr, consumer2AllPotentialPartitions) {
+ if (pprev && rd_kafka_topic_partition_list_cmp(
+ pcurr, pprev, rd_kafka_topic_partition_cmp))
+ return rd_false;
+ pprev = pcurr;
+ }
+
+ if (ignore) /* Avoid unused warning */
+ ;
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Comparator to sort an rd_kafka_topic_partition_list_t in ascending
+ * order by the number of list elements in the .opaque field, or
+ * secondarily by the topic name.
+ * Used by sortPartitions().
+ */
+static int
+toppar_sort_by_list_cnt(const void *_a, const void *_b, void *opaque) {
+ const rd_kafka_topic_partition_t *a = _a, *b = _b;
+ const rd_list_t *al = a->opaque, *bl = b->opaque;
+ int r = rd_list_cnt(al) - rd_list_cnt(bl); /* ascending order */
+ if (r)
+ return r;
+ return rd_kafka_topic_partition_cmp(a, b);
+}
+
+
+/**
+ * @brief Sort valid partitions so they are processed in the potential
+ * reassignment phase in the proper order that causes minimal partition
+ * movement among consumers (hence honouring maximal stickiness).
+ *
+ * @returns The result of the partitions sort.
+ */
+static rd_kafka_topic_partition_list_t *
+sortPartitions(rd_kafka_t *rk,
+ map_str_toppar_list_t *currentAssignment,
+ map_toppar_cgpair_t *prevAssignment,
+ rd_bool_t isFreshAssignment,
+ map_toppar_list_t *partition2AllPotentialConsumers,
+ map_str_toppar_list_t *consumer2AllPotentialPartitions) {
+
+ rd_kafka_topic_partition_list_t *sortedPartitions;
+ map_str_toppar_list_t assignments = RD_MAP_INITIALIZER(
+ RD_MAP_CNT(currentAssignment), rd_map_str_cmp, rd_map_str_hash,
+ NULL, rd_kafka_topic_partition_list_destroy_free);
+ rd_kafka_topic_partition_list_t *partitions;
+ const rd_kafka_topic_partition_t *partition;
+ const rd_list_t *consumers;
+ const char *consumer;
+ rd_list_t sortedConsumers; /* element is the (rd_map_elem_t *) from
+ * assignments. */
+ const rd_map_elem_t *elem;
+ rd_bool_t wasEmpty;
+ int i;
+
+ sortedPartitions = rd_kafka_topic_partition_list_new(
+ (int)RD_MAP_CNT(partition2AllPotentialConsumers));
+ ;
+
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "Sort %d partitions in %s assignment",
+ (int)RD_MAP_CNT(partition2AllPotentialConsumers),
+ isFreshAssignment ? "fresh" : "existing");
+
+ if (isFreshAssignment ||
+ !areSubscriptionsIdentical(partition2AllPotentialConsumers,
+ consumer2AllPotentialPartitions)) {
+ /* Create an ascending sorted list of partitions based on
+ * how many consumers can potentially use them. */
+ RD_MAP_FOREACH(partition, consumers,
+ partition2AllPotentialConsumers) {
+ rd_kafka_topic_partition_list_add(sortedPartitions,
+ partition->topic,
+ partition->partition)
+ ->opaque = (void *)consumers;
+ }
+
+ rd_kafka_topic_partition_list_sort(
+ sortedPartitions, toppar_sort_by_list_cnt, NULL);
+
+ RD_MAP_DESTROY(&assignments);
+
+ return sortedPartitions;
+ }
+
+ /* If this is a reassignment and the subscriptions are identical
+ * then we just need to list partitions in a round robin fashion
+ * (from consumers with most assigned partitions to those
+ * with least assigned partitions). */
+
+ /* Create an ascending sorted list of consumers by valid
+ * partition count. The list element is the `rd_map_elem_t *`
+ * of the assignments map. This allows us to get a sorted list
+ * of consumers without too much data duplication. */
+ rd_list_init(&sortedConsumers, (int)RD_MAP_CNT(currentAssignment),
+ NULL);
+
+ RD_MAP_FOREACH(consumer, partitions, currentAssignment) {
+ rd_kafka_topic_partition_list_t *partitions2;
+
+ /* Sort assigned partitions for consistency (during tests) */
+ rd_kafka_topic_partition_list_sort(partitions, NULL, NULL);
+
+ partitions2 =
+ rd_kafka_topic_partition_list_new(partitions->cnt);
+
+ for (i = 0; i < partitions->cnt; i++) {
+ partition = &partitions->elems[i];
+
+ /* Only add partitions from the current assignment
+ * that still exist. */
+ if (RD_MAP_GET(partition2AllPotentialConsumers,
+ partition))
+ rd_kafka_topic_partition_list_add(
+ partitions2, partition->topic,
+ partition->partition);
+ }
+
+ if (partitions2->cnt > 0) {
+ elem = RD_MAP_SET(&assignments, consumer, partitions2);
+ rd_list_add(&sortedConsumers, (void *)elem);
+ } else
+ rd_kafka_topic_partition_list_destroy(partitions2);
+ }
+
+ /* Sort consumers */
+ rd_list_sort(&sortedConsumers, sort_by_map_elem_val_toppar_list_cnt);
+
+ /* At this point sortedConsumers contains an ascending-sorted list
+ * of consumers based on how many valid partitions are currently
+ * assigned to them. */
+
+ while (!rd_list_empty(&sortedConsumers)) {
+ /* Take consumer with most partitions */
+ const rd_map_elem_t *elem = rd_list_last(&sortedConsumers);
+ const char *consumer = (const char *)elem->key;
+ /* Currently assigned partitions to this consumer */
+ rd_kafka_topic_partition_list_t *remainingPartitions =
+ RD_MAP_GET(&assignments, consumer);
+ /* Partitions that were assigned to a different consumer
+ * last time */
+ rd_kafka_topic_partition_list_t *prevPartitions =
+ rd_kafka_topic_partition_list_new(
+ (int)RD_MAP_CNT(prevAssignment));
+ rd_bool_t reSort = rd_true;
+
+ /* From the partitions that had a different consumer before,
+ * keep only those that are assigned to this consumer now. */
+ for (i = 0; i < remainingPartitions->cnt; i++) {
+ partition = &remainingPartitions->elems[i];
+ if (RD_MAP_GET(prevAssignment, partition))
+ rd_kafka_topic_partition_list_add(
+ prevPartitions, partition->topic,
+ partition->partition);
+ }
+
+ if (prevPartitions->cnt > 0) {
+ /* If there is a partition of this consumer that was
+ * assigned to another consumer before, then mark
+ * it as a good option for reassignment. */
+ partition = &prevPartitions->elems[0];
+
+ rd_kafka_topic_partition_list_del(remainingPartitions,
+ partition->topic,
+ partition->partition);
+
+ rd_kafka_topic_partition_list_add(sortedPartitions,
+ partition->topic,
+ partition->partition);
+
+ rd_kafka_topic_partition_list_del_by_idx(prevPartitions,
+ 0);
+
+ } else if (remainingPartitions->cnt > 0) {
+ /* Otherwise mark any other one of the current
+ * partitions as a reassignment candidate. */
+ partition = &remainingPartitions->elems[0];
+
+ rd_kafka_topic_partition_list_add(sortedPartitions,
+ partition->topic,
+ partition->partition);
+
+ rd_kafka_topic_partition_list_del_by_idx(
+ remainingPartitions, 0);
+ } else {
+ rd_list_remove_elem(&sortedConsumers,
+ rd_list_cnt(&sortedConsumers) - 1);
+ /* No need to re-sort the list (below) */
+ reSort = rd_false;
+ }
+
+ rd_kafka_topic_partition_list_destroy(prevPartitions);
+
+ if (reSort) {
+ /* Re-sort the list to keep the consumer with the most
+ * partitions at the end of the list.
+ * This should be an O(N) operation given it is at most
+ * a single shuffle. */
+ rd_list_sort(&sortedConsumers,
+ sort_by_map_elem_val_toppar_list_cnt);
+ }
+ }
+
+
+ wasEmpty = !sortedPartitions->cnt;
+
+ RD_MAP_FOREACH(partition, consumers, partition2AllPotentialConsumers)
+ rd_kafka_topic_partition_list_upsert(sortedPartitions, partition->topic,
+ partition->partition);
+
+ /* If all partitions were added in the foreach loop just above
+ * it means there is no order to retain from the sorderConsumer loop
+ * below and we sort the partitions according to their topic+partition
+ * to get consistent results (mainly in tests). */
+ if (wasEmpty)
+ rd_kafka_topic_partition_list_sort(sortedPartitions, NULL,
+ NULL);
+
+ rd_list_destroy(&sortedConsumers);
+ RD_MAP_DESTROY(&assignments);
+
+ return sortedPartitions;
+}
+
+
+/**
+ * @brief Transfer currentAssignment to members array.
+ */
+static void assignToMembers(map_str_toppar_list_t *currentAssignment,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt) {
+ size_t i;
+
+ for (i = 0; i < member_cnt; i++) {
+ rd_kafka_group_member_t *rkgm = &members[i];
+ const rd_kafka_topic_partition_list_t *partitions =
+ RD_MAP_GET(currentAssignment, rkgm->rkgm_member_id->str);
+ if (rkgm->rkgm_assignment)
+ rd_kafka_topic_partition_list_destroy(
+ rkgm->rkgm_assignment);
+ rkgm->rkgm_assignment =
+ rd_kafka_topic_partition_list_copy(partitions);
+ }
+}
+
+
+/**
+ * @brief KIP-54 and KIP-341/FIXME sticky assignor.
+ *
+ * This code is closely mimicking the AK Java AbstractStickyAssignor.assign().
+ */
+rd_kafka_resp_err_t
+rd_kafka_sticky_assignor_assign_cb(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas,
+ const char *member_id,
+ const rd_kafka_metadata_t *metadata,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ rd_kafka_assignor_topic_t **eligible_topics,
+ size_t eligible_topic_cnt,
+ char *errstr,
+ size_t errstr_size,
+ void *opaque) {
+ /* FIXME: Let the cgrp pass the actual eligible partition count */
+ size_t partition_cnt = member_cnt * 10; /* FIXME */
+
+ /* Map of subscriptions. This is \p member turned into a map. */
+ map_str_toppar_list_t subscriptions =
+ RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
+ NULL /* refs members.rkgm_member_id */,
+ NULL /* refs members.rkgm_subscription */);
+
+ /* Map member to current assignment */
+ map_str_toppar_list_t currentAssignment =
+ RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
+ NULL /* refs members.rkgm_member_id */,
+ rd_kafka_topic_partition_list_destroy_free);
+
+ /* Map partition to ConsumerGenerationPair */
+ map_toppar_cgpair_t prevAssignment =
+ RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free,
+ ConsumerGenerationPair_destroy);
+
+ /* Partition assignment movements between consumers */
+ PartitionMovements_t partitionMovements;
+
+ rd_bool_t isFreshAssignment;
+
+ /* Mapping of all topic partitions to all consumers that can be
+ * assigned to them.
+ * Value is an rd_list_t* with elements referencing the \p members
+ * \c rkgm_member_id->str. */
+ map_toppar_list_t partition2AllPotentialConsumers = RD_MAP_INITIALIZER(
+ partition_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free, rd_list_destroy_free);
+
+ /* Mapping of all consumers to all potential topic partitions that
+ * can be assigned to them. */
+ map_str_toppar_list_t consumer2AllPotentialPartitions =
+ RD_MAP_INITIALIZER(member_cnt, rd_map_str_cmp, rd_map_str_hash,
+ NULL,
+ rd_kafka_topic_partition_list_destroy_free);
+
+ /* Mapping of partition to current consumer. */
+ map_toppar_str_t currentPartitionConsumer =
+ RD_MAP_INITIALIZER(partition_cnt, rd_kafka_topic_partition_cmp,
+ rd_kafka_topic_partition_hash,
+ rd_kafka_topic_partition_destroy_free,
+ NULL /* refs members.rkgm_member_id->str */);
+
+ rd_kafka_topic_partition_list_t *sortedPartitions;
+ rd_kafka_topic_partition_list_t *unassignedPartitions;
+ rd_list_t sortedCurrentSubscriptions;
+
+ rd_bool_t revocationRequired = rd_false;
+
+ /* Iteration variables */
+ const char *consumer;
+ rd_kafka_topic_partition_list_t *partitions;
+ const rd_map_elem_t *elem;
+ int i;
+
+ /* Initialize PartitionMovements */
+ PartitionMovements_init(&partitionMovements, eligible_topic_cnt);
+
+ /* Prepopulate current and previous assignments */
+ prepopulateCurrentAssignments(
+ rk, members, member_cnt, &subscriptions, &currentAssignment,
+ &prevAssignment, &currentPartitionConsumer,
+ &consumer2AllPotentialPartitions, partition_cnt);
+
+ isFreshAssignment = RD_MAP_IS_EMPTY(&currentAssignment);
+
+ /* Populate partition2AllPotentialConsumers and
+ * consumer2AllPotentialPartitions maps by each eligible topic. */
+ for (i = 0; i < (int)eligible_topic_cnt; i++)
+ populatePotentialMaps(
+ eligible_topics[i], &partition2AllPotentialConsumers,
+ &consumer2AllPotentialPartitions, partition_cnt);
+
+
+ /* Sort valid partitions to minimize partition movements. */
+ sortedPartitions = sortPartitions(
+ rk, &currentAssignment, &prevAssignment, isFreshAssignment,
+ &partition2AllPotentialConsumers, &consumer2AllPotentialPartitions);
+
+
+ /* All partitions that need to be assigned (initially set to all
+ * partitions but adjusted in the following loop) */
+ unassignedPartitions =
+ rd_kafka_topic_partition_list_copy(sortedPartitions);
+
+ RD_MAP_FOREACH(consumer, partitions, &currentAssignment) {
+ if (!RD_MAP_GET(&subscriptions, consumer)) {
+ /* If a consumer that existed before
+ * (and had some partition assignments) is now removed,
+ * remove it from currentAssignment and its
+ * partitions from currentPartitionConsumer */
+
+ rd_kafka_dbg(rk, ASSIGNOR, "STICKY",
+ "Removing now non-existent consumer %s "
+ "with %d previously assigned partitions",
+ consumer, partitions->cnt);
+
+
+ for (i = 0; i < partitions->cnt; i++) {
+ const rd_kafka_topic_partition_t *partition =
+ &partitions->elems[i];
+ RD_MAP_DELETE(&currentPartitionConsumer,
+ partition);
+ }
+
+ /* FIXME: The delete could be optimized by passing the
+ * underlying elem_t. */
+ RD_MAP_DELETE(&currentAssignment, consumer);
+
+ } else {
+ /* Otherwise (the consumer still exists) */
+
+ for (i = 0; i < partitions->cnt; i++) {
+ const rd_kafka_topic_partition_t *partition =
+ &partitions->elems[i];
+ rd_bool_t remove_part = rd_false;
+
+ if (!RD_MAP_GET(
+ &partition2AllPotentialConsumers,
+ partition)) {
+ /* If this partition of this consumer
+ * no longer exists remove it from
+ * currentAssignment of the consumer */
+ remove_part = rd_true;
+ RD_MAP_DELETE(&currentPartitionConsumer,
+ partition);
+
+ } else if (!rd_kafka_topic_partition_list_find(
+ RD_MAP_GET(&subscriptions,
+ consumer),
+ partition->topic,
+ RD_KAFKA_PARTITION_UA)) {
+ /* If this partition cannot remain
+ * assigned to its current consumer
+ * because the consumer is no longer
+ * subscribed to its topic, remove it
+ * from the currentAssignment of the
+ * consumer. */
+ remove_part = rd_true;
+ revocationRequired = rd_true;
+ } else {
+ /* Otherwise, remove the topic partition
+ * from those that need to be assigned
+ * only if its current consumer is still
+ * subscribed to its topic (because it
+ * is already assigned and we would want
+ * to preserve that assignment as much
+ * as possible). */
+ rd_kafka_topic_partition_list_del(
+ unassignedPartitions,
+ partition->topic,
+ partition->partition);
+ }
+
+ if (remove_part) {
+ rd_kafka_topic_partition_list_del_by_idx(
+ partitions, i);
+ i--; /* Since the current element was
+ * removed we need the next for
+ * loop iteration to stay at the
+ * same index. */
+ }
+ }
+ }
+ }
+
+
+ /* At this point we have preserved all valid topic partition to consumer
+ * assignments and removed all invalid topic partitions and invalid
+ * consumers.
+ * Now we need to assign unassignedPartitions to consumers so that the
+ * topic partition assignments are as balanced as possible. */
+
+ /* An ascending sorted list of consumers based on how many topic
+ * partitions are already assigned to them. The list element is
+ * referencing the rd_map_elem_t* from the currentAssignment map. */
+ rd_list_init(&sortedCurrentSubscriptions,
+ (int)RD_MAP_CNT(&currentAssignment), NULL);
+
+ RD_MAP_FOREACH_ELEM(elem, &currentAssignment.rmap)
+ rd_list_add(&sortedCurrentSubscriptions, (void *)elem);
+
+ rd_list_sort(&sortedCurrentSubscriptions,
+ sort_by_map_elem_val_toppar_list_cnt);
+
+ /* Balance the available partitions across consumers */
+ balance(rk, &partitionMovements, &currentAssignment, &prevAssignment,
+ sortedPartitions, unassignedPartitions,
+ &sortedCurrentSubscriptions, &consumer2AllPotentialPartitions,
+ &partition2AllPotentialConsumers, &currentPartitionConsumer,
+ revocationRequired);
+
+ /* Transfer currentAssignment (now updated) to each member's
+ * assignment. */
+ assignToMembers(&currentAssignment, members, member_cnt);
+
+
+ rd_list_destroy(&sortedCurrentSubscriptions);
+
+ PartitionMovements_destroy(&partitionMovements);
+
+ rd_kafka_topic_partition_list_destroy(unassignedPartitions);
+ rd_kafka_topic_partition_list_destroy(sortedPartitions);
+
+ RD_MAP_DESTROY(&currentPartitionConsumer);
+ RD_MAP_DESTROY(&consumer2AllPotentialPartitions);
+ RD_MAP_DESTROY(&partition2AllPotentialConsumers);
+ RD_MAP_DESTROY(&prevAssignment);
+ RD_MAP_DESTROY(&currentAssignment);
+ RD_MAP_DESTROY(&subscriptions);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/** @brief FIXME docstring */
+static void rd_kafka_sticky_assignor_on_assignment_cb(
+ const rd_kafka_assignor_t *rkas,
+ void **assignor_state,
+ const rd_kafka_topic_partition_list_t *partitions,
+ const rd_kafkap_bytes_t *assignment_userdata,
+ const rd_kafka_consumer_group_metadata_t *rkcgm) {
+ rd_kafka_sticky_assignor_state_t *state =
+ (rd_kafka_sticky_assignor_state_t *)*assignor_state;
+
+ if (!state)
+ state = rd_calloc(1, sizeof(*state));
+ else
+ rd_kafka_topic_partition_list_destroy(state->prev_assignment);
+
+ state->prev_assignment = rd_kafka_topic_partition_list_copy(partitions);
+ state->generation_id = rkcgm->generation_id;
+
+ *assignor_state = state;
+}
+
+/** @brief FIXME docstring */
+static rd_kafkap_bytes_t *rd_kafka_sticky_assignor_get_metadata(
+ const rd_kafka_assignor_t *rkas,
+ void *assignor_state,
+ const rd_list_t *topics,
+ const rd_kafka_topic_partition_list_t *owned_partitions) {
+ rd_kafka_sticky_assignor_state_t *state;
+ rd_kafka_buf_t *rkbuf;
+ rd_kafkap_bytes_t *metadata;
+ rd_kafkap_bytes_t *kbytes;
+ size_t len;
+
+ /*
+ * UserData (Version: 1) => [previous_assignment] generation
+ * previous_assignment => topic [partitions]
+ * topic => STRING
+ * partitions => partition
+ * partition => INT32
+ * generation => INT32
+ *
+ * If there is no previous assignment, UserData is NULL.
+ */
+
+ if (!assignor_state) {
+ return rd_kafka_consumer_protocol_member_metadata_new(
+ topics, NULL, 0, owned_partitions);
+ }
+
+ state = (rd_kafka_sticky_assignor_state_t *)assignor_state;
+
+ rkbuf = rd_kafka_buf_new(1, 100);
+ rd_assert(state->prev_assignment != NULL);
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ rd_kafka_buf_write_topic_partitions(rkbuf, state->prev_assignment,
+ rd_false /*skip invalid offsets*/,
+ rd_false /*any offset*/, fields);
+ rd_kafka_buf_write_i32(rkbuf, state->generation_id);
+
+ /* Get binary buffer and allocate a new Kafka Bytes with a copy. */
+ rd_slice_init_full(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf);
+ len = rd_slice_remains(&rkbuf->rkbuf_reader);
+ kbytes = rd_kafkap_bytes_new(NULL, (int32_t)len);
+ rd_slice_read(&rkbuf->rkbuf_reader, (void *)kbytes->data, len);
+ rd_kafka_buf_destroy(rkbuf);
+
+ metadata = rd_kafka_consumer_protocol_member_metadata_new(
+ topics, kbytes->data, kbytes->len, owned_partitions);
+
+ rd_kafkap_bytes_destroy(kbytes);
+
+ return metadata;
+}
+
+
+/**
+ * @brief Destroy assignor state
+ */
+static void rd_kafka_sticky_assignor_state_destroy(void *assignor_state) {
+ rd_kafka_sticky_assignor_state_t *state =
+ (rd_kafka_sticky_assignor_state_t *)assignor_state;
+
+ rd_assert(assignor_state);
+
+ rd_kafka_topic_partition_list_destroy(state->prev_assignment);
+ rd_free(state);
+}
+
+
+
+/**
+ * @name Sticky assignor unit tests
+ *
+ *
+ * These are based on AbstractStickyAssignorTest.java
+ *
+ *
+ *
+ */
+
+
+
+/**
+ * @brief Set a member's owned partitions based on its assignment.
+ *
+ * For use between assignor_run(). This is mimicing a consumer receiving
+ * its new assignment and including it in the next rebalance as its
+ * owned-partitions.
+ */
+static void ut_set_owned(rd_kafka_group_member_t *rkgm) {
+ if (rkgm->rkgm_owned)
+ rd_kafka_topic_partition_list_destroy(rkgm->rkgm_owned);
+
+ rkgm->rkgm_owned =
+ rd_kafka_topic_partition_list_copy(rkgm->rkgm_assignment);
+}
+
+
+/**
+ * @brief Verify assignment validity and balance.
+ *
+ * @remark Also updates the members owned partitions to the assignment.
+ */
+
+static int verifyValidityAndBalance0(const char *func,
+ int line,
+ rd_kafka_group_member_t *members,
+ size_t member_cnt,
+ const rd_kafka_metadata_t *metadata) {
+ int fails = 0;
+ int i;
+ rd_bool_t verbose = rd_false; /* Enable for troubleshooting */
+
+ RD_UT_SAY("%s:%d: verifying assignment for %d member(s):", func, line,
+ (int)member_cnt);
+
+ for (i = 0; i < (int)member_cnt; i++) {
+ const char *consumer = members[i].rkgm_member_id->str;
+ const rd_kafka_topic_partition_list_t *partitions =
+ members[i].rkgm_assignment;
+ int p, j;
+
+ if (verbose)
+ RD_UT_SAY(
+ "%s:%d: "
+ "consumer \"%s\", %d subscribed topic(s), "
+ "%d assigned partition(s):",
+ func, line, consumer,
+ members[i].rkgm_subscription->cnt, partitions->cnt);
+
+ for (p = 0; p < partitions->cnt; p++) {
+ const rd_kafka_topic_partition_t *partition =
+ &partitions->elems[p];
+
+ if (verbose)
+ RD_UT_SAY("%s:%d: %s [%" PRId32 "]", func,
+ line, partition->topic,
+ partition->partition);
+
+ if (!rd_kafka_topic_partition_list_find(
+ members[i].rkgm_subscription, partition->topic,
+ RD_KAFKA_PARTITION_UA)) {
+ RD_UT_WARN("%s [%" PRId32
+ "] is assigned to "
+ "%s but it is not subscribed to "
+ "that topic",
+ partition->topic,
+ partition->partition, consumer);
+ fails++;
+ }
+ }
+
+ /* Update the member's owned partitions to match
+ * the assignment. */
+ ut_set_owned(&members[i]);
+
+ if (i == (int)member_cnt - 1)
+ continue;
+
+ for (j = i + 1; j < (int)member_cnt; j++) {
+ const char *otherConsumer =
+ members[j].rkgm_member_id->str;
+ const rd_kafka_topic_partition_list_t *otherPartitions =
+ members[j].rkgm_assignment;
+ rd_bool_t balanced =
+ abs(partitions->cnt - otherPartitions->cnt) <= 1;
+
+ for (p = 0; p < partitions->cnt; p++) {
+ const rd_kafka_topic_partition_t *partition =
+ &partitions->elems[p];
+
+ if (rd_kafka_topic_partition_list_find(
+ otherPartitions, partition->topic,
+ partition->partition)) {
+ RD_UT_WARN(
+ "Consumer %s and %s are both "
+ "assigned %s [%" PRId32 "]",
+ consumer, otherConsumer,
+ partition->topic,
+ partition->partition);
+ fails++;
+ }
+
+
+ /* If assignment is imbalanced and this topic
+ * is also subscribed by the other consumer
+ * it means the assignment strategy failed to
+ * properly balance the partitions. */
+ if (!balanced &&
+ rd_kafka_topic_partition_list_find_topic(
+ otherPartitions, partition->topic)) {
+ RD_UT_WARN(
+ "Some %s partition(s) can be "
+ "moved from "
+ "%s (%d partition(s)) to "
+ "%s (%d partition(s)) to "
+ "achieve a better balance",
+ partition->topic, consumer,
+ partitions->cnt, otherConsumer,
+ otherPartitions->cnt);
+ fails++;
+ }
+ }
+ }
+ }
+
+ RD_UT_ASSERT(!fails, "%s:%d: See %d previous errors", func, line,
+ fails);
+
+ return 0;
+}
+
+
+#define verifyValidityAndBalance(members, member_cnt, metadata) \
+ do { \
+ if (verifyValidityAndBalance0(__FUNCTION__, __LINE__, members, \
+ member_cnt, metadata)) \
+ return 1; \
+ } while (0)
+
+
+/**
+ * @brief Checks that all assigned partitions are fully balanced.
+ *
+ * Only works for symmetrical subscriptions.
+ */
+static int isFullyBalanced0(const char *function,
+ int line,
+ const rd_kafka_group_member_t *members,
+ size_t member_cnt) {
+ int min_assignment = INT_MAX;
+ int max_assignment = -1;
+ size_t i;
+
+ for (i = 0; i < member_cnt; i++) {
+ int size = members[i].rkgm_assignment->cnt;
+ if (size < min_assignment)
+ min_assignment = size;
+ if (size > max_assignment)
+ max_assignment = size;
+ }
+
+ RD_UT_ASSERT(max_assignment - min_assignment <= 1,
+ "%s:%d: Assignment not balanced: min %d, max %d", function,
+ line, min_assignment, max_assignment);
+
+ return 0;
+}
+
+#define isFullyBalanced(members, member_cnt) \
+ do { \
+ if (isFullyBalanced0(__FUNCTION__, __LINE__, members, \
+ member_cnt)) \
+ return 1; \
+ } while (0)
+
+
+static void
+ut_print_toppar_list(const rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+
+ for (i = 0; i < partitions->cnt; i++)
+ RD_UT_SAY(" %s [%" PRId32 "]", partitions->elems[i].topic,
+ partitions->elems[i].partition);
+}
+
+
+
+/**
+ * @brief Verify that member's assignment matches the expected partitions.
+ *
+ * The va-list is a NULL-terminated list of (const char *topic, int partition)
+ * tuples.
+ *
+ * @returns 0 on success, else raises a unittest error and returns 1.
+ */
+static int verifyAssignment0(const char *function,
+ int line,
+ rd_kafka_group_member_t *rkgm,
+ ...) {
+ va_list ap;
+ int cnt = 0;
+ const char *topic;
+ int fails = 0;
+
+ va_start(ap, rkgm);
+ while ((topic = va_arg(ap, const char *))) {
+ int partition = va_arg(ap, int);
+ cnt++;
+
+ if (!rd_kafka_topic_partition_list_find(rkgm->rkgm_assignment,
+ topic, partition)) {
+ RD_UT_WARN(
+ "%s:%d: Expected %s [%d] not found in %s's "
+ "assignment (%d partition(s))",
+ function, line, topic, partition,
+ rkgm->rkgm_member_id->str,
+ rkgm->rkgm_assignment->cnt);
+ fails++;
+ }
+ }
+ va_end(ap);
+
+ if (cnt != rkgm->rkgm_assignment->cnt) {
+ RD_UT_WARN(
+ "%s:%d: "
+ "Expected %d assigned partition(s) for %s, not %d",
+ function, line, cnt, rkgm->rkgm_member_id->str,
+ rkgm->rkgm_assignment->cnt);
+ fails++;
+ }
+
+ if (fails)
+ ut_print_toppar_list(rkgm->rkgm_assignment);
+
+ RD_UT_ASSERT(!fails, "%s:%d: See previous errors", function, line);
+
+ return 0;
+}
+
+#define verifyAssignment(rkgm, ...) \
+ do { \
+ if (verifyAssignment0(__FUNCTION__, __LINE__, rkgm, \
+ __VA_ARGS__)) \
+ return 1; \
+ } while (0)
+
+
+
+/**
+ * @brief Initialize group member struct for testing.
+ *
+ * va-args is a NULL-terminated list of (const char *) topics.
+ *
+ * Use rd_kafka_group_member_clear() to free fields.
+ */
+static void
+ut_init_member(rd_kafka_group_member_t *rkgm, const char *member_id, ...) {
+ va_list ap;
+ const char *topic;
+
+ memset(rkgm, 0, sizeof(*rkgm));
+
+ rkgm->rkgm_member_id = rd_kafkap_str_new(member_id, -1);
+ rkgm->rkgm_group_instance_id = rd_kafkap_str_new(member_id, -1);
+ rd_list_init(&rkgm->rkgm_eligible, 0, NULL);
+
+ rkgm->rkgm_subscription = rd_kafka_topic_partition_list_new(4);
+
+ va_start(ap, member_id);
+ while ((topic = va_arg(ap, const char *)))
+ rd_kafka_topic_partition_list_add(rkgm->rkgm_subscription,
+ topic, RD_KAFKA_PARTITION_UA);
+ va_end(ap);
+
+ rkgm->rkgm_assignment =
+ rd_kafka_topic_partition_list_new(rkgm->rkgm_subscription->size);
+}
+
+
+
+static int ut_testOneConsumerNoTopic(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata = rd_kafka_metadata_new_topic_mock(NULL, 0);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], NULL);
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testOneConsumerNonexistentTopic(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 0);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], NULL);
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+
+static int ut_testOneConsumerOneTopic(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 3,
+ "expected assignment of 3 partitions, got %d partition(s)",
+ members[0].rkgm_assignment->cnt);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
+ NULL);
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testOnlyAssignsPartitionsFromSubscribedTopics(
+ rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata =
+ rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
+ NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testOneConsumerMultipleTopics(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata =
+ rd_kafka_metadata_new_topic_mockv(2, "topic1", 1, "topic2", 2);
+ ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic2", 0, "topic2", 1,
+ NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+static int
+ut_testTwoConsumersOneTopicOnePartition(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[2];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 1);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, NULL);
+ verifyAssignment(&members[1], NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int
+ut_testTwoConsumersOneTopicTwoPartitions(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[2];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, NULL);
+ verifyAssignment(&members[1], "topic1", 1, NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testMultipleConsumersMixedTopicSubscriptions(
+ rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[3];
+
+ metadata =
+ rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 2);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL);
+ ut_init_member(&members[2], "consumer3", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL);
+ verifyAssignment(&members[1], "topic2", 0, "topic2", 1, NULL);
+ verifyAssignment(&members[2], "topic1", 1, NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_group_member_clear(&members[2]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int
+ut_testTwoConsumersTwoTopicsSixPartitions(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[2];
+
+ metadata =
+ rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3);
+ ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
+ NULL);
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 0, "topic2", 2,
+ NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testAddRemoveConsumerOneTopic(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[2];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1,
+ errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
+ NULL);
+
+ verifyValidityAndBalance(members, 1, metadata);
+ isFullyBalanced(members, 1);
+
+ /* Add consumer2 */
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 1, "topic1", 2, NULL);
+ verifyAssignment(&members[1], "topic1", 0, NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+ // FIXME: isSticky();
+
+
+ /* Remove consumer1 */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 1,
+ errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2,
+ NULL);
+
+ verifyValidityAndBalance(&members[1], 1, metadata);
+ isFullyBalanced(&members[1], 1);
+ // FIXME: isSticky();
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+/**
+ * This unit test performs sticky assignment for a scenario that round robin
+ * assignor handles poorly.
+ * Topics (partitions per topic):
+ * - topic1 (2), topic2 (1), topic3 (2), topic4 (1), topic5 (2)
+ * Subscriptions:
+ * - consumer1: topic1, topic2, topic3, topic4, topic5
+ * - consumer2: topic1, topic3, topic5
+ * - consumer3: topic1, topic3, topic5
+ * - consumer4: topic1, topic2, topic3, topic4, topic5
+ * Round Robin Assignment Result:
+ * - consumer1: topic1-0, topic3-0, topic5-0
+ * - consumer2: topic1-1, topic3-1, topic5-1
+ * - consumer3:
+ * - consumer4: topic2-0, topic4-0
+ * Sticky Assignment Result:
+ * - consumer1: topic2-0, topic3-0
+ * - consumer2: topic1-0, topic3-1
+ * - consumer3: topic1-1, topic5-0
+ * - consumer4: topic4-0, topic5-1
+ */
+static int
+ut_testPoorRoundRobinAssignmentScenario(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[4];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(
+ 5, "topic1", 2, "topic2", 1, "topic3", 2, "topic4", 1, "topic5", 2);
+
+ ut_init_member(&members[0], "consumer1", "topic1", "topic2", "topic3",
+ "topic4", "topic5", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", "topic3", "topic5",
+ NULL);
+ ut_init_member(&members[2], "consumer3", "topic1", "topic3", "topic5",
+ NULL);
+ ut_init_member(&members[3], "consumer4", "topic1", "topic2", "topic3",
+ "topic4", "topic5", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic2", 0, "topic3", 0, NULL);
+ verifyAssignment(&members[1], "topic1", 0, "topic3", 1, NULL);
+ verifyAssignment(&members[2], "topic1", 1, "topic5", 0, NULL);
+ verifyAssignment(&members[3], "topic4", 0, "topic5", 1, NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_group_member_clear(&members[2]);
+ rd_kafka_group_member_clear(&members[3]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+
+static int ut_testAddRemoveTopicTwoConsumers(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[2];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
+ ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", "topic2", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, NULL);
+ verifyAssignment(&members[1], "topic1", 1, NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ /*
+ * Add topic2
+ */
+ RD_UT_SAY("Adding topic2");
+ rd_kafka_metadata_destroy(metadata);
+ metadata =
+ rd_kafka_metadata_new_topic_mockv(2, "topic1", 3, "topic2", 3);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 2, "topic2", 1,
+ NULL);
+ verifyAssignment(&members[1], "topic1", 1, "topic2", 2, "topic2", 0,
+ NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+ // FIXME: isSticky();
+
+
+ /*
+ * Remove topic1
+ */
+ RD_UT_SAY("Removing topic1");
+ rd_kafka_metadata_destroy(metadata);
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic2", 3);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyAssignment(&members[0], "topic2", 1, NULL);
+ verifyAssignment(&members[1], "topic2", 0, "topic2", 2, NULL);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+ // FIXME: isSticky();
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_group_member_clear(&members[1]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int
+ut_testReassignmentAfterOneConsumerLeaves(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[19];
+ int member_cnt = RD_ARRAYSIZE(members);
+ rd_kafka_metadata_topic_t mt[19];
+ int topic_cnt = RD_ARRAYSIZE(mt);
+ int i;
+
+ for (i = 0; i < topic_cnt; i++) {
+ char topic[10];
+ rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
+ rd_strdupa(&mt[i].topic, topic);
+ mt[i].partition_cnt = i + 1;
+ }
+
+ metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt);
+
+
+ for (i = 1; i <= member_cnt; i++) {
+ char name[20];
+ rd_kafka_topic_partition_list_t *subscription =
+ rd_kafka_topic_partition_list_new(i);
+ int j;
+ for (j = 1; j <= i; j++) {
+ char topic[16];
+ rd_snprintf(topic, sizeof(topic), "topic%d", j);
+ rd_kafka_topic_partition_list_add(
+ subscription, topic, RD_KAFKA_PARTITION_UA);
+ }
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
+ ut_init_member(&members[i - 1], name, NULL);
+ rd_kafka_topic_partition_list_destroy(
+ members[i - 1].rkgm_subscription);
+ members[i - 1].rkgm_subscription = subscription;
+ }
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+
+
+ /*
+ * Remove consumer10.
+ */
+ rd_kafka_group_member_clear(&members[9]);
+ memmove(&members[9], &members[10],
+ sizeof(*members) * (member_cnt - 10));
+ member_cnt--;
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+ // FIXME: isSticky();
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int
+ut_testReassignmentAfterOneConsumerAdded(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[9];
+ int member_cnt = RD_ARRAYSIZE(members);
+ int i;
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 20);
+
+ for (i = 1; i <= member_cnt; i++) {
+ char name[20];
+ rd_kafka_topic_partition_list_t *subscription =
+ rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(subscription, "topic1",
+ RD_KAFKA_PARTITION_UA);
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
+ ut_init_member(&members[i - 1], name, NULL);
+ rd_kafka_topic_partition_list_destroy(
+ members[i - 1].rkgm_subscription);
+ members[i - 1].rkgm_subscription = subscription;
+ }
+
+ member_cnt--; /* Skip one consumer */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+
+
+ /*
+ * Add consumer.
+ */
+ member_cnt++;
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+ // FIXME: isSticky();
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testSameSubscriptions(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[9];
+ int member_cnt = RD_ARRAYSIZE(members);
+ rd_kafka_metadata_topic_t mt[15];
+ int topic_cnt = RD_ARRAYSIZE(mt);
+ rd_kafka_topic_partition_list_t *subscription =
+ rd_kafka_topic_partition_list_new(topic_cnt);
+ int i;
+
+ for (i = 0; i < topic_cnt; i++) {
+ char topic[10];
+ rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
+ rd_strdupa(&mt[i].topic, topic);
+ mt[i].partition_cnt = i + 1;
+ rd_kafka_topic_partition_list_add(subscription, topic,
+ RD_KAFKA_PARTITION_UA);
+ }
+
+ metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt);
+
+ for (i = 1; i <= member_cnt; i++) {
+ char name[16];
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
+ ut_init_member(&members[i - 1], name, NULL);
+ rd_kafka_topic_partition_list_destroy(
+ members[i - 1].rkgm_subscription);
+ members[i - 1].rkgm_subscription =
+ rd_kafka_topic_partition_list_copy(subscription);
+ }
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+
+ /*
+ * Remove consumer5
+ */
+ rd_kafka_group_member_clear(&members[5]);
+ memmove(&members[5], &members[6], sizeof(*members) * (member_cnt - 6));
+ member_cnt--;
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+ // FIXME: isSticky();
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+ rd_kafka_topic_partition_list_destroy(subscription);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testLargeAssignmentWithMultipleConsumersLeaving(
+ rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[200];
+ int member_cnt = RD_ARRAYSIZE(members);
+ rd_kafka_metadata_topic_t mt[40];
+ int topic_cnt = RD_ARRAYSIZE(mt);
+ int i;
+
+ for (i = 0; i < topic_cnt; i++) {
+ char topic[10];
+ rd_snprintf(topic, sizeof(topic), "topic%d", i + 1);
+ rd_strdupa(&mt[i].topic, topic);
+ mt[i].partition_cnt = i + 1;
+ }
+
+ metadata = rd_kafka_metadata_new_topic_mock(mt, topic_cnt);
+
+ for (i = 0; i < member_cnt; i++) {
+ /* Java tests use a random set, this is more deterministic. */
+ int sub_cnt = ((i + 1) * 17) % topic_cnt;
+ rd_kafka_topic_partition_list_t *subscription =
+ rd_kafka_topic_partition_list_new(sub_cnt);
+ char name[16];
+ int j;
+
+ /* Subscribe to a subset of topics */
+ for (j = 0; j < sub_cnt; j++)
+ rd_kafka_topic_partition_list_add(
+ subscription, metadata->topics[j].topic,
+ RD_KAFKA_PARTITION_UA);
+
+ rd_snprintf(name, sizeof(name), "consumer%d", i + 1);
+ ut_init_member(&members[i], name, NULL);
+ rd_kafka_topic_partition_list_destroy(
+ members[i].rkgm_subscription);
+ members[i].rkgm_subscription = subscription;
+ }
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+
+ /*
+ * Remove every 4th consumer (~50)
+ */
+ for (i = member_cnt - 1; i >= 0; i -= 4) {
+ rd_kafka_group_member_clear(&members[i]);
+ memmove(&members[i], &members[i + 1],
+ sizeof(*members) * (member_cnt - (i + 1)));
+ member_cnt--;
+ }
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+ // FIXME: isSticky();
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testNewSubscription(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[3];
+ int member_cnt = RD_ARRAYSIZE(members);
+ int i;
+
+ metadata = rd_kafka_metadata_new_topic_mockv(
+ 5, "topic1", 1, "topic2", 2, "topic3", 3, "topic4", 4, "topic5", 5);
+
+ for (i = 0; i < member_cnt; i++) {
+ char name[16];
+ int j;
+
+ rd_snprintf(name, sizeof(name), "consumer%d", i);
+ ut_init_member(&members[i], name, NULL);
+
+ rd_kafka_topic_partition_list_destroy(
+ members[i].rkgm_subscription);
+ members[i].rkgm_subscription =
+ rd_kafka_topic_partition_list_new(5);
+
+ for (j = metadata->topic_cnt - (1 + i); j >= 0; j--)
+ rd_kafka_topic_partition_list_add(
+ members[i].rkgm_subscription,
+ metadata->topics[j].topic, RD_KAFKA_PARTITION_UA);
+ }
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ /*
+ * Add topic1 to consumer1's subscription
+ */
+ RD_UT_SAY("Adding topic1 to consumer1");
+ rd_kafka_topic_partition_list_add(members[0].rkgm_subscription,
+ "topic1", RD_KAFKA_PARTITION_UA);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+ // FIXME: isSticky();
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testMoveExistingAssignments(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[4];
+ int member_cnt = RD_ARRAYSIZE(members);
+ rd_kafka_topic_partition_list_t *assignments[4] = RD_ZERO_INIT;
+ int i;
+ int fails = 0;
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
+
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
+ ut_init_member(&members[2], "consumer3", "topic1", NULL);
+ ut_init_member(&members[3], "consumer4", "topic1", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, member_cnt, metadata);
+
+ for (i = 0; i < member_cnt; i++) {
+ if (members[i].rkgm_assignment->cnt > 1) {
+ RD_UT_WARN("%s assigned %d partitions, expected <= 1",
+ members[i].rkgm_member_id->str,
+ members[i].rkgm_assignment->cnt);
+ fails++;
+ } else if (members[i].rkgm_assignment->cnt == 1) {
+ assignments[i] = rd_kafka_topic_partition_list_copy(
+ members[i].rkgm_assignment);
+ }
+ }
+
+ /*
+ * Remove potential group leader consumer1
+ */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1],
+ member_cnt - 1, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(&members[1], member_cnt - 1, metadata);
+ // FIXME: isSticky()
+
+ for (i = 1; i < member_cnt; i++) {
+ if (members[i].rkgm_assignment->cnt != 1) {
+ RD_UT_WARN("%s assigned %d partitions, expected 1",
+ members[i].rkgm_member_id->str,
+ members[i].rkgm_assignment->cnt);
+ fails++;
+ } else if (assignments[i] &&
+ !rd_kafka_topic_partition_list_find(
+ assignments[i],
+ members[i].rkgm_assignment->elems[0].topic,
+ members[i]
+ .rkgm_assignment->elems[0]
+ .partition)) {
+ RD_UT_WARN(
+ "Stickiness was not honored for %s, "
+ "%s [%" PRId32 "] not in previous assignment",
+ members[i].rkgm_member_id->str,
+ members[i].rkgm_assignment->elems[0].topic,
+ members[i].rkgm_assignment->elems[0].partition);
+ fails++;
+ }
+ }
+
+ RD_UT_ASSERT(!fails, "See previous errors");
+
+
+ for (i = 0; i < member_cnt; i++) {
+ rd_kafka_group_member_clear(&members[i]);
+ if (assignments[i])
+ rd_kafka_topic_partition_list_destroy(assignments[i]);
+ }
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+
+static int ut_testStickiness(rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[3];
+ int member_cnt = RD_ARRAYSIZE(members);
+ int i;
+
+ metadata = rd_kafka_metadata_new_topic_mockv(
+ 6, "topic1", 1, "topic2", 1, "topic3", 1, "topic4", 1, "topic5", 1,
+ "topic6", 1);
+
+ ut_init_member(&members[0], "consumer1", "topic1", "topic2", NULL);
+ rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment);
+ members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
+ 0);
+
+ ut_init_member(&members[1], "consumer2", "topic1", "topic2", "topic3",
+ "topic4", NULL);
+ rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment);
+ members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic2",
+ 0);
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic3",
+ 0);
+
+ ut_init_member(&members[2], "consumer3", "topic4", "topic5", "topic6",
+ NULL);
+ rd_kafka_topic_partition_list_destroy(members[2].rkgm_assignment);
+ members[2].rkgm_assignment = rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic4",
+ 0);
+ rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic5",
+ 0);
+ rd_kafka_topic_partition_list_add(members[2].rkgm_assignment, "topic6",
+ 0);
+
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+/**
+ * @brief Verify stickiness across three rebalances.
+ */
+static int ut_testStickiness2(rd_kafka_t *rk, const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[3];
+ int member_cnt = RD_ARRAYSIZE(members);
+ int i;
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 6);
+
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
+ ut_init_member(&members[2], "consumer3", "topic1", NULL);
+
+ /* Just consumer1 */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 1,
+ errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, 1, metadata);
+ isFullyBalanced(members, 1);
+ verifyAssignment(&members[0], "topic1", 0, "topic1", 1, "topic1", 2,
+ "topic1", 3, "topic1", 4, "topic1", 5, NULL);
+
+ /* consumer1 and consumer2 */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members, 2,
+ errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, 2, metadata);
+ isFullyBalanced(members, 2);
+ verifyAssignment(&members[0], "topic1", 3, "topic1", 4, "topic1", 5,
+ NULL);
+ verifyAssignment(&members[1], "topic1", 0, "topic1", 1, "topic1", 2,
+ NULL);
+
+ /* Run it twice, should be stable. */
+ for (i = 0; i < 2; i++) {
+ /* consumer1, consumer2, and consumer3 */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata,
+ members, 3, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, 3, metadata);
+ isFullyBalanced(members, 3);
+ verifyAssignment(&members[0], "topic1", 4, "topic1", 5, NULL);
+ verifyAssignment(&members[1], "topic1", 1, "topic1", 2, NULL);
+ verifyAssignment(&members[2], "topic1", 0, "topic1", 3, NULL);
+ }
+
+ /* Remove consumer1 */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[1], 2,
+ errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(&members[1], 2, metadata);
+ isFullyBalanced(&members[1], 2);
+ verifyAssignment(&members[1], "topic1", 1, "topic1", 2, "topic1", 5,
+ NULL);
+ verifyAssignment(&members[2], "topic1", 0, "topic1", 3, "topic1", 4,
+ NULL);
+
+ /* Remove consumer2 */
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, &members[2], 1,
+ errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(&members[2], 1, metadata);
+ isFullyBalanced(&members[2], 1);
+ verifyAssignment(&members[2], "topic1", 0, "topic1", 1, "topic1", 2,
+ "topic1", 3, "topic1", 4, "topic1", 5, NULL);
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int
+ut_testAssignmentUpdatedForDeletedTopic(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata =
+ rd_kafka_metadata_new_topic_mockv(2, "topic1", 1, "topic3", 100);
+ ut_init_member(&members[0], "consumer1", "topic1", "topic2", "topic3",
+ NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 + 100,
+ "Expected %d assigned partitions, not %d", 1 + 100,
+ members[0].rkgm_assignment->cnt);
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted(
+ rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[1];
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 3);
+
+ ut_init_member(&members[0], "consumer1", "topic", NULL);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ /*
+ * Remove topic
+ */
+ rd_kafka_metadata_destroy(metadata);
+ metadata = rd_kafka_metadata_new_topic_mock(NULL, 0);
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ RD_ARRAYSIZE(members), errstr,
+ sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+
+ rd_kafka_group_member_clear(&members[0]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+
+static int
+ut_testConflictingPreviousAssignments(rd_kafka_t *rk,
+ const rd_kafka_assignor_t *rkas) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_metadata_t *metadata;
+ rd_kafka_group_member_t members[2];
+ int member_cnt = RD_ARRAYSIZE(members);
+ int i;
+
+ // FIXME: removed from Java test suite, and fails for us, why, why?
+ RD_UT_PASS();
+
+ metadata = rd_kafka_metadata_new_topic_mockv(1, "topic1", 2);
+
+ /* Both consumer and consumer2 have both partitions assigned */
+ ut_init_member(&members[0], "consumer1", "topic1", NULL);
+ rd_kafka_topic_partition_list_destroy(members[0].rkgm_assignment);
+ members[0].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
+ 0);
+ rd_kafka_topic_partition_list_add(members[0].rkgm_assignment, "topic1",
+ 1);
+
+ ut_init_member(&members[1], "consumer2", "topic1", NULL);
+ rd_kafka_topic_partition_list_destroy(members[1].rkgm_assignment);
+ members[1].rkgm_assignment = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1",
+ 0);
+ rd_kafka_topic_partition_list_add(members[1].rkgm_assignment, "topic1",
+ 1);
+
+
+ err = rd_kafka_assignor_run(rk->rk_cgrp, rkas, metadata, members,
+ member_cnt, errstr, sizeof(errstr));
+ RD_UT_ASSERT(!err, "assignor run failed: %s", errstr);
+
+ RD_UT_ASSERT(members[0].rkgm_assignment->cnt == 1 &&
+ members[1].rkgm_assignment->cnt == 1,
+ "Expected consumers to have 1 partition each, "
+ "not %d and %d",
+ members[0].rkgm_assignment->cnt,
+ members[1].rkgm_assignment->cnt);
+ RD_UT_ASSERT(members[0].rkgm_assignment->elems[0].partition !=
+ members[1].rkgm_assignment->elems[0].partition,
+ "Expected consumers to have different partitions "
+ "assigned, not same partition %" PRId32,
+ members[0].rkgm_assignment->elems[0].partition);
+
+ verifyValidityAndBalance(members, RD_ARRAYSIZE(members), metadata);
+ isFullyBalanced(members, RD_ARRAYSIZE(members));
+ /* FIXME: isSticky() */
+
+ for (i = 0; i < member_cnt; i++)
+ rd_kafka_group_member_clear(&members[i]);
+ rd_kafka_metadata_destroy(metadata);
+
+ RD_UT_PASS();
+}
+
+/* testReassignmentWithRandomSubscriptionsAndChanges is not ported
+ * from Java since random tests don't provide meaningful test coverage. */
+
+
+static int rd_kafka_sticky_assignor_unittest(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ int fails = 0;
+ char errstr[256];
+ rd_kafka_assignor_t *rkas;
+ static int (*tests[])(rd_kafka_t *, const rd_kafka_assignor_t *) = {
+ ut_testOneConsumerNoTopic,
+ ut_testOneConsumerNonexistentTopic,
+ ut_testOneConsumerOneTopic,
+ ut_testOnlyAssignsPartitionsFromSubscribedTopics,
+ ut_testOneConsumerMultipleTopics,
+ ut_testTwoConsumersOneTopicOnePartition,
+ ut_testTwoConsumersOneTopicTwoPartitions,
+ ut_testMultipleConsumersMixedTopicSubscriptions,
+ ut_testTwoConsumersTwoTopicsSixPartitions,
+ ut_testAddRemoveConsumerOneTopic,
+ ut_testPoorRoundRobinAssignmentScenario,
+ ut_testAddRemoveTopicTwoConsumers,
+ ut_testReassignmentAfterOneConsumerLeaves,
+ ut_testReassignmentAfterOneConsumerAdded,
+ ut_testSameSubscriptions,
+ ut_testLargeAssignmentWithMultipleConsumersLeaving,
+ ut_testNewSubscription,
+ ut_testMoveExistingAssignments,
+ ut_testStickiness,
+ ut_testStickiness2,
+ ut_testAssignmentUpdatedForDeletedTopic,
+ ut_testNoExceptionThrownWhenOnlySubscribedTopicDeleted,
+ ut_testConflictingPreviousAssignments,
+ NULL,
+ };
+ int i;
+
+
+ conf = rd_kafka_conf_new();
+ if (rd_kafka_conf_set(conf, "group.id", "test", errstr,
+ sizeof(errstr)) ||
+ rd_kafka_conf_set(conf, "partition.assignment.strategy",
+ "cooperative-sticky", errstr, sizeof(errstr)))
+ RD_UT_FAIL("sticky assignor conf failed: %s", errstr);
+
+ rd_kafka_conf_set(conf, "debug", rd_getenv("TEST_DEBUG", NULL), NULL,
+ 0);
+
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ RD_UT_ASSERT(rk, "sticky assignor client instantiation failed: %s",
+ errstr);
+
+ rkas = rd_kafka_assignor_find(rk, "cooperative-sticky");
+ RD_UT_ASSERT(rkas, "sticky assignor not found");
+
+ for (i = 0; tests[i]; i++) {
+ rd_ts_t ts = rd_clock();
+ int r;
+
+ RD_UT_SAY("[ Test #%d ]", i);
+ r = tests[i](rk, rkas);
+ RD_UT_SAY("[ Test #%d ran for %.3fms ]", i,
+ (double)(rd_clock() - ts) / 1000.0);
+
+ RD_UT_ASSERT(!r, "^ failed");
+
+ fails += r;
+ }
+
+ rd_kafka_destroy(rk);
+
+ return fails;
+}
+
+
+/**
+ * @brief Initialzie and add sticky assignor.
+ */
+rd_kafka_resp_err_t rd_kafka_sticky_assignor_init(rd_kafka_t *rk) {
+ return rd_kafka_assignor_add(rk, "consumer", "cooperative-sticky",
+ RD_KAFKA_REBALANCE_PROTOCOL_COOPERATIVE,
+ rd_kafka_sticky_assignor_assign_cb,
+ rd_kafka_sticky_assignor_get_metadata,
+ rd_kafka_sticky_assignor_on_assignment_cb,
+ rd_kafka_sticky_assignor_state_destroy,
+ rd_kafka_sticky_assignor_unittest, NULL);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c
new file mode 100644
index 000000000..080589358
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_subscription.c
@@ -0,0 +1,278 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * This is the high level consumer API which is mutually exclusive
+ * with the old legacy simple consumer.
+ * Only one of these interfaces may be used on a given rd_kafka_t handle.
+ */
+
+#include "rdkafka_int.h"
+
+
+rd_kafka_resp_err_t rd_kafka_unsubscribe(rd_kafka_t *rk) {
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_SUBSCRIBE));
+}
+
+
+/** @returns 1 if the topic is invalid (bad regex, empty), else 0 if valid. */
+static size_t _invalid_topic_cb(const rd_kafka_topic_partition_t *rktpar,
+ void *opaque) {
+ rd_regex_t *re;
+ char errstr[1];
+
+ if (!*rktpar->topic)
+ return 1;
+
+ if (*rktpar->topic != '^')
+ return 0;
+
+ if (!(re = rd_regex_comp(rktpar->topic, errstr, sizeof(errstr))))
+ return 1;
+
+ rd_regex_destroy(re);
+
+ return 0;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_subscribe(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *topics) {
+
+ rd_kafka_op_t *rko;
+ rd_kafka_cgrp_t *rkcg;
+ rd_kafka_topic_partition_list_t *topics_cpy;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ /* Validate topics */
+ if (topics->cnt == 0 || rd_kafka_topic_partition_list_sum(
+ topics, _invalid_topic_cb, NULL) > 0)
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ topics_cpy = rd_kafka_topic_partition_list_copy(topics);
+ if (rd_kafka_topic_partition_list_has_duplicates(
+ topics_cpy, rd_true /*ignore partition field*/)) {
+ rd_kafka_topic_partition_list_destroy(topics_cpy);
+ return RD_KAFKA_RESP_ERR__INVALID_ARG;
+ }
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_SUBSCRIBE);
+ rko->rko_u.subscribe.topics = topics_cpy;
+
+ return rd_kafka_op_err_destroy(
+ rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
+}
+
+
+rd_kafka_error_t *
+rd_kafka_assign0(rd_kafka_t *rk,
+ rd_kafka_assign_method_t assign_method,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_op_t *rko;
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__UNKNOWN_GROUP,
+ "Requires a consumer with group.id "
+ "configured");
+
+ rko = rd_kafka_op_new(RD_KAFKA_OP_ASSIGN);
+
+ rko->rko_u.assign.method = assign_method;
+
+ if (partitions)
+ rko->rko_u.assign.partitions =
+ rd_kafka_topic_partition_list_copy(partitions);
+
+ return rd_kafka_op_error_destroy(
+ rd_kafka_op_req(rkcg->rkcg_ops, rko, RD_POLL_INFINITE));
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_assign(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+
+ error = rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_ASSIGN, partitions);
+
+ if (!error)
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ else {
+ err = rd_kafka_error_code(error);
+ rd_kafka_error_destroy(error);
+ }
+
+ return err;
+}
+
+
+rd_kafka_error_t *
+rd_kafka_incremental_assign(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ if (!partitions)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "partitions must not be NULL");
+
+ return rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_INCR_ASSIGN,
+ partitions);
+}
+
+
+rd_kafka_error_t *rd_kafka_incremental_unassign(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *partitions) {
+ if (!partitions)
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "partitions must not be NULL");
+
+ return rd_kafka_assign0(rk, RD_KAFKA_ASSIGN_METHOD_INCR_UNASSIGN,
+ partitions);
+}
+
+
+int rd_kafka_assignment_lost(rd_kafka_t *rk) {
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return 0;
+
+ return rd_kafka_cgrp_assignment_is_lost(rkcg) == rd_true;
+}
+
+
+const char *rd_kafka_rebalance_protocol(rd_kafka_t *rk) {
+ rd_kafka_op_t *rko;
+ rd_kafka_cgrp_t *rkcg;
+ const char *result;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return NULL;
+
+ rko = rd_kafka_op_req2(rkcg->rkcg_ops,
+ RD_KAFKA_OP_GET_REBALANCE_PROTOCOL);
+
+ if (!rko)
+ return NULL;
+ else if (rko->rko_err) {
+ rd_kafka_op_destroy(rko);
+ return NULL;
+ }
+
+ result = rko->rko_u.rebalance_protocol.str;
+
+ rd_kafka_op_destroy(rko);
+
+ return result;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_assignment(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t **partitions) {
+ rd_kafka_op_t *rko;
+ rd_kafka_resp_err_t err;
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_ASSIGNMENT);
+ if (!rko)
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ err = rko->rko_err;
+
+ *partitions = rko->rko_u.assign.partitions;
+ rko->rko_u.assign.partitions = NULL;
+ rd_kafka_op_destroy(rko);
+
+ if (!*partitions && !err) {
+ /* Create an empty list for convenience of the caller */
+ *partitions = rd_kafka_topic_partition_list_new(0);
+ }
+
+ return err;
+}
+
+rd_kafka_resp_err_t
+rd_kafka_subscription(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t **topics) {
+ rd_kafka_op_t *rko;
+ rd_kafka_resp_err_t err;
+ rd_kafka_cgrp_t *rkcg;
+
+ if (!(rkcg = rd_kafka_cgrp_get(rk)))
+ return RD_KAFKA_RESP_ERR__UNKNOWN_GROUP;
+
+ rko = rd_kafka_op_req2(rkcg->rkcg_ops, RD_KAFKA_OP_GET_SUBSCRIPTION);
+ if (!rko)
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ err = rko->rko_err;
+
+ *topics = rko->rko_u.subscribe.topics;
+ rko->rko_u.subscribe.topics = NULL;
+ rd_kafka_op_destroy(rko);
+
+ if (!*topics && !err) {
+ /* Create an empty list for convenience of the caller */
+ *topics = rd_kafka_topic_partition_list_new(0);
+ }
+
+ return err;
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_pause_partitions(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ return rd_kafka_toppars_pause_resume(rk, rd_true /*pause*/, RD_SYNC,
+ RD_KAFKA_TOPPAR_F_APP_PAUSE,
+ partitions);
+}
+
+
+rd_kafka_resp_err_t
+rd_kafka_resume_partitions(rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ return rd_kafka_toppars_pause_resume(rk, rd_false /*resume*/, RD_SYNC,
+ RD_KAFKA_TOPPAR_F_APP_PAUSE,
+ partitions);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c
new file mode 100644
index 000000000..5240af785
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.c
@@ -0,0 +1,384 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rd.h"
+#include "rdtime.h"
+#include "rdsysqueue.h"
+
+#include "rdkafka_queue.h"
+
+static RD_INLINE void rd_kafka_timers_lock(rd_kafka_timers_t *rkts) {
+ mtx_lock(&rkts->rkts_lock);
+}
+
+static RD_INLINE void rd_kafka_timers_unlock(rd_kafka_timers_t *rkts) {
+ mtx_unlock(&rkts->rkts_lock);
+}
+
+
+static RD_INLINE int rd_kafka_timer_started(const rd_kafka_timer_t *rtmr) {
+ return rtmr->rtmr_interval ? 1 : 0;
+}
+
+
+static RD_INLINE int rd_kafka_timer_scheduled(const rd_kafka_timer_t *rtmr) {
+ return rtmr->rtmr_next ? 1 : 0;
+}
+
+
+static int rd_kafka_timer_cmp(const void *_a, const void *_b) {
+ const rd_kafka_timer_t *a = _a, *b = _b;
+ return RD_CMP(a->rtmr_next, b->rtmr_next);
+}
+
+static void rd_kafka_timer_unschedule(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr) {
+ TAILQ_REMOVE(&rkts->rkts_timers, rtmr, rtmr_link);
+ rtmr->rtmr_next = 0;
+}
+
+
+/**
+ * @brief Schedule the next firing of the timer at \p abs_time.
+ *
+ * @remark Will not update rtmr_interval, only rtmr_next.
+ *
+ * @locks_required timers_lock()
+ */
+static void rd_kafka_timer_schedule_next(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ rd_ts_t abs_time) {
+ rd_kafka_timer_t *first;
+
+ rtmr->rtmr_next = abs_time;
+
+ if (!(first = TAILQ_FIRST(&rkts->rkts_timers)) ||
+ first->rtmr_next > rtmr->rtmr_next) {
+ TAILQ_INSERT_HEAD(&rkts->rkts_timers, rtmr, rtmr_link);
+ cnd_signal(&rkts->rkts_cond);
+ if (rkts->rkts_wakeq)
+ rd_kafka_q_yield(rkts->rkts_wakeq);
+ } else
+ TAILQ_INSERT_SORTED(&rkts->rkts_timers, rtmr,
+ rd_kafka_timer_t *, rtmr_link,
+ rd_kafka_timer_cmp);
+}
+
+
+/**
+ * @brief Schedule the next firing of the timer according to the timer's
+ * interval plus an optional \p extra_us.
+ *
+ * @locks_required timers_lock()
+ */
+static void rd_kafka_timer_schedule(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ int extra_us) {
+
+ /* Timer has been stopped */
+ if (!rtmr->rtmr_interval)
+ return;
+
+ /* Timers framework is terminating */
+ if (unlikely(!rkts->rkts_enabled))
+ return;
+
+ rd_kafka_timer_schedule_next(
+ rkts, rtmr, rd_clock() + rtmr->rtmr_interval + extra_us);
+}
+
+/**
+ * @brief Stop a timer that may be started.
+ * If called from inside a timer callback 'lock' must be 0, else 1.
+ *
+ * @returns 1 if the timer was started (before being stopped), else 0.
+ */
+int rd_kafka_timer_stop(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ int lock) {
+ if (lock)
+ rd_kafka_timers_lock(rkts);
+
+ if (!rd_kafka_timer_started(rtmr)) {
+ if (lock)
+ rd_kafka_timers_unlock(rkts);
+ return 0;
+ }
+
+ if (rd_kafka_timer_scheduled(rtmr))
+ rd_kafka_timer_unschedule(rkts, rtmr);
+
+ rtmr->rtmr_interval = 0;
+
+ if (lock)
+ rd_kafka_timers_unlock(rkts);
+
+ return 1;
+}
+
+
+/**
+ * @returns true if timer is started, else false.
+ */
+rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts,
+ const rd_kafka_timer_t *rtmr) {
+ rd_bool_t ret;
+ rd_kafka_timers_lock(rkts);
+ ret = rtmr->rtmr_interval != 0;
+ rd_kafka_timers_unlock(rkts);
+ return ret;
+}
+
+
+/**
+ * @brief Start the provided timer with the given interval.
+ *
+ * Upon expiration of the interval (us) the callback will be called in the
+ * main rdkafka thread, after callback return the timer will be restarted.
+ *
+ * @param oneshot just fire the timer once.
+ * @param restart if timer is already started, restart it.
+ *
+ * Use rd_kafka_timer_stop() to stop a timer.
+ */
+void rd_kafka_timer_start0(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ rd_ts_t interval,
+ rd_bool_t oneshot,
+ rd_bool_t restart,
+ void (*callback)(rd_kafka_timers_t *rkts, void *arg),
+ void *arg) {
+ rd_kafka_timers_lock(rkts);
+
+ if (!restart && rd_kafka_timer_scheduled(rtmr)) {
+ rd_kafka_timers_unlock(rkts);
+ return;
+ }
+
+ rd_kafka_timer_stop(rkts, rtmr, 0 /*!lock*/);
+
+ /* Make sure the timer interval is non-zero or the timer
+ * won't be scheduled, which is not what the caller of .._start*()
+ * would expect. */
+ rtmr->rtmr_interval = interval == 0 ? 1 : interval;
+ rtmr->rtmr_callback = callback;
+ rtmr->rtmr_arg = arg;
+ rtmr->rtmr_oneshot = oneshot;
+
+ rd_kafka_timer_schedule(rkts, rtmr, 0);
+
+ rd_kafka_timers_unlock(rkts);
+}
+
+/**
+ * Delay the next timer invocation by '2 * rtmr->rtmr_interval'
+ */
+void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr) {
+ rd_kafka_timers_lock(rkts);
+ if (rd_kafka_timer_scheduled(rtmr)) {
+ rtmr->rtmr_interval *= 2;
+ rd_kafka_timer_unschedule(rkts, rtmr);
+ }
+ rd_kafka_timer_schedule(rkts, rtmr, 0);
+ rd_kafka_timers_unlock(rkts);
+}
+
+/**
+ * @brief Override the interval once for the next firing of the timer.
+ *
+ * @locks_required none
+ * @locks_acquired timers_lock
+ */
+void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ rd_ts_t interval) {
+ rd_kafka_timers_lock(rkts);
+ if (rd_kafka_timer_scheduled(rtmr))
+ rd_kafka_timer_unschedule(rkts, rtmr);
+ rd_kafka_timer_schedule_next(rkts, rtmr, rd_clock() + interval);
+ rd_kafka_timers_unlock(rkts);
+}
+
+
+/**
+ * @returns the delta time to the next time (>=0) this timer fires, or -1
+ * if timer is stopped.
+ */
+rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ int do_lock) {
+ rd_ts_t now = rd_clock();
+ rd_ts_t delta = -1;
+
+ if (do_lock)
+ rd_kafka_timers_lock(rkts);
+
+ if (rd_kafka_timer_scheduled(rtmr)) {
+ delta = rtmr->rtmr_next - now;
+ if (delta < 0)
+ delta = 0;
+ }
+
+ if (do_lock)
+ rd_kafka_timers_unlock(rkts);
+
+ return delta;
+}
+
+
+/**
+ * Interrupt rd_kafka_timers_run().
+ * Used for termination.
+ */
+void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts) {
+ rd_kafka_timers_lock(rkts);
+ cnd_signal(&rkts->rkts_cond);
+ rd_kafka_timers_unlock(rkts);
+}
+
+
+/**
+ * Returns the delta time to the next timer to fire, capped by 'timeout_ms'.
+ */
+rd_ts_t
+rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_us, int do_lock) {
+ rd_ts_t now = rd_clock();
+ rd_ts_t sleeptime = 0;
+ rd_kafka_timer_t *rtmr;
+
+ if (do_lock)
+ rd_kafka_timers_lock(rkts);
+
+ if (likely((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) != NULL)) {
+ sleeptime = rtmr->rtmr_next - now;
+ if (sleeptime < 0)
+ sleeptime = 0;
+ else if (sleeptime > (rd_ts_t)timeout_us)
+ sleeptime = (rd_ts_t)timeout_us;
+ } else
+ sleeptime = (rd_ts_t)timeout_us;
+
+ if (do_lock)
+ rd_kafka_timers_unlock(rkts);
+
+ return sleeptime;
+}
+
+
+/**
+ * Dispatch timers.
+ * Will block up to 'timeout' microseconds before returning.
+ */
+void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us) {
+ rd_ts_t now = rd_clock();
+ rd_ts_t end = now + timeout_us;
+
+ rd_kafka_timers_lock(rkts);
+
+ while (!rd_kafka_terminating(rkts->rkts_rk) && now <= end) {
+ int64_t sleeptime;
+ rd_kafka_timer_t *rtmr;
+
+ if (timeout_us != RD_POLL_NOWAIT) {
+ sleeptime = rd_kafka_timers_next(rkts, timeout_us,
+ 0 /*no-lock*/);
+
+ if (sleeptime > 0) {
+ cnd_timedwait_ms(&rkts->rkts_cond,
+ &rkts->rkts_lock,
+ (int)(sleeptime / 1000));
+ }
+ }
+
+ now = rd_clock();
+
+ while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)) &&
+ rtmr->rtmr_next <= now) {
+ rd_bool_t oneshot;
+
+ rd_kafka_timer_unschedule(rkts, rtmr);
+
+ /* If timer must only be fired once,
+ * disable it now prior to callback.
+ *
+ * NOTE: Oneshot timers are never touched again after
+ * the callback has been called to avoid use-after-free.
+ */
+ if ((oneshot = rtmr->rtmr_oneshot))
+ rtmr->rtmr_interval = 0;
+
+ rd_kafka_timers_unlock(rkts);
+
+ rtmr->rtmr_callback(rkts, rtmr->rtmr_arg);
+
+ rd_kafka_timers_lock(rkts);
+
+ /* Restart timer, unless it has been stopped, or
+ * already reschedueld (start()ed) from callback. */
+ if (!oneshot && rd_kafka_timer_started(rtmr) &&
+ !rd_kafka_timer_scheduled(rtmr))
+ rd_kafka_timer_schedule(rkts, rtmr, 0);
+ }
+
+ if (timeout_us == RD_POLL_NOWAIT) {
+ /* Only iterate once, even if rd_clock doesn't change */
+ break;
+ }
+ }
+
+ rd_kafka_timers_unlock(rkts);
+}
+
+
+void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts) {
+ rd_kafka_timer_t *rtmr;
+
+ rd_kafka_timers_lock(rkts);
+ rkts->rkts_enabled = 0;
+ while ((rtmr = TAILQ_FIRST(&rkts->rkts_timers)))
+ rd_kafka_timer_stop(rkts, rtmr, 0);
+ rd_kafka_assert(rkts->rkts_rk, TAILQ_EMPTY(&rkts->rkts_timers));
+ rd_kafka_timers_unlock(rkts);
+
+ cnd_destroy(&rkts->rkts_cond);
+ mtx_destroy(&rkts->rkts_lock);
+}
+
+void rd_kafka_timers_init(rd_kafka_timers_t *rkts,
+ rd_kafka_t *rk,
+ struct rd_kafka_q_s *wakeq) {
+ memset(rkts, 0, sizeof(*rkts));
+ rkts->rkts_rk = rk;
+ TAILQ_INIT(&rkts->rkts_timers);
+ mtx_init(&rkts->rkts_lock, mtx_plain);
+ cnd_init(&rkts->rkts_cond);
+ rkts->rkts_enabled = 1;
+ rkts->rkts_wakeq = wakeq;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h
new file mode 100644
index 000000000..e3cadd7b9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_timer.h
@@ -0,0 +1,114 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_TIMER_H_
+#define _RDKAFKA_TIMER_H_
+
+#include "rd.h"
+
+struct rd_kafka_q_s; /**< Forward decl */
+
+/* A timer engine. */
+typedef struct rd_kafka_timers_s {
+
+ TAILQ_HEAD(, rd_kafka_timer_s) rkts_timers;
+
+ struct rd_kafka_s *rkts_rk;
+
+ mtx_t rkts_lock;
+ cnd_t rkts_cond;
+
+ /** Optional wake-up (q_yield()) to wake up when a new timer
+ * is scheduled that will fire prior to any existing timers.
+ * This is used to wake up blocking IO or queue polls that run
+ * in the same loop as timers_run(). */
+ struct rd_kafka_q_s *rkts_wakeq;
+
+ int rkts_enabled;
+} rd_kafka_timers_t;
+
+
+typedef struct rd_kafka_timer_s {
+ TAILQ_ENTRY(rd_kafka_timer_s) rtmr_link;
+
+ rd_ts_t rtmr_next;
+ rd_ts_t rtmr_interval; /* interval in microseconds */
+ rd_bool_t rtmr_oneshot; /**< Only fire once. */
+
+ void (*rtmr_callback)(rd_kafka_timers_t *rkts, void *arg);
+ void *rtmr_arg;
+} rd_kafka_timer_t;
+
+
+
+int rd_kafka_timer_stop(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ int lock);
+void rd_kafka_timer_start0(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ rd_ts_t interval,
+ rd_bool_t oneshot,
+ rd_bool_t restart,
+ void (*callback)(rd_kafka_timers_t *rkts, void *arg),
+ void *arg);
+#define rd_kafka_timer_start(rkts, rtmr, interval, callback, arg) \
+ rd_kafka_timer_start0(rkts, rtmr, interval, rd_false, rd_true, \
+ callback, arg)
+#define rd_kafka_timer_start_oneshot(rkts, rtmr, restart, interval, callback, \
+ arg) \
+ rd_kafka_timer_start0(rkts, rtmr, interval, rd_true, restart, \
+ callback, arg)
+
+void rd_kafka_timer_exp_backoff(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr);
+rd_ts_t rd_kafka_timer_next(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ int do_lock);
+
+void rd_kafka_timer_override_once(rd_kafka_timers_t *rkts,
+ rd_kafka_timer_t *rtmr,
+ rd_ts_t interval);
+
+/**
+ * @returns true if timer is started.
+ *
+ * @remark Must only be called in the timer's thread (not thread-safe)
+ */
+rd_bool_t rd_kafka_timer_is_started(rd_kafka_timers_t *rkts,
+ const rd_kafka_timer_t *rtmr);
+
+void rd_kafka_timers_interrupt(rd_kafka_timers_t *rkts);
+rd_ts_t
+rd_kafka_timers_next(rd_kafka_timers_t *rkts, int timeout_ms, int do_lock);
+void rd_kafka_timers_run(rd_kafka_timers_t *rkts, int timeout_us);
+void rd_kafka_timers_destroy(rd_kafka_timers_t *rkts);
+void rd_kafka_timers_init(rd_kafka_timers_t *rkte,
+ rd_kafka_t *rk,
+ struct rd_kafka_q_s *wakeq);
+
+#endif /* _RDKAFKA_TIMER_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c
new file mode 100644
index 000000000..89bfa092d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.c
@@ -0,0 +1,1900 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_msg.h"
+#include "rdkafka_topic.h"
+#include "rdkafka_partition.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_cgrp.h"
+#include "rdkafka_metadata.h"
+#include "rdkafka_offset.h"
+#include "rdlog.h"
+#include "rdsysqueue.h"
+#include "rdtime.h"
+#include "rdregex.h"
+
+#if WITH_ZSTD
+#include <zstd.h>
+#endif
+
+
+const char *rd_kafka_topic_state_names[] = {"unknown", "exists", "notexists",
+ "error"};
+
+
+static int rd_kafka_topic_metadata_update(
+ rd_kafka_topic_t *rkt,
+ const struct rd_kafka_metadata_topic *mdt,
+ const rd_kafka_partition_leader_epoch_t *leader_epochs,
+ rd_ts_t ts_age);
+
+
+/**
+ * @brief Increases the app's topic reference count.
+ *
+ * The app refcounts are implemented separately from the librdkafka refcounts,
+ * they are increased/decreased in a separate rkt_app_refcnt to keep track of
+ * its use.
+ *
+ * This only covers topic_new() & topic_destroy().
+ * The topic_t exposed in rd_kafka_message_t is NOT covered and is handled
+ * like a standard internal -> app pointer conversion (keep_a()).
+ */
+static void rd_kafka_topic_keep_app(rd_kafka_topic_t *rkt) {
+ if (rd_refcnt_add(&rkt->rkt_app_refcnt) == 1)
+ rd_kafka_topic_keep(rkt);
+}
+
+/**
+ * @brief drop rkt app reference
+ */
+static void rd_kafka_topic_destroy_app(rd_kafka_topic_t *app_rkt) {
+ rd_kafka_topic_t *rkt = app_rkt;
+
+ rd_assert(!rd_kafka_rkt_is_lw(app_rkt));
+
+ if (unlikely(rd_refcnt_sub(&rkt->rkt_app_refcnt) == 0))
+ rd_kafka_topic_destroy0(rkt); /* final app reference lost,
+ * loose reference from
+ * keep_app() */
+}
+
+
+/**
+ * Final destructor for topic. Refcnt must be 0.
+ */
+void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt) {
+ rd_kafka_partition_msgid_t *partmsgid, *partmsgid_tmp;
+
+ rd_kafka_assert(rkt->rkt_rk, rd_refcnt_get(&rkt->rkt_refcnt) == 0);
+
+ rd_kafka_wrlock(rkt->rkt_rk);
+ TAILQ_REMOVE(&rkt->rkt_rk->rk_topics, rkt, rkt_link);
+ rkt->rkt_rk->rk_topic_cnt--;
+ rd_kafka_wrunlock(rkt->rkt_rk);
+
+ TAILQ_FOREACH_SAFE(partmsgid, &rkt->rkt_saved_partmsgids, link,
+ partmsgid_tmp) {
+ rd_free(partmsgid);
+ }
+
+ rd_kafka_assert(rkt->rkt_rk, rd_list_empty(&rkt->rkt_desp));
+ rd_list_destroy(&rkt->rkt_desp);
+
+ rd_avg_destroy(&rkt->rkt_avg_batchsize);
+ rd_avg_destroy(&rkt->rkt_avg_batchcnt);
+
+ if (rkt->rkt_topic)
+ rd_kafkap_str_destroy(rkt->rkt_topic);
+
+ rd_kafka_anyconf_destroy(_RK_TOPIC, &rkt->rkt_conf);
+
+ rwlock_destroy(&rkt->rkt_lock);
+ rd_refcnt_destroy(&rkt->rkt_app_refcnt);
+ rd_refcnt_destroy(&rkt->rkt_refcnt);
+
+ rd_free(rkt);
+}
+
+/**
+ * @brief Application topic object destroy.
+ * @warning MUST ONLY BE CALLED BY THE APPLICATION.
+ * Use rd_kafka_topic_destroy0() for all internal use.
+ */
+void rd_kafka_topic_destroy(rd_kafka_topic_t *app_rkt) {
+ rd_kafka_lwtopic_t *lrkt;
+ if (unlikely((lrkt = rd_kafka_rkt_get_lw(app_rkt)) != NULL))
+ rd_kafka_lwtopic_destroy(lrkt);
+ else
+ rd_kafka_topic_destroy_app(app_rkt);
+}
+
+
+/**
+ * Finds and returns a topic based on its name, or NULL if not found.
+ * The 'rkt' refcount is increased by one and the caller must call
+ * rd_kafka_topic_destroy() when it is done with the topic to decrease
+ * the refcount.
+ *
+ * Locality: any thread
+ */
+rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ const char *topic,
+ int do_lock) {
+ rd_kafka_topic_t *rkt;
+
+ if (do_lock)
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ if (!rd_kafkap_str_cmp_str(rkt->rkt_topic, topic)) {
+ rd_kafka_topic_keep(rkt);
+ break;
+ }
+ }
+ if (do_lock)
+ rd_kafka_rdunlock(rk);
+
+ return rkt;
+}
+
+/**
+ * Same semantics as ..find() but takes a Kafka protocol string instead.
+ */
+rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ const rd_kafkap_str_t *topic) {
+ rd_kafka_topic_t *rkt;
+
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ if (!rd_kafkap_str_cmp(rkt->rkt_topic, topic)) {
+ rd_kafka_topic_keep(rkt);
+ break;
+ }
+ }
+ rd_kafka_rdunlock(rk);
+
+ return rkt;
+}
+
+
+/**
+ * @brief rd_kafka_topic_t comparator.
+ */
+int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b) {
+ rd_kafka_topic_t *rkt_a = (void *)_a, *rkt_b = (void *)_b;
+
+ if (rkt_a == rkt_b)
+ return 0;
+
+ return rd_kafkap_str_cmp(rkt_a->rkt_topic, rkt_b->rkt_topic);
+}
+
+
+/**
+ * @brief Destroy/free a light-weight topic object.
+ */
+void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt) {
+ rd_assert(rd_kafka_rkt_is_lw((const rd_kafka_topic_t *)lrkt));
+ if (rd_refcnt_sub(&lrkt->lrkt_refcnt) > 0)
+ return;
+
+ rd_refcnt_destroy(&lrkt->lrkt_refcnt);
+ rd_free(lrkt);
+}
+
+
+/**
+ * @brief Create a new light-weight topic name-only handle.
+ *
+ * This type of object is a light-weight non-linked alternative
+ * to the proper rd_kafka_itopic_t for outgoing APIs
+ * (such as rd_kafka_message_t) when there is no full topic object available.
+ */
+rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic) {
+ rd_kafka_lwtopic_t *lrkt;
+ size_t topic_len = strlen(topic);
+
+ lrkt = rd_malloc(sizeof(*lrkt) + topic_len + 1);
+
+ memcpy(lrkt->lrkt_magic, "LRKT", 4);
+ lrkt->lrkt_rk = rk;
+ rd_refcnt_init(&lrkt->lrkt_refcnt, 1);
+ lrkt->lrkt_topic = (char *)(lrkt + 1);
+ memcpy(lrkt->lrkt_topic, topic, topic_len + 1);
+
+ return lrkt;
+}
+
+
+/**
+ * @returns a proper rd_kafka_topic_t object (not light-weight)
+ * based on the input rd_kafka_topic_t app object which may
+ * either be a proper topic (which is then returned) or a light-weight
+ * topic in which case it will look up or create the proper topic
+ * object.
+ *
+ * This allows the application to (unknowingly) pass a light-weight
+ * topic object to any proper-aware public API.
+ */
+rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt) {
+ rd_kafka_lwtopic_t *lrkt;
+
+ if (likely(!(lrkt = rd_kafka_rkt_get_lw(app_rkt))))
+ return app_rkt;
+
+ /* Create proper topic object */
+ return rd_kafka_topic_new0(lrkt->lrkt_rk, lrkt->lrkt_topic, NULL, NULL,
+ 0);
+}
+
+
+/**
+ * @brief Create new topic handle.
+ *
+ * @locality any
+ */
+rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk,
+ const char *topic,
+ rd_kafka_topic_conf_t *conf,
+ int *existing,
+ int do_lock) {
+ rd_kafka_topic_t *rkt;
+ const struct rd_kafka_metadata_cache_entry *rkmce;
+ const char *conf_err;
+ const char *used_conf_str;
+
+ /* Verify configuration.
+ * Maximum topic name size + headers must never exceed message.max.bytes
+ * which is min-capped to 1000.
+ * See rd_kafka_broker_produce_toppar() and rdkafka_conf.c */
+ if (!topic || strlen(topic) > 512) {
+ if (conf)
+ rd_kafka_topic_conf_destroy(conf);
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
+ return NULL;
+ }
+
+ if (do_lock)
+ rd_kafka_wrlock(rk);
+ if ((rkt = rd_kafka_topic_find(rk, topic, 0 /*no lock*/))) {
+ if (do_lock)
+ rd_kafka_wrunlock(rk);
+ if (conf)
+ rd_kafka_topic_conf_destroy(conf);
+ if (existing)
+ *existing = 1;
+ return rkt;
+ }
+
+ if (!conf) {
+ if (rk->rk_conf.topic_conf) {
+ conf = rd_kafka_topic_conf_dup(rk->rk_conf.topic_conf);
+ used_conf_str = "default_topic_conf";
+ } else {
+ conf = rd_kafka_topic_conf_new();
+ used_conf_str = "empty";
+ }
+ } else {
+ used_conf_str = "user-supplied";
+ }
+
+
+ /* Verify and finalize topic configuration */
+ if ((conf_err = rd_kafka_topic_conf_finalize(rk->rk_type, &rk->rk_conf,
+ conf))) {
+ if (do_lock)
+ rd_kafka_wrunlock(rk);
+ /* Incompatible configuration settings */
+ rd_kafka_log(rk, LOG_ERR, "TOPICCONF",
+ "Incompatible configuration settings "
+ "for topic \"%s\": %s",
+ topic, conf_err);
+ rd_kafka_topic_conf_destroy(conf);
+ rd_kafka_set_last_error(RD_KAFKA_RESP_ERR__INVALID_ARG, EINVAL);
+ return NULL;
+ }
+
+ if (existing)
+ *existing = 0;
+
+ rkt = rd_calloc(1, sizeof(*rkt));
+
+ memcpy(rkt->rkt_magic, "IRKT", 4);
+
+ rkt->rkt_topic = rd_kafkap_str_new(topic, -1);
+ rkt->rkt_rk = rk;
+
+ rkt->rkt_ts_create = rd_clock();
+
+ rkt->rkt_conf = *conf;
+ rd_free(conf); /* explicitly not rd_kafka_topic_destroy()
+ * since we dont want to rd_free internal members,
+ * just the placeholder. The internal members
+ * were copied on the line above. */
+
+ /* Partitioner */
+ if (!rkt->rkt_conf.partitioner) {
+ const struct {
+ const char *str;
+ void *part;
+ } part_map[] = {
+ {"random", (void *)rd_kafka_msg_partitioner_random},
+ {"consistent", (void *)rd_kafka_msg_partitioner_consistent},
+ {"consistent_random",
+ (void *)rd_kafka_msg_partitioner_consistent_random},
+ {"murmur2", (void *)rd_kafka_msg_partitioner_murmur2},
+ {"murmur2_random",
+ (void *)rd_kafka_msg_partitioner_murmur2_random},
+ {"fnv1a", (void *)rd_kafka_msg_partitioner_fnv1a},
+ {"fnv1a_random",
+ (void *)rd_kafka_msg_partitioner_fnv1a_random},
+ {NULL}};
+ int i;
+
+ /* Use "partitioner" configuration property string, if set */
+ for (i = 0; rkt->rkt_conf.partitioner_str && part_map[i].str;
+ i++) {
+ if (!strcmp(rkt->rkt_conf.partitioner_str,
+ part_map[i].str)) {
+ rkt->rkt_conf.partitioner = part_map[i].part;
+ break;
+ }
+ }
+
+ /* Default partitioner: consistent_random */
+ if (!rkt->rkt_conf.partitioner) {
+ /* Make sure part_map matched something, otherwise
+ * there is a discreprency between this code
+ * and the validator in rdkafka_conf.c */
+ assert(!rkt->rkt_conf.partitioner_str);
+
+ rkt->rkt_conf.partitioner =
+ rd_kafka_msg_partitioner_consistent_random;
+ }
+ }
+
+ if (rkt->rkt_rk->rk_conf.sticky_partition_linger_ms > 0 &&
+ rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_consistent &&
+ rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_murmur2 &&
+ rkt->rkt_conf.partitioner != rd_kafka_msg_partitioner_fnv1a) {
+ rkt->rkt_conf.random_partitioner = rd_false;
+ } else {
+ rkt->rkt_conf.random_partitioner = rd_true;
+ }
+
+ /* Sticky partition assignment interval */
+ rd_interval_init(&rkt->rkt_sticky_intvl);
+
+ if (rkt->rkt_conf.queuing_strategy == RD_KAFKA_QUEUE_FIFO)
+ rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid;
+ else
+ rkt->rkt_conf.msg_order_cmp = rd_kafka_msg_cmp_msgid_lifo;
+
+ if (rkt->rkt_conf.compression_codec == RD_KAFKA_COMPRESSION_INHERIT)
+ rkt->rkt_conf.compression_codec = rk->rk_conf.compression_codec;
+
+ /* Translate compression level to library-specific level and check
+ * upper bound */
+ switch (rkt->rkt_conf.compression_codec) {
+#if WITH_ZLIB
+ case RD_KAFKA_COMPRESSION_GZIP:
+ if (rkt->rkt_conf.compression_level ==
+ RD_KAFKA_COMPLEVEL_DEFAULT)
+ rkt->rkt_conf.compression_level = Z_DEFAULT_COMPRESSION;
+ else if (rkt->rkt_conf.compression_level >
+ RD_KAFKA_COMPLEVEL_GZIP_MAX)
+ rkt->rkt_conf.compression_level =
+ RD_KAFKA_COMPLEVEL_GZIP_MAX;
+ break;
+#endif
+ case RD_KAFKA_COMPRESSION_LZ4:
+ if (rkt->rkt_conf.compression_level ==
+ RD_KAFKA_COMPLEVEL_DEFAULT)
+ /* LZ4 has no notion of system-wide default compression
+ * level, use zero in this case */
+ rkt->rkt_conf.compression_level = 0;
+ else if (rkt->rkt_conf.compression_level >
+ RD_KAFKA_COMPLEVEL_LZ4_MAX)
+ rkt->rkt_conf.compression_level =
+ RD_KAFKA_COMPLEVEL_LZ4_MAX;
+ break;
+#if WITH_ZSTD
+ case RD_KAFKA_COMPRESSION_ZSTD:
+ if (rkt->rkt_conf.compression_level ==
+ RD_KAFKA_COMPLEVEL_DEFAULT)
+ rkt->rkt_conf.compression_level = 3;
+ else if (rkt->rkt_conf.compression_level >
+ RD_KAFKA_COMPLEVEL_ZSTD_MAX)
+ rkt->rkt_conf.compression_level =
+ RD_KAFKA_COMPLEVEL_ZSTD_MAX;
+ break;
+#endif
+ case RD_KAFKA_COMPRESSION_SNAPPY:
+ default:
+ /* Compression level has no effect in this case */
+ rkt->rkt_conf.compression_level = RD_KAFKA_COMPLEVEL_DEFAULT;
+ }
+
+ rd_avg_init(&rkt->rkt_avg_batchsize, RD_AVG_GAUGE, 0,
+ rk->rk_conf.max_msg_size, 2,
+ rk->rk_conf.stats_interval_ms ? 1 : 0);
+ rd_avg_init(&rkt->rkt_avg_batchcnt, RD_AVG_GAUGE, 0,
+ rk->rk_conf.batch_num_messages, 2,
+ rk->rk_conf.stats_interval_ms ? 1 : 0);
+
+ rd_kafka_dbg(rk, TOPIC, "TOPIC", "New local topic: %.*s",
+ RD_KAFKAP_STR_PR(rkt->rkt_topic));
+
+ rd_list_init(&rkt->rkt_desp, 16, NULL);
+ rd_interval_init(&rkt->rkt_desp_refresh_intvl);
+ TAILQ_INIT(&rkt->rkt_saved_partmsgids);
+ rd_refcnt_init(&rkt->rkt_refcnt, 0);
+ rd_refcnt_init(&rkt->rkt_app_refcnt, 0);
+
+ rd_kafka_topic_keep(rkt);
+
+ rwlock_init(&rkt->rkt_lock);
+
+ /* Create unassigned partition */
+ rkt->rkt_ua = rd_kafka_toppar_new(rkt, RD_KAFKA_PARTITION_UA);
+
+ TAILQ_INSERT_TAIL(&rk->rk_topics, rkt, rkt_link);
+ rk->rk_topic_cnt++;
+
+ /* Populate from metadata cache. */
+ if ((rkmce = rd_kafka_metadata_cache_find(rk, topic, 1 /*valid*/)) &&
+ !rkmce->rkmce_mtopic.err) {
+ if (existing)
+ *existing = 1;
+
+ rd_kafka_topic_metadata_update(rkt, &rkmce->rkmce_mtopic, NULL,
+ rkmce->rkmce_ts_insert);
+ }
+
+ if (do_lock)
+ rd_kafka_wrunlock(rk);
+
+ if (rk->rk_conf.debug & RD_KAFKA_DBG_CONF) {
+ char desc[256];
+ rd_snprintf(desc, sizeof(desc),
+ "Topic \"%s\" configuration (%s)", topic,
+ used_conf_str);
+ rd_kafka_anyconf_dump_dbg(rk, _RK_TOPIC, &rkt->rkt_conf, desc);
+ }
+
+ return rkt;
+}
+
+
+
+/**
+ * @brief Create new app topic handle.
+ *
+ * @locality application thread
+ */
+rd_kafka_topic_t *rd_kafka_topic_new(rd_kafka_t *rk,
+ const char *topic,
+ rd_kafka_topic_conf_t *conf) {
+ rd_kafka_topic_t *rkt;
+ int existing;
+
+ rkt = rd_kafka_topic_new0(rk, topic, conf, &existing, 1 /*lock*/);
+ if (!rkt)
+ return NULL;
+
+ /* Increase application refcount. */
+ rd_kafka_topic_keep_app(rkt);
+
+ /* Query for the topic leader (async) */
+ if (!existing)
+ rd_kafka_topic_leader_query(rk, rkt);
+
+ /* Drop our reference since there is already/now an app refcnt */
+ rd_kafka_topic_destroy0(rkt);
+
+ return rkt;
+}
+
+
+
+/**
+ * Sets the state for topic.
+ * NOTE: rd_kafka_topic_wrlock(rkt) MUST be held
+ */
+static void rd_kafka_topic_set_state(rd_kafka_topic_t *rkt, int state) {
+
+ if ((int)rkt->rkt_state == state)
+ return;
+
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "STATE",
+ "Topic %s changed state %s -> %s", rkt->rkt_topic->str,
+ rd_kafka_topic_state_names[rkt->rkt_state],
+ rd_kafka_topic_state_names[state]);
+
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR)
+ rkt->rkt_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rkt->rkt_state = state;
+}
+
+/**
+ * Returns the name of a topic.
+ * NOTE:
+ * The topic Kafka String representation is crafted with an extra byte
+ * at the end for the Nul that is not included in the length, this way
+ * we can use the topic's String directly.
+ * This is not true for Kafka Strings read from the network.
+ */
+const char *rd_kafka_topic_name(const rd_kafka_topic_t *app_rkt) {
+ if (rd_kafka_rkt_is_lw(app_rkt))
+ return rd_kafka_rkt_lw_const(app_rkt)->lrkt_topic;
+ else
+ return app_rkt->rkt_topic->str;
+}
+
+
+/**
+ * @brief Update the broker that a topic+partition is delegated to.
+ *
+ * @param broker_id The id of the broker to associate the toppar with.
+ * @param rkb A reference to the broker to delegate to (must match
+ * broker_id) or NULL if the toppar should be undelegated for
+ * any reason.
+ * @param reason Human-readable reason for the update, included in debug log.
+ *
+ * @returns 1 if the broker delegation was changed, -1 if the broker
+ * delegation was changed and is now undelegated, else 0.
+ *
+ * @locks caller must have rd_kafka_toppar_lock(rktp)
+ * @locality any
+ */
+int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp,
+ int32_t broker_id,
+ rd_kafka_broker_t *rkb,
+ const char *reason) {
+
+ rktp->rktp_broker_id = broker_id;
+
+ if (!rkb) {
+ int had_broker = rktp->rktp_broker ? 1 : 0;
+ rd_kafka_toppar_broker_delegate(rktp, NULL);
+ return had_broker ? -1 : 0;
+ }
+
+ if (rktp->rktp_broker) {
+ if (rktp->rktp_broker == rkb) {
+ /* No change in broker */
+ return 0;
+ }
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_FETCH,
+ "TOPICUPD",
+ "Topic %s [%" PRId32
+ "]: migrating from "
+ "broker %" PRId32 " to %" PRId32
+ " (leader is "
+ "%" PRId32 "): %s",
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ rktp->rktp_broker->rkb_nodeid, rkb->rkb_nodeid,
+ rktp->rktp_leader_id, reason);
+ }
+
+ rd_kafka_toppar_broker_delegate(rktp, rkb);
+
+ return 1;
+}
+
+
+/**
+ * @brief Update a topic+partition for a new leader.
+ *
+ * @remark If a toppar is currently delegated to a preferred replica,
+ * it will not be delegated to the leader broker unless there
+ * has been a leader change.
+ *
+ * @param leader_id The id of the new leader broker.
+ * @param leader A reference to the leader broker or NULL if the
+ * toppar should be undelegated for any reason.
+ * @param leader_epoch Partition leader's epoch (KIP-320), or -1 if not known.
+ *
+ * @returns 1 if the broker delegation was changed, -1 if the broker
+ * delegation was changed and is now undelegated, else 0.
+ *
+ * @locks caller must have rd_kafka_topic_wrlock(rkt)
+ * AND NOT rd_kafka_toppar_lock(rktp)
+ * @locality any
+ */
+static int rd_kafka_toppar_leader_update(rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int32_t leader_id,
+ rd_kafka_broker_t *leader,
+ int32_t leader_epoch) {
+ rd_kafka_toppar_t *rktp;
+ rd_bool_t fetching_from_follower, need_epoch_validation = rd_false;
+ int r = 0;
+
+ rktp = rd_kafka_toppar_get(rkt, partition, 0);
+ if (unlikely(!rktp)) {
+ /* Have only seen this in issue #132.
+ * Probably caused by corrupt broker state. */
+ rd_kafka_log(rkt->rkt_rk, LOG_WARNING, "BROKER",
+ "%s [%" PRId32
+ "] is unknown "
+ "(partition_cnt %i): "
+ "ignoring leader (%" PRId32 ") update",
+ rkt->rkt_topic->str, partition,
+ rkt->rkt_partition_cnt, leader_id);
+ return -1;
+ }
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (leader_epoch < rktp->rktp_leader_epoch) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
+ "%s [%" PRId32
+ "]: ignoring outdated metadata update with "
+ "leader epoch %" PRId32
+ " which is older than "
+ "our cached epoch %" PRId32,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, leader_epoch,
+ rktp->rktp_leader_epoch);
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_ACTIVE) {
+ rd_kafka_toppar_unlock(rktp);
+ return 0;
+ }
+ }
+
+ if (rktp->rktp_fetch_state == RD_KAFKA_TOPPAR_FETCH_VALIDATE_EPOCH_WAIT)
+ need_epoch_validation = rd_true;
+ else if (leader_epoch > rktp->rktp_leader_epoch) {
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
+ "%s [%" PRId32 "]: leader %" PRId32
+ " epoch %" PRId32 " -> leader %" PRId32
+ " epoch %" PRId32,
+ rktp->rktp_rkt->rkt_topic->str,
+ rktp->rktp_partition, rktp->rktp_leader_id,
+ rktp->rktp_leader_epoch, leader_id, leader_epoch);
+ rktp->rktp_leader_epoch = leader_epoch;
+ need_epoch_validation = rd_true;
+ }
+
+ fetching_from_follower =
+ leader != NULL && rktp->rktp_broker != NULL &&
+ rktp->rktp_broker->rkb_source != RD_KAFKA_INTERNAL &&
+ rktp->rktp_broker != leader;
+
+ if (fetching_from_follower && rktp->rktp_leader_id == leader_id) {
+ rd_kafka_dbg(
+ rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
+ "Topic %s [%" PRId32 "]: leader %" PRId32
+ " unchanged, "
+ "not migrating away from preferred replica %" PRId32,
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ leader_id, rktp->rktp_broker_id);
+ r = 0;
+
+ } else {
+
+ if (rktp->rktp_leader_id != leader_id ||
+ rktp->rktp_leader != leader) {
+ /* Update leader if it has changed */
+ rktp->rktp_leader_id = leader_id;
+ if (rktp->rktp_leader)
+ rd_kafka_broker_destroy(rktp->rktp_leader);
+ if (leader)
+ rd_kafka_broker_keep(leader);
+ rktp->rktp_leader = leader;
+ }
+
+ /* Update handling broker */
+ r = rd_kafka_toppar_broker_update(rktp, leader_id, leader,
+ "leader updated");
+ }
+
+ if (need_epoch_validation) {
+ /* Update next fetch position, that could be stale since last
+ * fetch start. Only if the app pos is real. */
+ if (rktp->rktp_app_pos.offset > 0) {
+ rd_kafka_toppar_set_next_fetch_position(
+ rktp, rktp->rktp_app_pos);
+ }
+ rd_kafka_offset_validate(rktp, "epoch updated from metadata");
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp); /* from get() */
+
+ return r;
+}
+
+
+/**
+ * @brief Revert the topic+partition delegation to the leader from
+ * a preferred replica.
+ *
+ * @returns 1 if the broker delegation was changed, -1 if the broker
+ * delegation was changed and is now undelegated, else 0.
+ *
+ * @locks none
+ * @locality any
+ */
+int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp) {
+ rd_kafka_broker_t *leader;
+ int r;
+
+ rd_kafka_rdlock(rktp->rktp_rkt->rkt_rk);
+ rd_kafka_toppar_lock(rktp);
+
+ rd_assert(rktp->rktp_leader_id != rktp->rktp_broker_id);
+
+ rd_kafka_dbg(rktp->rktp_rkt->rkt_rk, TOPIC, "BROKER",
+ "Topic %s [%" PRId32
+ "]: Reverting from preferred "
+ "replica %" PRId32 " to leader %" PRId32,
+ rktp->rktp_rkt->rkt_topic->str, rktp->rktp_partition,
+ rktp->rktp_broker_id, rktp->rktp_leader_id);
+
+ leader = rd_kafka_broker_find_by_nodeid(rktp->rktp_rkt->rkt_rk,
+ rktp->rktp_leader_id);
+
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_rdunlock(rktp->rktp_rkt->rkt_rk);
+
+ rd_kafka_toppar_lock(rktp);
+ r = rd_kafka_toppar_broker_update(
+ rktp, rktp->rktp_leader_id, leader,
+ "reverting from preferred replica to leader");
+ rd_kafka_toppar_unlock(rktp);
+
+ if (leader)
+ rd_kafka_broker_destroy(leader);
+
+ return r;
+}
+
+
+
+/**
+ * @brief Save idempotent producer state for a partition that is about to
+ * be removed.
+ *
+ * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp)
+ */
+static void rd_kafka_toppar_idemp_msgid_save(rd_kafka_topic_t *rkt,
+ const rd_kafka_toppar_t *rktp) {
+ rd_kafka_partition_msgid_t *partmsgid = rd_malloc(sizeof(*partmsgid));
+ partmsgid->partition = rktp->rktp_partition;
+ partmsgid->msgid = rktp->rktp_msgid;
+ partmsgid->pid = rktp->rktp_eos.pid;
+ partmsgid->epoch_base_msgid = rktp->rktp_eos.epoch_base_msgid;
+ partmsgid->ts = rd_clock();
+
+ TAILQ_INSERT_TAIL(&rkt->rkt_saved_partmsgids, partmsgid, link);
+}
+
+
+/**
+ * @brief Restore idempotent producer state for a new/resurfacing partition.
+ *
+ * @locks_required rd_kafka_wrlock(rkt), rd_kafka_toppar_lock(rktp)
+ */
+static void rd_kafka_toppar_idemp_msgid_restore(rd_kafka_topic_t *rkt,
+ rd_kafka_toppar_t *rktp) {
+ rd_kafka_partition_msgid_t *partmsgid;
+
+ TAILQ_FOREACH(partmsgid, &rkt->rkt_saved_partmsgids, link) {
+ if (partmsgid->partition == rktp->rktp_partition)
+ break;
+ }
+
+ if (!partmsgid)
+ return;
+
+ rktp->rktp_msgid = partmsgid->msgid;
+ rktp->rktp_eos.pid = partmsgid->pid;
+ rktp->rktp_eos.epoch_base_msgid = partmsgid->epoch_base_msgid;
+
+ rd_kafka_dbg(rkt->rkt_rk, EOS | RD_KAFKA_DBG_TOPIC, "MSGID",
+ "Topic %s [%" PRId32 "]: restored %s with MsgId %" PRIu64
+ " and "
+ "epoch base MsgId %" PRIu64
+ " that was saved upon removal %dms ago",
+ rkt->rkt_topic->str, rktp->rktp_partition,
+ rd_kafka_pid2str(partmsgid->pid), partmsgid->msgid,
+ partmsgid->epoch_base_msgid,
+ (int)((rd_clock() - partmsgid->ts) / 1000));
+
+ TAILQ_REMOVE(&rkt->rkt_saved_partmsgids, partmsgid, link);
+ rd_free(partmsgid);
+}
+
+
+/**
+ * @brief Update the number of partitions for a topic and takes actions
+ * accordingly.
+ *
+ * @returns 1 if the partition count changed, else 0.
+ *
+ * @locks rd_kafka_topic_wrlock(rkt) MUST be held.
+ */
+static int rd_kafka_topic_partition_cnt_update(rd_kafka_topic_t *rkt,
+ int32_t partition_cnt) {
+ rd_kafka_t *rk = rkt->rkt_rk;
+ rd_kafka_toppar_t **rktps;
+ rd_kafka_toppar_t *rktp;
+ rd_bool_t is_idempodent = rd_kafka_is_idempotent(rk);
+ int32_t i;
+
+ if (likely(rkt->rkt_partition_cnt == partition_cnt))
+ return 0; /* No change in partition count */
+
+ if (unlikely(rkt->rkt_partition_cnt != 0 &&
+ !rd_kafka_terminating(rkt->rkt_rk)))
+ rd_kafka_log(rk, LOG_NOTICE, "PARTCNT",
+ "Topic %s partition count changed "
+ "from %" PRId32 " to %" PRId32,
+ rkt->rkt_topic->str, rkt->rkt_partition_cnt,
+ partition_cnt);
+ else
+ rd_kafka_dbg(rk, TOPIC, "PARTCNT",
+ "Topic %s partition count changed "
+ "from %" PRId32 " to %" PRId32,
+ rkt->rkt_topic->str, rkt->rkt_partition_cnt,
+ partition_cnt);
+
+
+ /* Create and assign new partition list */
+ if (partition_cnt > 0)
+ rktps = rd_calloc(partition_cnt, sizeof(*rktps));
+ else
+ rktps = NULL;
+
+ for (i = 0; i < partition_cnt; i++) {
+ if (i >= rkt->rkt_partition_cnt) {
+ /* New partition. Check if its in the list of
+ * desired partitions first. */
+
+ rktp = rd_kafka_toppar_desired_get(rkt, i);
+ if (rktp) {
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_flags &=
+ ~(RD_KAFKA_TOPPAR_F_UNKNOWN |
+ RD_KAFKA_TOPPAR_F_REMOVE);
+
+ /* Remove from desp list since the
+ * partition is now known. */
+ rd_kafka_toppar_desired_unlink(rktp);
+ } else {
+ rktp = rd_kafka_toppar_new(rkt, i);
+
+ rd_kafka_toppar_lock(rktp);
+ rktp->rktp_flags &=
+ ~(RD_KAFKA_TOPPAR_F_UNKNOWN |
+ RD_KAFKA_TOPPAR_F_REMOVE);
+ }
+ rktps[i] = rktp;
+
+ if (is_idempodent)
+ /* Restore idempotent producer state for
+ * this partition, if any. */
+ rd_kafka_toppar_idemp_msgid_restore(rkt, rktp);
+
+ rd_kafka_toppar_unlock(rktp);
+
+ } else {
+ /* Existing partition, grab our own reference. */
+ rktps[i] = rd_kafka_toppar_keep(rkt->rkt_p[i]);
+ /* Loose previous ref */
+ rd_kafka_toppar_destroy(rkt->rkt_p[i]);
+ }
+ }
+
+ /* Propagate notexist errors for desired partitions */
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i) {
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED",
+ "%s [%" PRId32
+ "]: "
+ "desired partition does not exist in cluster",
+ rkt->rkt_topic->str, rktp->rktp_partition);
+ rd_kafka_toppar_enq_error(
+ rktp,
+ rkt->rkt_err ? rkt->rkt_err
+ : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ "desired partition is not available");
+ }
+
+ /* Remove excessive partitions */
+ for (i = partition_cnt; i < rkt->rkt_partition_cnt; i++) {
+ rktp = rkt->rkt_p[i];
+
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "REMOVE",
+ "%s [%" PRId32 "] no longer reported in metadata",
+ rkt->rkt_topic->str, rktp->rktp_partition);
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Idempotent/Transactional producer:
+ * We need to save each removed partition's base msgid for
+ * the (rare) chance the partition comes back,
+ * in which case we must continue with the correct msgid
+ * in future ProduceRequests.
+ *
+ * These base msgsid are restored (above) if/when partitions
+ * come back and the PID,Epoch hasn't changed.
+ *
+ * One situation where this might happen is if a broker goes
+ * out of sync and starts to wrongfully report an existing
+ * topic as non-existent, triggering the removal of partitions
+ * on the producer client. When metadata is eventually correct
+ * again and the topic is "re-created" on the producer, it
+ * must continue with the next msgid/baseseq. */
+ if (is_idempodent && rd_kafka_pid_valid(rktp->rktp_eos.pid))
+ rd_kafka_toppar_idemp_msgid_save(rkt, rktp);
+
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_UNKNOWN;
+
+ if (rktp->rktp_flags & RD_KAFKA_TOPPAR_F_DESIRED) {
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "DESIRED",
+ "Topic %s [%" PRId32
+ "] is desired "
+ "but no longer known: "
+ "moving back on desired list",
+ rkt->rkt_topic->str, rktp->rktp_partition);
+
+ /* If this is a desired partition move it back on to
+ * the desired list since partition is no longer known*/
+ rd_kafka_toppar_desired_link(rktp);
+
+ if (!rd_kafka_terminating(rkt->rkt_rk))
+ rd_kafka_toppar_enq_error(
+ rktp,
+ rkt->rkt_err
+ ? rkt->rkt_err
+ : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION,
+ "desired partition is no longer "
+ "available");
+
+ rd_kafka_toppar_broker_delegate(rktp, NULL);
+
+ } else {
+ /* Tell handling broker to let go of the toppar */
+ rd_kafka_toppar_broker_leave_for_remove(rktp);
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ if (rkt->rkt_p)
+ rd_free(rkt->rkt_p);
+
+ rkt->rkt_p = rktps;
+
+ rkt->rkt_partition_cnt = partition_cnt;
+
+ return 1;
+}
+
+
+
+/**
+ * Topic 'rkt' does not exist: propagate to interested parties.
+ * The topic's state must have been set to NOTEXISTS and
+ * rd_kafka_topic_partition_cnt_update() must have been called prior to
+ * calling this function.
+ *
+ * Locks: rd_kafka_topic_*lock() must be held.
+ */
+static void rd_kafka_topic_propagate_notexists(rd_kafka_topic_t *rkt,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_toppar_t *rktp;
+ int i;
+
+ if (rkt->rkt_rk->rk_type != RD_KAFKA_CONSUMER)
+ return;
+
+
+ /* Notify consumers that the topic doesn't exist. */
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
+ rd_kafka_toppar_enq_error(rktp, err, "topic does not exist");
+}
+
+
+/**
+ * Assign messages on the UA partition to available partitions.
+ * Locks: rd_kafka_topic_*lock() must be held.
+ */
+static void rd_kafka_topic_assign_uas(rd_kafka_topic_t *rkt,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_t *rk = rkt->rkt_rk;
+ rd_kafka_toppar_t *rktp_ua;
+ rd_kafka_msg_t *rkm, *tmp;
+ rd_kafka_msgq_t uas = RD_KAFKA_MSGQ_INITIALIZER(uas);
+ rd_kafka_msgq_t failed = RD_KAFKA_MSGQ_INITIALIZER(failed);
+ rd_kafka_resp_err_t err_all = RD_KAFKA_RESP_ERR_NO_ERROR;
+ int cnt;
+
+ if (rkt->rkt_rk->rk_type != RD_KAFKA_PRODUCER)
+ return;
+
+ rktp_ua = rd_kafka_toppar_get(rkt, RD_KAFKA_PARTITION_UA, 0);
+ if (unlikely(!rktp_ua)) {
+ rd_kafka_dbg(rk, TOPIC, "ASSIGNUA",
+ "No UnAssigned partition available for %s",
+ rkt->rkt_topic->str);
+ return;
+ }
+
+ /* Assign all unassigned messages to new topics. */
+ rd_kafka_toppar_lock(rktp_ua);
+
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR) {
+ err_all = rkt->rkt_err;
+ rd_kafka_dbg(rk, TOPIC, "PARTCNT",
+ "Failing all %i unassigned messages in "
+ "topic %.*s due to permanent topic error: %s",
+ rktp_ua->rktp_msgq.rkmq_msg_cnt,
+ RD_KAFKAP_STR_PR(rkt->rkt_topic),
+ rd_kafka_err2str(err_all));
+ } else if (rkt->rkt_state == RD_KAFKA_TOPIC_S_NOTEXISTS) {
+ err_all = err;
+ rd_kafka_dbg(rk, TOPIC, "PARTCNT",
+ "Failing all %i unassigned messages in "
+ "topic %.*s since topic does not exist: %s",
+ rktp_ua->rktp_msgq.rkmq_msg_cnt,
+ RD_KAFKAP_STR_PR(rkt->rkt_topic),
+ rd_kafka_err2str(err_all));
+ } else {
+ rd_kafka_dbg(rk, TOPIC, "PARTCNT",
+ "Partitioning %i unassigned messages in "
+ "topic %.*s to %" PRId32 " partitions",
+ rktp_ua->rktp_msgq.rkmq_msg_cnt,
+ RD_KAFKAP_STR_PR(rkt->rkt_topic),
+ rkt->rkt_partition_cnt);
+ }
+
+ rd_kafka_msgq_move(&uas, &rktp_ua->rktp_msgq);
+ cnt = uas.rkmq_msg_cnt;
+ rd_kafka_toppar_unlock(rktp_ua);
+
+ TAILQ_FOREACH_SAFE(rkm, &uas.rkmq_msgs, rkm_link, tmp) {
+ /* Fast-path for failing messages with forced partition or
+ * when all messages are to fail. */
+ if (err_all || (rkm->rkm_partition != RD_KAFKA_PARTITION_UA &&
+ rkm->rkm_partition >= rkt->rkt_partition_cnt &&
+ rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN)) {
+ rd_kafka_msgq_enq(&failed, rkm);
+ continue;
+ }
+
+ if (unlikely(rd_kafka_msg_partitioner(rkt, rkm, 0) != 0)) {
+ /* Desired partition not available */
+ rd_kafka_msgq_enq(&failed, rkm);
+ }
+ }
+
+ rd_kafka_dbg(rk, TOPIC, "UAS",
+ "%i/%i messages were partitioned in topic %s",
+ cnt - failed.rkmq_msg_cnt, cnt, rkt->rkt_topic->str);
+
+ if (failed.rkmq_msg_cnt > 0) {
+ /* Fail the messages */
+ rd_kafka_dbg(rk, TOPIC, "UAS",
+ "%" PRId32
+ "/%i messages failed partitioning "
+ "in topic %s",
+ failed.rkmq_msg_cnt, cnt, rkt->rkt_topic->str);
+ rd_kafka_dr_msgq(
+ rkt, &failed,
+ err_all ? err_all : RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION);
+ }
+
+ rd_kafka_toppar_destroy(rktp_ua); /* from get() */
+}
+
+
+/**
+ * @brief Mark topic as non-existent, unless metadata propagation configuration
+ * disallows it.
+ *
+ * @param err Propagate non-existent topic using this error code.
+ * If \p err is RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION it means the
+ * topic is invalid and no propagation delay will be used.
+ *
+ * @returns true if the topic was marked as non-existent, else false.
+ *
+ * @locks topic_wrlock() MUST be held.
+ */
+rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt,
+ rd_kafka_resp_err_t err) {
+ rd_ts_t remains_us;
+ rd_bool_t permanent = err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION;
+
+ if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) {
+ /* Dont update metadata while terminating. */
+ return rd_false;
+ }
+
+ rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR);
+
+ remains_us =
+ (rkt->rkt_ts_create +
+ (rkt->rkt_rk->rk_conf.metadata_propagation_max_ms * 1000)) -
+ rkt->rkt_ts_metadata;
+
+ if (!permanent && rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN &&
+ remains_us > 0) {
+ /* Still allowing topic metadata to propagate. */
+ rd_kafka_dbg(
+ rkt->rkt_rk, TOPIC | RD_KAFKA_DBG_METADATA, "TOPICPROP",
+ "Topic %.*s does not exist, allowing %dms "
+ "for metadata propagation before marking topic "
+ "as non-existent",
+ RD_KAFKAP_STR_PR(rkt->rkt_topic), (int)(remains_us / 1000));
+ return rd_false;
+ }
+
+ rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_NOTEXISTS);
+
+ rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
+
+ /* Update number of partitions */
+ rd_kafka_topic_partition_cnt_update(rkt, 0);
+
+ /* Purge messages with forced partition */
+ rd_kafka_topic_assign_uas(rkt, err);
+
+ /* Propagate nonexistent topic info */
+ rd_kafka_topic_propagate_notexists(rkt, err);
+
+ return rd_true;
+}
+
+/**
+ * @brief Mark topic as errored, such as when topic authorization fails.
+ *
+ * @param err Propagate error using this error code.
+ *
+ * @returns true if the topic was marked as errored, else false.
+ *
+ * @locality any
+ * @locks topic_wrlock() MUST be held.
+ */
+rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt,
+ rd_kafka_resp_err_t err) {
+
+ if (unlikely(rd_kafka_terminating(rkt->rkt_rk))) {
+ /* Dont update metadata while terminating. */
+ return rd_false;
+ }
+
+ rd_assert(err != RD_KAFKA_RESP_ERR_NO_ERROR);
+
+ /* Same error, ignore. */
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR && rkt->rkt_err == err)
+ return rd_true;
+
+ rd_kafka_dbg(rkt->rkt_rk, TOPIC, "TOPICERROR",
+ "Topic %s has permanent error: %s", rkt->rkt_topic->str,
+ rd_kafka_err2str(err));
+
+ rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_ERROR);
+
+ rkt->rkt_err = err;
+
+ /* Update number of partitions */
+ rd_kafka_topic_partition_cnt_update(rkt, 0);
+
+ /* Purge messages with forced partition */
+ rd_kafka_topic_assign_uas(rkt, err);
+
+ return rd_true;
+}
+
+
+
+/**
+ * @brief Update a topic from metadata.
+ *
+ * @param mdt Topic metadata.
+ * @param leader_epochs Array of per-partition leader epochs, or NULL.
+ * The array size is identical to the partition count in
+ * \p mdt.
+ * @param ts_age absolute age (timestamp) of metadata.
+ * @returns 1 if the number of partitions changed, 0 if not, and -1 if the
+ * topic is unknown.
+
+ *
+ * @locks_required rd_kafka_*lock() MUST be held.
+ */
+static int rd_kafka_topic_metadata_update(
+ rd_kafka_topic_t *rkt,
+ const struct rd_kafka_metadata_topic *mdt,
+ const rd_kafka_partition_leader_epoch_t *leader_epochs,
+ rd_ts_t ts_age) {
+ rd_kafka_t *rk = rkt->rkt_rk;
+ int upd = 0;
+ int j;
+ rd_kafka_broker_t **partbrokers;
+ int leader_cnt = 0;
+ int old_state;
+
+ if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA",
+ "Error in metadata reply for "
+ "topic %s (PartCnt %i): %s",
+ rkt->rkt_topic->str, mdt->partition_cnt,
+ rd_kafka_err2str(mdt->err));
+
+ if (unlikely(rd_kafka_terminating(rk))) {
+ /* Dont update metadata while terminating, do this
+ * after acquiring lock for proper synchronisation */
+ return -1;
+ }
+
+ /* Look up brokers before acquiring rkt lock to preserve lock order */
+ partbrokers = rd_malloc(mdt->partition_cnt * sizeof(*partbrokers));
+
+ for (j = 0; j < mdt->partition_cnt; j++) {
+ if (mdt->partitions[j].leader == -1) {
+ partbrokers[j] = NULL;
+ continue;
+ }
+
+ partbrokers[j] = rd_kafka_broker_find_by_nodeid(
+ rk, mdt->partitions[j].leader);
+ }
+
+
+ rd_kafka_topic_wrlock(rkt);
+
+ old_state = rkt->rkt_state;
+ rkt->rkt_ts_metadata = ts_age;
+
+ /* Set topic state.
+ * UNKNOWN_TOPIC_OR_PART may indicate that auto.create.topics failed */
+ if (mdt->err == RD_KAFKA_RESP_ERR_TOPIC_EXCEPTION /*invalid topic*/ ||
+ mdt->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+ rd_kafka_topic_set_notexists(rkt, mdt->err);
+ else if (mdt->partition_cnt > 0)
+ rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_EXISTS);
+ else if (mdt->err)
+ rd_kafka_topic_set_error(rkt, mdt->err);
+
+ /* Update number of partitions, but not if there are
+ * (possibly intermittent) errors (e.g., "Leader not available"). */
+ if (mdt->err == RD_KAFKA_RESP_ERR_NO_ERROR) {
+ upd += rd_kafka_topic_partition_cnt_update(rkt,
+ mdt->partition_cnt);
+
+ /* If the metadata times out for a topic (because all brokers
+ * are down) the state will transition to S_UNKNOWN.
+ * When updated metadata is eventually received there might
+ * not be any change to partition count or leader,
+ * but there may still be messages in the UA partition that
+ * needs to be assigned, so trigger an update for this case too.
+ * Issue #1985. */
+ if (old_state == RD_KAFKA_TOPIC_S_UNKNOWN)
+ upd++;
+ }
+
+ /* Update leader for each partition */
+ for (j = 0; j < mdt->partition_cnt; j++) {
+ int r;
+ rd_kafka_broker_t *leader;
+ int32_t leader_epoch =
+ leader_epochs ? leader_epochs[j].leader_epoch : -1;
+
+ rd_kafka_dbg(rk, TOPIC | RD_KAFKA_DBG_METADATA, "METADATA",
+ " Topic %s partition %i Leader %" PRId32
+ " Epoch %" PRId32,
+ rkt->rkt_topic->str, mdt->partitions[j].id,
+ mdt->partitions[j].leader, leader_epoch);
+
+ leader = partbrokers[j];
+ partbrokers[j] = NULL;
+
+ /* Update leader for partition */
+ r = rd_kafka_toppar_leader_update(rkt, mdt->partitions[j].id,
+ mdt->partitions[j].leader,
+ leader, leader_epoch);
+
+ upd += (r != 0 ? 1 : 0);
+
+ if (leader) {
+ if (r != -1)
+ leader_cnt++;
+ /* Drop reference to broker (from find()) */
+ rd_kafka_broker_destroy(leader);
+ }
+ }
+
+ /* If all partitions have leaders we can turn off fast leader query. */
+ if (mdt->partition_cnt > 0 && leader_cnt == mdt->partition_cnt)
+ rkt->rkt_flags &= ~RD_KAFKA_TOPIC_F_LEADER_UNAVAIL;
+
+ if (mdt->err != RD_KAFKA_RESP_ERR_NO_ERROR && rkt->rkt_partition_cnt) {
+ /* (Possibly intermittent) topic-wide error:
+ * remove leaders for partitions */
+
+ for (j = 0; j < rkt->rkt_partition_cnt; j++) {
+ rd_kafka_toppar_t *rktp;
+ if (!rkt->rkt_p[j])
+ continue;
+
+ rktp = rkt->rkt_p[j];
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_toppar_broker_delegate(rktp, NULL);
+ rd_kafka_toppar_unlock(rktp);
+ }
+ }
+
+ /* If there was an update to the partitions try to assign
+ * unassigned messages to new partitions, or fail them */
+ if (upd > 0)
+ rd_kafka_topic_assign_uas(
+ rkt,
+ mdt->err ? mdt->err : RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC);
+
+ rd_kafka_topic_wrunlock(rkt);
+
+ /* Loose broker references */
+ for (j = 0; j < mdt->partition_cnt; j++)
+ if (partbrokers[j])
+ rd_kafka_broker_destroy(partbrokers[j]);
+
+ rd_free(partbrokers);
+
+ return upd;
+}
+
+/**
+ * @brief Update topic by metadata, if topic is locally known.
+ * @sa rd_kafka_topic_metadata_update()
+ * @locks none
+ */
+int rd_kafka_topic_metadata_update2(
+ rd_kafka_broker_t *rkb,
+ const struct rd_kafka_metadata_topic *mdt,
+ const rd_kafka_partition_leader_epoch_t *leader_epochs) {
+ rd_kafka_topic_t *rkt;
+ int r;
+
+ rd_kafka_wrlock(rkb->rkb_rk);
+ if (!(rkt =
+ rd_kafka_topic_find(rkb->rkb_rk, mdt->topic, 0 /*!lock*/))) {
+ rd_kafka_wrunlock(rkb->rkb_rk);
+ return -1; /* Ignore topics that we dont have locally. */
+ }
+
+ r = rd_kafka_topic_metadata_update(rkt, mdt, leader_epochs, rd_clock());
+
+ rd_kafka_wrunlock(rkb->rkb_rk);
+
+ rd_kafka_topic_destroy0(rkt); /* from find() */
+
+ return r;
+}
+
+
+
+/**
+ * @returns a list of all partitions (rktp's) for a topic.
+ * @remark rd_kafka_topic_*lock() MUST be held.
+ */
+static rd_list_t *rd_kafka_topic_get_all_partitions(rd_kafka_topic_t *rkt) {
+ rd_list_t *list;
+ rd_kafka_toppar_t *rktp;
+ int i;
+
+ list = rd_list_new(rkt->rkt_partition_cnt +
+ rd_list_cnt(&rkt->rkt_desp) + 1 /*ua*/,
+ NULL);
+
+ for (i = 0; i < rkt->rkt_partition_cnt; i++)
+ rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_p[i]));
+
+ RD_LIST_FOREACH(rktp, &rkt->rkt_desp, i)
+ rd_list_add(list, rd_kafka_toppar_keep(rktp));
+
+ if (rkt->rkt_ua)
+ rd_list_add(list, rd_kafka_toppar_keep(rkt->rkt_ua));
+
+ return list;
+}
+
+
+
+/**
+ * Remove all partitions from a topic, including the ua.
+ * Must only be called during rd_kafka_t termination.
+ *
+ * Locality: main thread
+ */
+void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt) {
+ rd_kafka_toppar_t *rktp;
+ rd_list_t *partitions;
+ int i;
+
+ /* Purge messages for all partitions outside the topic_wrlock since
+ * a message can hold a reference to the topic_t and thus
+ * would trigger a recursive lock dead-lock. */
+ rd_kafka_topic_rdlock(rkt);
+ partitions = rd_kafka_topic_get_all_partitions(rkt);
+ rd_kafka_topic_rdunlock(rkt);
+
+ RD_LIST_FOREACH(rktp, partitions, i) {
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_msgq_purge(rkt->rkt_rk, &rktp->rktp_msgq);
+ rd_kafka_toppar_purge_and_disable_queues(rktp);
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp);
+ }
+ rd_list_destroy(partitions);
+
+ rd_kafka_topic_keep(rkt);
+ rd_kafka_topic_wrlock(rkt);
+
+ /* Setting the partition count to 0 moves all partitions to
+ * the desired list (rktp_desp). */
+ rd_kafka_topic_partition_cnt_update(rkt, 0);
+
+ /* Now clean out the desired partitions list.
+ * Use reverse traversal to avoid excessive memory shuffling
+ * in rd_list_remove() */
+ RD_LIST_FOREACH_REVERSE(rktp, &rkt->rkt_desp, i) {
+ /* Keep a reference while deleting from desired list */
+ rd_kafka_toppar_keep(rktp);
+
+ rd_kafka_toppar_lock(rktp);
+ rd_kafka_toppar_desired_del(rktp);
+ rd_kafka_toppar_unlock(rktp);
+
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ rd_kafka_assert(rkt->rkt_rk, rkt->rkt_partition_cnt == 0);
+
+ if (rkt->rkt_p)
+ rd_free(rkt->rkt_p);
+
+ rkt->rkt_p = NULL;
+ rkt->rkt_partition_cnt = 0;
+
+ if ((rktp = rkt->rkt_ua)) {
+ rkt->rkt_ua = NULL;
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ rd_kafka_topic_wrunlock(rkt);
+
+ rd_kafka_topic_destroy0(rkt);
+}
+
+
+
+/**
+ * @returns the broker state (as a human readable string) if a query
+ * for the partition leader is necessary, else NULL.
+ * @locality any
+ * @locks rd_kafka_toppar_lock MUST be held
+ */
+static const char *rd_kafka_toppar_needs_query(rd_kafka_t *rk,
+ rd_kafka_toppar_t *rktp) {
+ int broker_state;
+
+ if (!rktp->rktp_broker)
+ return "not delegated";
+
+ if (rktp->rktp_broker->rkb_source == RD_KAFKA_INTERNAL)
+ return "internal";
+
+ broker_state = rd_kafka_broker_get_state(rktp->rktp_broker);
+
+ if (broker_state >= RD_KAFKA_BROKER_STATE_UP)
+ return NULL;
+
+ if (!rk->rk_conf.sparse_connections)
+ return "down";
+
+ /* Partition assigned to broker but broker does not
+ * need a persistent connection, this typically means
+ * the partition is not being fetched or not being produced to,
+ * so there is no need to re-query the leader. */
+ if (broker_state == RD_KAFKA_BROKER_STATE_INIT)
+ return NULL;
+
+ /* This is most likely a persistent broker,
+ * which means the partition leader should probably
+ * be re-queried to see if it needs changing. */
+ return "down";
+}
+
+
+
+/**
+ * @brief Scan all topics and partitions for:
+ * - timed out messages in UA partitions.
+ * - topics that needs to be created on the broker.
+ * - topics who's metadata is too old.
+ * - partitions with unknown leaders that require leader query.
+ *
+ * @locality rdkafka main thread
+ */
+void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now) {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_toppar_t *rktp;
+ rd_list_t query_topics;
+
+ rd_list_init(&query_topics, 0, rd_free);
+
+ rd_kafka_rdlock(rk);
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link) {
+ int p;
+ int query_this = 0;
+ rd_kafka_msgq_t timedout = RD_KAFKA_MSGQ_INITIALIZER(timedout);
+
+ rd_kafka_topic_wrlock(rkt);
+
+ /* Check if metadata information has timed out. */
+ if (rkt->rkt_state != RD_KAFKA_TOPIC_S_UNKNOWN &&
+ !rd_kafka_metadata_cache_topic_get(rk, rkt->rkt_topic->str,
+ 1 /*only valid*/)) {
+ rd_kafka_dbg(rk, TOPIC, "NOINFO",
+ "Topic %s metadata information timed out "
+ "(%" PRId64 "ms old)",
+ rkt->rkt_topic->str,
+ (rd_clock() - rkt->rkt_ts_metadata) /
+ 1000);
+ rd_kafka_topic_set_state(rkt, RD_KAFKA_TOPIC_S_UNKNOWN);
+
+ query_this = 1;
+ } else if (rkt->rkt_state == RD_KAFKA_TOPIC_S_UNKNOWN) {
+ rd_kafka_dbg(rk, TOPIC, "NOINFO",
+ "Topic %s metadata information unknown",
+ rkt->rkt_topic->str);
+ query_this = 1;
+ }
+
+ /* Just need a read-lock from here on. */
+ rd_kafka_topic_wrunlock(rkt);
+ rd_kafka_topic_rdlock(rkt);
+
+ if (rkt->rkt_partition_cnt == 0) {
+ /* If this topic is unknown by brokers try
+ * to create it by sending a topic-specific
+ * metadata request.
+ * This requires "auto.create.topics.enable=true"
+ * on the brokers. */
+ rd_kafka_dbg(rk, TOPIC, "NOINFO",
+ "Topic %s partition count is zero: "
+ "should refresh metadata",
+ rkt->rkt_topic->str);
+
+ query_this = 1;
+
+ } else if (!rd_list_empty(&rkt->rkt_desp) &&
+ rd_interval_immediate(&rkt->rkt_desp_refresh_intvl,
+ 10 * 1000 * 1000, 0) > 0) {
+ /* Query topic metadata if there are
+ * desired (non-existent) partitions.
+ * At most every 10 seconds. */
+ rd_kafka_dbg(rk, TOPIC, "DESIRED",
+ "Topic %s has %d desired partition(s): "
+ "should refresh metadata",
+ rkt->rkt_topic->str,
+ rd_list_cnt(&rkt->rkt_desp));
+
+ query_this = 1;
+ }
+
+ for (p = RD_KAFKA_PARTITION_UA; p < rkt->rkt_partition_cnt;
+ p++) {
+
+ if (!(rktp = rd_kafka_toppar_get(
+ rkt, p,
+ p == RD_KAFKA_PARTITION_UA ? rd_true
+ : rd_false)))
+ continue;
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Check that partition is delegated to a broker that
+ * is up, else add topic to query list. */
+ if (p != RD_KAFKA_PARTITION_UA) {
+ const char *leader_reason =
+ rd_kafka_toppar_needs_query(rk, rktp);
+
+ if (leader_reason) {
+ rd_kafka_dbg(rk, TOPIC, "QRYLEADER",
+ "Topic %s [%" PRId32
+ "]: "
+ "broker is %s: re-query",
+ rkt->rkt_topic->str,
+ rktp->rktp_partition,
+ leader_reason);
+ query_this = 1;
+ }
+ } else {
+ if (rk->rk_type == RD_KAFKA_PRODUCER) {
+ /* Scan UA partition for message
+ * timeouts.
+ * Proper partitions are scanned by
+ * their toppar broker thread. */
+ rd_kafka_msgq_age_scan(
+ rktp, &rktp->rktp_msgq, &timedout,
+ now, NULL);
+ }
+ }
+
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ rd_kafka_topic_rdunlock(rkt);
+
+ /* Propagate delivery reports for timed out messages */
+ if (rd_kafka_msgq_len(&timedout) > 0) {
+ rd_kafka_dbg(
+ rk, MSG, "TIMEOUT", "%s: %d message(s) timed out",
+ rkt->rkt_topic->str, rd_kafka_msgq_len(&timedout));
+ rd_kafka_dr_msgq(rkt, &timedout,
+ RD_KAFKA_RESP_ERR__MSG_TIMED_OUT);
+ }
+
+ /* Need to re-query this topic's leader. */
+ if (query_this &&
+ !rd_list_find(&query_topics, rkt->rkt_topic->str,
+ (void *)strcmp))
+ rd_list_add(&query_topics,
+ rd_strdup(rkt->rkt_topic->str));
+ }
+ rd_kafka_rdunlock(rk);
+
+ if (!rd_list_empty(&query_topics))
+ rd_kafka_metadata_refresh_topics(
+ rk, NULL, &query_topics, rd_true /*force even if cached
+ * info exists*/
+ ,
+ rk->rk_conf.allow_auto_create_topics,
+ rd_false /*!cgrp_update*/, "refresh unavailable topics");
+ rd_list_destroy(&query_topics);
+}
+
+
+/**
+ * Locks: rd_kafka_topic_*lock() must be held.
+ */
+int rd_kafka_topic_partition_available(const rd_kafka_topic_t *app_rkt,
+ int32_t partition) {
+ int avail;
+ rd_kafka_toppar_t *rktp;
+ rd_kafka_broker_t *rkb;
+
+ /* This API must only be called from a partitioner and the
+ * partitioner is always passed a proper topic */
+ rd_assert(!rd_kafka_rkt_is_lw(app_rkt));
+
+ rktp = rd_kafka_toppar_get(app_rkt, partition, 0 /*no ua-on-miss*/);
+ if (unlikely(!rktp))
+ return 0;
+
+ rkb = rd_kafka_toppar_broker(rktp, 1 /*proper broker*/);
+ avail = rkb ? 1 : 0;
+ if (rkb)
+ rd_kafka_broker_destroy(rkb);
+ rd_kafka_toppar_destroy(rktp);
+ return avail;
+}
+
+
+void *rd_kafka_topic_opaque(const rd_kafka_topic_t *app_rkt) {
+ const rd_kafka_lwtopic_t *lrkt;
+
+ lrkt = rd_kafka_rkt_get_lw((rd_kafka_topic_t *)app_rkt);
+ if (unlikely(lrkt != NULL)) {
+ void *opaque;
+ rd_kafka_topic_t *rkt;
+
+ if (!(rkt = rd_kafka_topic_find(lrkt->lrkt_rk, lrkt->lrkt_topic,
+ 1 /*lock*/)))
+ return NULL;
+
+ opaque = rkt->rkt_conf.opaque;
+
+ rd_kafka_topic_destroy0(rkt); /* loose refcnt from find() */
+
+ return opaque;
+ }
+
+ return app_rkt->rkt_conf.opaque;
+}
+
+
+int rd_kafka_topic_info_cmp(const void *_a, const void *_b) {
+ const rd_kafka_topic_info_t *a = _a, *b = _b;
+ int r;
+
+ if ((r = strcmp(a->topic, b->topic)))
+ return r;
+
+ return RD_CMP(a->partition_cnt, b->partition_cnt);
+}
+
+
+/**
+ * @brief string compare two topics.
+ *
+ * @param _a topic string (type char *)
+ * @param _b rd_kafka_topic_info_t * pointer.
+ */
+int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b) {
+ const char *a = _a;
+ const rd_kafka_topic_info_t *b = _b;
+ return strcmp(a, b->topic);
+}
+
+
+/**
+ * Allocate new topic_info.
+ * \p topic is copied.
+ */
+rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic,
+ int partition_cnt) {
+ rd_kafka_topic_info_t *ti;
+ size_t tlen = strlen(topic) + 1;
+
+ /* Allocate space for the topic along with the struct */
+ ti = rd_malloc(sizeof(*ti) + tlen);
+ ti->topic = (char *)(ti + 1);
+ memcpy((char *)ti->topic, topic, tlen);
+ ti->partition_cnt = partition_cnt;
+
+ return ti;
+}
+
+/**
+ * Destroy/free topic_info
+ */
+void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti) {
+ rd_free(ti);
+}
+
+
+/**
+ * @brief Match \p topic to \p pattern.
+ *
+ * If pattern begins with "^" it is considered a regexp,
+ * otherwise a simple string comparison is performed.
+ *
+ * @returns 1 on match, else 0.
+ */
+int rd_kafka_topic_match(rd_kafka_t *rk,
+ const char *pattern,
+ const char *topic) {
+ char errstr[128];
+
+ if (*pattern == '^') {
+ int r = rd_regex_match(pattern, topic, errstr, sizeof(errstr));
+ if (unlikely(r == -1))
+ rd_kafka_dbg(rk, TOPIC, "TOPICREGEX",
+ "Topic \"%s\" regex \"%s\" "
+ "matching failed: %s",
+ topic, pattern, errstr);
+ return r == 1;
+ } else
+ return !strcmp(pattern, topic);
+}
+
+
+
+/**
+ * @brief Trigger broker metadata query for topic leader.
+ *
+ * @locks none
+ */
+void rd_kafka_topic_leader_query0(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ int do_rk_lock,
+ rd_bool_t force) {
+ rd_list_t topics;
+
+ rd_list_init(&topics, 1, rd_free);
+ rd_list_add(&topics, rd_strdup(rkt->rkt_topic->str));
+
+ rd_kafka_metadata_refresh_topics(
+ rk, NULL, &topics, force, rk->rk_conf.allow_auto_create_topics,
+ rd_false /*!cgrp_update*/, "leader query");
+
+ rd_list_destroy(&topics);
+}
+
+
+
+/**
+ * @brief Populate list \p topics with the topic names (strdupped char *) of
+ * all locally known or cached topics.
+ *
+ * @param cache_cntp is an optional pointer to an int that will be set to the
+ * number of entries added to \p topics from the
+ * metadata cache.
+ * @remark \p rk lock MUST NOT be held
+ */
+void rd_kafka_local_topics_to_list(rd_kafka_t *rk,
+ rd_list_t *topics,
+ int *cache_cntp) {
+ rd_kafka_topic_t *rkt;
+ int cache_cnt;
+
+ rd_kafka_rdlock(rk);
+ rd_list_grow(topics, rk->rk_topic_cnt);
+ TAILQ_FOREACH(rkt, &rk->rk_topics, rkt_link)
+ rd_list_add(topics, rd_strdup(rkt->rkt_topic->str));
+ cache_cnt = rd_kafka_metadata_cache_topics_to_list(rk, topics);
+ if (cache_cntp)
+ *cache_cntp = cache_cnt;
+ rd_kafka_rdunlock(rk);
+}
+
+
+/**
+ * @brief Unit test helper to set a topic's state to EXISTS
+ * with the given number of partitions.
+ */
+void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt,
+ int partition_cnt,
+ int32_t leader_id) {
+ struct rd_kafka_metadata_topic mdt = {.topic =
+ (char *)rkt->rkt_topic->str,
+ .partition_cnt = partition_cnt};
+ int i;
+
+ mdt.partitions = rd_alloca(sizeof(*mdt.partitions) * partition_cnt);
+
+ for (i = 0; i < partition_cnt; i++) {
+ memset(&mdt.partitions[i], 0, sizeof(mdt.partitions[i]));
+ mdt.partitions[i].id = i;
+ mdt.partitions[i].leader = leader_id;
+ }
+
+ rd_kafka_wrlock(rkt->rkt_rk);
+ rd_kafka_metadata_cache_topic_update(rkt->rkt_rk, &mdt, rd_true);
+ rd_kafka_topic_metadata_update(rkt, &mdt, NULL, rd_clock());
+ rd_kafka_wrunlock(rkt->rkt_rk);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h
new file mode 100644
index 000000000..cbed9308a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_topic.h
@@ -0,0 +1,311 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012,2013 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_TOPIC_H_
+#define _RDKAFKA_TOPIC_H_
+
+#include "rdlist.h"
+
+extern const char *rd_kafka_topic_state_names[];
+
+
+/**
+ * @struct Light-weight topic object which only contains the topic name.
+ *
+ * For use in outgoing APIs (like rd_kafka_message_t) when there is
+ * no proper topic object available.
+ *
+ * @remark lrkt_magic[4] MUST be the first field and be set to "LRKT".
+ */
+struct rd_kafka_lwtopic_s {
+ char lrkt_magic[4]; /**< "LRKT" */
+ rd_kafka_t *lrkt_rk; /**< Pointer to the client instance. */
+ rd_refcnt_t lrkt_refcnt; /**< Refcount */
+ char *lrkt_topic; /**< Points past this struct, allocated
+ * along with the struct. */
+};
+
+/** Casts a topic_t to a light-weight lwtopic_t */
+#define rd_kafka_rkt_lw(rkt) ((rd_kafka_lwtopic_t *)rkt)
+
+#define rd_kafka_rkt_lw_const(rkt) ((const rd_kafka_lwtopic_t *)rkt)
+
+/**
+ * @returns true if the topic object is a light-weight topic, else false.
+ */
+static RD_UNUSED RD_INLINE rd_bool_t
+rd_kafka_rkt_is_lw(const rd_kafka_topic_t *app_rkt) {
+ const rd_kafka_lwtopic_t *lrkt = rd_kafka_rkt_lw_const(app_rkt);
+ return !memcmp(lrkt->lrkt_magic, "LRKT", 4);
+}
+
+/** @returns the lwtopic_t if \p rkt is a light-weight topic, else NULL. */
+static RD_UNUSED RD_INLINE rd_kafka_lwtopic_t *
+rd_kafka_rkt_get_lw(rd_kafka_topic_t *rkt) {
+ if (rd_kafka_rkt_is_lw(rkt))
+ return rd_kafka_rkt_lw(rkt);
+ return NULL;
+}
+
+void rd_kafka_lwtopic_destroy(rd_kafka_lwtopic_t *lrkt);
+rd_kafka_lwtopic_t *rd_kafka_lwtopic_new(rd_kafka_t *rk, const char *topic);
+
+static RD_UNUSED RD_INLINE void
+rd_kafka_lwtopic_keep(rd_kafka_lwtopic_t *lrkt) {
+ rd_refcnt_add(&lrkt->lrkt_refcnt);
+}
+
+
+
+/**
+ * @struct Holds partition + transactional PID + base sequence msgid.
+ *
+ * Used in rkt_saved_partmsgids to restore transactional/idempotency state
+ * for a partition that is lost from metadata for some time and then returns.
+ */
+typedef struct rd_kafka_partition_msgid_s {
+ TAILQ_ENTRY(rd_kafka_partition_msgid_s) link;
+ int32_t partition;
+ rd_kafka_pid_t pid;
+ uint64_t msgid;
+ uint64_t epoch_base_msgid;
+ rd_ts_t ts;
+} rd_kafka_partition_msgid_t;
+
+
+/**
+ * @struct Aux struct that holds a partition id and a leader epoch.
+ * Used as temporary holding space for per-partition leader epochs
+ * while parsing MetadataResponse.
+ */
+typedef struct rd_kafka_partition_leader_epoch_s {
+ int32_t partition_id;
+ int32_t leader_epoch;
+} rd_kafka_partition_leader_epoch_t;
+
+
+/*
+ * @struct Internal representation of a topic.
+ *
+ * @remark rkt_magic[4] MUST be the first field and be set to "IRKT".
+ */
+struct rd_kafka_topic_s {
+ char rkt_magic[4]; /**< "IRKT" */
+
+ TAILQ_ENTRY(rd_kafka_topic_s) rkt_link;
+
+ rd_refcnt_t rkt_refcnt;
+
+ rwlock_t rkt_lock;
+ rd_kafkap_str_t *rkt_topic;
+
+ rd_kafka_toppar_t *rkt_ua; /**< Unassigned partition (-1) */
+ rd_kafka_toppar_t **rkt_p; /**< Partition array */
+ int32_t rkt_partition_cnt;
+
+ int32_t rkt_sticky_partition; /**< Current sticky partition.
+ * @locks rkt_lock */
+ rd_interval_t rkt_sticky_intvl; /**< Interval to assign new
+ * sticky partition. */
+
+ rd_list_t rkt_desp; /* Desired partitions
+ * that are not yet seen
+ * in the cluster. */
+ rd_interval_t rkt_desp_refresh_intvl; /**< Rate-limiter for
+ * desired partition
+ * metadata refresh. */
+
+ rd_ts_t rkt_ts_create; /**< Topic object creation time. */
+ rd_ts_t rkt_ts_metadata; /* Timestamp of last metadata
+ * update for this topic. */
+
+ rd_refcnt_t rkt_app_refcnt; /**< Number of active rkt's new()ed
+ * by application. */
+
+ enum { RD_KAFKA_TOPIC_S_UNKNOWN, /* No cluster information yet */
+ RD_KAFKA_TOPIC_S_EXISTS, /* Topic exists in cluster */
+ RD_KAFKA_TOPIC_S_NOTEXISTS, /* Topic is not known in cluster */
+ RD_KAFKA_TOPIC_S_ERROR, /* Topic exists but is in an errored
+ * state, such as auth failure. */
+ } rkt_state;
+
+ int rkt_flags;
+#define RD_KAFKA_TOPIC_F_LEADER_UNAVAIL \
+ 0x1 /* Leader lost/unavailable \
+ * for at least one partition. */
+
+ rd_kafka_resp_err_t rkt_err; /**< Permanent error. */
+
+ rd_kafka_t *rkt_rk;
+
+ rd_avg_t rkt_avg_batchsize; /**< Average batch size */
+ rd_avg_t rkt_avg_batchcnt; /**< Average batch message count */
+
+ rd_kafka_topic_conf_t rkt_conf;
+
+ /** Idempotent/Txn producer:
+ * The PID,Epoch,base Msgid state for removed partitions. */
+ TAILQ_HEAD(, rd_kafka_partition_msgid_s) rkt_saved_partmsgids;
+};
+
+#define rd_kafka_topic_rdlock(rkt) rwlock_rdlock(&(rkt)->rkt_lock)
+#define rd_kafka_topic_wrlock(rkt) rwlock_wrlock(&(rkt)->rkt_lock)
+#define rd_kafka_topic_rdunlock(rkt) rwlock_rdunlock(&(rkt)->rkt_lock)
+#define rd_kafka_topic_wrunlock(rkt) rwlock_wrunlock(&(rkt)->rkt_lock)
+
+
+
+/**
+ * @brief Increase refcount and return topic object.
+ */
+static RD_INLINE RD_UNUSED rd_kafka_topic_t *
+rd_kafka_topic_keep(rd_kafka_topic_t *rkt) {
+ rd_kafka_lwtopic_t *lrkt;
+ if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL))
+ rd_kafka_lwtopic_keep(lrkt);
+ else
+ rd_refcnt_add(&rkt->rkt_refcnt);
+ return rkt;
+}
+
+void rd_kafka_topic_destroy_final(rd_kafka_topic_t *rkt);
+
+rd_kafka_topic_t *rd_kafka_topic_proper(rd_kafka_topic_t *app_rkt);
+
+
+
+/**
+ * @brief Loose reference to topic object as increased by ..topic_keep().
+ */
+static RD_INLINE RD_UNUSED void rd_kafka_topic_destroy0(rd_kafka_topic_t *rkt) {
+ rd_kafka_lwtopic_t *lrkt;
+ if (unlikely((lrkt = rd_kafka_rkt_get_lw(rkt)) != NULL))
+ rd_kafka_lwtopic_destroy(lrkt);
+ else if (unlikely(rd_refcnt_sub(&rkt->rkt_refcnt) == 0))
+ rd_kafka_topic_destroy_final(rkt);
+}
+
+
+rd_kafka_topic_t *rd_kafka_topic_new0(rd_kafka_t *rk,
+ const char *topic,
+ rd_kafka_topic_conf_t *conf,
+ int *existing,
+ int do_lock);
+
+rd_kafka_topic_t *rd_kafka_topic_find_fl(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ const char *topic,
+ int do_lock);
+rd_kafka_topic_t *rd_kafka_topic_find0_fl(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ const rd_kafkap_str_t *topic);
+#define rd_kafka_topic_find(rk, topic, do_lock) \
+ rd_kafka_topic_find_fl(__FUNCTION__, __LINE__, rk, topic, do_lock)
+#define rd_kafka_topic_find0(rk, topic) \
+ rd_kafka_topic_find0_fl(__FUNCTION__, __LINE__, rk, topic)
+int rd_kafka_topic_cmp_rkt(const void *_a, const void *_b);
+
+void rd_kafka_topic_partitions_remove(rd_kafka_topic_t *rkt);
+
+rd_bool_t rd_kafka_topic_set_notexists(rd_kafka_topic_t *rkt,
+ rd_kafka_resp_err_t err);
+rd_bool_t rd_kafka_topic_set_error(rd_kafka_topic_t *rkt,
+ rd_kafka_resp_err_t err);
+
+/**
+ * @returns the topic's permanent error, if any.
+ *
+ * @locality any
+ * @locks_acquired rd_kafka_topic_rdlock(rkt)
+ */
+static RD_INLINE RD_UNUSED rd_kafka_resp_err_t
+rd_kafka_topic_get_error(rd_kafka_topic_t *rkt) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_topic_rdlock(rkt);
+ if (rkt->rkt_state == RD_KAFKA_TOPIC_S_ERROR)
+ err = rkt->rkt_err;
+ rd_kafka_topic_rdunlock(rkt);
+ return err;
+}
+
+int rd_kafka_topic_metadata_update2(
+ rd_kafka_broker_t *rkb,
+ const struct rd_kafka_metadata_topic *mdt,
+ const rd_kafka_partition_leader_epoch_t *leader_epochs);
+
+void rd_kafka_topic_scan_all(rd_kafka_t *rk, rd_ts_t now);
+
+
+typedef struct rd_kafka_topic_info_s {
+ const char *topic; /**< Allocated along with struct */
+ int partition_cnt;
+} rd_kafka_topic_info_t;
+
+int rd_kafka_topic_info_topic_cmp(const void *_a, const void *_b);
+int rd_kafka_topic_info_cmp(const void *_a, const void *_b);
+rd_kafka_topic_info_t *rd_kafka_topic_info_new(const char *topic,
+ int partition_cnt);
+void rd_kafka_topic_info_destroy(rd_kafka_topic_info_t *ti);
+
+int rd_kafka_topic_match(rd_kafka_t *rk,
+ const char *pattern,
+ const char *topic);
+
+int rd_kafka_toppar_broker_update(rd_kafka_toppar_t *rktp,
+ int32_t broker_id,
+ rd_kafka_broker_t *rkb,
+ const char *reason);
+
+int rd_kafka_toppar_delegate_to_leader(rd_kafka_toppar_t *rktp);
+
+rd_kafka_resp_err_t rd_kafka_topics_leader_query_sync(rd_kafka_t *rk,
+ int all_topics,
+ const rd_list_t *topics,
+ int timeout_ms);
+void rd_kafka_topic_leader_query0(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ int do_rk_lock,
+ rd_bool_t force);
+#define rd_kafka_topic_leader_query(rk, rkt) \
+ rd_kafka_topic_leader_query0(rk, rkt, 1 /*lock*/, \
+ rd_false /*dont force*/)
+
+#define rd_kafka_topic_fast_leader_query(rk) \
+ rd_kafka_metadata_fast_leader_query(rk)
+
+void rd_kafka_local_topics_to_list(rd_kafka_t *rk,
+ rd_list_t *topics,
+ int *cache_cntp);
+
+void rd_ut_kafka_topic_set_topic_exists(rd_kafka_topic_t *rkt,
+ int partition_cnt,
+ int32_t leader_id);
+
+#endif /* _RDKAFKA_TOPIC_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c
new file mode 100644
index 000000000..ae5895b29
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.c
@@ -0,0 +1,1295 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifdef _WIN32
+#pragma comment(lib, "ws2_32.lib")
+#endif
+
+#define __need_IOV_MAX
+
+#define _DARWIN_C_SOURCE /* MSG_DONTWAIT */
+
+#include "rdkafka_int.h"
+#include "rdaddr.h"
+#include "rdkafka_transport.h"
+#include "rdkafka_transport_int.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_interceptor.h"
+
+#include <errno.h>
+
+/* AIX doesn't have MSG_DONTWAIT */
+#ifndef MSG_DONTWAIT
+#define MSG_DONTWAIT MSG_NONBLOCK
+#endif
+
+#if WITH_SSL
+#include "rdkafka_ssl.h"
+#endif
+
+/**< Current thread's rd_kafka_transport_t instance.
+ * This pointer is set up when calling any OpenSSL APIs that might
+ * trigger SSL callbacks, and is used to retrieve the SSL object's
+ * corresponding rd_kafka_transport_t instance.
+ * There is an set/get_ex_data() API in OpenSSL, but it requires storing
+ * a unique index somewhere, which we can't do without having a singleton
+ * object, so instead we cut out the middle man and store the
+ * rd_kafka_transport_t pointer directly in the thread-local memory. */
+RD_TLS rd_kafka_transport_t *rd_kafka_curr_transport;
+
+
+static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout);
+
+
+/**
+ * Low-level socket close
+ */
+static void rd_kafka_transport_close0(rd_kafka_t *rk, rd_socket_t s) {
+ if (rk->rk_conf.closesocket_cb)
+ rk->rk_conf.closesocket_cb((int)s, rk->rk_conf.opaque);
+ else
+ rd_socket_close(s);
+}
+
+/**
+ * Close and destroy a transport handle
+ */
+void rd_kafka_transport_close(rd_kafka_transport_t *rktrans) {
+#if WITH_SSL
+ rd_kafka_curr_transport = rktrans;
+ if (rktrans->rktrans_ssl)
+ rd_kafka_transport_ssl_close(rktrans);
+#endif
+
+ rd_kafka_sasl_close(rktrans);
+
+ if (rktrans->rktrans_recv_buf)
+ rd_kafka_buf_destroy(rktrans->rktrans_recv_buf);
+
+#ifdef _WIN32
+ WSACloseEvent(rktrans->rktrans_wsaevent);
+#endif
+
+ if (rktrans->rktrans_s != -1)
+ rd_kafka_transport_close0(rktrans->rktrans_rkb->rkb_rk,
+ rktrans->rktrans_s);
+
+ rd_free(rktrans);
+}
+
+/**
+ * @brief shutdown(2) a transport's underlying socket.
+ *
+ * This will prohibit further sends and receives.
+ * rd_kafka_transport_close() must still be called to close the socket.
+ */
+void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans) {
+ shutdown(rktrans->rktrans_s,
+#ifdef _WIN32
+ SD_BOTH
+#else
+ SHUT_RDWR
+#endif
+ );
+}
+
+
+#ifndef _WIN32
+/**
+ * @brief sendmsg() abstraction, converting a list of segments to iovecs.
+ * @remark should only be called if the number of segments is > 1.
+ */
+static ssize_t rd_kafka_transport_socket_sendmsg(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size) {
+ struct iovec iov[IOV_MAX];
+ struct msghdr msg = {.msg_iov = iov};
+ size_t iovlen;
+ ssize_t r;
+ size_t r2;
+
+ rd_slice_get_iov(slice, msg.msg_iov, &iovlen, IOV_MAX,
+ /* FIXME: Measure the effects of this */
+ rktrans->rktrans_sndbuf_size);
+ msg.msg_iovlen = (int)iovlen;
+
+#ifdef __sun
+ /* See recvmsg() comment. Setting it here to be safe. */
+ rd_socket_errno = EAGAIN;
+#endif
+
+ r = sendmsg(rktrans->rktrans_s, &msg,
+ MSG_DONTWAIT
+#ifdef MSG_NOSIGNAL
+ | MSG_NOSIGNAL
+#endif
+ );
+
+ if (r == -1) {
+ if (rd_socket_errno == EAGAIN)
+ return 0;
+ rd_snprintf(errstr, errstr_size, "%s", rd_strerror(errno));
+ return -1;
+ }
+
+ /* Update buffer read position */
+ r2 = rd_slice_read(slice, NULL, (size_t)r);
+ rd_assert((size_t)r == r2 &&
+ *"BUG: wrote more bytes than available in slice");
+
+ return r;
+}
+#endif
+
+
+/**
+ * @brief Plain send() abstraction
+ */
+static ssize_t rd_kafka_transport_socket_send0(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t sum = 0;
+ const void *p;
+ size_t rlen;
+
+ while ((rlen = rd_slice_peeker(slice, &p))) {
+ ssize_t r;
+ size_t r2;
+
+ r = send(rktrans->rktrans_s, p,
+#ifdef _WIN32
+ (int)rlen, (int)0
+#else
+ rlen, 0
+#endif
+ );
+
+#ifdef _WIN32
+ if (unlikely(r == RD_SOCKET_ERROR)) {
+ if (sum > 0 || rd_socket_errno == WSAEWOULDBLOCK) {
+ rktrans->rktrans_blocked = rd_true;
+ return sum;
+ } else {
+ rd_snprintf(
+ errstr, errstr_size, "%s",
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+ }
+
+ rktrans->rktrans_blocked = rd_false;
+#else
+ if (unlikely(r <= 0)) {
+ if (r == 0 || rd_socket_errno == EAGAIN)
+ return 0;
+ rd_snprintf(errstr, errstr_size, "%s",
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+#endif
+
+ /* Update buffer read position */
+ r2 = rd_slice_read(slice, NULL, (size_t)r);
+ rd_assert((size_t)r == r2 &&
+ *"BUG: wrote more bytes than available in slice");
+
+
+ sum += r;
+
+ /* FIXME: remove this and try again immediately and let
+ * the next write() call fail instead? */
+ if ((size_t)r < rlen)
+ break;
+ }
+
+ return sum;
+}
+
+
+static ssize_t rd_kafka_transport_socket_send(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size) {
+#ifndef _WIN32
+ /* FIXME: Use sendmsg() with iovecs if there's more than one segment
+ * remaining, otherwise (or if platform does not have sendmsg)
+ * use plain send(). */
+ return rd_kafka_transport_socket_sendmsg(rktrans, slice, errstr,
+ errstr_size);
+#endif
+ return rd_kafka_transport_socket_send0(rktrans, slice, errstr,
+ errstr_size);
+}
+
+
+
+#ifndef _WIN32
+/**
+ * @brief recvmsg() abstraction, converting a list of segments to iovecs.
+ * @remark should only be called if the number of segments is > 1.
+ */
+static ssize_t rd_kafka_transport_socket_recvmsg(rd_kafka_transport_t *rktrans,
+ rd_buf_t *rbuf,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t r;
+ struct iovec iov[IOV_MAX];
+ struct msghdr msg = {.msg_iov = iov};
+ size_t iovlen;
+
+ rd_buf_get_write_iov(rbuf, msg.msg_iov, &iovlen, IOV_MAX,
+ /* FIXME: Measure the effects of this */
+ rktrans->rktrans_rcvbuf_size);
+ msg.msg_iovlen = (int)iovlen;
+
+#ifdef __sun
+ /* SunOS doesn't seem to set errno when recvmsg() fails
+ * due to no data and MSG_DONTWAIT is set. */
+ rd_socket_errno = EAGAIN;
+#endif
+ r = recvmsg(rktrans->rktrans_s, &msg, MSG_DONTWAIT);
+ if (unlikely(r <= 0)) {
+ if (r == -1 && rd_socket_errno == EAGAIN)
+ return 0;
+ else if (r == 0 || (r == -1 && rd_socket_errno == ECONNRESET)) {
+ /* Receive 0 after POLLIN event means
+ * connection closed. */
+ rd_snprintf(errstr, errstr_size, "Disconnected");
+ return -1;
+ } else if (r == -1) {
+ rd_snprintf(errstr, errstr_size, "%s",
+ rd_strerror(errno));
+ return -1;
+ }
+ }
+
+ /* Update buffer write position */
+ rd_buf_write(rbuf, NULL, (size_t)r);
+
+ return r;
+}
+#endif
+
+
+/**
+ * @brief Plain recv()
+ */
+static ssize_t rd_kafka_transport_socket_recv0(rd_kafka_transport_t *rktrans,
+ rd_buf_t *rbuf,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t sum = 0;
+ void *p;
+ size_t len;
+
+ while ((len = rd_buf_get_writable(rbuf, &p))) {
+ ssize_t r;
+
+ r = recv(rktrans->rktrans_s, p,
+#ifdef _WIN32
+ (int)
+#endif
+ len,
+ 0);
+
+ if (unlikely(r == RD_SOCKET_ERROR)) {
+ if (rd_socket_errno == EAGAIN
+#ifdef _WIN32
+ || rd_socket_errno == WSAEWOULDBLOCK
+#endif
+ )
+ return sum;
+ else {
+ rd_snprintf(
+ errstr, errstr_size, "%s",
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+ } else if (unlikely(r == 0)) {
+ /* Receive 0 after POLLIN event means
+ * connection closed. */
+ rd_snprintf(errstr, errstr_size, "Disconnected");
+ return -1;
+ }
+
+ /* Update buffer write position */
+ rd_buf_write(rbuf, NULL, (size_t)r);
+
+ sum += r;
+
+ /* FIXME: remove this and try again immediately and let
+ * the next recv() call fail instead? */
+ if ((size_t)r < len)
+ break;
+ }
+ return sum;
+}
+
+
+static ssize_t rd_kafka_transport_socket_recv(rd_kafka_transport_t *rktrans,
+ rd_buf_t *buf,
+ char *errstr,
+ size_t errstr_size) {
+#ifndef _WIN32
+ return rd_kafka_transport_socket_recvmsg(rktrans, buf, errstr,
+ errstr_size);
+#endif
+ return rd_kafka_transport_socket_recv0(rktrans, buf, errstr,
+ errstr_size);
+}
+
+
+
+/**
+ * CONNECT state is failed (errstr!=NULL) or done (TCP is up, SSL is working..).
+ * From this state we either hand control back to the broker code,
+ * or if authentication is configured we ente the AUTH state.
+ */
+void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans,
+ char *errstr) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+
+ rd_kafka_curr_transport = rktrans;
+
+ rd_kafka_broker_connect_done(rkb, errstr);
+}
+
+
+
+ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t r;
+#if WITH_SSL
+ if (rktrans->rktrans_ssl) {
+ rd_kafka_curr_transport = rktrans;
+ r = rd_kafka_transport_ssl_send(rktrans, slice, errstr,
+ errstr_size);
+ } else
+#endif
+ r = rd_kafka_transport_socket_send(rktrans, slice, errstr,
+ errstr_size);
+
+ return r;
+}
+
+
+ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans,
+ rd_buf_t *rbuf,
+ char *errstr,
+ size_t errstr_size) {
+ ssize_t r;
+
+#if WITH_SSL
+ if (rktrans->rktrans_ssl) {
+ rd_kafka_curr_transport = rktrans;
+ r = rd_kafka_transport_ssl_recv(rktrans, rbuf, errstr,
+ errstr_size);
+ } else
+#endif
+ r = rd_kafka_transport_socket_recv(rktrans, rbuf, errstr,
+ errstr_size);
+
+ return r;
+}
+
+
+
+/**
+ * @brief Notify transport layer of full request sent.
+ */
+void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf) {
+ rd_kafka_transport_t *rktrans = rkb->rkb_transport;
+
+ /* Call on_request_sent interceptors */
+ rd_kafka_interceptors_on_request_sent(
+ rkb->rkb_rk, (int)rktrans->rktrans_s, rkb->rkb_name,
+ rkb->rkb_nodeid, rkbuf->rkbuf_reqhdr.ApiKey,
+ rkbuf->rkbuf_reqhdr.ApiVersion, rkbuf->rkbuf_corrid,
+ rd_slice_size(&rkbuf->rkbuf_reader));
+}
+
+
+
+/**
+ * Length framed receive handling.
+ * Currently only supports a the following framing:
+ * [int32_t:big_endian_length_of_payload][payload]
+ *
+ * To be used on POLLIN event, will return:
+ * -1: on fatal error (errstr will be updated, *rkbufp remains unset)
+ * 0: still waiting for data (*rkbufp remains unset)
+ * 1: data complete, (buffer returned in *rkbufp)
+ */
+int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans,
+ rd_kafka_buf_t **rkbufp,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_buf_t *rkbuf = rktrans->rktrans_recv_buf;
+ ssize_t r;
+ const int log_decode_errors = LOG_ERR;
+
+ /* States:
+ * !rktrans_recv_buf: initial state; set up buf to receive header.
+ * rkbuf_totlen == 0: awaiting header
+ * rkbuf_totlen > 0: awaiting payload
+ */
+
+ if (!rkbuf) {
+ rkbuf = rd_kafka_buf_new(1, 4 /*length field's length*/);
+ /* Set up buffer reader for the length field */
+ rd_buf_write_ensure(&rkbuf->rkbuf_buf, 4, 4);
+ rktrans->rktrans_recv_buf = rkbuf;
+ }
+
+
+ r = rd_kafka_transport_recv(rktrans, &rkbuf->rkbuf_buf, errstr,
+ errstr_size);
+ if (r == 0)
+ return 0;
+ else if (r == -1)
+ return -1;
+
+ if (rkbuf->rkbuf_totlen == 0) {
+ /* Frame length not known yet. */
+ int32_t frame_len;
+
+ if (rd_buf_write_pos(&rkbuf->rkbuf_buf) < sizeof(frame_len)) {
+ /* Wait for entire frame header. */
+ return 0;
+ }
+
+ /* Initialize reader */
+ rd_slice_init(&rkbuf->rkbuf_reader, &rkbuf->rkbuf_buf, 0, 4);
+
+ /* Reader header: payload length */
+ rd_kafka_buf_read_i32(rkbuf, &frame_len);
+
+ if (frame_len < 0 ||
+ frame_len > rktrans->rktrans_rkb->rkb_rk->rk_conf
+ .recv_max_msg_size) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid frame size %" PRId32, frame_len);
+ return -1;
+ }
+
+ rkbuf->rkbuf_totlen = 4 + frame_len;
+ if (frame_len == 0) {
+ /* Payload is empty, we're done. */
+ rktrans->rktrans_recv_buf = NULL;
+ *rkbufp = rkbuf;
+ return 1;
+ }
+
+ /* Allocate memory to hold entire frame payload in contigious
+ * memory. */
+ rd_buf_write_ensure_contig(&rkbuf->rkbuf_buf, frame_len);
+
+ /* Try reading directly, there is probably more data available*/
+ return rd_kafka_transport_framed_recv(rktrans, rkbufp, errstr,
+ errstr_size);
+ }
+
+ if (rd_buf_write_pos(&rkbuf->rkbuf_buf) == rkbuf->rkbuf_totlen) {
+ /* Payload is complete. */
+ rktrans->rktrans_recv_buf = NULL;
+ *rkbufp = rkbuf;
+ return 1;
+ }
+
+ /* Wait for more data */
+ return 0;
+
+err_parse:
+ rd_snprintf(errstr, errstr_size, "Frame header parsing failed: %s",
+ rd_kafka_err2str(rkbuf->rkbuf_err));
+ return -1;
+}
+
+
+/**
+ * @brief Final socket setup after a connection has been established
+ */
+void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ unsigned int slen;
+
+ /* Set socket send & receive buffer sizes if configuerd */
+ if (rkb->rkb_rk->rk_conf.socket_sndbuf_size != 0) {
+ if (setsockopt(
+ rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF,
+ (void *)&rkb->rkb_rk->rk_conf.socket_sndbuf_size,
+ sizeof(rkb->rkb_rk->rk_conf.socket_sndbuf_size)) ==
+ RD_SOCKET_ERROR)
+ rd_rkb_log(rkb, LOG_WARNING, "SNDBUF",
+ "Failed to set socket send "
+ "buffer size to %i: %s",
+ rkb->rkb_rk->rk_conf.socket_sndbuf_size,
+ rd_socket_strerror(rd_socket_errno));
+ }
+
+ if (rkb->rkb_rk->rk_conf.socket_rcvbuf_size != 0) {
+ if (setsockopt(
+ rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF,
+ (void *)&rkb->rkb_rk->rk_conf.socket_rcvbuf_size,
+ sizeof(rkb->rkb_rk->rk_conf.socket_rcvbuf_size)) ==
+ RD_SOCKET_ERROR)
+ rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
+ "Failed to set socket receive "
+ "buffer size to %i: %s",
+ rkb->rkb_rk->rk_conf.socket_rcvbuf_size,
+ rd_socket_strerror(rd_socket_errno));
+ }
+
+ /* Get send and receive buffer sizes to allow limiting
+ * the total number of bytes passed with iovecs to sendmsg()
+ * and recvmsg(). */
+ slen = sizeof(rktrans->rktrans_rcvbuf_size);
+ if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_RCVBUF,
+ (void *)&rktrans->rktrans_rcvbuf_size,
+ &slen) == RD_SOCKET_ERROR) {
+ rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
+ "Failed to get socket receive "
+ "buffer size: %s: assuming 1MB",
+ rd_socket_strerror(rd_socket_errno));
+ rktrans->rktrans_rcvbuf_size = 1024 * 1024;
+ } else if (rktrans->rktrans_rcvbuf_size < 1024 * 64)
+ rktrans->rktrans_rcvbuf_size =
+ 1024 * 64; /* Use at least 64KB */
+
+ slen = sizeof(rktrans->rktrans_sndbuf_size);
+ if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_SNDBUF,
+ (void *)&rktrans->rktrans_sndbuf_size,
+ &slen) == RD_SOCKET_ERROR) {
+ rd_rkb_log(rkb, LOG_WARNING, "RCVBUF",
+ "Failed to get socket send "
+ "buffer size: %s: assuming 1MB",
+ rd_socket_strerror(rd_socket_errno));
+ rktrans->rktrans_sndbuf_size = 1024 * 1024;
+ } else if (rktrans->rktrans_sndbuf_size < 1024 * 64)
+ rktrans->rktrans_sndbuf_size =
+ 1024 * 64; /* Use at least 64KB */
+
+
+#ifdef TCP_NODELAY
+ if (rkb->rkb_rk->rk_conf.socket_nagle_disable) {
+ int one = 1;
+ if (setsockopt(rktrans->rktrans_s, IPPROTO_TCP, TCP_NODELAY,
+ (void *)&one, sizeof(one)) == RD_SOCKET_ERROR)
+ rd_rkb_log(rkb, LOG_WARNING, "NAGLE",
+ "Failed to disable Nagle (TCP_NODELAY) "
+ "on socket: %s",
+ rd_socket_strerror(rd_socket_errno));
+ }
+#endif
+}
+
+
+/**
+ * TCP connection established.
+ * Set up socket options, SSL, etc.
+ *
+ * Locality: broker thread
+ */
+static void rd_kafka_transport_connected(rd_kafka_transport_t *rktrans) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+
+ rd_rkb_dbg(
+ rkb, BROKER, "CONNECT", "Connected to %s",
+ rd_sockaddr2str(rkb->rkb_addr_last,
+ RD_SOCKADDR2STR_F_PORT | RD_SOCKADDR2STR_F_FAMILY));
+
+ rd_kafka_transport_post_connect_setup(rktrans);
+
+#if WITH_SSL
+ if (rkb->rkb_proto == RD_KAFKA_PROTO_SSL ||
+ rkb->rkb_proto == RD_KAFKA_PROTO_SASL_SSL) {
+ char errstr[512];
+
+ rd_kafka_broker_lock(rkb);
+ rd_kafka_broker_set_state(rkb,
+ RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE);
+ rd_kafka_broker_unlock(rkb);
+
+ /* Set up SSL connection.
+ * This is also an asynchronous operation so dont
+ * propagate to broker_connect_done() just yet. */
+ if (rd_kafka_transport_ssl_connect(rkb, rktrans, errstr,
+ sizeof(errstr)) == -1) {
+ rd_kafka_transport_connect_done(rktrans, errstr);
+ return;
+ }
+ return;
+ }
+#endif
+
+ /* Propagate connect success */
+ rd_kafka_transport_connect_done(rktrans, NULL);
+}
+
+
+
+/**
+ * @brief the kernel SO_ERROR in \p errp for the given transport.
+ * @returns 0 if getsockopt() was succesful (and \p and errp can be trusted),
+ * else -1 in which case \p errp 's value is undefined.
+ */
+static int rd_kafka_transport_get_socket_error(rd_kafka_transport_t *rktrans,
+ int *errp) {
+ socklen_t intlen = sizeof(*errp);
+
+ if (getsockopt(rktrans->rktrans_s, SOL_SOCKET, SO_ERROR, (void *)errp,
+ &intlen) == -1) {
+ rd_rkb_dbg(rktrans->rktrans_rkb, BROKER, "SO_ERROR",
+ "Failed to get socket error: %s",
+ rd_socket_strerror(rd_socket_errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+
+/**
+ * IO event handler.
+ *
+ * @param socket_errstr Is an optional (else NULL) error string from the
+ * socket layer.
+ *
+ * Locality: broker thread
+ */
+static void rd_kafka_transport_io_event(rd_kafka_transport_t *rktrans,
+ int events,
+ const char *socket_errstr) {
+ char errstr[512];
+ int r;
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+
+ switch (rkb->rkb_state) {
+ case RD_KAFKA_BROKER_STATE_CONNECT:
+ /* Asynchronous connect finished, read status. */
+ if (!(events & (POLLOUT | POLLERR | POLLHUP)))
+ return;
+
+ if (socket_errstr)
+ rd_kafka_broker_fail(
+ rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Connect to %s failed: %s",
+ rd_sockaddr2str(rkb->rkb_addr_last,
+ RD_SOCKADDR2STR_F_PORT |
+ RD_SOCKADDR2STR_F_FAMILY),
+ socket_errstr);
+ else if (rd_kafka_transport_get_socket_error(rktrans, &r) ==
+ -1) {
+ rd_kafka_broker_fail(
+ rkb, LOG_ERR, RD_KAFKA_RESP_ERR__TRANSPORT,
+ "Connect to %s failed: "
+ "unable to get status from "
+ "socket %d: %s",
+ rd_sockaddr2str(rkb->rkb_addr_last,
+ RD_SOCKADDR2STR_F_PORT |
+ RD_SOCKADDR2STR_F_FAMILY),
+ rktrans->rktrans_s, rd_strerror(rd_socket_errno));
+ } else if (r != 0) {
+ /* Connect failed */
+ rd_snprintf(
+ errstr, sizeof(errstr), "Connect to %s failed: %s",
+ rd_sockaddr2str(rkb->rkb_addr_last,
+ RD_SOCKADDR2STR_F_PORT |
+ RD_SOCKADDR2STR_F_FAMILY),
+ rd_strerror(r));
+
+ rd_kafka_transport_connect_done(rktrans, errstr);
+ } else {
+ /* Connect succeeded */
+ rd_kafka_transport_connected(rktrans);
+ }
+ break;
+
+ case RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE:
+#if WITH_SSL
+ rd_assert(rktrans->rktrans_ssl);
+
+ /* Currently setting up SSL connection:
+ * perform handshake. */
+ r = rd_kafka_transport_ssl_handshake(rktrans);
+
+ if (r == 0 /* handshake still in progress */ &&
+ (events & POLLHUP)) {
+ rd_kafka_broker_conn_closed(
+ rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected");
+ return;
+ }
+
+#else
+ RD_NOTREACHED();
+#endif
+ break;
+
+ case RD_KAFKA_BROKER_STATE_AUTH_LEGACY:
+ /* SASL authentication.
+ * Prior to broker version v1.0.0 this is performed
+ * directly on the socket without Kafka framing. */
+ if (rd_kafka_sasl_io_event(rktrans, events, errstr,
+ sizeof(errstr)) == -1) {
+ rd_kafka_broker_fail(
+ rkb, LOG_ERR, RD_KAFKA_RESP_ERR__AUTHENTICATION,
+ "SASL authentication failure: %s", errstr);
+ return;
+ }
+
+ if (events & POLLHUP) {
+ rd_kafka_broker_fail(rkb, LOG_ERR,
+ RD_KAFKA_RESP_ERR__AUTHENTICATION,
+ "Disconnected");
+
+ return;
+ }
+
+ break;
+
+ case RD_KAFKA_BROKER_STATE_APIVERSION_QUERY:
+ case RD_KAFKA_BROKER_STATE_AUTH_HANDSHAKE:
+ case RD_KAFKA_BROKER_STATE_AUTH_REQ:
+ case RD_KAFKA_BROKER_STATE_UP:
+ case RD_KAFKA_BROKER_STATE_UPDATE:
+
+ if (events & POLLIN) {
+ while (rkb->rkb_state >= RD_KAFKA_BROKER_STATE_UP &&
+ rd_kafka_recv(rkb) > 0)
+ ;
+
+ /* If connection went down: bail out early */
+ if (rkb->rkb_state == RD_KAFKA_BROKER_STATE_DOWN)
+ return;
+ }
+
+ if (events & POLLHUP) {
+ rd_kafka_broker_conn_closed(
+ rkb, RD_KAFKA_RESP_ERR__TRANSPORT, "Disconnected");
+ return;
+ }
+
+ if (events & POLLOUT) {
+ while (rd_kafka_send(rkb) > 0)
+ ;
+ }
+ break;
+
+ case RD_KAFKA_BROKER_STATE_INIT:
+ case RD_KAFKA_BROKER_STATE_DOWN:
+ case RD_KAFKA_BROKER_STATE_TRY_CONNECT:
+ rd_kafka_assert(rkb->rkb_rk, !*"bad state");
+ }
+}
+
+
+
+#ifdef _WIN32
+/**
+ * @brief Convert WSA FD_.. events to POLL.. events.
+ */
+static RD_INLINE int rd_kafka_transport_wsa2events(long wevents) {
+ int events = 0;
+
+ if (unlikely(wevents == 0))
+ return 0;
+
+ if (wevents & FD_READ)
+ events |= POLLIN;
+ if (wevents & (FD_WRITE | FD_CONNECT))
+ events |= POLLOUT;
+ if (wevents & FD_CLOSE)
+ events |= POLLHUP;
+
+ rd_dassert(events != 0);
+
+ return events;
+}
+
+/**
+ * @brief Convert POLL.. events to WSA FD_.. events.
+ */
+static RD_INLINE int rd_kafka_transport_events2wsa(int events,
+ rd_bool_t is_connecting) {
+ long wevents = FD_CLOSE;
+
+ if (unlikely(is_connecting))
+ return wevents | FD_CONNECT;
+
+ if (events & POLLIN)
+ wevents |= FD_READ;
+ if (events & POLLOUT)
+ wevents |= FD_WRITE;
+
+ return wevents;
+}
+
+
+/**
+ * @returns the WinSocket events (as POLL.. events) for the broker socket.
+ */
+static int rd_kafka_transport_get_wsa_events(rd_kafka_transport_t *rktrans) {
+ const int try_bits[4 * 2] = {FD_READ_BIT, POLLIN, FD_WRITE_BIT,
+ POLLOUT, FD_CONNECT_BIT, POLLOUT,
+ FD_CLOSE_BIT, POLLHUP};
+ int r, i;
+ WSANETWORKEVENTS netevents;
+ int events = 0;
+ const char *socket_errstr = NULL;
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+
+ /* Get Socket event */
+ r = WSAEnumNetworkEvents(rktrans->rktrans_s, rktrans->rktrans_wsaevent,
+ &netevents);
+ if (unlikely(r == SOCKET_ERROR)) {
+ rd_rkb_log(rkb, LOG_ERR, "WSAWAIT",
+ "WSAEnumNetworkEvents() failed: %s",
+ rd_socket_strerror(rd_socket_errno));
+ socket_errstr = rd_socket_strerror(rd_socket_errno);
+ return POLLHUP | POLLERR;
+ }
+
+ /* Get fired events and errors for each event type */
+ for (i = 0; i < RD_ARRAYSIZE(try_bits); i += 2) {
+ const int bit = try_bits[i];
+ const int event = try_bits[i + 1];
+
+ if (!(netevents.lNetworkEvents & (1 << bit)))
+ continue;
+
+ if (unlikely(netevents.iErrorCode[bit])) {
+ socket_errstr =
+ rd_socket_strerror(netevents.iErrorCode[bit]);
+ events |= POLLHUP;
+ } else {
+ events |= event;
+
+ if (bit == FD_WRITE_BIT) {
+ /* Writing no longer blocked */
+ rktrans->rktrans_blocked = rd_false;
+ }
+ }
+ }
+
+ return events;
+}
+
+
+/**
+ * @brief Win32: Poll transport and \p rkq cond events.
+ *
+ * @returns the transport socket POLL.. event bits.
+ */
+static int rd_kafka_transport_io_serve_win32(rd_kafka_transport_t *rktrans,
+ rd_kafka_q_t *rkq,
+ int timeout_ms) {
+ const DWORD wsaevent_cnt = 3;
+ WSAEVENT wsaevents[3] = {
+ rkq->rkq_cond.mEvents[0], /* rkq: cnd_signal */
+ rkq->rkq_cond.mEvents[1], /* rkq: cnd_broadcast */
+ rktrans->rktrans_wsaevent, /* socket */
+ };
+ DWORD r;
+ int events = 0;
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ rd_bool_t set_pollout = rd_false;
+ rd_bool_t cnd_is_waiting = rd_false;
+
+ /* WSA only sets FD_WRITE (e.g., POLLOUT) when the socket was
+ * previously blocked, unlike BSD sockets that set POLLOUT as long as
+ * the socket isn't blocked. So we need to imitate the BSD behaviour
+ * here and cut the timeout short if a write is wanted and the socket
+ * is not currently blocked. */
+ if (rktrans->rktrans_rkb->rkb_state != RD_KAFKA_BROKER_STATE_CONNECT &&
+ !rktrans->rktrans_blocked &&
+ (rktrans->rktrans_pfd[0].events & POLLOUT)) {
+ timeout_ms = 0;
+ set_pollout = rd_true;
+ } else {
+ /* Check if the queue already has ops enqueued in which case we
+ * cut the timeout short. Else add this thread as waiting on the
+ * queue's condvar so that cnd_signal() (et.al.) will perform
+ * SetEvent() and thus wake up this thread in case a new op is
+ * added to the queue. */
+ mtx_lock(&rkq->rkq_lock);
+ if (rkq->rkq_qlen > 0) {
+ timeout_ms = 0;
+ } else {
+ cnd_is_waiting = rd_true;
+ cnd_wait_enter(&rkq->rkq_cond);
+ }
+ mtx_unlock(&rkq->rkq_lock);
+ }
+
+ /* Wait for IO and queue events */
+ r = WSAWaitForMultipleEvents(wsaevent_cnt, wsaevents, FALSE, timeout_ms,
+ FALSE);
+
+ if (cnd_is_waiting) {
+ mtx_lock(&rkq->rkq_lock);
+ cnd_wait_exit(&rkq->rkq_cond);
+ mtx_unlock(&rkq->rkq_lock);
+ }
+
+ if (unlikely(r == WSA_WAIT_FAILED)) {
+ rd_rkb_log(rkb, LOG_CRIT, "WSAWAIT",
+ "WSAWaitForMultipleEvents failed: %s",
+ rd_socket_strerror(rd_socket_errno));
+ return POLLERR;
+ } else if (r != WSA_WAIT_TIMEOUT) {
+ r -= WSA_WAIT_EVENT_0;
+
+ /* Reset the cond events if any of them were triggered */
+ if (r < 2) {
+ ResetEvent(rkq->rkq_cond.mEvents[0]);
+ ResetEvent(rkq->rkq_cond.mEvents[1]);
+ }
+
+ /* Get the socket events. */
+ events = rd_kafka_transport_get_wsa_events(rktrans);
+ }
+
+ /* As explained above we need to set the POLLOUT flag
+ * in case it is wanted but not triggered by Winsocket so that
+ * io_event() knows it can attempt to send more data. */
+ if (likely(set_pollout && !(events & (POLLHUP | POLLERR | POLLOUT))))
+ events |= POLLOUT;
+
+ return events;
+}
+#endif
+
+
+/**
+ * @brief Poll and serve IOs
+ *
+ * @returns 0 if \p rkq may need additional blocking/timeout polling, else 1.
+ *
+ * @locality broker thread
+ */
+int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans,
+ rd_kafka_q_t *rkq,
+ int timeout_ms) {
+ rd_kafka_broker_t *rkb = rktrans->rktrans_rkb;
+ int events;
+
+ rd_kafka_curr_transport = rktrans;
+
+ if (
+#ifndef _WIN32
+ /* BSD sockets use POLLOUT to indicate success to connect.
+ * Windows has its own flag for this (FD_CONNECT). */
+ rkb->rkb_state == RD_KAFKA_BROKER_STATE_CONNECT ||
+#endif
+ (rkb->rkb_state > RD_KAFKA_BROKER_STATE_SSL_HANDSHAKE &&
+ rd_kafka_bufq_cnt(&rkb->rkb_waitresps) < rkb->rkb_max_inflight &&
+ rd_kafka_bufq_cnt(&rkb->rkb_outbufs) > 0))
+ rd_kafka_transport_poll_set(rkb->rkb_transport, POLLOUT);
+
+#ifdef _WIN32
+ /* BSD sockets use POLLIN and a following recv() returning 0 to
+ * to indicate connection close.
+ * Windows has its own flag for this (FD_CLOSE). */
+ if (rd_kafka_bufq_cnt(&rkb->rkb_waitresps) > 0)
+#endif
+ rd_kafka_transport_poll_set(rkb->rkb_transport, POLLIN);
+
+ /* On Windows we can wait for both IO and condvars (rkq)
+ * simultaneously.
+ *
+ * On *nix/BSD sockets we use a local pipe (pfd[1]) to wake
+ * up the rkq. */
+#ifdef _WIN32
+ events = rd_kafka_transport_io_serve_win32(rktrans, rkq, timeout_ms);
+
+#else
+ if (rd_kafka_transport_poll(rktrans, timeout_ms) < 1)
+ return 0; /* No events, caller can block on \p rkq poll */
+
+ /* Broker socket events */
+ events = rktrans->rktrans_pfd[0].revents;
+#endif
+
+ if (events) {
+ rd_kafka_transport_poll_clear(rktrans, POLLOUT | POLLIN);
+
+ rd_kafka_transport_io_event(rktrans, events, NULL);
+ }
+
+ return 1;
+}
+
+
+/**
+ * @brief Create a new transport object using existing socket \p s.
+ */
+rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb,
+ rd_socket_t s,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_transport_t *rktrans;
+ int on = 1;
+ int r;
+
+#ifdef SO_NOSIGPIPE
+ /* Disable SIGPIPE signalling for this socket on OSX */
+ if (setsockopt(s, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on)) == -1)
+ rd_rkb_dbg(rkb, BROKER, "SOCKET",
+ "Failed to set SO_NOSIGPIPE: %s",
+ rd_socket_strerror(rd_socket_errno));
+#endif
+
+#ifdef SO_KEEPALIVE
+ /* Enable TCP keep-alives, if configured. */
+ if (rkb->rkb_rk->rk_conf.socket_keepalive) {
+ if (setsockopt(s, SOL_SOCKET, SO_KEEPALIVE, (void *)&on,
+ sizeof(on)) == RD_SOCKET_ERROR)
+ rd_rkb_dbg(rkb, BROKER, "SOCKET",
+ "Failed to set SO_KEEPALIVE: %s",
+ rd_socket_strerror(rd_socket_errno));
+ }
+#endif
+
+ /* Set the socket to non-blocking */
+ if ((r = rd_fd_set_nonblocking(s))) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to set socket non-blocking: %s",
+ rd_socket_strerror(r));
+ return NULL;
+ }
+
+
+ rktrans = rd_calloc(1, sizeof(*rktrans));
+ rktrans->rktrans_rkb = rkb;
+ rktrans->rktrans_s = s;
+
+#ifdef _WIN32
+ rktrans->rktrans_wsaevent = WSACreateEvent();
+ rd_assert(rktrans->rktrans_wsaevent != NULL);
+#endif
+
+ return rktrans;
+}
+
+
+/**
+ * Initiate asynchronous connection attempt.
+ *
+ * Locality: broker thread
+ */
+rd_kafka_transport_t *rd_kafka_transport_connect(rd_kafka_broker_t *rkb,
+ const rd_sockaddr_inx_t *sinx,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_transport_t *rktrans;
+ int s = -1;
+ int r;
+
+ rkb->rkb_addr_last = sinx;
+
+ s = rkb->rkb_rk->rk_conf.socket_cb(sinx->in.sin_family, SOCK_STREAM,
+ IPPROTO_TCP,
+ rkb->rkb_rk->rk_conf.opaque);
+ if (s == -1) {
+ rd_snprintf(errstr, errstr_size, "Failed to create socket: %s",
+ rd_socket_strerror(rd_socket_errno));
+ return NULL;
+ }
+
+ rktrans = rd_kafka_transport_new(rkb, s, errstr, errstr_size);
+ if (!rktrans) {
+ rd_kafka_transport_close0(rkb->rkb_rk, s);
+ return NULL;
+ }
+
+ rd_rkb_dbg(rkb, BROKER, "CONNECT",
+ "Connecting to %s (%s) "
+ "with socket %i",
+ rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_FAMILY |
+ RD_SOCKADDR2STR_F_PORT),
+ rd_kafka_secproto_names[rkb->rkb_proto], s);
+
+ /* Connect to broker */
+ if (rkb->rkb_rk->rk_conf.connect_cb) {
+ rd_kafka_broker_lock(rkb); /* for rkb_nodename */
+ r = rkb->rkb_rk->rk_conf.connect_cb(
+ s, (struct sockaddr *)sinx, RD_SOCKADDR_INX_LEN(sinx),
+ rkb->rkb_nodename, rkb->rkb_rk->rk_conf.opaque);
+ rd_kafka_broker_unlock(rkb);
+ } else {
+ if (connect(s, (struct sockaddr *)sinx,
+ RD_SOCKADDR_INX_LEN(sinx)) == RD_SOCKET_ERROR &&
+ (rd_socket_errno != EINPROGRESS
+#ifdef _WIN32
+ && rd_socket_errno != WSAEWOULDBLOCK
+#endif
+ ))
+ r = rd_socket_errno;
+ else
+ r = 0;
+ }
+
+ if (r != 0) {
+ rd_rkb_dbg(rkb, BROKER, "CONNECT",
+ "Couldn't connect to %s: %s (%i)",
+ rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_PORT |
+ RD_SOCKADDR2STR_F_FAMILY),
+ rd_socket_strerror(r), r);
+ rd_snprintf(errstr, errstr_size,
+ "Failed to connect to broker at %s: %s",
+ rd_sockaddr2str(sinx, RD_SOCKADDR2STR_F_NICE),
+ rd_socket_strerror(r));
+
+ rd_kafka_transport_close(rktrans);
+ return NULL;
+ }
+
+ /* Set up transport handle */
+ rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd = s;
+ if (rkb->rkb_wakeup_fd[0] != -1) {
+ rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt].events = POLLIN;
+ rktrans->rktrans_pfd[rktrans->rktrans_pfd_cnt++].fd =
+ rkb->rkb_wakeup_fd[0];
+ }
+
+
+ /* Poll writability to trigger on connection success/failure. */
+ rd_kafka_transport_poll_set(rktrans, POLLOUT);
+
+ return rktrans;
+}
+
+
+#ifdef _WIN32
+/**
+ * @brief Set the WinSocket event poll bit to \p events.
+ */
+static void rd_kafka_transport_poll_set_wsa(rd_kafka_transport_t *rktrans,
+ int events) {
+ int r;
+ r = WSAEventSelect(
+ rktrans->rktrans_s, rktrans->rktrans_wsaevent,
+ rd_kafka_transport_events2wsa(rktrans->rktrans_pfd[0].events,
+ rktrans->rktrans_rkb->rkb_state ==
+ RD_KAFKA_BROKER_STATE_CONNECT));
+ if (unlikely(r != 0)) {
+ rd_rkb_log(rktrans->rktrans_rkb, LOG_CRIT, "WSAEVENT",
+ "WSAEventSelect() failed: %s",
+ rd_socket_strerror(rd_socket_errno));
+ }
+}
+#endif
+
+void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event) {
+ if ((rktrans->rktrans_pfd[0].events & event) == event)
+ return;
+
+ rktrans->rktrans_pfd[0].events |= event;
+
+#ifdef _WIN32
+ rd_kafka_transport_poll_set_wsa(rktrans,
+ rktrans->rktrans_pfd[0].events);
+#endif
+}
+
+void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event) {
+ if (!(rktrans->rktrans_pfd[0].events & event))
+ return;
+
+ rktrans->rktrans_pfd[0].events &= ~event;
+
+#ifdef _WIN32
+ rd_kafka_transport_poll_set_wsa(rktrans,
+ rktrans->rktrans_pfd[0].events);
+#endif
+}
+
+#ifndef _WIN32
+/**
+ * @brief Poll transport fds.
+ *
+ * @returns 1 if an event was raised, else 0, or -1 on error.
+ */
+static int rd_kafka_transport_poll(rd_kafka_transport_t *rktrans, int tmout) {
+ int r;
+
+ r = poll(rktrans->rktrans_pfd, rktrans->rktrans_pfd_cnt, tmout);
+ if (r <= 0)
+ return r;
+
+ if (rktrans->rktrans_pfd[1].revents & POLLIN) {
+ /* Read wake-up fd data and throw away, just used for wake-ups*/
+ char buf[1024];
+ while (rd_socket_read((int)rktrans->rktrans_pfd[1].fd, buf,
+ sizeof(buf)) > 0)
+ ; /* Read all buffered signalling bytes */
+ }
+
+ return 1;
+}
+#endif
+
+#ifdef _WIN32
+/**
+ * @brief A socket write operation would block, flag the socket
+ * as blocked so that POLLOUT events are handled correctly.
+ *
+ * This is really only used on Windows where POLLOUT (FD_WRITE) is
+ * edge-triggered rather than level-triggered.
+ */
+void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans,
+ rd_bool_t blocked) {
+ rktrans->rktrans_blocked = blocked;
+}
+#endif
+
+
+#if 0
+/**
+ * Global cleanup.
+ * This is dangerous and SHOULD NOT be called since it will rip
+ * the rug from under the application if it uses any of this functionality
+ * in its own code. This means we might leak some memory on exit.
+ */
+void rd_kafka_transport_term (void) {
+#ifdef _WIN32
+ (void)WSACleanup(); /* FIXME: dangerous */
+#endif
+}
+#endif
+
+void rd_kafka_transport_init(void) {
+#ifdef _WIN32
+ WSADATA d;
+ (void)WSAStartup(MAKEWORD(2, 2), &d);
+#endif
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h
new file mode 100644
index 000000000..83af5ae90
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport.h
@@ -0,0 +1,94 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_TRANSPORT_H_
+#define _RDKAFKA_TRANSPORT_H_
+
+#ifndef _WIN32
+#include <poll.h>
+#endif
+
+#include "rdbuf.h"
+#include "rdaddr.h"
+
+typedef struct rd_kafka_transport_s rd_kafka_transport_t;
+
+int rd_kafka_transport_io_serve(rd_kafka_transport_t *rktrans,
+ rd_kafka_q_t *rkq,
+ int timeout_ms);
+
+ssize_t rd_kafka_transport_send(rd_kafka_transport_t *rktrans,
+ rd_slice_t *slice,
+ char *errstr,
+ size_t errstr_size);
+ssize_t rd_kafka_transport_recv(rd_kafka_transport_t *rktrans,
+ rd_buf_t *rbuf,
+ char *errstr,
+ size_t errstr_size);
+
+void rd_kafka_transport_request_sent(rd_kafka_broker_t *rkb,
+ rd_kafka_buf_t *rkbuf);
+
+int rd_kafka_transport_framed_recv(rd_kafka_transport_t *rktrans,
+ rd_kafka_buf_t **rkbufp,
+ char *errstr,
+ size_t errstr_size);
+
+rd_kafka_transport_t *rd_kafka_transport_new(rd_kafka_broker_t *rkb,
+ rd_socket_t s,
+ char *errstr,
+ size_t errstr_size);
+struct rd_kafka_broker_s;
+rd_kafka_transport_t *rd_kafka_transport_connect(struct rd_kafka_broker_s *rkb,
+ const rd_sockaddr_inx_t *sinx,
+ char *errstr,
+ size_t errstr_size);
+void rd_kafka_transport_connect_done(rd_kafka_transport_t *rktrans,
+ char *errstr);
+
+void rd_kafka_transport_post_connect_setup(rd_kafka_transport_t *rktrans);
+
+void rd_kafka_transport_close(rd_kafka_transport_t *rktrans);
+void rd_kafka_transport_shutdown(rd_kafka_transport_t *rktrans);
+void rd_kafka_transport_poll_set(rd_kafka_transport_t *rktrans, int event);
+void rd_kafka_transport_poll_clear(rd_kafka_transport_t *rktrans, int event);
+
+#ifdef _WIN32
+void rd_kafka_transport_set_blocked(rd_kafka_transport_t *rktrans,
+ rd_bool_t blocked);
+#else
+/* no-op on other platforms */
+#define rd_kafka_transport_set_blocked(rktrans, blocked) \
+ do { \
+ } while (0)
+#endif
+
+
+void rd_kafka_transport_init(void);
+
+#endif /* _RDKAFKA_TRANSPORT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h
new file mode 100644
index 000000000..4b053b98f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_transport_int.h
@@ -0,0 +1,100 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDKAFKA_TRANSPORT_INT_H_
+#define _RDKAFKA_TRANSPORT_INT_H_
+
+/* This header file is to be used by .c files needing access to the
+ * rd_kafka_transport_t struct internals. */
+
+#include "rdkafka_sasl.h"
+
+#if WITH_SSL
+#include <openssl/ssl.h>
+#include <openssl/err.h>
+#include <openssl/pkcs12.h>
+#endif
+
+#ifndef _WIN32
+#include <sys/socket.h>
+#include <netinet/tcp.h>
+#endif
+
+struct rd_kafka_transport_s {
+ rd_socket_t rktrans_s;
+ rd_kafka_broker_t *rktrans_rkb; /* Not reference counted */
+
+#if WITH_SSL
+ SSL *rktrans_ssl;
+#endif
+
+#ifdef _WIN32
+ WSAEVENT *rktrans_wsaevent;
+ rd_bool_t rktrans_blocked; /* Latest send() returned ..WOULDBLOCK.
+ * We need to poll for FD_WRITE which
+ * is edge-triggered rather than
+ * level-triggered.
+ * This behaviour differs from BSD
+ * sockets. */
+#endif
+
+ struct {
+ void *state; /* SASL implementation
+ * state handle */
+
+ int complete; /* Auth was completed early
+ * from the client's perspective
+ * (but we might still have to
+ * wait for server reply). */
+
+ /* SASL framing buffers */
+ struct msghdr msg;
+ struct iovec iov[2];
+
+ char *recv_buf;
+ int recv_of; /* Received byte count */
+ int recv_len; /* Expected receive length for
+ * current frame. */
+ } rktrans_sasl;
+
+ rd_kafka_buf_t *rktrans_recv_buf; /* Used with framed_recvmsg */
+
+ /* Two pollable fds:
+ * - TCP socket
+ * - wake-up fd (not used on Win32)
+ */
+ rd_pollfd_t rktrans_pfd[2];
+ int rktrans_pfd_cnt;
+
+ size_t rktrans_rcvbuf_size; /**< Socket receive buffer size */
+ size_t rktrans_sndbuf_size; /**< Socket send buffer size */
+};
+
+
+extern RD_TLS rd_kafka_transport_t *rd_kafka_curr_transport;
+
+#endif /* _RDKAFKA_TRANSPORT_INT_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c
new file mode 100644
index 000000000..afbc28b71
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.c
@@ -0,0 +1,3249 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name Transaction Manager
+ *
+ */
+
+#include <stdarg.h>
+
+#include "rd.h"
+#include "rdkafka_int.h"
+#include "rdkafka_txnmgr.h"
+#include "rdkafka_idempotence.h"
+#include "rdkafka_request.h"
+#include "rdkafka_error.h"
+#include "rdunittest.h"
+#include "rdrand.h"
+
+
+static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms);
+
+#define rd_kafka_txn_curr_api_set_result(rk, actions, error) \
+ rd_kafka_txn_curr_api_set_result0(__FUNCTION__, __LINE__, rk, actions, \
+ error)
+static void rd_kafka_txn_curr_api_set_result0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ int actions,
+ rd_kafka_error_t *error);
+
+
+
+/**
+ * @return a normalized error code, this for instance abstracts different
+ * fencing errors to return one single fencing error to the application.
+ */
+static rd_kafka_resp_err_t rd_kafka_txn_normalize_err(rd_kafka_resp_err_t err) {
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
+ return RD_KAFKA_RESP_ERR__FENCED;
+ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
+ return RD_KAFKA_RESP_ERR__TIMED_OUT;
+ default:
+ return err;
+ }
+}
+
+
+/**
+ * @brief Ensure client is configured as a transactional producer,
+ * else return error.
+ *
+ * @locality application thread
+ * @locks none
+ */
+static RD_INLINE rd_kafka_error_t *
+rd_kafka_ensure_transactional(const rd_kafka_t *rk) {
+ if (unlikely(rk->rk_type != RD_KAFKA_PRODUCER))
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "The Transactional API can only be used "
+ "on producer instances");
+
+ if (unlikely(!rk->rk_conf.eos.transactional_id))
+ return rd_kafka_error_new(RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
+ "The Transactional API requires "
+ "transactional.id to be configured");
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Ensure transaction state is one of \p states.
+ *
+ * @param the required states, ended by a -1 sentinel.
+ *
+ * @locks_required rd_kafka_*lock(rk) MUST be held
+ * @locality any
+ */
+static RD_INLINE rd_kafka_error_t *
+rd_kafka_txn_require_states0(rd_kafka_t *rk, rd_kafka_txn_state_t states[]) {
+ rd_kafka_error_t *error;
+ size_t i;
+
+ if (unlikely((error = rd_kafka_ensure_transactional(rk)) != NULL))
+ return error;
+
+ for (i = 0; (int)states[i] != -1; i++)
+ if (rk->rk_eos.txn_state == states[i])
+ return NULL;
+
+ /* For fatal and abortable states return the last transactional
+ * error, for all other states just return a state error. */
+ if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_FATAL_ERROR)
+ error = rd_kafka_error_new_fatal(rk->rk_eos.txn_err, "%s",
+ rk->rk_eos.txn_errstr);
+ else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) {
+ error = rd_kafka_error_new(rk->rk_eos.txn_err, "%s",
+ rk->rk_eos.txn_errstr);
+ rd_kafka_error_set_txn_requires_abort(error);
+ } else
+ error = rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__STATE, "Operation not valid in state %s",
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state));
+
+
+ return error;
+}
+
+/** @brief \p ... is a list of states */
+#define rd_kafka_txn_require_state(rk, ...) \
+ rd_kafka_txn_require_states0( \
+ rk, (rd_kafka_txn_state_t[]) {__VA_ARGS__, -1})
+
+
+
+/**
+ * @param ignore Will be set to true if the state transition should be
+ * completely ignored.
+ * @returns true if the state transition is valid, else false.
+ */
+static rd_bool_t
+rd_kafka_txn_state_transition_is_valid(rd_kafka_txn_state_t curr,
+ rd_kafka_txn_state_t new_state,
+ rd_bool_t *ignore) {
+
+ *ignore = rd_false;
+
+ switch (new_state) {
+ case RD_KAFKA_TXN_STATE_INIT:
+ /* This is the initialized value and this transition will
+ * never happen. */
+ return rd_false;
+
+ case RD_KAFKA_TXN_STATE_WAIT_PID:
+ return curr == RD_KAFKA_TXN_STATE_INIT;
+
+ case RD_KAFKA_TXN_STATE_READY_NOT_ACKED:
+ return curr == RD_KAFKA_TXN_STATE_WAIT_PID;
+
+ case RD_KAFKA_TXN_STATE_READY:
+ return curr == RD_KAFKA_TXN_STATE_READY_NOT_ACKED ||
+ curr == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED ||
+ curr == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED;
+
+ case RD_KAFKA_TXN_STATE_IN_TRANSACTION:
+ return curr == RD_KAFKA_TXN_STATE_READY;
+
+ case RD_KAFKA_TXN_STATE_BEGIN_COMMIT:
+ return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION;
+
+ case RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION:
+ return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT;
+
+ case RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED:
+ return curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT ||
+ curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION;
+
+ case RD_KAFKA_TXN_STATE_BEGIN_ABORT:
+ return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
+ curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION ||
+ curr == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR;
+
+ case RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION:
+ return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT;
+
+ case RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED:
+ return curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT ||
+ curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION;
+
+ case RD_KAFKA_TXN_STATE_ABORTABLE_ERROR:
+ if (curr == RD_KAFKA_TXN_STATE_BEGIN_ABORT ||
+ curr == RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION ||
+ curr == RD_KAFKA_TXN_STATE_FATAL_ERROR) {
+ /* Ignore sub-sequent abortable errors in
+ * these states. */
+ *ignore = rd_true;
+ return 1;
+ }
+
+ return curr == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
+ curr == RD_KAFKA_TXN_STATE_BEGIN_COMMIT ||
+ curr == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION;
+
+ case RD_KAFKA_TXN_STATE_FATAL_ERROR:
+ /* Any state can transition to a fatal error */
+ return rd_true;
+
+ default:
+ RD_BUG("Invalid txn state transition: %s -> %s",
+ rd_kafka_txn_state2str(curr),
+ rd_kafka_txn_state2str(new_state));
+ return rd_false;
+ }
+}
+
+
+/**
+ * @brief Transition the transaction state to \p new_state.
+ *
+ * @returns 0 on success or an error code if the state transition
+ * was invalid.
+ *
+ * @locality rdkafka main thread
+ * @locks_required rd_kafka_wrlock MUST be held
+ */
+static void rd_kafka_txn_set_state(rd_kafka_t *rk,
+ rd_kafka_txn_state_t new_state) {
+ rd_bool_t ignore;
+
+ if (rk->rk_eos.txn_state == new_state)
+ return;
+
+ /* Check if state transition is valid */
+ if (!rd_kafka_txn_state_transition_is_valid(rk->rk_eos.txn_state,
+ new_state, &ignore)) {
+ rd_kafka_log(rk, LOG_CRIT, "TXNSTATE",
+ "BUG: Invalid transaction state transition "
+ "attempted: %s -> %s",
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state),
+ rd_kafka_txn_state2str(new_state));
+
+ rd_assert(!*"BUG: Invalid transaction state transition");
+ }
+
+ if (ignore) {
+ /* Ignore this state change */
+ return;
+ }
+
+ rd_kafka_dbg(rk, EOS, "TXNSTATE", "Transaction state change %s -> %s",
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state),
+ rd_kafka_txn_state2str(new_state));
+
+ /* If transitioning from IN_TRANSACTION, the app is no longer
+ * allowed to enqueue (produce) messages. */
+ if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION)
+ rd_atomic32_set(&rk->rk_eos.txn_may_enq, 0);
+ else if (new_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION)
+ rd_atomic32_set(&rk->rk_eos.txn_may_enq, 1);
+
+ rk->rk_eos.txn_state = new_state;
+}
+
+
+/**
+ * @returns the current transaction timeout, i.e., the time remaining in
+ * the current transaction.
+ *
+ * @remark The remaining timeout is currently not tracked, so this function
+ * will always return the remaining time based on transaction.timeout.ms
+ * and we rely on the broker to enforce the actual remaining timeout.
+ * This is still better than not having a timeout cap at all, which
+ * used to be the case.
+ * It's also tricky knowing exactly what the controller thinks the
+ * remaining transaction time is.
+ *
+ * @locks_required rd_kafka_*lock(rk) MUST be held.
+ */
+static RD_INLINE rd_ts_t rd_kafka_txn_current_timeout(const rd_kafka_t *rk) {
+ return rd_timeout_init(rk->rk_conf.eos.transaction_timeout_ms);
+}
+
+
+/**
+ * @brief An unrecoverable transactional error has occurred.
+ *
+ * @param do_lock RD_DO_LOCK: rd_kafka_wrlock(rk) will be acquired and released,
+ * RD_DONT_LOCK: rd_kafka_wrlock(rk) MUST be held by the caller.
+ * @locality any
+ * @locks rd_kafka_wrlock MUST NOT be held
+ */
+void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk,
+ rd_dolock_t do_lock,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) {
+ char errstr[512];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(errstr, sizeof(errstr), fmt, ap);
+ va_end(ap);
+
+ rd_kafka_log(rk, LOG_ALERT, "TXNERR",
+ "Fatal transaction error: %s (%s)", errstr,
+ rd_kafka_err2name(err));
+
+ if (do_lock)
+ rd_kafka_wrlock(rk);
+ rd_kafka_set_fatal_error0(rk, RD_DONT_LOCK, err, "%s", errstr);
+
+ rk->rk_eos.txn_err = err;
+ if (rk->rk_eos.txn_errstr)
+ rd_free(rk->rk_eos.txn_errstr);
+ rk->rk_eos.txn_errstr = rd_strdup(errstr);
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR);
+
+ if (do_lock)
+ rd_kafka_wrunlock(rk);
+
+ /* If application has called a transactional API and
+ * it has now failed, reply to the app.
+ * If there is no currently called API then this is a no-op. */
+ rd_kafka_txn_curr_api_set_result(
+ rk, 0, rd_kafka_error_new_fatal(err, "%s", errstr));
+}
+
+
+/**
+ * @brief An abortable/recoverable transactional error has occured.
+ *
+ * @param requires_epoch_bump If true; abort_transaction() will bump the epoch
+ * on the coordinator (KIP-360).
+
+ * @locality rdkafka main thread
+ * @locks rd_kafka_wrlock MUST NOT be held
+ */
+void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_bool_t requires_epoch_bump,
+ const char *fmt,
+ ...) {
+ char errstr[512];
+ va_list ap;
+
+ if (rd_kafka_fatal_error(rk, NULL, 0)) {
+ rd_kafka_dbg(rk, EOS, "FATAL",
+ "Not propagating abortable transactional "
+ "error (%s) "
+ "since previous fatal error already raised",
+ rd_kafka_err2name(err));
+ return;
+ }
+
+ va_start(ap, fmt);
+ vsnprintf(errstr, sizeof(errstr), fmt, ap);
+ va_end(ap);
+
+ rd_kafka_wrlock(rk);
+
+ if (requires_epoch_bump)
+ rk->rk_eos.txn_requires_epoch_bump = requires_epoch_bump;
+
+ if (rk->rk_eos.txn_err) {
+ rd_kafka_dbg(rk, EOS, "TXNERR",
+ "Ignoring sub-sequent abortable transaction "
+ "error: %s (%s): "
+ "previous error (%s) already raised",
+ errstr, rd_kafka_err2name(err),
+ rd_kafka_err2name(rk->rk_eos.txn_err));
+ rd_kafka_wrunlock(rk);
+ return;
+ }
+
+ rk->rk_eos.txn_err = err;
+ if (rk->rk_eos.txn_errstr)
+ rd_free(rk->rk_eos.txn_errstr);
+ rk->rk_eos.txn_errstr = rd_strdup(errstr);
+
+ rd_kafka_log(rk, LOG_ERR, "TXNERR",
+ "Current transaction failed in state %s: %s (%s%s)",
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state), errstr,
+ rd_kafka_err2name(err),
+ requires_epoch_bump ? ", requires epoch bump" : "");
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTABLE_ERROR);
+ rd_kafka_wrunlock(rk);
+
+ /* Purge all messages in queue/flight */
+ rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE | RD_KAFKA_PURGE_F_ABORT_TXN |
+ RD_KAFKA_PURGE_F_NON_BLOCKING);
+}
+
+
+
+/**
+ * @brief Send request-reply op to txnmgr callback, waits for a reply
+ * or timeout, and returns an error object or NULL on success.
+ *
+ * @remark Does not alter the current API state.
+ *
+ * @returns an error object on failure, else NULL.
+ *
+ * @locality application thread
+ *
+ * @locks_acquired rk->rk_eos.txn_curr_api.lock
+ */
+#define rd_kafka_txn_op_req(rk, op_cb, abs_timeout) \
+ rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, \
+ rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN, op_cb), \
+ abs_timeout)
+#define rd_kafka_txn_op_req1(rk, rko, abs_timeout) \
+ rd_kafka_txn_op_req0(__FUNCTION__, __LINE__, rk, rko, abs_timeout)
+static rd_kafka_error_t *rd_kafka_txn_op_req0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ rd_kafka_op_t *rko,
+ rd_ts_t abs_timeout) {
+ rd_kafka_error_t *error = NULL;
+ rd_bool_t has_result = rd_false;
+
+ mtx_lock(&rk->rk_eos.txn_curr_api.lock);
+
+ /* See if there's already a result, if so return that immediately. */
+ if (rk->rk_eos.txn_curr_api.has_result) {
+ error = rk->rk_eos.txn_curr_api.error;
+ rk->rk_eos.txn_curr_api.error = NULL;
+ rk->rk_eos.txn_curr_api.has_result = rd_false;
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+ rd_kafka_op_destroy(rko);
+ rd_kafka_dbg(rk, EOS, "OPREQ",
+ "%s:%d: %s: returning already set result: %s",
+ func, line, rk->rk_eos.txn_curr_api.name,
+ error ? rd_kafka_error_string(error) : "Success");
+ return error;
+ }
+
+ /* Send one-way op to txnmgr */
+ if (!rd_kafka_q_enq(rk->rk_ops, rko))
+ RD_BUG("rk_ops queue disabled");
+
+ /* Wait for result to be set, or timeout */
+ do {
+ if (cnd_timedwait_ms(&rk->rk_eos.txn_curr_api.cnd,
+ &rk->rk_eos.txn_curr_api.lock,
+ rd_timeout_remains(abs_timeout)) ==
+ thrd_timedout)
+ break;
+ } while (!rk->rk_eos.txn_curr_api.has_result);
+
+
+
+ if ((has_result = rk->rk_eos.txn_curr_api.has_result)) {
+ rk->rk_eos.txn_curr_api.has_result = rd_false;
+ error = rk->rk_eos.txn_curr_api.error;
+ rk->rk_eos.txn_curr_api.error = NULL;
+ }
+
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+
+ /* If there was no reply it means the background operation is still
+ * in progress and its result will be set later, so the application
+ * should call this API again to resume. */
+ if (!has_result) {
+ error = rd_kafka_error_new_retriable(
+ RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Timed out waiting for operation to finish, "
+ "retry call to resume");
+ }
+
+ return error;
+}
+
+
+/**
+ * @brief Begin (or resume) a public API call.
+ *
+ * This function will prevent conflicting calls.
+ *
+ * @returns an error on failure, or NULL on success.
+ *
+ * @locality application thread
+ *
+ * @locks_acquired rk->rk_eos.txn_curr_api.lock
+ */
+static rd_kafka_error_t *rd_kafka_txn_curr_api_begin(rd_kafka_t *rk,
+ const char *api_name,
+ rd_bool_t cap_timeout,
+ int timeout_ms,
+ rd_ts_t *abs_timeoutp) {
+ rd_kafka_error_t *error = NULL;
+
+ if ((error = rd_kafka_ensure_transactional(rk)))
+ return error;
+
+ rd_kafka_rdlock(rk); /* Need lock for retrieving the states */
+ rd_kafka_dbg(rk, EOS, "TXNAPI",
+ "Transactional API called: %s "
+ "(in txn state %s, idemp state %s, API timeout %d)",
+ api_name, rd_kafka_txn_state2str(rk->rk_eos.txn_state),
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state),
+ timeout_ms);
+ rd_kafka_rdunlock(rk);
+
+ mtx_lock(&rk->rk_eos.txn_curr_api.lock);
+
+
+ /* Make sure there is no other conflicting in-progress API call,
+ * and that this same call is not currently under way in another thread.
+ */
+ if (unlikely(*rk->rk_eos.txn_curr_api.name &&
+ strcmp(rk->rk_eos.txn_curr_api.name, api_name))) {
+ /* Another API is being called. */
+ error = rd_kafka_error_new_retriable(
+ RD_KAFKA_RESP_ERR__CONFLICT,
+ "Conflicting %s API call is already in progress",
+ rk->rk_eos.txn_curr_api.name);
+
+ } else if (unlikely(rk->rk_eos.txn_curr_api.calling)) {
+ /* There is an active call to this same API
+ * from another thread. */
+ error = rd_kafka_error_new_retriable(
+ RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS,
+ "Simultaneous %s API calls not allowed",
+ rk->rk_eos.txn_curr_api.name);
+
+ } else if (*rk->rk_eos.txn_curr_api.name) {
+ /* Resumed call */
+ rk->rk_eos.txn_curr_api.calling = rd_true;
+
+ } else {
+ /* New call */
+ rd_snprintf(rk->rk_eos.txn_curr_api.name,
+ sizeof(rk->rk_eos.txn_curr_api.name), "%s",
+ api_name);
+ rk->rk_eos.txn_curr_api.calling = rd_true;
+ rd_assert(!rk->rk_eos.txn_curr_api.error);
+ }
+
+ if (!error && abs_timeoutp) {
+ rd_ts_t abs_timeout = rd_timeout_init(timeout_ms);
+
+ if (cap_timeout) {
+ /* Cap API timeout to remaining transaction timeout */
+ rd_ts_t abs_txn_timeout =
+ rd_kafka_txn_current_timeout(rk);
+ if (abs_timeout > abs_txn_timeout ||
+ abs_timeout == RD_POLL_INFINITE)
+ abs_timeout = abs_txn_timeout;
+ }
+
+ *abs_timeoutp = abs_timeout;
+ }
+
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+
+ return error;
+}
+
+
+
+/**
+ * @brief Return from public API.
+ *
+ * This function updates the current API state and must be used in
+ * all return statements from the public txn API.
+ *
+ * @param resumable If true and the error is retriable, the current API state
+ * will be maintained to allow a future call to the same API
+ * to resume the background operation that is in progress.
+ * @param error The error object, if not NULL, is simply inspected and returned.
+ *
+ * @returns the \p error object as-is.
+ *
+ * @locality application thread
+ * @locks_acquired rk->rk_eos.txn_curr_api.lock
+ */
+#define rd_kafka_txn_curr_api_return(rk, resumable, error) \
+ rd_kafka_txn_curr_api_return0(__FUNCTION__, __LINE__, rk, resumable, \
+ error)
+static rd_kafka_error_t *
+rd_kafka_txn_curr_api_return0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ rd_bool_t resumable,
+ rd_kafka_error_t *error) {
+
+ mtx_lock(&rk->rk_eos.txn_curr_api.lock);
+
+ rd_kafka_dbg(
+ rk, EOS, "TXNAPI", "Transactional API %s return%s at %s:%d: %s",
+ rk->rk_eos.txn_curr_api.name,
+ resumable && rd_kafka_error_is_retriable(error) ? " resumable" : "",
+ func, line, error ? rd_kafka_error_string(error) : "Success");
+
+ rd_assert(*rk->rk_eos.txn_curr_api.name);
+ rd_assert(rk->rk_eos.txn_curr_api.calling);
+
+ rk->rk_eos.txn_curr_api.calling = rd_false;
+
+ /* Reset the current API call so that other APIs may be called,
+ * unless this is a resumable API and the error is retriable. */
+ if (!resumable || (error && !rd_kafka_error_is_retriable(error))) {
+ *rk->rk_eos.txn_curr_api.name = '\0';
+ /* It is possible for another error to have been set,
+ * typically when a fatal error is raised, so make sure
+ * we're not destroying the error we're supposed to return. */
+ if (rk->rk_eos.txn_curr_api.error != error)
+ rd_kafka_error_destroy(rk->rk_eos.txn_curr_api.error);
+ rk->rk_eos.txn_curr_api.error = NULL;
+ }
+
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+
+ return error;
+}
+
+
+
+/**
+ * @brief Set the (possibly intermediary) result for the current API call.
+ *
+ * The result is \p error NULL for success or \p error object on failure.
+ * If the application is actively blocked on the call the result will be
+ * sent on its replyq, otherwise the result will be stored for future retrieval
+ * the next time the application calls the API again.
+ *
+ * @locality rdkafka main thread
+ * @locks_acquired rk->rk_eos.txn_curr_api.lock
+ */
+static void rd_kafka_txn_curr_api_set_result0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ int actions,
+ rd_kafka_error_t *error) {
+
+ mtx_lock(&rk->rk_eos.txn_curr_api.lock);
+
+ if (!*rk->rk_eos.txn_curr_api.name) {
+ /* No current API being called, this could happen
+ * if the application thread API deemed the API was done,
+ * or for fatal errors that attempt to set the result
+ * regardless of current API state.
+ * In this case we simply throw away this result. */
+ if (error)
+ rd_kafka_error_destroy(error);
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+ return;
+ }
+
+ rd_kafka_dbg(rk, EOS, "APIRESULT",
+ "Transactional API %s (intermediary%s) result set "
+ "at %s:%d: %s (%sprevious result%s%s)",
+ rk->rk_eos.txn_curr_api.name,
+ rk->rk_eos.txn_curr_api.calling ? ", calling" : "", func,
+ line, error ? rd_kafka_error_string(error) : "Success",
+ rk->rk_eos.txn_curr_api.has_result ? "" : "no ",
+ rk->rk_eos.txn_curr_api.error ? ": " : "",
+ rd_kafka_error_string(rk->rk_eos.txn_curr_api.error));
+
+ rk->rk_eos.txn_curr_api.has_result = rd_true;
+
+
+ if (rk->rk_eos.txn_curr_api.error) {
+ /* If there's already an error it typically means
+ * a fatal error has been raised, so nothing more to do here. */
+ rd_kafka_dbg(
+ rk, EOS, "APIRESULT",
+ "Transactional API %s error "
+ "already set: %s",
+ rk->rk_eos.txn_curr_api.name,
+ rd_kafka_error_string(rk->rk_eos.txn_curr_api.error));
+
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+
+ if (error)
+ rd_kafka_error_destroy(error);
+
+ return;
+ }
+
+ if (error) {
+ if (actions & RD_KAFKA_ERR_ACTION_FATAL)
+ rd_kafka_error_set_fatal(error);
+ else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
+ rd_kafka_error_set_txn_requires_abort(error);
+ else if (actions & RD_KAFKA_ERR_ACTION_RETRY)
+ rd_kafka_error_set_retriable(error);
+ }
+
+ rk->rk_eos.txn_curr_api.error = error;
+ error = NULL;
+ cnd_broadcast(&rk->rk_eos.txn_curr_api.cnd);
+
+
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+}
+
+
+
+/**
+ * @brief The underlying idempotent producer state changed,
+ * see if this affects the transactional operations.
+ *
+ * @locality any thread
+ * @locks rd_kafka_wrlock(rk) MUST be held
+ */
+void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk,
+ rd_kafka_idemp_state_t idemp_state) {
+ rd_bool_t set_result = rd_false;
+
+ if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED &&
+ rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_WAIT_PID) {
+ /* Application is calling (or has called) init_transactions() */
+ RD_UT_COVERAGE(1);
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED);
+ set_result = rd_true;
+
+ } else if (idemp_state == RD_KAFKA_IDEMP_STATE_ASSIGNED &&
+ (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_ABORT ||
+ rk->rk_eos.txn_state ==
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION)) {
+ /* Application is calling abort_transaction() as we're
+ * recovering from a fatal idempotence error. */
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED);
+ set_result = rd_true;
+
+ } else if (idemp_state == RD_KAFKA_IDEMP_STATE_FATAL_ERROR &&
+ rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_FATAL_ERROR) {
+ /* A fatal error has been raised. */
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_FATAL_ERROR);
+ }
+
+ if (set_result) {
+ /* Application has called init_transactions() or
+ * abort_transaction() and it is now complete,
+ * reply to the app. */
+ rd_kafka_txn_curr_api_set_result(rk, 0, NULL);
+ }
+}
+
+
+/**
+ * @brief Moves a partition from the pending list to the proper list.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_partition_registered(rd_kafka_toppar_t *rktp) {
+ rd_kafka_t *rk = rktp->rktp_rkt->rkt_rk;
+
+ rd_kafka_toppar_lock(rktp);
+
+ if (unlikely(!(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_PEND_TXN))) {
+ rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_PROTOCOL, "ADDPARTS",
+ "\"%.*s\" [%" PRId32
+ "] is not in pending "
+ "list but returned in AddPartitionsToTxn "
+ "response: ignoring",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+ rd_kafka_toppar_unlock(rktp);
+ return;
+ }
+
+ rd_kafka_dbg(rk, EOS | RD_KAFKA_DBG_TOPIC, "ADDPARTS",
+ "%.*s [%" PRId32 "] registered with transaction",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition);
+
+ rd_assert((rktp->rktp_flags &
+ (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN)) ==
+ RD_KAFKA_TOPPAR_F_PEND_TXN);
+
+ rktp->rktp_flags = (rktp->rktp_flags & ~RD_KAFKA_TOPPAR_F_PEND_TXN) |
+ RD_KAFKA_TOPPAR_F_IN_TXN;
+
+ rd_kafka_toppar_unlock(rktp);
+
+ mtx_lock(&rk->rk_eos.txn_pending_lock);
+ TAILQ_REMOVE(&rk->rk_eos.txn_waitresp_rktps, rktp, rktp_txnlink);
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+
+ /* Not destroy()/keep():ing rktp since it just changes tailq. */
+
+ TAILQ_INSERT_TAIL(&rk->rk_eos.txn_rktps, rktp, rktp_txnlink);
+}
+
+
+
+/**
+ * @brief Handle AddPartitionsToTxnResponse
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_handle_AddPartitionsToTxn(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int32_t TopicCnt;
+ int actions = 0;
+ int retry_backoff_ms = 500; /* retry backoff */
+ rd_kafka_resp_err_t reset_coord_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_bool_t require_bump = rd_false;
+
+ if (err)
+ goto done;
+
+ rd_kafka_rdlock(rk);
+ rd_assert(rk->rk_eos.txn_state !=
+ RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION);
+
+ if (rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_IN_TRANSACTION &&
+ rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_BEGIN_COMMIT) {
+ /* Response received after aborting transaction */
+ rd_rkb_dbg(rkb, EOS, "ADDPARTS",
+ "Ignoring outdated AddPartitionsToTxn response in "
+ "state %s",
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state));
+ rd_kafka_rdunlock(rk);
+ err = RD_KAFKA_RESP_ERR__OUTDATED;
+ goto done;
+ }
+ rd_kafka_rdunlock(rk);
+
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i32(rkbuf, &TopicCnt);
+
+ while (TopicCnt-- > 0) {
+ rd_kafkap_str_t Topic;
+ rd_kafka_topic_t *rkt;
+ int32_t PartCnt;
+ rd_bool_t request_error = rd_false;
+
+ rd_kafka_buf_read_str(rkbuf, &Topic);
+ rd_kafka_buf_read_i32(rkbuf, &PartCnt);
+
+ rkt = rd_kafka_topic_find0(rk, &Topic);
+ if (rkt)
+ rd_kafka_topic_rdlock(rkt); /* for toppar_get() */
+
+ while (PartCnt-- > 0) {
+ rd_kafka_toppar_t *rktp = NULL;
+ int32_t Partition;
+ int16_t ErrorCode;
+ int p_actions = 0;
+
+ rd_kafka_buf_read_i32(rkbuf, &Partition);
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ if (rkt)
+ rktp = rd_kafka_toppar_get(rkt, Partition,
+ rd_false);
+
+ if (!rktp) {
+ rd_rkb_dbg(rkb, EOS | RD_KAFKA_DBG_PROTOCOL,
+ "ADDPARTS",
+ "Unknown partition \"%.*s\" "
+ "[%" PRId32
+ "] in AddPartitionsToTxn "
+ "response: ignoring",
+ RD_KAFKAP_STR_PR(&Topic), Partition);
+ continue;
+ }
+
+ switch (ErrorCode) {
+ case RD_KAFKA_RESP_ERR_NO_ERROR:
+ /* Move rktp from pending to proper list */
+ rd_kafka_txn_partition_registered(rktp);
+ break;
+
+ /* Request-level errors.
+ * As soon as any of these errors are seen
+ * the rest of the partitions are ignored
+ * since they will have the same error. */
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
+ reset_coord_err = ErrorCode;
+ p_actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ err = ErrorCode;
+ request_error = rd_true;
+ break;
+
+ case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
+ retry_backoff_ms = 20;
+ /* FALLTHRU */
+ case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
+ case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
+ p_actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ err = ErrorCode;
+ request_error = rd_true;
+ break;
+
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
+ case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
+ case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
+ p_actions |= RD_KAFKA_ERR_ACTION_FATAL;
+ err = ErrorCode;
+ request_error = rd_true;
+ break;
+
+ case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID:
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING:
+ require_bump = rd_true;
+ p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ err = ErrorCode;
+ request_error = rd_true;
+ break;
+
+ /* Partition-level errors.
+ * Continue with rest of partitions. */
+ case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
+ p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ err = ErrorCode;
+ break;
+
+ case RD_KAFKA_RESP_ERR_OPERATION_NOT_ATTEMPTED:
+ /* Partition skipped due to other partition's
+ * error. */
+ p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ if (!err)
+ err = ErrorCode;
+ break;
+
+ default:
+ /* Other partition error */
+ p_actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ err = ErrorCode;
+ break;
+ }
+
+ if (ErrorCode) {
+ actions |= p_actions;
+
+ if (!(p_actions &
+ (RD_KAFKA_ERR_ACTION_FATAL |
+ RD_KAFKA_ERR_ACTION_PERMANENT)))
+ rd_rkb_dbg(
+ rkb, EOS, "ADDPARTS",
+ "AddPartitionsToTxn response: "
+ "partition \"%.*s\": "
+ "[%" PRId32 "]: %s",
+ RD_KAFKAP_STR_PR(&Topic), Partition,
+ rd_kafka_err2str(ErrorCode));
+ else
+ rd_rkb_log(rkb, LOG_ERR, "ADDPARTS",
+ "Failed to add partition "
+ "\"%.*s\" [%" PRId32
+ "] to "
+ "transaction: %s",
+ RD_KAFKAP_STR_PR(&Topic),
+ Partition,
+ rd_kafka_err2str(ErrorCode));
+ }
+
+ rd_kafka_toppar_destroy(rktp);
+
+ if (request_error)
+ break; /* Request-level error seen, bail out */
+ }
+
+ if (rkt) {
+ rd_kafka_topic_rdunlock(rkt);
+ rd_kafka_topic_destroy0(rkt);
+ }
+
+ if (request_error)
+ break; /* Request-level error seen, bail out */
+ }
+
+ if (actions) /* Actions set from encountered errors */
+ goto done;
+
+ /* Since these partitions are now allowed to produce
+ * we wake up all broker threads. */
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "partitions added to transaction");
+
+ goto done;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+
+done:
+ if (err) {
+ rd_assert(rk->rk_eos.txn_req_cnt > 0);
+ rk->rk_eos.txn_req_cnt--;
+ }
+
+ /* Handle local request-level errors */
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_NO_ERROR:
+ break;
+
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ case RD_KAFKA_RESP_ERR__OUTDATED:
+ /* Terminating or outdated, ignore response */
+ return;
+
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ case RD_KAFKA_RESP_ERR__TIMED_OUT:
+ default:
+ /* For these errors we can't be sure if the
+ * request was received by the broker or not,
+ * so increase the txn_req_cnt back up as if
+ * they were received so that and EndTxnRequest
+ * is sent on abort_transaction(). */
+ rk->rk_eos.txn_req_cnt++;
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+ }
+
+ if (reset_coord_err) {
+ rd_kafka_wrlock(rk);
+ rd_kafka_txn_coord_set(rk, NULL,
+ "AddPartitionsToTxn failed: %s",
+ rd_kafka_err2str(reset_coord_err));
+ rd_kafka_wrunlock(rk);
+ }
+
+ /* Partitions that failed will still be on the waitresp list
+ * and are moved back to the pending list for the next scheduled
+ * AddPartitionsToTxn request.
+ * If this request was successful there will be no remaining partitions
+ * on the waitresp list.
+ */
+ mtx_lock(&rk->rk_eos.txn_pending_lock);
+ TAILQ_CONCAT_SORTED(&rk->rk_eos.txn_pending_rktps,
+ &rk->rk_eos.txn_waitresp_rktps, rd_kafka_toppar_t *,
+ rktp_txnlink, rd_kafka_toppar_topic_cmp);
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+
+ err = rd_kafka_txn_normalize_err(err);
+
+ if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
+ rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err,
+ "Failed to add partitions to "
+ "transaction: %s",
+ rd_kafka_err2str(err));
+
+ } else if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) {
+ /* Treat all other permanent errors as abortable errors.
+ * If an epoch bump is required let idempo sort it out. */
+ if (require_bump)
+ rd_kafka_idemp_drain_epoch_bump(
+ rk, err,
+ "Failed to add partition(s) to transaction "
+ "on broker %s: %s (after %d ms)",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000));
+ else
+ rd_kafka_txn_set_abortable_error(
+ rk, err,
+ "Failed to add partition(s) to transaction "
+ "on broker %s: %s (after %d ms)",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000));
+
+ } else {
+ /* Schedule registration of any new or remaining partitions */
+ rd_kafka_txn_schedule_register_partitions(
+ rk, (actions & RD_KAFKA_ERR_ACTION_RETRY)
+ ? retry_backoff_ms
+ : 1 /*immediate*/);
+ }
+}
+
+
+/**
+ * @brief Send AddPartitionsToTxnRequest to the transaction coordinator.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_register_partitions(rd_kafka_t *rk) {
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+ rd_kafka_pid_t pid;
+
+ /* Require operational state */
+ rd_kafka_rdlock(rk);
+ error =
+ rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION,
+ RD_KAFKA_TXN_STATE_BEGIN_COMMIT);
+
+ if (unlikely(error != NULL)) {
+ rd_kafka_rdunlock(rk);
+ rd_kafka_dbg(rk, EOS, "ADDPARTS",
+ "Not registering partitions: %s",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ return;
+ }
+
+ /* Get pid, checked later */
+ pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
+
+ rd_kafka_rdunlock(rk);
+
+ /* Transaction coordinator needs to be up */
+ if (!rd_kafka_broker_is_up(rk->rk_eos.txn_coord)) {
+ rd_kafka_dbg(rk, EOS, "ADDPARTS",
+ "Not registering partitions: "
+ "coordinator is not available");
+ return;
+ }
+
+ mtx_lock(&rk->rk_eos.txn_pending_lock);
+ if (TAILQ_EMPTY(&rk->rk_eos.txn_pending_rktps)) {
+ /* No pending partitions to register */
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+ return;
+ }
+
+ if (!TAILQ_EMPTY(&rk->rk_eos.txn_waitresp_rktps)) {
+ /* Only allow one outstanding AddPartitionsToTxnRequest */
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+ rd_kafka_dbg(rk, EOS, "ADDPARTS",
+ "Not registering partitions: waiting for "
+ "previous AddPartitionsToTxn request to complete");
+ return;
+ }
+
+ /* Require valid pid */
+ if (unlikely(!rd_kafka_pid_valid(pid))) {
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+ rd_kafka_dbg(rk, EOS, "ADDPARTS",
+ "Not registering partitions: "
+ "No PID available (idempotence state %s)",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
+ rd_dassert(!*"BUG: No PID despite proper transaction state");
+ return;
+ }
+
+
+ /* Send request to coordinator */
+ err = rd_kafka_AddPartitionsToTxnRequest(
+ rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
+ &rk->rk_eos.txn_pending_rktps, errstr, sizeof(errstr),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_txn_handle_AddPartitionsToTxn, NULL);
+ if (err) {
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+ rd_kafka_dbg(rk, EOS, "ADDPARTS",
+ "Not registering partitions: %s", errstr);
+ return;
+ }
+
+ /* Move all pending partitions to wait-response list.
+ * No need to keep waitresp sorted. */
+ TAILQ_CONCAT(&rk->rk_eos.txn_waitresp_rktps,
+ &rk->rk_eos.txn_pending_rktps, rktp_txnlink);
+
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+
+ rk->rk_eos.txn_req_cnt++;
+
+ rd_rkb_dbg(rk->rk_eos.txn_coord, EOS, "ADDPARTS",
+ "Registering partitions with transaction");
+}
+
+
+static void rd_kafka_txn_register_partitions_tmr_cb(rd_kafka_timers_t *rkts,
+ void *arg) {
+ rd_kafka_t *rk = arg;
+ rd_kafka_txn_register_partitions(rk);
+}
+
+
+/**
+ * @brief Schedule register_partitions() as soon as possible.
+ *
+ * @locality any
+ * @locks any
+ */
+void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms) {
+ rd_kafka_timer_start_oneshot(
+ &rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr,
+ rd_false /*dont-restart*/,
+ backoff_ms ? backoff_ms * 1000 : 1 /* immediate */,
+ rd_kafka_txn_register_partitions_tmr_cb, rk);
+}
+
+
+
+/**
+ * @brief Clears \p flag from all rktps and destroys them, emptying
+ * and reinitializing the \p tqh.
+ */
+static void rd_kafka_txn_clear_partitions_flag(rd_kafka_toppar_tqhead_t *tqh,
+ int flag) {
+ rd_kafka_toppar_t *rktp, *tmp;
+
+ TAILQ_FOREACH_SAFE(rktp, tqh, rktp_txnlink, tmp) {
+ rd_kafka_toppar_lock(rktp);
+ rd_dassert(rktp->rktp_flags & flag);
+ rktp->rktp_flags &= ~flag;
+ rd_kafka_toppar_unlock(rktp);
+ rd_kafka_toppar_destroy(rktp);
+ }
+
+ TAILQ_INIT(tqh);
+}
+
+
+/**
+ * @brief Clear all pending partitions.
+ *
+ * @locks txn_pending_lock MUST be held
+ */
+static void rd_kafka_txn_clear_pending_partitions(rd_kafka_t *rk) {
+ rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_pending_rktps,
+ RD_KAFKA_TOPPAR_F_PEND_TXN);
+ rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_waitresp_rktps,
+ RD_KAFKA_TOPPAR_F_PEND_TXN);
+}
+
+/**
+ * @brief Clear all added partitions.
+ *
+ * @locks rd_kafka_wrlock(rk) MUST be held
+ */
+static void rd_kafka_txn_clear_partitions(rd_kafka_t *rk) {
+ rd_kafka_txn_clear_partitions_flag(&rk->rk_eos.txn_rktps,
+ RD_KAFKA_TOPPAR_F_IN_TXN);
+}
+
+
+
+/**
+ * @brief Async handler for init_transactions()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t rd_kafka_txn_op_init_transactions(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ if ((error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_INIT, RD_KAFKA_TXN_STATE_WAIT_PID,
+ RD_KAFKA_TXN_STATE_READY_NOT_ACKED))) {
+ rd_kafka_wrunlock(rk);
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_READY_NOT_ACKED) {
+ /* A previous init_transactions() called finished successfully
+ * after timeout, the application has called init_transactions()
+ * again, we do nothin here, ack_init_transactions() will
+ * transition the state from READY_NOT_ACKED to READY. */
+ rd_kafka_wrunlock(rk);
+
+ } else {
+
+ /* Possibly a no-op if already in WAIT_PID state */
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_WAIT_PID);
+
+ rk->rk_eos.txn_init_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_wrunlock(rk);
+
+ /* Start idempotent producer to acquire PID */
+ rd_kafka_idemp_start(rk, rd_true /*immediately*/);
+
+ /* Do not call curr_api_set_result, it will be triggered from
+ * idemp_state_change() when the PID has been retrieved. */
+ }
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Async handler for the application to acknowledge
+ * successful background completion of init_transactions().
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t
+rd_kafka_txn_op_ack_init_transactions(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ if (!(error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_READY_NOT_ACKED)))
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY);
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+
+rd_kafka_error_t *rd_kafka_init_transactions(rd_kafka_t *rk, int timeout_ms) {
+ rd_kafka_error_t *error;
+ rd_ts_t abs_timeout;
+
+ /* Cap actual timeout to transaction.timeout.ms * 2 when an infinite
+ * timeout is provided, this is to make sure the call doesn't block
+ * indefinitely in case a coordinator is not available.
+ * This is only needed for init_transactions() since there is no
+ * coordinator to time us out yet. */
+ if (timeout_ms == RD_POLL_INFINITE &&
+ /* Avoid overflow */
+ rk->rk_conf.eos.transaction_timeout_ms < INT_MAX / 2)
+ timeout_ms = rk->rk_conf.eos.transaction_timeout_ms * 2;
+
+ if ((error = rd_kafka_txn_curr_api_begin(rk, "init_transactions",
+ rd_false /* no cap */,
+ timeout_ms, &abs_timeout)))
+ return error;
+
+ /* init_transactions() will continue to operate in the background
+ * if the timeout expires, and the application may call
+ * init_transactions() again to resume the initialization
+ * process.
+ * For this reason we need two states:
+ * - TXN_STATE_READY_NOT_ACKED for when initialization is done
+ * but the API call timed out prior to success, meaning the
+ * application does not know initialization finished and
+ * is thus not allowed to call sub-sequent txn APIs, e.g. begin..()
+ * - TXN_STATE_READY for when initialization is done and this
+ * function has returned successfully to the application.
+ *
+ * And due to the two states we need two calls to the rdkafka main
+ * thread (to keep txn_state synchronization in one place). */
+
+ /* First call is to trigger initialization */
+ if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_init_transactions,
+ abs_timeout))) {
+ if (rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR__TIMED_OUT) {
+ /* See if there's a more meaningful txn_init_err set
+ * by idempo that we can return. */
+ rd_kafka_resp_err_t err;
+ rd_kafka_rdlock(rk);
+ err =
+ rd_kafka_txn_normalize_err(rk->rk_eos.txn_init_err);
+ rd_kafka_rdunlock(rk);
+
+ if (err && err != RD_KAFKA_RESP_ERR__TIMED_OUT) {
+ rd_kafka_error_destroy(error);
+ error = rd_kafka_error_new_retriable(
+ err, "Failed to initialize Producer ID: %s",
+ rd_kafka_err2str(err));
+ }
+ }
+
+ return rd_kafka_txn_curr_api_return(rk, rd_true, error);
+ }
+
+
+ /* Second call is to transition from READY_NOT_ACKED -> READY,
+ * if necessary. */
+ error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_ack_init_transactions,
+ /* Timeout must be infinite since this is
+ * a synchronization point.
+ * The call is immediate though, so this
+ * will not block. */
+ RD_POLL_INFINITE);
+
+ return rd_kafka_txn_curr_api_return(rk,
+ /* not resumable at this point */
+ rd_false, error);
+}
+
+
+
+/**
+ * @brief Handler for begin_transaction()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t rd_kafka_txn_op_begin_transaction(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+ rd_bool_t wakeup_brokers = rd_false;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+ if (!(error =
+ rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_READY))) {
+ rd_assert(TAILQ_EMPTY(&rk->rk_eos.txn_rktps));
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION);
+
+ rd_assert(rk->rk_eos.txn_req_cnt == 0);
+ rd_atomic64_set(&rk->rk_eos.txn_dr_fails, 0);
+ rk->rk_eos.txn_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free);
+ rk->rk_eos.txn_errstr = NULL;
+
+ /* Wake up all broker threads (that may have messages to send
+ * that were waiting for this transaction state.
+ * But needs to be done below with no lock held. */
+ wakeup_brokers = rd_true;
+ }
+ rd_kafka_wrunlock(rk);
+
+ if (wakeup_brokers)
+ rd_kafka_all_brokers_wakeup(rk, RD_KAFKA_BROKER_STATE_INIT,
+ "begin transaction");
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+rd_kafka_error_t *rd_kafka_begin_transaction(rd_kafka_t *rk) {
+ rd_kafka_error_t *error;
+
+ if ((error = rd_kafka_txn_curr_api_begin(rk, "begin_transaction",
+ rd_false, 0, NULL)))
+ return error;
+
+ error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_transaction,
+ RD_POLL_INFINITE);
+
+ return rd_kafka_txn_curr_api_return(rk, rd_false /*not resumable*/,
+ error);
+}
+
+
+static rd_kafka_resp_err_t
+rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_op_t *rko,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *reply_opaque);
+
+/**
+ * @brief Handle TxnOffsetCommitResponse
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_handle_TxnOffsetCommit(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_op_t *rko = opaque;
+ int actions = 0;
+ rd_kafka_topic_partition_list_t *partitions = NULL;
+ char errstr[512];
+
+ *errstr = '\0';
+
+ if (err)
+ goto done;
+
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_ERR,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ partitions = rd_kafka_buf_read_topic_partitions(rkbuf, 0, fields);
+ if (!partitions)
+ goto err_parse;
+
+ err = rd_kafka_topic_partition_list_get_err(partitions);
+ if (err) {
+ char errparts[256];
+ rd_kafka_topic_partition_list_str(partitions, errparts,
+ sizeof(errparts),
+ RD_KAFKA_FMT_F_ONLY_ERR);
+ rd_snprintf(errstr, sizeof(errstr),
+ "Failed to commit offsets to transaction on "
+ "broker %s: %s "
+ "(after %dms)",
+ rd_kafka_broker_name(rkb), errparts,
+ (int)(request->rkbuf_ts_sent / 1000));
+ }
+
+ goto done;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+
+done:
+ if (err) {
+ if (!*errstr) {
+ rd_snprintf(errstr, sizeof(errstr),
+ "Failed to commit offsets to "
+ "transaction on broker %s: %s "
+ "(after %d ms)",
+ rkb ? rd_kafka_broker_name(rkb) : "(none)",
+ rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000));
+ }
+ }
+
+
+ if (partitions)
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_NO_ERROR:
+ break;
+
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ /* Producer is being terminated, ignore the response. */
+ case RD_KAFKA_RESP_ERR__OUTDATED:
+ /* Set a non-actionable actions flag so that
+ * curr_api_set_result() is called below, without
+ * other side-effects. */
+ actions = RD_KAFKA_ERR_ACTION_SPECIAL;
+ return;
+
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ case RD_KAFKA_RESP_ERR__TIMED_OUT:
+ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
+ /* Note: this is the group coordinator, not the
+ * transaction coordinator. */
+ rd_kafka_coord_cache_evict(&rk->rk_coord_cache, rkb);
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+
+ case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
+ case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+
+ case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING:
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
+ case RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT:
+ actions |= RD_KAFKA_ERR_ACTION_FATAL;
+ break;
+
+ case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED:
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ break;
+
+ case RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION:
+ case RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID:
+ case RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID:
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ break;
+
+ default:
+ /* Unhandled error, fail transaction */
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ break;
+ }
+
+ err = rd_kafka_txn_normalize_err(err);
+
+ if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
+ rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err, "%s", errstr);
+
+ } else if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ int remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout);
+
+ if (!rd_timeout_expired(remains_ms)) {
+ rd_kafka_coord_req(
+ rk, RD_KAFKA_COORD_GROUP,
+ rko->rko_u.txn.cgmetadata->group_id,
+ rd_kafka_txn_send_TxnOffsetCommitRequest, rko,
+ 500 /* 500ms delay before retrying */,
+ rd_timeout_remains_limit0(
+ remains_ms, rk->rk_conf.socket_timeout_ms),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_txn_handle_TxnOffsetCommit, rko);
+ return;
+ } else if (!err)
+ err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ }
+
+ if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
+ rd_kafka_txn_set_abortable_error(rk, err, "%s", errstr);
+
+ if (err)
+ rd_kafka_txn_curr_api_set_result(
+ rk, actions, rd_kafka_error_new(err, "%s", errstr));
+ else
+ rd_kafka_txn_curr_api_set_result(rk, 0, NULL);
+
+ rd_kafka_op_destroy(rko);
+}
+
+
+
+/**
+ * @brief Construct and send TxnOffsetCommitRequest.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static rd_kafka_resp_err_t
+rd_kafka_txn_send_TxnOffsetCommitRequest(rd_kafka_broker_t *rkb,
+ rd_kafka_op_t *rko,
+ rd_kafka_replyq_t replyq,
+ rd_kafka_resp_cb_t *resp_cb,
+ void *reply_opaque) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_buf_t *rkbuf;
+ int16_t ApiVersion;
+ rd_kafka_pid_t pid;
+ const rd_kafka_consumer_group_metadata_t *cgmetadata =
+ rko->rko_u.txn.cgmetadata;
+ int cnt;
+
+ rd_kafka_rdlock(rk);
+ if (rk->rk_eos.txn_state != RD_KAFKA_TXN_STATE_IN_TRANSACTION) {
+ rd_kafka_rdunlock(rk);
+ /* Do not free the rko, it is passed as the reply_opaque
+ * on the reply queue by coord_req_fsm() when we return
+ * an error here. */
+ return RD_KAFKA_RESP_ERR__STATE;
+ }
+
+ pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
+ rd_kafka_rdunlock(rk);
+ if (!rd_kafka_pid_valid(pid)) {
+ /* Do not free the rko, it is passed as the reply_opaque
+ * on the reply queue by coord_req_fsm() when we return
+ * an error here. */
+ return RD_KAFKA_RESP_ERR__STATE;
+ }
+
+ ApiVersion = rd_kafka_broker_ApiVersion_supported(
+ rkb, RD_KAFKAP_TxnOffsetCommit, 0, 3, NULL);
+ if (ApiVersion == -1) {
+ /* Do not free the rko, it is passed as the reply_opaque
+ * on the reply queue by coord_req_fsm() when we return
+ * an error here. */
+ return RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE;
+ }
+
+ rkbuf = rd_kafka_buf_new_flexver_request(
+ rkb, RD_KAFKAP_TxnOffsetCommit, 1, rko->rko_u.txn.offsets->cnt * 50,
+ ApiVersion >= 3);
+
+ /* transactional_id */
+ rd_kafka_buf_write_str(rkbuf, rk->rk_conf.eos.transactional_id, -1);
+
+ /* group_id */
+ rd_kafka_buf_write_str(rkbuf, rko->rko_u.txn.cgmetadata->group_id, -1);
+
+ /* PID */
+ rd_kafka_buf_write_i64(rkbuf, pid.id);
+ rd_kafka_buf_write_i16(rkbuf, pid.epoch);
+
+ if (ApiVersion >= 3) {
+ /* GenerationId */
+ rd_kafka_buf_write_i32(rkbuf, cgmetadata->generation_id);
+ /* MemberId */
+ rd_kafka_buf_write_str(rkbuf, cgmetadata->member_id, -1);
+ /* GroupInstanceId */
+ rd_kafka_buf_write_str(rkbuf, cgmetadata->group_instance_id,
+ -1);
+ }
+
+ /* Write per-partition offsets list */
+ const rd_kafka_topic_partition_field_t fields[] = {
+ RD_KAFKA_TOPIC_PARTITION_FIELD_PARTITION,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_OFFSET,
+ ApiVersion >= 2 ? RD_KAFKA_TOPIC_PARTITION_FIELD_EPOCH
+ : RD_KAFKA_TOPIC_PARTITION_FIELD_NOOP,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_METADATA,
+ RD_KAFKA_TOPIC_PARTITION_FIELD_END};
+ cnt = rd_kafka_buf_write_topic_partitions(
+ rkbuf, rko->rko_u.txn.offsets, rd_true /*skip invalid offsets*/,
+ rd_false /*any offset*/, fields);
+ if (!cnt) {
+ /* No valid partition offsets, don't commit. */
+ rd_kafka_buf_destroy(rkbuf);
+ /* Do not free the rko, it is passed as the reply_opaque
+ * on the reply queue by coord_req_fsm() when we return
+ * an error here. */
+ return RD_KAFKA_RESP_ERR__NO_OFFSET;
+ }
+
+ rd_kafka_buf_ApiVersion_set(rkbuf, ApiVersion, 0);
+
+ rkbuf->rkbuf_max_retries = RD_KAFKA_REQUEST_MAX_RETRIES;
+
+ rd_kafka_broker_buf_enq_replyq(rkb, rkbuf, replyq, resp_cb,
+ reply_opaque);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Handle AddOffsetsToTxnResponse
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_handle_AddOffsetsToTxn(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ rd_kafka_op_t *rko = opaque;
+ int16_t ErrorCode;
+ int actions = 0;
+ int remains_ms;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY) {
+ rd_kafka_op_destroy(rko);
+ return;
+ }
+
+ if (err)
+ goto done;
+
+ rd_kafka_buf_read_throttle_time(rkbuf);
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ err = ErrorCode;
+ goto done;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+
+done:
+ if (err) {
+ rd_assert(rk->rk_eos.txn_req_cnt > 0);
+ rk->rk_eos.txn_req_cnt--;
+ }
+
+ remains_ms = rd_timeout_remains(rko->rko_u.txn.abs_timeout);
+ if (rd_timeout_expired(remains_ms) && !err)
+ err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_NO_ERROR:
+ break;
+
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ /* Producer is being terminated, ignore the response. */
+ case RD_KAFKA_RESP_ERR__OUTDATED:
+ /* Set a non-actionable actions flag so that
+ * curr_api_set_result() is called below, without
+ * other side-effects. */
+ actions = RD_KAFKA_ERR_ACTION_SPECIAL;
+ break;
+
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ case RD_KAFKA_RESP_ERR__TIMED_OUT:
+ /* For these errors we can't be sure if the
+ * request was received by the broker or not,
+ * so increase the txn_req_cnt back up as if
+ * they were received so that and EndTxnRequest
+ * is sent on abort_transaction(). */
+ rk->rk_eos.txn_req_cnt++;
+ /* FALLTHRU */
+ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
+ case RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT:
+ actions |=
+ RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH;
+ break;
+
+ case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
+ case RD_KAFKA_RESP_ERR_UNSUPPORTED_FOR_MESSAGE_FORMAT:
+ actions |= RD_KAFKA_ERR_ACTION_FATAL;
+ break;
+
+ case RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED:
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ break;
+
+ case RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART:
+ case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
+ case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+
+ default:
+ /* All unhandled errors are permanent */
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ break;
+ }
+
+ err = rd_kafka_txn_normalize_err(err);
+
+ rd_kafka_dbg(rk, EOS, "ADDOFFSETS",
+ "AddOffsetsToTxn response from %s: %s (%s)",
+ rkb ? rd_kafka_broker_name(rkb) : "(none)",
+ rd_kafka_err2name(err), rd_kafka_actions2str(actions));
+
+ /* All unhandled errors are considered permanent */
+ if (err && !actions)
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+
+ if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
+ rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err,
+ "Failed to add offsets to "
+ "transaction: %s",
+ rd_kafka_err2str(err));
+ } else {
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
+ rd_kafka_txn_coord_timer_start(rk, 50);
+
+ if (actions & RD_KAFKA_ERR_ACTION_RETRY) {
+ rd_rkb_dbg(
+ rkb, EOS, "ADDOFFSETS",
+ "Failed to add offsets to transaction on "
+ "broker %s: %s (after %dms, %dms remains): "
+ "error is retriable",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000), remains_ms);
+
+ if (!rd_timeout_expired(remains_ms) &&
+ rd_kafka_buf_retry(rk->rk_eos.txn_coord, request)) {
+ rk->rk_eos.txn_req_cnt++;
+ return;
+ }
+
+ /* Propagate as retriable error through
+ * api_reply() below */
+ }
+ }
+
+ if (err)
+ rd_rkb_log(rkb, LOG_ERR, "ADDOFFSETS",
+ "Failed to add offsets to transaction on broker %s: "
+ "%s",
+ rkb ? rd_kafka_broker_name(rkb) : "(none)",
+ rd_kafka_err2str(err));
+
+ if (actions & RD_KAFKA_ERR_ACTION_PERMANENT)
+ rd_kafka_txn_set_abortable_error(
+ rk, err,
+ "Failed to add offsets to "
+ "transaction on broker %s: "
+ "%s (after %dms)",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000));
+
+ if (!err) {
+ /* Step 2: Commit offsets to transaction on the
+ * group coordinator. */
+
+ rd_kafka_coord_req(
+ rk, RD_KAFKA_COORD_GROUP,
+ rko->rko_u.txn.cgmetadata->group_id,
+ rd_kafka_txn_send_TxnOffsetCommitRequest, rko,
+ 0 /* no delay */,
+ rd_timeout_remains_limit0(remains_ms,
+ rk->rk_conf.socket_timeout_ms),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0),
+ rd_kafka_txn_handle_TxnOffsetCommit, rko);
+
+ } else {
+
+ rd_kafka_txn_curr_api_set_result(
+ rk, actions,
+ rd_kafka_error_new(
+ err,
+ "Failed to add offsets to transaction on "
+ "broker %s: %s (after %dms)",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
+ (int)(request->rkbuf_ts_sent / 1000)));
+
+ rd_kafka_op_destroy(rko);
+ }
+}
+
+
+/**
+ * @brief Async handler for send_offsets_to_transaction()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t
+rd_kafka_txn_op_send_offsets_to_transaction(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ char errstr[512];
+ rd_kafka_error_t *error;
+ rd_kafka_pid_t pid;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ *errstr = '\0';
+
+ rd_kafka_wrlock(rk);
+
+ if ((error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION))) {
+ rd_kafka_wrunlock(rk);
+ goto err;
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
+ if (!rd_kafka_pid_valid(pid)) {
+ rd_dassert(!*"BUG: No PID despite proper transaction state");
+ error = rd_kafka_error_new_retriable(
+ RD_KAFKA_RESP_ERR__STATE,
+ "No PID available (idempotence state %s)",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
+ goto err;
+ }
+
+ /* This is a multi-stage operation, consisting of:
+ * 1) send AddOffsetsToTxnRequest to transaction coordinator.
+ * 2) send TxnOffsetCommitRequest to group coordinator. */
+
+ err = rd_kafka_AddOffsetsToTxnRequest(
+ rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
+ rko->rko_u.txn.cgmetadata->group_id, errstr, sizeof(errstr),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_AddOffsetsToTxn,
+ rko);
+
+ if (err) {
+ error = rd_kafka_error_new_retriable(err, "%s", errstr);
+ goto err;
+ }
+
+ rk->rk_eos.txn_req_cnt++;
+
+ return RD_KAFKA_OP_RES_KEEP; /* the rko is passed to AddOffsetsToTxn */
+
+err:
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+/**
+ * error returns:
+ * ERR__TRANSPORT - retryable
+ */
+rd_kafka_error_t *rd_kafka_send_offsets_to_transaction(
+ rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ const rd_kafka_consumer_group_metadata_t *cgmetadata,
+ int timeout_ms) {
+ rd_kafka_error_t *error;
+ rd_kafka_op_t *rko;
+ rd_kafka_topic_partition_list_t *valid_offsets;
+ rd_ts_t abs_timeout;
+
+ if (!cgmetadata || !offsets)
+ return rd_kafka_error_new(
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "cgmetadata and offsets are required parameters");
+
+ if ((error = rd_kafka_txn_curr_api_begin(
+ rk, "send_offsets_to_transaction",
+ /* Cap timeout to txn timeout */
+ rd_true, timeout_ms, &abs_timeout)))
+ return error;
+
+
+ valid_offsets = rd_kafka_topic_partition_list_match(
+ offsets, rd_kafka_topic_partition_match_valid_offset, NULL);
+
+ if (valid_offsets->cnt == 0) {
+ /* No valid offsets, e.g., nothing was consumed,
+ * this is not an error, do nothing. */
+ rd_kafka_topic_partition_list_destroy(valid_offsets);
+ return rd_kafka_txn_curr_api_return(rk, rd_false, NULL);
+ }
+
+ rd_kafka_topic_partition_list_sort_by_topic(valid_offsets);
+
+ rko = rd_kafka_op_new_cb(rk, RD_KAFKA_OP_TXN,
+ rd_kafka_txn_op_send_offsets_to_transaction);
+ rko->rko_u.txn.offsets = valid_offsets;
+ rko->rko_u.txn.cgmetadata =
+ rd_kafka_consumer_group_metadata_dup(cgmetadata);
+ rko->rko_u.txn.abs_timeout = abs_timeout;
+
+ /* Timeout is enforced by op_send_offsets_to_transaction() */
+ error = rd_kafka_txn_op_req1(rk, rko, RD_POLL_INFINITE);
+
+ return rd_kafka_txn_curr_api_return(rk, rd_false, error);
+}
+
+
+
+/**
+ * @brief Successfully complete the transaction.
+ *
+ * Current state must be either COMMIT_NOT_ACKED or ABORT_NOT_ACKED.
+ *
+ * @locality rdkafka main thread
+ * @locks rd_kafka_wrlock(rk) MUST be held
+ */
+static void rd_kafka_txn_complete(rd_kafka_t *rk, rd_bool_t is_commit) {
+ rd_kafka_dbg(rk, EOS, "TXNCOMPLETE", "Transaction successfully %s",
+ is_commit ? "committed" : "aborted");
+
+ /* Clear all transaction partition state */
+ rd_kafka_txn_clear_pending_partitions(rk);
+ rd_kafka_txn_clear_partitions(rk);
+
+ rk->rk_eos.txn_requires_epoch_bump = rd_false;
+ rk->rk_eos.txn_req_cnt = 0;
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_READY);
+}
+
+
+/**
+ * @brief EndTxn (commit or abort of transaction on the coordinator) is done,
+ * or was skipped.
+ * Continue with next steps (if any) before completing the local
+ * transaction state.
+ *
+ * @locality rdkafka main thread
+ * @locks_acquired rd_kafka_wrlock(rk), rk->rk_eos.txn_curr_api.lock
+ */
+static void rd_kafka_txn_endtxn_complete(rd_kafka_t *rk) {
+ rd_bool_t is_commit;
+
+ mtx_lock(&rk->rk_eos.txn_curr_api.lock);
+ is_commit = !strcmp(rk->rk_eos.txn_curr_api.name, "commit_transaction");
+ mtx_unlock(&rk->rk_eos.txn_curr_api.lock);
+
+ rd_kafka_wrlock(rk);
+
+ /* If an epoch bump is required, let idempo handle it.
+ * When the bump is finished we'll be notified through
+ * idemp_state_change() and we can complete the local transaction state
+ * and set the final API call result.
+ * If the bumping fails a fatal error will be raised. */
+ if (rk->rk_eos.txn_requires_epoch_bump) {
+ rd_kafka_resp_err_t bump_err = rk->rk_eos.txn_err;
+ rd_dassert(!is_commit);
+
+ rd_kafka_wrunlock(rk);
+
+ /* After the epoch bump is done we'll be transitioned
+ * to the next state. */
+ rd_kafka_idemp_drain_epoch_bump0(
+ rk, rd_false /* don't allow txn abort */, bump_err,
+ "Transaction aborted: %s", rd_kafka_err2str(bump_err));
+ return;
+ }
+
+ if (is_commit)
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED);
+ else
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED);
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, NULL);
+}
+
+
+/**
+ * @brief Handle EndTxnResponse (commit or abort)
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_handle_EndTxn(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode;
+ int actions = 0;
+ rd_bool_t is_commit, may_retry = rd_false, require_bump = rd_false;
+
+ if (err == RD_KAFKA_RESP_ERR__DESTROY)
+ return;
+
+ is_commit = request->rkbuf_u.EndTxn.commit;
+
+ if (err)
+ goto err;
+
+ rd_kafka_buf_read_throttle_time(rkbuf);
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+ err = ErrorCode;
+ goto err;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+ /* FALLTHRU */
+
+err:
+ rd_kafka_wrlock(rk);
+
+ if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) {
+ may_retry = rd_true;
+
+ } else if (rk->rk_eos.txn_state ==
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) {
+ may_retry = rd_true;
+
+ } else if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR) {
+ /* Transaction has failed locally, typically due to timeout.
+ * Get the transaction error and return that instead of
+ * this error.
+ * This is a tricky state since the transaction will have
+ * failed locally but the EndTxn(commit) may have succeeded. */
+
+
+ if (err) {
+ rd_kafka_txn_curr_api_set_result(
+ rk, RD_KAFKA_ERR_ACTION_PERMANENT,
+ rd_kafka_error_new(
+ rk->rk_eos.txn_err,
+ "EndTxn failed with %s but transaction "
+ "had already failed due to: %s",
+ rd_kafka_err2name(err), rk->rk_eos.txn_errstr));
+ } else {
+ /* If the transaction has failed locally but
+ * this EndTxn commit succeeded we'll raise
+ * a fatal error. */
+ if (is_commit)
+ rd_kafka_txn_curr_api_set_result(
+ rk, RD_KAFKA_ERR_ACTION_FATAL,
+ rd_kafka_error_new(
+ rk->rk_eos.txn_err,
+ "Transaction commit succeeded on the "
+ "broker but the transaction "
+ "had already failed locally due to: %s",
+ rk->rk_eos.txn_errstr));
+
+ else
+ rd_kafka_txn_curr_api_set_result(
+ rk, RD_KAFKA_ERR_ACTION_PERMANENT,
+ rd_kafka_error_new(
+ rk->rk_eos.txn_err,
+ "Transaction abort succeeded on the "
+ "broker but the transaction"
+ "had already failed locally due to: %s",
+ rk->rk_eos.txn_errstr));
+ }
+
+ rd_kafka_wrunlock(rk);
+
+
+ return;
+
+ } else if (!err) {
+ /* Request is outdated */
+ err = RD_KAFKA_RESP_ERR__OUTDATED;
+ }
+
+
+ rd_kafka_dbg(rk, EOS, "ENDTXN",
+ "EndTxn returned %s in state %s (may_retry=%s)",
+ rd_kafka_err2name(err),
+ rd_kafka_txn_state2str(rk->rk_eos.txn_state),
+ RD_STR_ToF(may_retry));
+
+ rd_kafka_wrunlock(rk);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR_NO_ERROR:
+ break;
+
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ /* Producer is being terminated, ignore the response. */
+ case RD_KAFKA_RESP_ERR__OUTDATED:
+ /* Transactional state no longer relevant for this
+ * outdated response. */
+ break;
+ case RD_KAFKA_RESP_ERR__TIMED_OUT:
+ case RD_KAFKA_RESP_ERR__TIMED_OUT_QUEUE:
+ /* Request timeout */
+ /* FALLTHRU */
+ case RD_KAFKA_RESP_ERR__TRANSPORT:
+ actions |=
+ RD_KAFKA_ERR_ACTION_RETRY | RD_KAFKA_ERR_ACTION_REFRESH;
+ break;
+
+ case RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE:
+ case RD_KAFKA_RESP_ERR_NOT_COORDINATOR:
+ rd_kafka_wrlock(rk);
+ rd_kafka_txn_coord_set(rk, NULL, "EndTxn failed: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_wrunlock(rk);
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+
+ case RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS:
+ case RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS:
+ actions |= RD_KAFKA_ERR_ACTION_RETRY;
+ break;
+
+ case RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID:
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING:
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ require_bump = rd_true;
+ break;
+
+ case RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH:
+ case RD_KAFKA_RESP_ERR_PRODUCER_FENCED:
+ case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_INVALID_TXN_STATE:
+ actions |= RD_KAFKA_ERR_ACTION_FATAL;
+ break;
+
+ default:
+ /* All unhandled errors are permanent */
+ actions |= RD_KAFKA_ERR_ACTION_PERMANENT;
+ }
+
+ err = rd_kafka_txn_normalize_err(err);
+
+ if (actions & RD_KAFKA_ERR_ACTION_FATAL) {
+ rd_kafka_txn_set_fatal_error(rk, RD_DO_LOCK, err,
+ "Failed to end transaction: %s",
+ rd_kafka_err2str(err));
+ } else {
+ if (actions & RD_KAFKA_ERR_ACTION_REFRESH)
+ rd_kafka_txn_coord_timer_start(rk, 50);
+
+ if (actions & RD_KAFKA_ERR_ACTION_PERMANENT) {
+ if (require_bump && !is_commit) {
+ /* Abort failed to due invalid PID, starting
+ * with KIP-360 we can have idempo sort out
+ * epoch bumping.
+ * When the epoch has been bumped we'll detect
+ * the idemp_state_change and complete the
+ * current API call. */
+ rd_kafka_idemp_drain_epoch_bump0(
+ rk,
+ /* don't allow txn abort */
+ rd_false, err, "EndTxn %s failed: %s",
+ is_commit ? "commit" : "abort",
+ rd_kafka_err2str(err));
+ return;
+ }
+
+ /* For aborts we need to revert the state back to
+ * BEGIN_ABORT so that the abort can be retried from
+ * the beginning in op_abort_transaction(). */
+ rd_kafka_wrlock(rk);
+ if (rk->rk_eos.txn_state ==
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION)
+ rd_kafka_txn_set_state(
+ rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT);
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_set_abortable_error0(
+ rk, err, require_bump,
+ "Failed to end transaction: "
+ "%s",
+ rd_kafka_err2str(err));
+
+ } else if (may_retry && actions & RD_KAFKA_ERR_ACTION_RETRY &&
+ rd_kafka_buf_retry(rkb, request))
+ return;
+ }
+
+ if (err)
+ rd_kafka_txn_curr_api_set_result(
+ rk, actions,
+ rd_kafka_error_new(err, "EndTxn %s failed: %s",
+ is_commit ? "commit" : "abort",
+ rd_kafka_err2str(err)));
+ else
+ rd_kafka_txn_endtxn_complete(rk);
+}
+
+
+
+/**
+ * @brief Handler for commit_transaction()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t
+rd_kafka_txn_op_commit_transaction(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_pid_t pid;
+ int64_t dr_fails;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ if ((error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT,
+ RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION,
+ RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED)))
+ goto done;
+
+ if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED) {
+ /* A previous call to commit_transaction() timed out but the
+ * commit completed since then, we still
+ * need to wait for the application to call commit_transaction()
+ * again to resume the call, and it just did. */
+ goto done;
+ } else if (rk->rk_eos.txn_state ==
+ RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION) {
+ /* A previous call to commit_transaction() timed out but the
+ * commit is still in progress, we still
+ * need to wait for the application to call commit_transaction()
+ * again to resume the call, and it just did. */
+ rd_kafka_wrunlock(rk);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ /* If any messages failed delivery the transaction must be aborted. */
+ dr_fails = rd_atomic64_get(&rk->rk_eos.txn_dr_fails);
+ if (unlikely(dr_fails > 0)) {
+ error = rd_kafka_error_new_txn_requires_abort(
+ RD_KAFKA_RESP_ERR__INCONSISTENT,
+ "%" PRId64
+ " message(s) failed delivery "
+ "(see individual delivery reports)",
+ dr_fails);
+ goto done;
+ }
+
+ if (!rk->rk_eos.txn_req_cnt) {
+ /* If there were no messages produced, or no send_offsets,
+ * in this transaction, simply complete the transaction
+ * without sending anything to the transaction coordinator
+ * (since it will not have any txn state). */
+ rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
+ "No partitions registered: not sending EndTxn");
+ rd_kafka_wrunlock(rk);
+ rd_kafka_txn_endtxn_complete(rk);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_false);
+ if (!rd_kafka_pid_valid(pid)) {
+ rd_dassert(!*"BUG: No PID despite proper transaction state");
+ error = rd_kafka_error_new_retriable(
+ RD_KAFKA_RESP_ERR__STATE,
+ "No PID available (idempotence state %s)",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
+ goto done;
+ }
+
+ err = rd_kafka_EndTxnRequest(
+ rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
+ rd_true /* commit */, errstr, sizeof(errstr),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL);
+ if (err) {
+ error = rd_kafka_error_new_retriable(err, "%s", errstr);
+ goto done;
+ }
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION);
+
+ rd_kafka_wrunlock(rk);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+
+done:
+ rd_kafka_wrunlock(rk);
+
+ /* If the returned error is an abortable error
+ * also set the current transaction state accordingly. */
+ if (rd_kafka_error_txn_requires_abort(error))
+ rd_kafka_txn_set_abortable_error(rk, rd_kafka_error_code(error),
+ "%s",
+ rd_kafka_error_string(error));
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Handler for commit_transaction()'s first phase: begin commit
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t rd_kafka_txn_op_begin_commit(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+
+ rd_kafka_wrlock(rk);
+
+ error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION,
+ RD_KAFKA_TXN_STATE_BEGIN_COMMIT,
+ RD_KAFKA_TXN_STATE_COMMITTING_TRANSACTION,
+ RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED);
+
+ if (!error &&
+ rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION) {
+ /* Transition to BEGIN_COMMIT state if no error and commit not
+ * already started. */
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_COMMIT);
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Handler for last ack of commit_transaction()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t
+rd_kafka_txn_op_commit_transaction_ack(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ if (!(error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_COMMIT_NOT_ACKED))) {
+ rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
+ "Committed transaction now acked by application");
+ rd_kafka_txn_complete(rk, rd_true /*is commit*/);
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+
+rd_kafka_error_t *rd_kafka_commit_transaction(rd_kafka_t *rk, int timeout_ms) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_ts_t abs_timeout;
+
+ /* The commit is in three phases:
+ * - begin commit: wait for outstanding messages to be produced,
+ * disallow new messages from being produced
+ * by application.
+ * - commit: commit transaction.
+ * - commit not acked: commit done, but waiting for application
+ * to acknowledge by completing this API call.
+ */
+
+ if ((error = rd_kafka_txn_curr_api_begin(rk, "commit_transaction",
+ rd_false /* no cap */,
+ timeout_ms, &abs_timeout)))
+ return error;
+
+ /* Begin commit */
+ if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_commit,
+ abs_timeout)))
+ return rd_kafka_txn_curr_api_return(rk,
+ /* not resumable yet */
+ rd_false, error);
+
+ rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
+ "Flushing %d outstanding message(s) prior to commit",
+ rd_kafka_outq_len(rk));
+
+ /* Wait for queued messages to be delivered, limited by
+ * the remaining transaction lifetime. */
+ if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) {
+ rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
+ "Flush failed (with %d messages remaining): %s",
+ rd_kafka_outq_len(rk), rd_kafka_err2str(err));
+
+ if (err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ error = rd_kafka_error_new_retriable(
+ err,
+ "Failed to flush all outstanding messages "
+ "within the API timeout: "
+ "%d message(s) remaining%s",
+ rd_kafka_outq_len(rk),
+ /* In case event queue delivery reports
+ * are enabled and there is no dr callback
+ * we instruct the developer to poll
+ * the event queue separately, since we
+ * can't do it for them. */
+ ((rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR) &&
+ !rk->rk_conf.dr_msg_cb && !rk->rk_conf.dr_cb)
+ ? ": the event queue must be polled "
+ "for delivery report events in a separate "
+ "thread or prior to calling commit"
+ : "");
+ else
+ error = rd_kafka_error_new_retriable(
+ err, "Failed to flush outstanding messages: %s",
+ rd_kafka_err2str(err));
+
+ /* The commit operation is in progress in the background
+ * and the application will need to call this API again
+ * to resume. */
+ return rd_kafka_txn_curr_api_return(rk, rd_true, error);
+ }
+
+ rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
+ "Transaction commit message flush complete");
+
+ /* Commit transaction */
+ error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction,
+ abs_timeout);
+ if (error)
+ return rd_kafka_txn_curr_api_return(rk, rd_true, error);
+
+ /* Last call is to transition from COMMIT_NOT_ACKED to READY */
+ error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_commit_transaction_ack,
+ /* Timeout must be infinite since this is
+ * a synchronization point.
+ * The call is immediate though, so this
+ * will not block. */
+ RD_POLL_INFINITE);
+
+ return rd_kafka_txn_curr_api_return(rk,
+ /* not resumable at this point */
+ rd_false, error);
+}
+
+
+
+/**
+ * @brief Handler for abort_transaction()'s first phase: begin abort
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t rd_kafka_txn_op_begin_abort(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+ rd_bool_t clear_pending = rd_false;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ error =
+ rd_kafka_txn_require_state(rk, RD_KAFKA_TXN_STATE_IN_TRANSACTION,
+ RD_KAFKA_TXN_STATE_BEGIN_ABORT,
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION,
+ RD_KAFKA_TXN_STATE_ABORTABLE_ERROR,
+ RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED);
+
+ if (!error &&
+ (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
+ rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORTABLE_ERROR)) {
+ /* Transition to ABORTING_TRANSACTION state if no error and
+ * abort not already started. */
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT);
+ clear_pending = rd_true;
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ if (clear_pending) {
+ mtx_lock(&rk->rk_eos.txn_pending_lock);
+ rd_kafka_txn_clear_pending_partitions(rk);
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+ }
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Handler for abort_transaction()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t rd_kafka_txn_op_abort_transaction(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_pid_t pid;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ if ((error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_BEGIN_ABORT,
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION,
+ RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED)))
+ goto done;
+
+ if (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED) {
+ /* A previous call to abort_transaction() timed out but
+ * the aborting completed since then, we still need to wait
+ * for the application to call abort_transaction() again
+ * to synchronize state, and it just did. */
+ goto done;
+ } else if (rk->rk_eos.txn_state ==
+ RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION) {
+ /* A previous call to abort_transaction() timed out but
+ * the abort is still in progress, we still need to wait
+ * for the application to call abort_transaction() again
+ * to synchronize state, and it just did. */
+ rd_kafka_wrunlock(rk);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ if (!rk->rk_eos.txn_req_cnt) {
+ rd_kafka_dbg(rk, EOS, "TXNABORT",
+ "No partitions registered: not sending EndTxn");
+ rd_kafka_wrunlock(rk);
+ rd_kafka_txn_endtxn_complete(rk);
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ /* If the underlying idempotent producer's state indicates it
+ * is re-acquiring its PID we need to wait for that to finish
+ * before allowing a new begin_transaction(), and since that is
+ * not a blocking call we need to perform that wait in this
+ * state instead.
+ * To recover we need to request an epoch bump from the
+ * transaction coordinator. This is handled automatically
+ * by the idempotent producer, so we just need to wait for
+ * the new pid to be assigned.
+ */
+ if (rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_ASSIGNED &&
+ rk->rk_eos.idemp_state != RD_KAFKA_IDEMP_STATE_WAIT_TXN_ABORT) {
+ rd_kafka_dbg(rk, EOS, "TXNABORT",
+ "Waiting for transaction coordinator "
+ "PID bump to complete before aborting "
+ "transaction (idempotent producer state %s)",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
+
+ rd_kafka_wrunlock(rk);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+ }
+
+ pid = rd_kafka_idemp_get_pid0(rk, RD_DONT_LOCK, rd_true);
+ if (!rd_kafka_pid_valid(pid)) {
+ rd_dassert(!*"BUG: No PID despite proper transaction state");
+ error = rd_kafka_error_new_retriable(
+ RD_KAFKA_RESP_ERR__STATE,
+ "No PID available (idempotence state %s)",
+ rd_kafka_idemp_state2str(rk->rk_eos.idemp_state));
+ goto done;
+ }
+
+ err = rd_kafka_EndTxnRequest(
+ rk->rk_eos.txn_coord, rk->rk_conf.eos.transactional_id, pid,
+ rd_false /* abort */, errstr, sizeof(errstr),
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_EndTxn, NULL);
+ if (err) {
+ error = rd_kafka_error_new_retriable(err, "%s", errstr);
+ goto done;
+ }
+
+ rd_kafka_txn_set_state(rk, RD_KAFKA_TXN_STATE_ABORTING_TRANSACTION);
+
+ rd_kafka_wrunlock(rk);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+
+done:
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+/**
+ * @brief Handler for last ack of abort_transaction()
+ *
+ * @locks none
+ * @locality rdkafka main thread
+ */
+static rd_kafka_op_res_t
+rd_kafka_txn_op_abort_transaction_ack(rd_kafka_t *rk,
+ rd_kafka_q_t *rkq,
+ rd_kafka_op_t *rko) {
+ rd_kafka_error_t *error;
+
+ if (rko->rko_err == RD_KAFKA_RESP_ERR__DESTROY)
+ return RD_KAFKA_OP_RES_HANDLED;
+
+ rd_kafka_wrlock(rk);
+
+ if (!(error = rd_kafka_txn_require_state(
+ rk, RD_KAFKA_TXN_STATE_ABORT_NOT_ACKED))) {
+ rd_kafka_dbg(rk, EOS, "TXNABORT",
+ "Aborted transaction now acked by application");
+ rd_kafka_txn_complete(rk, rd_false /*is abort*/);
+ }
+
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_txn_curr_api_set_result(rk, 0, error);
+
+ return RD_KAFKA_OP_RES_HANDLED;
+}
+
+
+
+rd_kafka_error_t *rd_kafka_abort_transaction(rd_kafka_t *rk, int timeout_ms) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_ts_t abs_timeout;
+
+ if ((error = rd_kafka_txn_curr_api_begin(rk, "abort_transaction",
+ rd_false /* no cap */,
+ timeout_ms, &abs_timeout)))
+ return error;
+
+ /* The abort is multi-phase:
+ * - set state to BEGIN_ABORT
+ * - flush() outstanding messages
+ * - send EndTxn
+ */
+
+ /* Begin abort */
+ if ((error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_begin_abort,
+ abs_timeout)))
+ return rd_kafka_txn_curr_api_return(rk,
+ /* not resumable yet */
+ rd_false, error);
+
+ rd_kafka_dbg(rk, EOS, "TXNABORT",
+ "Purging and flushing %d outstanding message(s) prior "
+ "to abort",
+ rd_kafka_outq_len(rk));
+
+ /* Purge all queued messages.
+ * Will need to wait for messages in-flight since purging these
+ * messages may lead to gaps in the idempotent producer sequences. */
+ err = rd_kafka_purge(rk, RD_KAFKA_PURGE_F_QUEUE |
+ RD_KAFKA_PURGE_F_ABORT_TXN);
+
+ /* Serve delivery reports for the purged messages. */
+ if ((err = rd_kafka_flush(rk, rd_timeout_remains(abs_timeout)))) {
+ /* FIXME: Not sure these errors matter that much */
+ if (err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ error = rd_kafka_error_new_retriable(
+ err,
+ "Failed to flush all outstanding messages "
+ "within the API timeout: "
+ "%d message(s) remaining%s",
+ rd_kafka_outq_len(rk),
+ (rk->rk_conf.enabled_events & RD_KAFKA_EVENT_DR)
+ ? ": the event queue must be polled "
+ "for delivery report events in a separate "
+ "thread or prior to calling abort"
+ : "");
+
+ else
+ error = rd_kafka_error_new_retriable(
+ err, "Failed to flush outstanding messages: %s",
+ rd_kafka_err2str(err));
+
+ /* The abort operation is in progress in the background
+ * and the application will need to call this API again
+ * to resume. */
+ return rd_kafka_txn_curr_api_return(rk, rd_true, error);
+ }
+
+ rd_kafka_dbg(rk, EOS, "TXNCOMMIT",
+ "Transaction abort message purge and flush complete");
+
+ error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction,
+ abs_timeout);
+ if (error)
+ return rd_kafka_txn_curr_api_return(rk, rd_true, error);
+
+ /* Last call is to transition from ABORT_NOT_ACKED to READY. */
+ error = rd_kafka_txn_op_req(rk, rd_kafka_txn_op_abort_transaction_ack,
+ /* Timeout must be infinite since this is
+ * a synchronization point.
+ * The call is immediate though, so this
+ * will not block. */
+ RD_POLL_INFINITE);
+
+ return rd_kafka_txn_curr_api_return(rk,
+ /* not resumable at this point */
+ rd_false, error);
+}
+
+
+
+/**
+ * @brief Coordinator query timer
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+
+static void rd_kafka_txn_coord_timer_cb(rd_kafka_timers_t *rkts, void *arg) {
+ rd_kafka_t *rk = arg;
+
+ rd_kafka_wrlock(rk);
+ rd_kafka_txn_coord_query(rk, "Coordinator query timer");
+ rd_kafka_wrunlock(rk);
+}
+
+/**
+ * @brief Start coord query timer if not already started.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_coord_timer_start(rd_kafka_t *rk, int timeout_ms) {
+ rd_assert(rd_kafka_is_transactional(rk));
+ rd_kafka_timer_start_oneshot(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr,
+ /* don't restart if already started */
+ rd_false, 1000 * timeout_ms,
+ rd_kafka_txn_coord_timer_cb, rk);
+}
+
+
+/**
+ * @brief Parses and handles a FindCoordinator response.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+static void rd_kafka_txn_handle_FindCoordinator(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ rd_kafka_resp_err_t err,
+ rd_kafka_buf_t *rkbuf,
+ rd_kafka_buf_t *request,
+ void *opaque) {
+ const int log_decode_errors = LOG_ERR;
+ int16_t ErrorCode;
+ rd_kafkap_str_t Host;
+ int32_t NodeId, Port;
+ char errstr[512];
+
+ *errstr = '\0';
+
+ rk->rk_eos.txn_wait_coord = rd_false;
+
+ if (err)
+ goto err;
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1)
+ rd_kafka_buf_read_throttle_time(rkbuf);
+
+ rd_kafka_buf_read_i16(rkbuf, &ErrorCode);
+
+ if (request->rkbuf_reqhdr.ApiVersion >= 1) {
+ rd_kafkap_str_t ErrorMsg;
+ rd_kafka_buf_read_str(rkbuf, &ErrorMsg);
+ if (ErrorCode)
+ rd_snprintf(errstr, sizeof(errstr), "%.*s",
+ RD_KAFKAP_STR_PR(&ErrorMsg));
+ }
+
+ if ((err = ErrorCode))
+ goto err;
+
+ rd_kafka_buf_read_i32(rkbuf, &NodeId);
+ rd_kafka_buf_read_str(rkbuf, &Host);
+ rd_kafka_buf_read_i32(rkbuf, &Port);
+
+ rd_rkb_dbg(rkb, EOS, "TXNCOORD",
+ "FindCoordinator response: "
+ "Transaction coordinator is broker %" PRId32 " (%.*s:%d)",
+ NodeId, RD_KAFKAP_STR_PR(&Host), (int)Port);
+
+ rd_kafka_rdlock(rk);
+ if (NodeId == -1)
+ err = RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE;
+ else if (!(rkb = rd_kafka_broker_find_by_nodeid(rk, NodeId))) {
+ rd_snprintf(errstr, sizeof(errstr),
+ "Transaction coordinator %" PRId32 " is unknown",
+ NodeId);
+ err = RD_KAFKA_RESP_ERR__UNKNOWN_BROKER;
+ }
+ rd_kafka_rdunlock(rk);
+
+ if (err)
+ goto err;
+
+ rd_kafka_wrlock(rk);
+ rd_kafka_txn_coord_set(rk, rkb, "FindCoordinator response");
+ rd_kafka_wrunlock(rk);
+
+ rd_kafka_broker_destroy(rkb);
+
+ return;
+
+err_parse:
+ err = rkbuf->rkbuf_err;
+err:
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__DESTROY:
+ return;
+
+ case RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED:
+ case RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED:
+ rd_kafka_wrlock(rk);
+ rd_kafka_txn_set_fatal_error(
+ rkb->rkb_rk, RD_DONT_LOCK, err,
+ "Failed to find transaction coordinator: %s: %s%s%s",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err),
+ *errstr ? ": " : "", errstr);
+
+ rd_kafka_idemp_set_state(rk, RD_KAFKA_IDEMP_STATE_FATAL_ERROR);
+ rd_kafka_wrunlock(rk);
+ return;
+
+ case RD_KAFKA_RESP_ERR__UNKNOWN_BROKER:
+ rd_kafka_metadata_refresh_brokers(rk, NULL, errstr);
+ break;
+
+ default:
+ break;
+ }
+
+ rd_kafka_wrlock(rk);
+ rd_kafka_txn_coord_set(
+ rk, NULL, "Failed to find transaction coordinator: %s: %s",
+ rd_kafka_err2name(err), *errstr ? errstr : rd_kafka_err2str(err));
+ rd_kafka_wrunlock(rk);
+}
+
+
+
+/**
+ * @brief Query for the transaction coordinator.
+ *
+ * @returns true if a fatal error was raised, else false.
+ *
+ * @locality rdkafka main thread
+ * @locks rd_kafka_wrlock(rk) MUST be held.
+ */
+rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason) {
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ rd_kafka_broker_t *rkb;
+
+ rd_assert(rd_kafka_is_transactional(rk));
+
+ if (rk->rk_eos.txn_wait_coord) {
+ rd_kafka_dbg(rk, EOS, "TXNCOORD",
+ "Not sending coordinator query (%s): "
+ "waiting for previous query to finish",
+ reason);
+ return rd_false;
+ }
+
+ /* Find usable broker to query for the txn coordinator */
+ rkb = rd_kafka_idemp_broker_any(rk, &err, errstr, sizeof(errstr));
+ if (!rkb) {
+ rd_kafka_dbg(rk, EOS, "TXNCOORD",
+ "Unable to query for transaction coordinator: "
+ "%s: %s",
+ reason, errstr);
+
+ if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false))
+ return rd_true;
+
+ rd_kafka_txn_coord_timer_start(rk, 500);
+
+ return rd_false;
+ }
+
+ rd_kafka_dbg(rk, EOS, "TXNCOORD",
+ "Querying for transaction coordinator: %s", reason);
+
+ /* Send FindCoordinator request */
+ err = rd_kafka_FindCoordinatorRequest(
+ rkb, RD_KAFKA_COORD_TXN, rk->rk_conf.eos.transactional_id,
+ RD_KAFKA_REPLYQ(rk->rk_ops, 0), rd_kafka_txn_handle_FindCoordinator,
+ NULL);
+
+ if (err) {
+ rd_snprintf(errstr, sizeof(errstr),
+ "Failed to send coordinator query to %s: "
+ "%s",
+ rd_kafka_broker_name(rkb), rd_kafka_err2str(err));
+
+ rd_kafka_broker_destroy(rkb);
+
+ if (rd_kafka_idemp_check_error(rk, err, errstr, rd_false))
+ return rd_true; /* Fatal error */
+
+ rd_kafka_txn_coord_timer_start(rk, 500);
+
+ return rd_false;
+ }
+
+ rd_kafka_broker_destroy(rkb);
+
+ rk->rk_eos.txn_wait_coord = rd_true;
+
+ return rd_false;
+}
+
+/**
+ * @brief Sets or clears the current coordinator address.
+ *
+ * @returns true if the coordinator was changed, else false.
+ *
+ * @locality rdkafka main thread
+ * @locks rd_kafka_wrlock(rk) MUST be held
+ */
+rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *fmt,
+ ...) {
+ char buf[256];
+ va_list ap;
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+
+ if (rk->rk_eos.txn_curr_coord == rkb) {
+ if (!rkb) {
+ rd_kafka_dbg(rk, EOS, "TXNCOORD", "%s", buf);
+ /* Keep querying for the coordinator */
+ rd_kafka_txn_coord_timer_start(rk, 500);
+ }
+ return rd_false;
+ }
+
+ rd_kafka_dbg(rk, EOS, "TXNCOORD",
+ "Transaction coordinator changed from %s -> %s: %s",
+ rk->rk_eos.txn_curr_coord
+ ? rd_kafka_broker_name(rk->rk_eos.txn_curr_coord)
+ : "(none)",
+ rkb ? rd_kafka_broker_name(rkb) : "(none)", buf);
+
+ if (rk->rk_eos.txn_curr_coord)
+ rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord);
+
+ rk->rk_eos.txn_curr_coord = rkb;
+ if (rkb)
+ rd_kafka_broker_keep(rkb);
+
+ rd_kafka_broker_set_nodename(rk->rk_eos.txn_coord,
+ rk->rk_eos.txn_curr_coord);
+
+ if (!rkb) {
+ /* Lost the current coordinator, query for new coordinator */
+ rd_kafka_txn_coord_timer_start(rk, 500);
+ } else {
+ /* Trigger PID state machine */
+ rd_kafka_idemp_pid_fsm(rk);
+ }
+
+ return rd_true;
+}
+
+
+/**
+ * @brief Coordinator state monitor callback.
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_txn_coord_monitor_cb(rd_kafka_broker_t *rkb) {
+ rd_kafka_t *rk = rkb->rkb_rk;
+ rd_kafka_broker_state_t state = rd_kafka_broker_get_state(rkb);
+ rd_bool_t is_up;
+
+ rd_assert(rk->rk_eos.txn_coord == rkb);
+
+ is_up = rd_kafka_broker_state_is_up(state);
+ rd_rkb_dbg(rkb, EOS, "COORD", "Transaction coordinator is now %s",
+ is_up ? "up" : "down");
+
+ if (!is_up) {
+ /* Coordinator is down, the connection will be re-established
+ * automatically, but we also trigger a coordinator query
+ * to pick up on coordinator change. */
+ rd_kafka_txn_coord_timer_start(rk, 500);
+
+ } else {
+ /* Coordinator is up. */
+
+ rd_kafka_wrlock(rk);
+ if (rk->rk_eos.idemp_state < RD_KAFKA_IDEMP_STATE_ASSIGNED) {
+ /* See if a idempotence state change is warranted. */
+ rd_kafka_idemp_pid_fsm(rk);
+
+ } else if (rk->rk_eos.idemp_state ==
+ RD_KAFKA_IDEMP_STATE_ASSIGNED) {
+ /* PID is already valid, continue transactional
+ * operations by checking for partitions to register */
+ rd_kafka_txn_schedule_register_partitions(rk,
+ 1 /*ASAP*/);
+ }
+
+ rd_kafka_wrunlock(rk);
+ }
+}
+
+
+
+/**
+ * @brief Transactions manager destructor
+ *
+ * @locality rdkafka main thread
+ * @locks none
+ */
+void rd_kafka_txns_term(rd_kafka_t *rk) {
+
+ RD_IF_FREE(rk->rk_eos.txn_errstr, rd_free);
+ RD_IF_FREE(rk->rk_eos.txn_curr_api.error, rd_kafka_error_destroy);
+
+ mtx_destroy(&rk->rk_eos.txn_curr_api.lock);
+ cnd_destroy(&rk->rk_eos.txn_curr_api.cnd);
+
+ rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_coord_tmr, 1);
+ rd_kafka_timer_stop(&rk->rk_timers, &rk->rk_eos.txn_register_parts_tmr,
+ 1);
+
+ if (rk->rk_eos.txn_curr_coord)
+ rd_kafka_broker_destroy(rk->rk_eos.txn_curr_coord);
+
+ /* Logical coordinator */
+ rd_kafka_broker_persistent_connection_del(
+ rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord);
+ rd_kafka_broker_monitor_del(&rk->rk_eos.txn_coord_mon);
+ rd_kafka_broker_destroy(rk->rk_eos.txn_coord);
+ rk->rk_eos.txn_coord = NULL;
+
+ mtx_lock(&rk->rk_eos.txn_pending_lock);
+ rd_kafka_txn_clear_pending_partitions(rk);
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+ mtx_destroy(&rk->rk_eos.txn_pending_lock);
+
+ rd_kafka_txn_clear_partitions(rk);
+}
+
+
+/**
+ * @brief Initialize transactions manager.
+ *
+ * @locality application thread
+ * @locks none
+ */
+void rd_kafka_txns_init(rd_kafka_t *rk) {
+ rd_atomic32_init(&rk->rk_eos.txn_may_enq, 0);
+ mtx_init(&rk->rk_eos.txn_pending_lock, mtx_plain);
+ TAILQ_INIT(&rk->rk_eos.txn_pending_rktps);
+ TAILQ_INIT(&rk->rk_eos.txn_waitresp_rktps);
+ TAILQ_INIT(&rk->rk_eos.txn_rktps);
+
+ mtx_init(&rk->rk_eos.txn_curr_api.lock, mtx_plain);
+ cnd_init(&rk->rk_eos.txn_curr_api.cnd);
+
+ /* Logical coordinator */
+ rk->rk_eos.txn_coord =
+ rd_kafka_broker_add_logical(rk, "TxnCoordinator");
+
+ rd_kafka_broker_monitor_add(&rk->rk_eos.txn_coord_mon,
+ rk->rk_eos.txn_coord, rk->rk_ops,
+ rd_kafka_txn_coord_monitor_cb);
+
+ rd_kafka_broker_persistent_connection_add(
+ rk->rk_eos.txn_coord, &rk->rk_eos.txn_coord->rkb_persistconn.coord);
+
+ rd_atomic64_init(&rk->rk_eos.txn_dr_fails, 0);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h
new file mode 100644
index 000000000..3c088d09a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_txnmgr.h
@@ -0,0 +1,171 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDKAFKA_TXNMGR_H_
+#define _RDKAFKA_TXNMGR_H_
+
+/**
+ * @returns true if transaction state allows enqueuing new messages
+ * (i.e., produce()), else false.
+ *
+ * @locality application thread
+ * @locks none
+ */
+static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_enq_msg(rd_kafka_t *rk) {
+ return !rd_kafka_is_transactional(rk) ||
+ rd_atomic32_get(&rk->rk_eos.txn_may_enq);
+}
+
+
+/**
+ * @returns true if transaction state allows sending messages to broker,
+ * else false.
+ *
+ * @locality broker thread
+ * @locks none
+ */
+static RD_INLINE RD_UNUSED rd_bool_t rd_kafka_txn_may_send_msg(rd_kafka_t *rk) {
+ rd_bool_t ret;
+
+ rd_kafka_rdlock(rk);
+ ret = (rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_IN_TRANSACTION ||
+ rk->rk_eos.txn_state == RD_KAFKA_TXN_STATE_BEGIN_COMMIT);
+ rd_kafka_rdunlock(rk);
+
+ return ret;
+}
+
+
+/**
+ * @returns true if transaction and partition state allows sending queued
+ * messages to broker, else false.
+ *
+ * @locality any
+ * @locks toppar_lock MUST be held
+ */
+static RD_INLINE RD_UNUSED rd_bool_t
+rd_kafka_txn_toppar_may_send_msg(rd_kafka_toppar_t *rktp) {
+ if (likely(rktp->rktp_flags & RD_KAFKA_TOPPAR_F_IN_TXN))
+ return rd_true;
+
+ return rd_false;
+}
+
+
+
+void rd_kafka_txn_schedule_register_partitions(rd_kafka_t *rk, int backoff_ms);
+
+
+/**
+ * @brief Add partition to transaction (unless already added).
+ *
+ * The partition will first be added to the pending list (txn_pending_rktps)
+ * awaiting registration on the coordinator with AddPartitionsToTxnRequest.
+ * On successful registration the partition is flagged as IN_TXN and removed
+ * from the pending list.
+ *
+ * @locality application thread
+ * @locks none
+ */
+static RD_INLINE RD_UNUSED void
+rd_kafka_txn_add_partition(rd_kafka_toppar_t *rktp) {
+ rd_kafka_t *rk;
+ rd_bool_t schedule = rd_false;
+
+ rd_kafka_toppar_lock(rktp);
+
+ /* Already added or registered */
+ if (likely(rktp->rktp_flags &
+ (RD_KAFKA_TOPPAR_F_PEND_TXN | RD_KAFKA_TOPPAR_F_IN_TXN))) {
+ rd_kafka_toppar_unlock(rktp);
+ return;
+ }
+
+ rktp->rktp_flags |= RD_KAFKA_TOPPAR_F_PEND_TXN;
+
+ rd_kafka_toppar_unlock(rktp);
+
+ rk = rktp->rktp_rkt->rkt_rk;
+
+ mtx_lock(&rk->rk_eos.txn_pending_lock);
+ schedule = TAILQ_EMPTY(&rk->rk_eos.txn_pending_rktps);
+
+ /* List is sorted by topic name since AddPartitionsToTxnRequest()
+ * requires it. */
+ TAILQ_INSERT_SORTED(&rk->rk_eos.txn_pending_rktps, rktp,
+ rd_kafka_toppar_t *, rktp_txnlink,
+ rd_kafka_toppar_topic_cmp);
+ rd_kafka_toppar_keep(rktp);
+ mtx_unlock(&rk->rk_eos.txn_pending_lock);
+
+ rd_kafka_dbg(rk, EOS, "ADDPARTS",
+ "Marked %.*s [%" PRId32
+ "] as part of transaction: "
+ "%sscheduling registration",
+ RD_KAFKAP_STR_PR(rktp->rktp_rkt->rkt_topic),
+ rktp->rktp_partition, schedule ? "" : "not ");
+
+
+ /* Schedule registration of partitions by the rdkafka main thread */
+ if (unlikely(schedule))
+ rd_kafka_txn_schedule_register_partitions(rk, 1 /*immediate*/);
+}
+
+
+
+void rd_kafka_txn_idemp_state_change(rd_kafka_t *rk,
+ rd_kafka_idemp_state_t state);
+
+void rd_kafka_txn_set_abortable_error0(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_bool_t requires_epoch_bump,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 4, 5);
+#define rd_kafka_txn_set_abortable_error(rk, err, ...) \
+ rd_kafka_txn_set_abortable_error0(rk, err, rd_false, __VA_ARGS__)
+
+#define rd_kafka_txn_set_abortable_error_with_bump(rk, err, ...) \
+ rd_kafka_txn_set_abortable_error0(rk, err, rd_true, __VA_ARGS__)
+
+void rd_kafka_txn_set_fatal_error(rd_kafka_t *rk,
+ rd_dolock_t do_lock,
+ rd_kafka_resp_err_t err,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 4, 5);
+
+rd_bool_t rd_kafka_txn_coord_query(rd_kafka_t *rk, const char *reason);
+
+rd_bool_t rd_kafka_txn_coord_set(rd_kafka_t *rk,
+ rd_kafka_broker_t *rkb,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 3, 4);
+
+void rd_kafka_txns_term(rd_kafka_t *rk);
+void rd_kafka_txns_init(rd_kafka_t *rk);
+
+#endif /* _RDKAFKA_TXNMGR_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c
new file mode 100644
index 000000000..68b01a4e1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.c
@@ -0,0 +1,226 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdkafka_zstd.h"
+
+#if WITH_ZSTD_STATIC
+/* Enable advanced/unstable API for initCStream_srcSize */
+#define ZSTD_STATIC_LINKING_ONLY
+#endif
+
+#include <zstd.h>
+#include <zstd_errors.h>
+
+rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb,
+ char *inbuf,
+ size_t inlen,
+ void **outbuf,
+ size_t *outlenp) {
+ unsigned long long out_bufsize = ZSTD_getFrameContentSize(inbuf, inlen);
+
+ switch (out_bufsize) {
+ case ZSTD_CONTENTSIZE_UNKNOWN:
+ /* Decompressed size cannot be determined, make a guess */
+ out_bufsize = inlen * 2;
+ break;
+ case ZSTD_CONTENTSIZE_ERROR:
+ /* Error calculating frame content size */
+ rd_rkb_dbg(rkb, MSG, "ZSTD",
+ "Unable to begin ZSTD decompression "
+ "(out buffer is %llu bytes): %s",
+ out_bufsize, "Error in determining frame size");
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ default:
+ break;
+ }
+
+ /* Increase output buffer until it can fit the entire result,
+ * capped by message.max.bytes */
+ while (out_bufsize <=
+ (unsigned long long)rkb->rkb_rk->rk_conf.recv_max_msg_size) {
+ size_t ret;
+ char *decompressed;
+
+ decompressed = rd_malloc((size_t)out_bufsize);
+ if (!decompressed) {
+ rd_rkb_dbg(rkb, MSG, "ZSTD",
+ "Unable to allocate output buffer "
+ "(%llu bytes for %" PRIusz
+ " compressed bytes): %s",
+ out_bufsize, inlen, rd_strerror(errno));
+ return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ }
+
+
+ ret = ZSTD_decompress(decompressed, (size_t)out_bufsize, inbuf,
+ inlen);
+ if (!ZSTD_isError(ret)) {
+ *outlenp = ret;
+ *outbuf = decompressed;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_free(decompressed);
+
+ /* Check if the destination size is too small */
+ if (ZSTD_getErrorCode(ret) == ZSTD_error_dstSize_tooSmall) {
+
+ /* Grow quadratically */
+ out_bufsize += RD_MAX(out_bufsize * 2, 4000);
+
+ rd_atomic64_add(&rkb->rkb_c.zbuf_grow, 1);
+
+ } else {
+ /* Fail on any other error */
+ rd_rkb_dbg(rkb, MSG, "ZSTD",
+ "Unable to begin ZSTD decompression "
+ "(out buffer is %llu bytes): %s",
+ out_bufsize, ZSTD_getErrorName(ret));
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ }
+ }
+
+ rd_rkb_dbg(rkb, MSG, "ZSTD",
+ "Unable to decompress ZSTD "
+ "(input buffer %" PRIusz
+ ", output buffer %llu): "
+ "output would exceed message.max.bytes (%d)",
+ inlen, out_bufsize, rkb->rkb_rk->rk_conf.max_msg_size);
+
+ return RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+}
+
+
+rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb,
+ int comp_level,
+ rd_slice_t *slice,
+ void **outbuf,
+ size_t *outlenp) {
+ ZSTD_CStream *cctx;
+ size_t r;
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ size_t len = rd_slice_remains(slice);
+ ZSTD_outBuffer out;
+ ZSTD_inBuffer in;
+
+ *outbuf = NULL;
+ out.pos = 0;
+ out.size = ZSTD_compressBound(len);
+ out.dst = rd_malloc(out.size);
+ if (!out.dst) {
+ rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
+ "Unable to allocate output buffer "
+ "(%" PRIusz " bytes): %s",
+ out.size, rd_strerror(errno));
+ return RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ }
+
+
+ cctx = ZSTD_createCStream();
+ if (!cctx) {
+ rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
+ "Unable to create ZSTD compression context");
+ err = RD_KAFKA_RESP_ERR__CRIT_SYS_RESOURCE;
+ goto done;
+ }
+
+#if defined(WITH_ZSTD_STATIC) && \
+ ZSTD_VERSION_NUMBER >= (1 * 100 * 100 + 2 * 100 + 1) /* v1.2.1 */
+ r = ZSTD_initCStream_srcSize(cctx, comp_level, len);
+#else
+ /* libzstd not linked statically (or zstd version < 1.2.1):
+ * decompression in consumer may be more costly due to
+ * decompressed size not included in header by librdkafka producer */
+ r = ZSTD_initCStream(cctx, comp_level);
+#endif
+ if (ZSTD_isError(r)) {
+ rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
+ "Unable to begin ZSTD compression "
+ "(out buffer is %" PRIusz " bytes): %s",
+ out.size, ZSTD_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ while ((in.size = rd_slice_reader(slice, &in.src))) {
+ in.pos = 0;
+ r = ZSTD_compressStream(cctx, &out, &in);
+ if (unlikely(ZSTD_isError(r))) {
+ rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
+ "ZSTD compression failed "
+ "(at of %" PRIusz
+ " bytes, with "
+ "%" PRIusz
+ " bytes remaining in out buffer): "
+ "%s",
+ in.size, out.size - out.pos,
+ ZSTD_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ /* No space left in output buffer,
+ * but input isn't fully consumed */
+ if (in.pos < in.size) {
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+ }
+
+ if (rd_slice_remains(slice) != 0) {
+ rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
+ "Failed to finalize ZSTD compression "
+ "of %" PRIusz " bytes: %s",
+ len, "Unexpected trailing data");
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ r = ZSTD_endStream(cctx, &out);
+ if (unlikely(ZSTD_isError(r) || r > 0)) {
+ rd_rkb_dbg(rkb, MSG, "ZSTDCOMPR",
+ "Failed to finalize ZSTD compression "
+ "of %" PRIusz " bytes: %s",
+ len, ZSTD_getErrorName(r));
+ err = RD_KAFKA_RESP_ERR__BAD_COMPRESSION;
+ goto done;
+ }
+
+ *outbuf = out.dst;
+ *outlenp = out.pos;
+
+done:
+ if (cctx)
+ ZSTD_freeCStream(cctx);
+
+ if (err)
+ rd_free(out.dst);
+
+ return err;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h
new file mode 100644
index 000000000..f87c4c6fb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdkafka_zstd.h
@@ -0,0 +1,57 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDZSTD_H_
+#define _RDZSTD_H_
+
+/**
+ * @brief Decompress ZSTD framed data.
+ *
+ * @returns allocated buffer in \p *outbuf, length in \p *outlenp on success.
+ */
+rd_kafka_resp_err_t rd_kafka_zstd_decompress(rd_kafka_broker_t *rkb,
+ char *inbuf,
+ size_t inlen,
+ void **outbuf,
+ size_t *outlenp);
+
+/**
+ * Allocate space for \p *outbuf and compress all \p iovlen buffers in \p iov.
+ * @param MessageSetSize indicates (at least) full uncompressed data size,
+ * possibly including MessageSet fields that will not
+ * be compressed.
+ *
+ * @returns allocated buffer in \p *outbuf, length in \p *outlenp.
+ */
+rd_kafka_resp_err_t rd_kafka_zstd_compress(rd_kafka_broker_t *rkb,
+ int comp_level,
+ rd_slice_t *slice,
+ void **outbuf,
+ size_t *outlenp);
+
+#endif /* _RDZSTD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c
new file mode 100644
index 000000000..c71e3004a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.c
@@ -0,0 +1,546 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdlist.h"
+
+
+void rd_list_dump(const char *what, const rd_list_t *rl) {
+ int i;
+ printf("%s: (rd_list_t*)%p cnt %d, size %d, elems %p:\n", what, rl,
+ rl->rl_cnt, rl->rl_size, rl->rl_elems);
+ for (i = 0; i < rl->rl_cnt; i++)
+ printf(" #%d: %p at &%p\n", i, rl->rl_elems[i],
+ &rl->rl_elems[i]);
+}
+
+void rd_list_grow(rd_list_t *rl, size_t size) {
+ rd_assert(!(rl->rl_flags & RD_LIST_F_FIXED_SIZE));
+ rl->rl_size += (int)size;
+ if (unlikely(rl->rl_size == 0))
+ return; /* avoid zero allocations */
+ rl->rl_elems =
+ rd_realloc(rl->rl_elems, sizeof(*rl->rl_elems) * rl->rl_size);
+}
+
+rd_list_t *
+rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *)) {
+ memset(rl, 0, sizeof(*rl));
+
+ if (initial_size > 0)
+ rd_list_grow(rl, initial_size);
+
+ rl->rl_free_cb = free_cb;
+
+ return rl;
+}
+
+rd_list_t *rd_list_init_copy(rd_list_t *dst, const rd_list_t *src) {
+
+ if (src->rl_flags & RD_LIST_F_FIXED_SIZE) {
+ /* Source was preallocated, prealloc new dst list */
+ rd_list_init(dst, 0, src->rl_free_cb);
+
+ rd_list_prealloc_elems(dst, src->rl_elemsize, src->rl_size,
+ 1 /*memzero*/);
+ } else {
+ /* Source is dynamic, initialize dst the same */
+ rd_list_init(dst, rd_list_cnt(src), src->rl_free_cb);
+ }
+
+ return dst;
+}
+
+static RD_INLINE rd_list_t *rd_list_alloc(void) {
+ return rd_malloc(sizeof(rd_list_t));
+}
+
+rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *)) {
+ rd_list_t *rl = rd_list_alloc();
+ rd_list_init(rl, initial_size, free_cb);
+ rl->rl_flags |= RD_LIST_F_ALLOCATED;
+ return rl;
+}
+
+
+void rd_list_prealloc_elems(rd_list_t *rl,
+ size_t elemsize,
+ size_t cnt,
+ int memzero) {
+ size_t allocsize;
+ char *p;
+ size_t i;
+
+ rd_assert(!rl->rl_elems);
+
+ /* Allocation layout:
+ * void *ptrs[cnt];
+ * elems[elemsize][cnt];
+ */
+
+ allocsize = (sizeof(void *) * cnt) + (elemsize * cnt);
+ if (memzero)
+ rl->rl_elems = rd_calloc(1, allocsize);
+ else
+ rl->rl_elems = rd_malloc(allocsize);
+
+ /* p points to first element's memory, unless elemsize is 0. */
+ if (elemsize > 0)
+ p = rl->rl_p = (char *)&rl->rl_elems[cnt];
+ else
+ p = rl->rl_p = NULL;
+
+ /* Pointer -> elem mapping */
+ for (i = 0; i < cnt; i++, p += elemsize)
+ rl->rl_elems[i] = p;
+
+ rl->rl_size = (int)cnt;
+ rl->rl_cnt = 0;
+ rl->rl_flags |= RD_LIST_F_FIXED_SIZE;
+ rl->rl_elemsize = (int)elemsize;
+}
+
+
+void rd_list_set_cnt(rd_list_t *rl, size_t cnt) {
+ rd_assert(rl->rl_flags & RD_LIST_F_FIXED_SIZE);
+ rd_assert((int)cnt <= rl->rl_size);
+ rl->rl_cnt = (int)cnt;
+}
+
+
+void rd_list_free_cb(rd_list_t *rl, void *ptr) {
+ if (rl->rl_free_cb && ptr)
+ rl->rl_free_cb(ptr);
+}
+
+
+void *rd_list_add(rd_list_t *rl, void *elem) {
+ if (rl->rl_cnt == rl->rl_size)
+ rd_list_grow(rl, rl->rl_size ? rl->rl_size * 2 : 16);
+ rl->rl_flags &= ~RD_LIST_F_SORTED;
+ if (elem)
+ rl->rl_elems[rl->rl_cnt] = elem;
+ return rl->rl_elems[rl->rl_cnt++];
+}
+
+void rd_list_set(rd_list_t *rl, int idx, void *ptr) {
+ if (idx >= rl->rl_size)
+ rd_list_grow(rl, idx + 1);
+
+ if (idx >= rl->rl_cnt) {
+ memset(&rl->rl_elems[rl->rl_cnt], 0,
+ sizeof(*rl->rl_elems) * (idx - rl->rl_cnt));
+ rl->rl_cnt = idx + 1;
+ } else {
+ /* Not allowed to replace existing element. */
+ rd_assert(!rl->rl_elems[idx]);
+ }
+
+ rl->rl_elems[idx] = ptr;
+}
+
+
+
+void rd_list_remove_elem(rd_list_t *rl, int idx) {
+ rd_assert(idx < rl->rl_cnt);
+
+ if (idx + 1 < rl->rl_cnt)
+ memmove(&rl->rl_elems[idx], &rl->rl_elems[idx + 1],
+ sizeof(*rl->rl_elems) * (rl->rl_cnt - (idx + 1)));
+ rl->rl_cnt--;
+}
+
+void *rd_list_remove(rd_list_t *rl, void *match_elem) {
+ void *elem;
+ int i;
+
+ RD_LIST_FOREACH(elem, rl, i) {
+ if (elem == match_elem) {
+ rd_list_remove_elem(rl, i);
+ return elem;
+ }
+ }
+
+ return NULL;
+}
+
+
+void *rd_list_remove_cmp(rd_list_t *rl,
+ void *match_elem,
+ int (*cmp)(void *_a, void *_b)) {
+ void *elem;
+ int i;
+
+ RD_LIST_FOREACH(elem, rl, i) {
+ if (elem == match_elem || !cmp(elem, match_elem)) {
+ rd_list_remove_elem(rl, i);
+ return elem;
+ }
+ }
+
+ return NULL;
+}
+
+
+int rd_list_remove_multi_cmp(rd_list_t *rl,
+ void *match_elem,
+ int (*cmp)(void *_a, void *_b)) {
+
+ void *elem;
+ int i;
+ int cnt = 0;
+
+ /* Scan backwards to minimize memmoves */
+ RD_LIST_FOREACH_REVERSE(elem, rl, i) {
+ if (match_elem == cmp || !cmp(elem, match_elem)) {
+ rd_list_remove_elem(rl, i);
+ cnt++;
+ }
+ }
+
+ return cnt;
+}
+
+
+void *rd_list_pop(rd_list_t *rl) {
+ void *elem;
+ int idx = rl->rl_cnt - 1;
+
+ if (idx < 0)
+ return NULL;
+
+ elem = rl->rl_elems[idx];
+ rd_list_remove_elem(rl, idx);
+
+ return elem;
+}
+
+
+/**
+ * Trampoline to avoid the double pointers in callbacks.
+ *
+ * rl_elems is a **, but to avoid having the application do the cumbersome
+ * ** -> * casting we wrap this here and provide a simple * pointer to the
+ * the callbacks.
+ *
+ * This is true for all list comparator uses, i.e., both sort() and find().
+ */
+static RD_TLS int (*rd_list_cmp_curr)(const void *, const void *);
+
+static RD_INLINE int rd_list_cmp_trampoline(const void *_a, const void *_b) {
+ const void *a = *(const void **)_a, *b = *(const void **)_b;
+
+ return rd_list_cmp_curr(a, b);
+}
+
+void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *)) {
+ if (unlikely(rl->rl_elems == NULL))
+ return;
+
+ rd_list_cmp_curr = cmp;
+ qsort(rl->rl_elems, rl->rl_cnt, sizeof(*rl->rl_elems),
+ rd_list_cmp_trampoline);
+ rl->rl_flags |= RD_LIST_F_SORTED;
+}
+
+static void rd_list_destroy_elems(rd_list_t *rl) {
+ int i;
+
+ if (!rl->rl_elems)
+ return;
+
+ if (rl->rl_free_cb) {
+ /* Free in reverse order to allow deletions */
+ for (i = rl->rl_cnt - 1; i >= 0; i--)
+ if (rl->rl_elems[i])
+ rl->rl_free_cb(rl->rl_elems[i]);
+ }
+
+ rd_free(rl->rl_elems);
+ rl->rl_elems = NULL;
+ rl->rl_cnt = 0;
+ rl->rl_size = 0;
+ rl->rl_flags &= ~RD_LIST_F_SORTED;
+}
+
+
+void rd_list_clear(rd_list_t *rl) {
+ rd_list_destroy_elems(rl);
+}
+
+
+void rd_list_destroy(rd_list_t *rl) {
+ rd_list_destroy_elems(rl);
+ if (rl->rl_flags & RD_LIST_F_ALLOCATED)
+ rd_free(rl);
+}
+
+void rd_list_destroy_free(void *rl) {
+ rd_list_destroy((rd_list_t *)rl);
+}
+
+void *rd_list_elem(const rd_list_t *rl, int idx) {
+ if (likely(idx < rl->rl_cnt))
+ return (void *)rl->rl_elems[idx];
+ return NULL;
+}
+
+int rd_list_index(const rd_list_t *rl,
+ const void *match,
+ int (*cmp)(const void *, const void *)) {
+ int i;
+ const void *elem;
+
+ RD_LIST_FOREACH(elem, rl, i) {
+ if (!cmp(match, elem))
+ return i;
+ }
+
+ return -1;
+}
+
+
+void *rd_list_find(const rd_list_t *rl,
+ const void *match,
+ int (*cmp)(const void *, const void *)) {
+ int i;
+ const void *elem;
+
+ if (rl->rl_flags & RD_LIST_F_SORTED) {
+ void **r;
+ rd_list_cmp_curr = cmp;
+ r = bsearch(&match /*ptrptr to match elems*/, rl->rl_elems,
+ rl->rl_cnt, sizeof(*rl->rl_elems),
+ rd_list_cmp_trampoline);
+ return r ? *r : NULL;
+ }
+
+ RD_LIST_FOREACH(elem, rl, i) {
+ if (!cmp(match, elem))
+ return (void *)elem;
+ }
+
+ return NULL;
+}
+
+
+void *rd_list_first(const rd_list_t *rl) {
+ if (rl->rl_cnt == 0)
+ return NULL;
+ return rl->rl_elems[0];
+}
+
+void *rd_list_last(const rd_list_t *rl) {
+ if (rl->rl_cnt == 0)
+ return NULL;
+ return rl->rl_elems[rl->rl_cnt - 1];
+}
+
+
+void *rd_list_find_duplicate(const rd_list_t *rl,
+ int (*cmp)(const void *, const void *)) {
+ int i;
+
+ rd_assert(rl->rl_flags & RD_LIST_F_SORTED);
+
+ for (i = 1; i < rl->rl_cnt; i++) {
+ if (!cmp(rl->rl_elems[i - 1], rl->rl_elems[i]))
+ return rl->rl_elems[i];
+ }
+
+ return NULL;
+}
+
+int rd_list_cmp(const rd_list_t *a,
+ const rd_list_t *b,
+ int (*cmp)(const void *, const void *)) {
+ int i;
+
+ i = RD_CMP(a->rl_cnt, b->rl_cnt);
+ if (i)
+ return i;
+
+ for (i = 0; i < a->rl_cnt; i++) {
+ int r = cmp(a->rl_elems[i], b->rl_elems[i]);
+ if (r)
+ return r;
+ }
+
+ return 0;
+}
+
+
+/**
+ * @brief Simple element pointer comparator
+ */
+int rd_list_cmp_ptr(const void *a, const void *b) {
+ return RD_CMP(a, b);
+}
+
+int rd_list_cmp_str(const void *a, const void *b) {
+ return strcmp((const char *)a, (const char *)b);
+}
+
+void rd_list_apply(rd_list_t *rl,
+ int (*cb)(void *elem, void *opaque),
+ void *opaque) {
+ void *elem;
+ int i;
+
+ RD_LIST_FOREACH(elem, rl, i) {
+ if (!cb(elem, opaque)) {
+ rd_list_remove_elem(rl, i);
+ i--;
+ }
+ }
+
+ return;
+}
+
+
+/**
+ * @brief Default element copier that simply assigns the original pointer.
+ */
+static void *rd_list_nocopy_ptr(const void *elem, void *opaque) {
+ return (void *)elem;
+}
+
+rd_list_t *
+rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque) {
+ rd_list_t *dst;
+
+ dst = rd_list_new(src->rl_cnt, src->rl_free_cb);
+
+ rd_list_copy_to(dst, src, copy_cb, opaque);
+ return dst;
+}
+
+
+void rd_list_copy_to(rd_list_t *dst,
+ const rd_list_t *src,
+ void *(*copy_cb)(const void *elem, void *opaque),
+ void *opaque) {
+ void *elem;
+ int i;
+
+ rd_assert(dst != src);
+
+ if (!copy_cb)
+ copy_cb = rd_list_nocopy_ptr;
+
+ RD_LIST_FOREACH(elem, src, i) {
+ void *celem = copy_cb(elem, opaque);
+ if (celem)
+ rd_list_add(dst, celem);
+ }
+}
+
+
+/**
+ * @brief Copy elements of preallocated \p src to preallocated \p dst.
+ *
+ * @remark \p dst will be overwritten and initialized, but its
+ * flags will be retained.
+ *
+ * @returns \p dst
+ */
+static rd_list_t *rd_list_copy_preallocated0(rd_list_t *dst,
+ const rd_list_t *src) {
+ int dst_flags = dst->rl_flags & RD_LIST_F_ALLOCATED;
+
+ rd_assert(dst != src);
+
+ rd_list_init_copy(dst, src);
+ dst->rl_flags |= dst_flags;
+
+ rd_assert((dst->rl_flags & RD_LIST_F_FIXED_SIZE));
+ rd_assert((src->rl_flags & RD_LIST_F_FIXED_SIZE));
+ rd_assert(dst->rl_elemsize == src->rl_elemsize &&
+ dst->rl_size == src->rl_size);
+
+ memcpy(dst->rl_p, src->rl_p, src->rl_elemsize * src->rl_size);
+ dst->rl_cnt = src->rl_cnt;
+
+ return dst;
+}
+
+void *rd_list_copy_preallocated(const void *elem, void *opaque) {
+ return rd_list_copy_preallocated0(rd_list_new(0, NULL),
+ (const rd_list_t *)elem);
+}
+
+
+
+void rd_list_move(rd_list_t *dst, rd_list_t *src) {
+ rd_list_init_copy(dst, src);
+
+ if (src->rl_flags & RD_LIST_F_FIXED_SIZE) {
+ rd_list_copy_preallocated0(dst, src);
+ } else {
+ memcpy(dst->rl_elems, src->rl_elems,
+ src->rl_cnt * sizeof(*src->rl_elems));
+ dst->rl_cnt = src->rl_cnt;
+ }
+
+ src->rl_cnt = 0;
+}
+
+
+/**
+ * @name Misc helpers for common list types
+ * @{
+ *
+ */
+rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size) {
+ int rl_flags = rl->rl_flags & RD_LIST_F_ALLOCATED;
+ rd_list_init(rl, 0, NULL);
+ rl->rl_flags |= rl_flags;
+ rd_list_prealloc_elems(rl, sizeof(int32_t), max_size, 1 /*memzero*/);
+ return rl;
+}
+
+void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val) {
+ rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) &&
+ rl->rl_elemsize == sizeof(int32_t));
+ rd_assert(idx < rl->rl_size);
+
+ memcpy(rl->rl_elems[idx], &val, sizeof(int32_t));
+
+ if (rl->rl_cnt <= idx)
+ rl->rl_cnt = idx + 1;
+}
+
+int32_t rd_list_get_int32(const rd_list_t *rl, int idx) {
+ rd_assert((rl->rl_flags & RD_LIST_F_FIXED_SIZE) &&
+ rl->rl_elemsize == sizeof(int32_t) && idx < rl->rl_cnt);
+ return *(int32_t *)rl->rl_elems[idx];
+}
+
+
+
+/**@}*/
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h
new file mode 100644
index 000000000..db5295f6c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdlist.h
@@ -0,0 +1,421 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDLIST_H_
+#define _RDLIST_H_
+
+
+/**
+ *
+ * Simple light-weight append-only list to be used as a collection convenience.
+ *
+ */
+
+typedef struct rd_list_s {
+ int rl_size;
+ int rl_cnt;
+ void **rl_elems;
+ void (*rl_free_cb)(void *);
+ int rl_flags;
+#define RD_LIST_F_ALLOCATED \
+ 0x1 /* The rd_list_t is allocated, \
+ * will be free on destroy() */
+#define RD_LIST_F_SORTED \
+ 0x2 /* Set by sort(), cleared by any mutations. \
+ * When this flag is set bsearch() is used \
+ * by find(), otherwise a linear search. */
+#define RD_LIST_F_FIXED_SIZE 0x4 /* Assert on grow, when prealloc()ed */
+#define RD_LIST_F_UNIQUE \
+ 0x8 /* Don't allow duplicates: \
+ * ONLY ENFORCED BY CALLER. */
+ int rl_elemsize; /**< Element size (when prealloc()ed) */
+ void *rl_p; /**< Start of prealloced elements,
+ * the allocation itself starts at rl_elems
+ */
+} rd_list_t;
+
+
+/**
+ * @brief Initialize a list, prepare for 'initial_size' elements
+ * (optional optimization).
+ * List elements will optionally be freed by \p free_cb.
+ *
+ * @returns \p rl
+ */
+rd_list_t *
+rd_list_init(rd_list_t *rl, int initial_size, void (*free_cb)(void *));
+
+
+/**
+ * @brief Same as rd_list_init() but uses initial_size and free_cb
+ * from the provided \p src list.
+ */
+rd_list_t *rd_list_init_copy(rd_list_t *rl, const rd_list_t *src);
+
+/**
+ * @brief Allocate a new list pointer and initialize
+ * it according to rd_list_init().
+ *
+ * This is the same as calling \c rd_list_init(rd_list_alloc(), ..));
+ *
+ * Use rd_list_destroy() to free.
+ */
+rd_list_t *rd_list_new(int initial_size, void (*free_cb)(void *));
+
+
+/**
+ * @brief Prepare list to for an additional \p size elements.
+ * This is an optimization to avoid incremental grows.
+ */
+void rd_list_grow(rd_list_t *rl, size_t size);
+
+/**
+ * @brief Preallocate elements to avoid having to pass an allocated pointer to
+ * rd_list_add(), instead pass NULL to rd_list_add() and use the returned
+ * pointer as the element.
+ *
+ * @param elemsize element size, or 0 if elements are allocated separately.
+ * @param size number of elements
+ * @param memzero initialize element memory to zeros.
+ *
+ * @remark Preallocated element lists can't grow past \p size.
+ */
+void rd_list_prealloc_elems(rd_list_t *rl,
+ size_t elemsize,
+ size_t size,
+ int memzero);
+
+/**
+ * @brief Set the number of valid elements, this must only be used
+ * with prealloc_elems() to make the preallocated elements directly
+ * usable.
+ */
+void rd_list_set_cnt(rd_list_t *rl, size_t cnt);
+
+
+/**
+ * @brief Free a pointer using the list's free_cb
+ *
+ * @remark If no free_cb is set, or \p ptr is NULL, dont do anything
+ *
+ * Typical use is rd_list_free_cb(rd_list_remove_cmp(....));
+ */
+void rd_list_free_cb(rd_list_t *rl, void *ptr);
+
+
+/**
+ * @brief Append element to list
+ *
+ * @returns \p elem. If \p elem is NULL the default element for that index
+ * will be returned (for use with set_elems).
+ */
+void *rd_list_add(rd_list_t *rl, void *elem);
+
+
+/**
+ * @brief Set element at \p idx to \p ptr.
+ *
+ * @remark MUST NOT overwrite an existing element.
+ * @remark The list will be grown, if needed, any gaps between the current
+ * highest element and \p idx will be set to NULL.
+ */
+void rd_list_set(rd_list_t *rl, int idx, void *ptr);
+
+
+/**
+ * Remove element from list.
+ * This is a slow O(n) + memmove operation.
+ * Returns the removed element.
+ */
+void *rd_list_remove(rd_list_t *rl, void *match_elem);
+
+/**
+ * Remove element from list using comparator.
+ * See rd_list_remove()
+ */
+void *rd_list_remove_cmp(rd_list_t *rl,
+ void *match_elem,
+ int (*cmp)(void *_a, void *_b));
+
+
+/**
+ * @brief Remove element at index \p idx.
+ *
+ * This is a O(1) + memmove operation
+ */
+void rd_list_remove_elem(rd_list_t *rl, int idx);
+
+
+/**
+ * @brief Remove and return the last element in the list.
+ *
+ * @returns the last element, or NULL if list is empty. */
+void *rd_list_pop(rd_list_t *rl);
+
+
+/**
+ * @brief Remove all elements matching comparator.
+ *
+ * @returns the number of elements removed.
+ *
+ * @sa rd_list_remove()
+ */
+int rd_list_remove_multi_cmp(rd_list_t *rl,
+ void *match_elem,
+ int (*cmp)(void *_a, void *_b));
+
+
+/**
+ * @brief Sort list using comparator.
+ *
+ * To sort a list ascendingly the comparator should implement (a - b)
+ * and for descending order implement (b - a).
+ */
+void rd_list_sort(rd_list_t *rl, int (*cmp)(const void *, const void *));
+
+
+/**
+ * Empties the list and frees elements (if there is a free_cb).
+ */
+void rd_list_clear(rd_list_t *rl);
+
+
+/**
+ * Empties the list, frees the element array, and optionally frees
+ * each element using the registered \c rl->rl_free_cb.
+ *
+ * If the list was previously allocated with rd_list_new() it will be freed.
+ */
+void rd_list_destroy(rd_list_t *rl);
+
+/**
+ * @brief Wrapper for rd_list_destroy() that has same signature as free(3),
+ * allowing it to be used as free_cb for nested lists.
+ */
+void rd_list_destroy_free(void *rl);
+
+
+/**
+ * Returns the element at index 'idx', or NULL if out of range.
+ *
+ * Typical iteration is:
+ * int i = 0;
+ * my_type_t *obj;
+ * while ((obj = rd_list_elem(rl, i++)))
+ * do_something(obj);
+ */
+void *rd_list_elem(const rd_list_t *rl, int idx);
+
+#define RD_LIST_FOREACH(elem, listp, idx) \
+ for (idx = 0; (elem = rd_list_elem(listp, idx)); idx++)
+
+#define RD_LIST_FOREACH_REVERSE(elem, listp, idx) \
+ for (idx = (listp)->rl_cnt - 1; \
+ idx >= 0 && (elem = rd_list_elem(listp, idx)); idx--)
+
+/**
+ * Returns the number of elements in list.
+ */
+static RD_INLINE RD_UNUSED int rd_list_cnt(const rd_list_t *rl) {
+ return rl->rl_cnt;
+}
+
+
+/**
+ * Returns true if list is empty
+ */
+#define rd_list_empty(rl) (rd_list_cnt(rl) == 0)
+
+
+/**
+ * @brief Find element index using comparator.
+ *
+ * \p match is the first argument to \p cmp, and each element (up to a match)
+ * is the second argument to \p cmp.
+ *
+ * @remark this is a O(n) scan.
+ * @returns the first matching element or NULL.
+ */
+int rd_list_index(const rd_list_t *rl,
+ const void *match,
+ int (*cmp)(const void *, const void *));
+
+/**
+ * @brief Find element using comparator
+ *
+ * \p match is the first argument to \p cmp, and each element (up to a match)
+ * is the second argument to \p cmp.
+ *
+ * @remark if the list is sorted bsearch() is used, otherwise an O(n) scan.
+ *
+ * @returns the first matching element or NULL.
+ */
+void *rd_list_find(const rd_list_t *rl,
+ const void *match,
+ int (*cmp)(const void *, const void *));
+
+
+
+/**
+ * @returns the first element of the list, or NULL if list is empty.
+ */
+void *rd_list_first(const rd_list_t *rl);
+
+/**
+ * @returns the last element of the list, or NULL if list is empty.
+ */
+void *rd_list_last(const rd_list_t *rl);
+
+
+/**
+ * @returns the first duplicate in the list or NULL if no duplicates.
+ *
+ * @warning The list MUST be sorted.
+ */
+void *rd_list_find_duplicate(const rd_list_t *rl,
+ int (*cmp)(const void *, const void *));
+
+
+/**
+ * @brief Compare list \p a to \p b.
+ *
+ * @returns < 0 if a was "lesser" than b,
+ * > 0 if a was "greater" than b,
+ * 0 if a and b are equal.
+ */
+int rd_list_cmp(const rd_list_t *a,
+ const rd_list_t *b,
+ int (*cmp)(const void *, const void *));
+
+/**
+ * @brief Simple element pointer comparator
+ */
+int rd_list_cmp_ptr(const void *a, const void *b);
+
+/**
+ * @brief strcmp comparator where the list elements are strings.
+ */
+int rd_list_cmp_str(const void *a, const void *b);
+
+
+/**
+ * @brief Apply \p cb to each element in list, if \p cb returns 0
+ * the element will be removed (but not freed).
+ */
+void rd_list_apply(rd_list_t *rl,
+ int (*cb)(void *elem, void *opaque),
+ void *opaque);
+
+
+
+typedef void *(rd_list_copy_cb_t)(const void *elem, void *opaque);
+/**
+ * @brief Copy list \p src, returning a new list,
+ * using optional \p copy_cb (per elem)
+ */
+rd_list_t *
+rd_list_copy(const rd_list_t *src, rd_list_copy_cb_t *copy_cb, void *opaque);
+
+
+/**
+ * @brief Copy list \p src to \p dst using optional \p copy_cb (per elem)
+ * @remark The destination list is not initialized or copied by this function.
+ * @remark copy_cb() may return NULL in which case no element is added,
+ * but the copy callback might have done so itself.
+ */
+void rd_list_copy_to(rd_list_t *dst,
+ const rd_list_t *src,
+ void *(*copy_cb)(const void *elem, void *opaque),
+ void *opaque);
+
+
+/**
+ * @brief Copy callback to copy elements that are preallocated lists.
+ */
+void *rd_list_copy_preallocated(const void *elem, void *opaque);
+
+
+/**
+ * @brief String copier for rd_list_copy()
+ */
+static RD_UNUSED void *rd_list_string_copy(const void *elem, void *opaque) {
+ return rd_strdup((const char *)elem);
+}
+
+
+
+/**
+ * @brief Move elements from \p src to \p dst.
+ *
+ * @remark \p dst will be initialized first.
+ * @remark \p src will be emptied.
+ */
+void rd_list_move(rd_list_t *dst, rd_list_t *src);
+
+
+/**
+ * @name Misc helpers for common list types
+ * @{
+ *
+ */
+
+/**
+ * @brief Init a new list of int32_t's of maximum size \p max_size
+ * where each element is pre-allocated.
+ *
+ * @remark The allocation flag of the original \p rl is retained,
+ * do not pass an uninitialized \p rl to this function.
+ */
+rd_list_t *rd_list_init_int32(rd_list_t *rl, int max_size);
+
+
+/**
+ * Debugging: Print list to stdout.
+ */
+void rd_list_dump(const char *what, const rd_list_t *rl);
+
+
+
+/**
+ * @brief Set element at index \p idx to value \p val.
+ *
+ * @remark Must only be used with preallocated int32_t lists.
+ * @remark Allows values to be overwritten.
+ */
+void rd_list_set_int32(rd_list_t *rl, int idx, int32_t val);
+
+/**
+ * @returns the int32_t element value at index \p idx
+ *
+ * @remark Must only be used with preallocated int32_t lists.
+ */
+int32_t rd_list_get_int32(const rd_list_t *rl, int idx);
+
+/**@}*/
+
+#endif /* _RDLIST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c
new file mode 100644
index 000000000..19fbbb161
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.c
@@ -0,0 +1,89 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdlog.h"
+
+#include <stdarg.h>
+#include <string.h>
+#include <ctype.h>
+
+
+
+void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len) {
+ const char *p = (const char *)ptr;
+ size_t of = 0;
+
+
+ if (name)
+ fprintf(fp, "%s hexdump (%" PRIusz " bytes):\n", name, len);
+
+ for (of = 0; of < len; of += 16) {
+ char hexen[16 * 3 + 1];
+ char charen[16 + 1];
+ int hof = 0;
+
+ int cof = 0;
+ unsigned int i;
+
+ for (i = (unsigned int)of; i < (unsigned int)of + 16 && i < len;
+ i++) {
+ hof += rd_snprintf(hexen + hof, sizeof(hexen) - hof,
+ "%02x ", p[i] & 0xff);
+ cof +=
+ rd_snprintf(charen + cof, sizeof(charen) - cof,
+ "%c", isprint((int)p[i]) ? p[i] : '.');
+ }
+ fprintf(fp, "%08zx: %-48s %-16s\n", of, hexen, charen);
+ }
+}
+
+
+void rd_iov_print(const char *what,
+ int iov_idx,
+ const struct iovec *iov,
+ int hexdump) {
+ printf("%s: iov #%i: %" PRIusz "\n", what, iov_idx,
+ (size_t)iov->iov_len);
+ if (hexdump)
+ rd_hexdump(stdout, what, iov->iov_base, iov->iov_len);
+}
+
+
+void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump) {
+ int i;
+ size_t len = 0;
+
+ printf("%s: iovlen %" PRIusz "\n", what, (size_t)msg->msg_iovlen);
+
+ for (i = 0; i < (int)msg->msg_iovlen; i++) {
+ rd_iov_print(what, i, &msg->msg_iov[i], hexdump);
+ len += msg->msg_iov[i].iov_len;
+ }
+ printf("%s: ^ message was %" PRIusz " bytes in total\n", what, len);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h
new file mode 100644
index 000000000..f360a0b66
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdlog.h
@@ -0,0 +1,41 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDLOG_H_
+#define _RDLOG_H_
+
+void rd_hexdump(FILE *fp, const char *name, const void *ptr, size_t len);
+
+void rd_iov_print(const char *what,
+ int iov_idx,
+ const struct iovec *iov,
+ int hexdump);
+struct msghdr;
+void rd_msghdr_print(const char *what, const struct msghdr *msg, int hexdump);
+
+#endif /* _RDLOG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c
new file mode 100644
index 000000000..4b8547033
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.c
@@ -0,0 +1,487 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdsysqueue.h"
+#include "rdstring.h"
+#include "rdmap.h"
+
+
+static RD_INLINE int rd_map_elem_cmp(const rd_map_elem_t *a,
+ const rd_map_elem_t *b,
+ const rd_map_t *rmap) {
+ int r = a->hash - b->hash;
+ if (r != 0)
+ return r;
+ return rmap->rmap_cmp(a->key, b->key);
+}
+
+static void rd_map_elem_destroy(rd_map_t *rmap, rd_map_elem_t *elem) {
+ rd_assert(rmap->rmap_cnt > 0);
+ rmap->rmap_cnt--;
+ if (rmap->rmap_destroy_key)
+ rmap->rmap_destroy_key((void *)elem->key);
+ if (rmap->rmap_destroy_value)
+ rmap->rmap_destroy_value((void *)elem->value);
+ LIST_REMOVE(elem, hlink);
+ LIST_REMOVE(elem, link);
+ rd_free(elem);
+}
+
+static rd_map_elem_t *
+rd_map_find(const rd_map_t *rmap, int *bktp, const rd_map_elem_t *skel) {
+ int bkt = skel->hash % rmap->rmap_buckets.cnt;
+ rd_map_elem_t *elem;
+
+ if (bktp)
+ *bktp = bkt;
+
+ LIST_FOREACH(elem, &rmap->rmap_buckets.p[bkt], hlink) {
+ if (!rd_map_elem_cmp(skel, elem, rmap))
+ return elem;
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @brief Create and return new element based on \p skel without value set.
+ */
+static rd_map_elem_t *
+rd_map_insert(rd_map_t *rmap, int bkt, const rd_map_elem_t *skel) {
+ rd_map_elem_t *elem;
+
+ elem = rd_calloc(1, sizeof(*elem));
+ elem->hash = skel->hash;
+ elem->key = skel->key; /* takes ownership of key */
+ LIST_INSERT_HEAD(&rmap->rmap_buckets.p[bkt], elem, hlink);
+ LIST_INSERT_HEAD(&rmap->rmap_iter, elem, link);
+ rmap->rmap_cnt++;
+
+ return elem;
+}
+
+
+rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value) {
+ rd_map_elem_t skel = {.key = key, .hash = rmap->rmap_hash(key)};
+ rd_map_elem_t *elem;
+ int bkt;
+
+ if (!(elem = rd_map_find(rmap, &bkt, &skel))) {
+ elem = rd_map_insert(rmap, bkt, &skel);
+ } else {
+ if (elem->value && rmap->rmap_destroy_value)
+ rmap->rmap_destroy_value((void *)elem->value);
+ if (rmap->rmap_destroy_key)
+ rmap->rmap_destroy_key(key);
+ }
+
+ elem->value = value; /* takes ownership of value */
+
+ return elem;
+}
+
+
+void *rd_map_get(const rd_map_t *rmap, const void *key) {
+ const rd_map_elem_t skel = {.key = (void *)key,
+ .hash = rmap->rmap_hash(key)};
+ rd_map_elem_t *elem;
+
+ if (!(elem = rd_map_find(rmap, NULL, &skel)))
+ return NULL;
+
+ return (void *)elem->value;
+}
+
+
+void rd_map_delete(rd_map_t *rmap, const void *key) {
+ const rd_map_elem_t skel = {.key = (void *)key,
+ .hash = rmap->rmap_hash(key)};
+ rd_map_elem_t *elem;
+ int bkt;
+
+ if (!(elem = rd_map_find(rmap, &bkt, &skel)))
+ return;
+
+ rd_map_elem_destroy(rmap, elem);
+}
+
+
+void rd_map_copy(rd_map_t *dst,
+ const rd_map_t *src,
+ rd_map_copy_t *key_copy,
+ rd_map_copy_t *value_copy) {
+ const rd_map_elem_t *elem;
+
+ RD_MAP_FOREACH_ELEM(elem, src) {
+ rd_map_set(
+ dst, key_copy ? key_copy(elem->key) : (void *)elem->key,
+ value_copy ? value_copy(elem->value) : (void *)elem->value);
+ }
+}
+
+
+void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem) {
+ *elem = LIST_FIRST(&rmap->rmap_iter);
+}
+
+size_t rd_map_cnt(const rd_map_t *rmap) {
+ return (size_t)rmap->rmap_cnt;
+}
+
+rd_bool_t rd_map_is_empty(const rd_map_t *rmap) {
+ return rmap->rmap_cnt == 0;
+}
+
+
+/**
+ * @brief Calculates the number of desired buckets and returns
+ * a struct with pre-allocated buckets.
+ */
+struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt) {
+ static const int max_depth = 15;
+ static const int bucket_sizes[] = {
+ 5, 11, 23, 47, 97, 199, /* default */
+ 409, 823, 1741, 3469, 6949, 14033,
+ 28411, 57557, 116731, 236897, -1};
+ struct rd_map_buckets buckets = RD_ZERO_INIT;
+ int i;
+
+ if (!expected_cnt) {
+ buckets.cnt = 199;
+ } else {
+ /* Strive for an average (at expected element count) depth
+ * of 15 elements per bucket, but limit the maximum
+ * bucket count to the maximum value in bucket_sizes above.
+ * When a real need arise we'll change this to a dynamically
+ * growing hash map instead, but this will do for now. */
+ buckets.cnt = bucket_sizes[0];
+ for (i = 1; bucket_sizes[i] != -1 &&
+ (int)expected_cnt / max_depth > bucket_sizes[i];
+ i++)
+ buckets.cnt = bucket_sizes[i];
+ }
+
+ rd_assert(buckets.cnt > 0);
+
+ buckets.p = rd_calloc(buckets.cnt, sizeof(*buckets.p));
+
+ return buckets;
+}
+
+
+void rd_map_init(rd_map_t *rmap,
+ size_t expected_cnt,
+ int (*cmp)(const void *a, const void *b),
+ unsigned int (*hash)(const void *key),
+ void (*destroy_key)(void *key),
+ void (*destroy_value)(void *value)) {
+
+ memset(rmap, 0, sizeof(*rmap));
+ rmap->rmap_buckets = rd_map_alloc_buckets(expected_cnt);
+ rmap->rmap_cmp = cmp;
+ rmap->rmap_hash = hash;
+ rmap->rmap_destroy_key = destroy_key;
+ rmap->rmap_destroy_value = destroy_value;
+}
+
+void rd_map_clear(rd_map_t *rmap) {
+ rd_map_elem_t *elem;
+
+ while ((elem = LIST_FIRST(&rmap->rmap_iter)))
+ rd_map_elem_destroy(rmap, elem);
+}
+
+void rd_map_destroy(rd_map_t *rmap) {
+ rd_map_clear(rmap);
+ rd_free(rmap->rmap_buckets.p);
+}
+
+
+int rd_map_str_cmp(const void *a, const void *b) {
+ return strcmp((const char *)a, (const char *)b);
+}
+
+/**
+ * @brief A djb2 string hasher.
+ */
+unsigned int rd_map_str_hash(const void *key) {
+ const char *str = key;
+ return rd_string_hash(str, -1);
+}
+
+
+
+/**
+ * @name Unit tests
+ *
+ */
+#include "rdtime.h"
+#include "rdunittest.h"
+#include "rdcrc32.h"
+
+
+/**
+ * Typed hash maps
+ */
+
+/* Complex key type */
+struct mykey {
+ int k;
+ int something_else; /* Ignored by comparator and hasher below */
+};
+
+/* Key comparator */
+static int mykey_cmp(const void *_a, const void *_b) {
+ const struct mykey *a = _a, *b = _b;
+ return a->k - b->k;
+}
+
+/* Key hasher */
+static unsigned int mykey_hash(const void *_key) {
+ const struct mykey *key = _key;
+ return (unsigned int)key->k;
+}
+
+/* Complex value type */
+struct person {
+ char *name;
+ char *surname;
+};
+
+/* Define typed hash map type */
+typedef RD_MAP_TYPE(const struct mykey *,
+ const struct person *) ut_my_typed_map_t;
+
+
+/**
+ * @brief Test typed hash map with pre-defined type.
+ */
+static int unittest_typed_map(void) {
+ ut_my_typed_map_t rmap =
+ RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL);
+ ut_my_typed_map_t dup =
+ RD_MAP_INITIALIZER(0, mykey_cmp, mykey_hash, NULL, NULL);
+ struct mykey k1 = {1};
+ struct mykey k2 = {2};
+ struct person v1 = {"Roy", "McPhearsome"};
+ struct person v2 = {"Hedvig", "Lindahl"};
+ const struct mykey *key;
+ const struct person *value;
+
+ RD_MAP_SET(&rmap, &k1, &v1);
+ RD_MAP_SET(&rmap, &k2, &v2);
+
+ value = RD_MAP_GET(&rmap, &k2);
+ RD_UT_ASSERT(value == &v2, "mismatch");
+
+ RD_MAP_FOREACH(key, value, &rmap) {
+ RD_UT_SAY("enumerated key %d person %s %s", key->k, value->name,
+ value->surname);
+ }
+
+ RD_MAP_COPY(&dup, &rmap, NULL, NULL);
+
+ RD_MAP_DELETE(&rmap, &k1);
+ value = RD_MAP_GET(&rmap, &k1);
+ RD_UT_ASSERT(value == NULL, "expected no k1");
+
+ value = RD_MAP_GET(&dup, &k1);
+ RD_UT_ASSERT(value == &v1, "copied map: k1 mismatch");
+ value = RD_MAP_GET(&dup, &k2);
+ RD_UT_ASSERT(value == &v2, "copied map: k2 mismatch");
+
+ RD_MAP_DESTROY(&rmap);
+ RD_MAP_DESTROY(&dup);
+
+ RD_UT_PASS();
+}
+
+
+static int person_cmp(const void *_a, const void *_b) {
+ const struct person *a = _a, *b = _b;
+ int r;
+ if ((r = strcmp(a->name, b->name)))
+ return r;
+ return strcmp(a->surname, b->surname);
+}
+static unsigned int person_hash(const void *_key) {
+ const struct person *key = _key;
+ return 31 * rd_map_str_hash(key->name) + rd_map_str_hash(key->surname);
+}
+
+/**
+ * @brief Test typed hash map with locally defined type.
+ */
+static int unittest_typed_map2(void) {
+ RD_MAP_LOCAL_INITIALIZER(usermap, 3, const char *,
+ const struct person *, rd_map_str_cmp,
+ rd_map_str_hash, NULL, NULL);
+ RD_MAP_LOCAL_INITIALIZER(personmap, 3, const struct person *,
+ const char *, person_cmp, person_hash, NULL,
+ NULL);
+ struct person p1 = {"Magnus", "Lundstrom"};
+ struct person p2 = {"Peppy", "Popperpappies"};
+ const char *user;
+ const struct person *person;
+
+ /* Populate user -> person map */
+ RD_MAP_SET(&usermap, "user1234", &p1);
+ RD_MAP_SET(&usermap, "user9999999999", &p2);
+
+ person = RD_MAP_GET(&usermap, "user1234");
+
+
+ RD_UT_ASSERT(person == &p1, "mismatch");
+
+ RD_MAP_FOREACH(user, person, &usermap) {
+ /* Populate reverse name -> user map */
+ RD_MAP_SET(&personmap, person, user);
+ }
+
+ RD_MAP_FOREACH(person, user, &personmap) {
+ /* Just reference the memory to catch memory errors.*/
+ RD_UT_ASSERT(strlen(person->name) > 0 &&
+ strlen(person->surname) > 0 &&
+ strlen(user) > 0,
+ "bug");
+ }
+
+ RD_MAP_DESTROY(&usermap);
+ RD_MAP_DESTROY(&personmap);
+
+ return 0;
+}
+
+
+/**
+ * @brief Untyped hash map.
+ *
+ * This is a more thorough test of the underlying hash map implementation.
+ */
+static int unittest_untyped_map(void) {
+ rd_map_t rmap;
+ int pass, i, r;
+ int cnt = 100000;
+ int exp_cnt = 0, get_cnt = 0, iter_cnt = 0;
+ const rd_map_elem_t *elem;
+ rd_ts_t ts = rd_clock();
+ rd_ts_t ts_get = 0;
+
+ rd_map_init(&rmap, cnt, rd_map_str_cmp, rd_map_str_hash, rd_free,
+ rd_free);
+
+ /* pass 0 is set,delete,overwrite,get
+ * pass 1-5 is get */
+ for (pass = 0; pass < 6; pass++) {
+ if (pass == 1)
+ ts_get = rd_clock();
+
+ for (i = 1; i < cnt; i++) {
+ char key[10];
+ char val[64];
+ const char *val2;
+ rd_bool_t do_delete = !(i % 13);
+ rd_bool_t overwrite = !do_delete && !(i % 5);
+
+ rd_snprintf(key, sizeof(key), "key%d", i);
+ rd_snprintf(val, sizeof(val), "VALUE=%d!", i);
+
+ if (pass == 0) {
+ rd_map_set(&rmap, rd_strdup(key),
+ rd_strdup(val));
+
+ if (do_delete)
+ rd_map_delete(&rmap, key);
+ }
+
+ if (overwrite) {
+ rd_snprintf(val, sizeof(val), "OVERWRITE=%d!",
+ i);
+ if (pass == 0)
+ rd_map_set(&rmap, rd_strdup(key),
+ rd_strdup(val));
+ }
+
+ val2 = rd_map_get(&rmap, key);
+
+ if (do_delete)
+ RD_UT_ASSERT(!val2,
+ "map_get pass %d "
+ "returned value %s "
+ "for deleted key %s",
+ pass, val2, key);
+ else
+ RD_UT_ASSERT(val2 && !strcmp(val, val2),
+ "map_get pass %d: "
+ "expected value %s, not %s, "
+ "for key %s",
+ pass, val, val2 ? val2 : "NULL",
+ key);
+
+ if (pass == 0 && !do_delete)
+ exp_cnt++;
+ }
+
+ if (pass >= 1)
+ get_cnt += cnt;
+ }
+
+ ts_get = rd_clock() - ts_get;
+ RD_UT_SAY("%d map_get iterations took %.3fms = %" PRId64 "us/get",
+ get_cnt, (float)ts_get / 1000.0, ts_get / get_cnt);
+
+ RD_MAP_FOREACH_ELEM(elem, &rmap) {
+ iter_cnt++;
+ }
+
+ r = (int)rd_map_cnt(&rmap);
+ RD_UT_ASSERT(r == exp_cnt, "expected %d map entries, not %d", exp_cnt,
+ r);
+
+ RD_UT_ASSERT(r == iter_cnt,
+ "map_cnt() = %d, iteration gave %d elements", r, iter_cnt);
+
+ rd_map_destroy(&rmap);
+
+ ts = rd_clock() - ts;
+ RD_UT_SAY("Total time over %d entries took %.3fms", cnt,
+ (float)ts / 1000.0);
+
+ RD_UT_PASS();
+}
+
+
+int unittest_map(void) {
+ int fails = 0;
+ fails += unittest_untyped_map();
+ fails += unittest_typed_map();
+ fails += unittest_typed_map2();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h
new file mode 100644
index 000000000..a79dcda06
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdmap.h
@@ -0,0 +1,487 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2020 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDMAP_H_
+#define _RDMAP_H_
+
+/**
+ * @name Hash maps.
+ *
+ * Memory of key and value are allocated by the user but owned by the hash map
+ * until elements are deleted or overwritten.
+ *
+ * The lower-case API provides a generic typeless (void *) hash map while
+ * the upper-case API provides a strictly typed hash map implemented as macros
+ * on top of the generic API.
+ *
+ * See rd_map_init(), et.al, for the generic API and RD_MAP_INITIALIZER()
+ * for the typed API.
+ *
+ * @remark Not thread safe.
+ */
+
+
+/**
+ * @struct Map element. This is the internal representation
+ * of the element and exposed to the user for iterating over the hash.
+ */
+typedef struct rd_map_elem_s {
+ LIST_ENTRY(rd_map_elem_s) hlink; /**< Hash bucket link */
+ LIST_ENTRY(rd_map_elem_s) link; /**< Iterator link */
+ unsigned int hash; /**< Key hash value */
+ const void *key; /**< Key (memory owned by map) */
+ const void *value; /**< Value (memory owned by map) */
+} rd_map_elem_t;
+
+
+/**
+ * @struct Hash buckets (internal use).
+ */
+struct rd_map_buckets {
+ LIST_HEAD(, rd_map_elem_s) * p; /**< Hash buckets array */
+ int cnt; /**< Bucket count */
+};
+
+
+/**
+ * @struct Hash map.
+ */
+typedef struct rd_map_s {
+ struct rd_map_buckets rmap_buckets; /**< Hash buckets */
+ int rmap_cnt; /**< Element count */
+
+ LIST_HEAD(, rd_map_elem_s)
+ rmap_iter; /**< Element list for iterating
+ * over all elements. */
+
+ int (*rmap_cmp)(const void *a, const void *b); /**< Key comparator */
+ unsigned int (*rmap_hash)(const void *key); /**< Key hash function */
+ void (*rmap_destroy_key)(void *key); /**< Optional key free */
+ void (*rmap_destroy_value)(void *value); /**< Optional value free */
+
+ void *rmap_opaque;
+} rd_map_t;
+
+
+
+/**
+ * @brief Set/overwrite value in map.
+ *
+ * If an existing entry with the same key already exists its key and value
+ * will be freed with the destroy_key and destroy_value functions
+ * passed to rd_map_init().
+ *
+ * The map assumes memory ownership of both the \p key and \p value and will
+ * use the destroy_key and destroy_value functions (if set) to free
+ * the key and value memory when the map is destroyed or element removed.
+ *
+ * @returns the map element.
+ */
+rd_map_elem_t *rd_map_set(rd_map_t *rmap, void *key, void *value);
+
+
+/**
+ * @brief Look up \p key in the map and return its value, or NULL
+ * if \p key was not found.
+ *
+ * The returned memory is still owned by the map.
+ */
+void *rd_map_get(const rd_map_t *rmap, const void *key);
+
+
+/**
+ * @brief Delete \p key from the map, if it exists.
+ *
+ * The destroy_key and destroy_value functions (if set) will be used
+ * to free the key and value memory.
+ */
+void rd_map_delete(rd_map_t *rmap, const void *key);
+
+
+/** Key or Value Copy function signature. */
+typedef void *(rd_map_copy_t)(const void *key_or_value);
+
+
+/**
+ * @brief Copy all elements from \p src to \p dst.
+ * \p dst must be initialized and compatible with \p src.
+ *
+ * @param dst Destination map to copy to.
+ * @param src Source map to copy from.
+ * @param key_copy Key copy callback. If NULL the \p dst key will just
+ * reference the \p src key.
+ * @param value_copy Value copy callback. If NULL the \p dst value will just
+ * reference the \p src value.
+ */
+void rd_map_copy(rd_map_t *dst,
+ const rd_map_t *src,
+ rd_map_copy_t *key_copy,
+ rd_map_copy_t *value_copy);
+
+
+/**
+ * @returns the current number of elements in the map.
+ */
+size_t rd_map_cnt(const rd_map_t *rmap);
+
+/**
+ * @returns true if map is empty, else false.
+ */
+rd_bool_t rd_map_is_empty(const rd_map_t *rmap);
+
+
+/**
+ * @brief Iterate over all elements in the map.
+ *
+ * @warning The map MUST NOT be modified during the loop.
+ *
+ * @remark This is part of the untyped generic API.
+ */
+#define RD_MAP_FOREACH_ELEM(ELEM, RMAP) \
+ for (rd_map_iter_begin((RMAP), &(ELEM)); rd_map_iter(&(ELEM)); \
+ rd_map_iter_next(&(ELEM)))
+
+
+/**
+ * @brief Begin iterating \p rmap, first element is set in \p *elem.
+ */
+void rd_map_iter_begin(const rd_map_t *rmap, const rd_map_elem_t **elem);
+
+/**
+ * @returns 1 if \p *elem is a valid iteration element, else 0.
+ */
+static RD_INLINE RD_UNUSED int rd_map_iter(const rd_map_elem_t **elem) {
+ return *elem != NULL;
+}
+
+/**
+ * @brief Advances the iteration to the next element.
+ */
+static RD_INLINE RD_UNUSED void rd_map_iter_next(const rd_map_elem_t **elem) {
+ *elem = LIST_NEXT(*elem, link);
+}
+
+
+/**
+ * @brief Initialize a map that is expected to hold \p expected_cnt elements.
+ *
+ * @param expected_cnt Expected number of elements in the map,
+ * this is used to select a suitable bucket count.
+ * Passing a value of 0 will set the bucket count
+ * to a reasonable default.
+ * @param cmp Key comparator that must return 0 if the two keys match.
+ * @param hash Key hashing function that is used to map a key to a bucket.
+ * It must return an integer hash >= 0 of the key.
+ * @param destroy_key (Optional) When an element is deleted or overwritten
+ * this function will be used to free the key memory.
+ * @param destroy_value (Optional) When an element is deleted or overwritten
+ * this function will be used to free the value memory.
+ *
+ * Destroy the map with rd_map_destroy()
+ *
+ * @remarks The map is not thread-safe.
+ */
+void rd_map_init(rd_map_t *rmap,
+ size_t expected_cnt,
+ int (*cmp)(const void *a, const void *b),
+ unsigned int (*hash)(const void *key),
+ void (*destroy_key)(void *key),
+ void (*destroy_value)(void *value));
+
+
+/**
+ * @brief Internal use
+ */
+struct rd_map_buckets rd_map_alloc_buckets(size_t expected_cnt);
+
+
+/**
+ * @brief Empty the map and free all elements.
+ */
+void rd_map_clear(rd_map_t *rmap);
+
+
+/**
+ * @brief Free all elements in the map and free all memory associated
+ * with the map, but not the rd_map_t itself.
+ *
+ * The map is unusable after this call but can be re-initialized using
+ * rd_map_init().
+ *
+ * @sa rd_map_clear()
+ */
+void rd_map_destroy(rd_map_t *rmap);
+
+
+/**
+ * @brief String comparator for (const char *) keys.
+ */
+int rd_map_str_cmp(const void *a, const void *b);
+
+
+/**
+ * @brief String hash function (djb2) for (const char *) keys.
+ */
+unsigned int rd_map_str_hash(const void *a);
+
+
+
+/**
+ * @name Typed hash maps.
+ *
+ * Typed hash maps provides a type-safe layer on top of the standard hash maps.
+ */
+
+/**
+ * @brief Define a typed map type which can later be used with
+ * RD_MAP_INITIALIZER() and typed RD_MAP_*() API.
+ */
+#define RD_MAP_TYPE(KEY_TYPE, VALUE_TYPE) \
+ struct { \
+ rd_map_t rmap; \
+ KEY_TYPE key; \
+ VALUE_TYPE value; \
+ const rd_map_elem_t *elem; \
+ }
+
+/**
+ * @brief Initialize a typed hash map. The left hand side variable must be
+ * a typed hash map defined by RD_MAP_TYPE().
+ *
+ * The typed hash map is a macro layer on top of the rd_map_t implementation
+ * that provides type safety.
+ * The methods are the same as the underlying implementation but in all caps
+ * (to indicate their macro use), e.g., RD_MAP_SET() is the typed version
+ * of rd_map_set().
+ *
+ * @param EXPECTED_CNT Expected number of elements in hash.
+ * @param KEY_TYPE The type of the hash key.
+ * @param VALUE_TYPE The type of the hash value.
+ * @param CMP Comparator function for the key.
+ * @param HASH Hash function for the key.
+ * @param DESTROY_KEY Destructor for the key type.
+ * @param DESTROY_VALUE Destructor for the value type.
+ *
+ * @sa rd_map_init()
+ */
+#define RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \
+ DESTROY_VALUE) \
+ { \
+ .rmap = { \
+ .rmap_buckets = rd_map_alloc_buckets(EXPECTED_CNT), \
+ .rmap_cmp = CMP, \
+ .rmap_hash = HASH, \
+ .rmap_destroy_key = DESTROY_KEY, \
+ .rmap_destroy_value = DESTROY_VALUE \
+ } \
+ }
+
+
+/**
+ * @brief Initialize a locally-defined typed hash map.
+ * This hash map can only be used in the current scope/function
+ * as its type is private to this initializement.
+ *
+ * @param RMAP Hash map variable name.
+ *
+ * For the other parameters, see RD_MAP_INITIALIZER().
+ *
+ * @sa RD_MAP_INITIALIZER()
+ */
+#define RD_MAP_LOCAL_INITIALIZER(RMAP, EXPECTED_CNT, KEY_TYPE, VALUE_TYPE, \
+ CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \
+ struct { \
+ rd_map_t rmap; \
+ KEY_TYPE key; \
+ VALUE_TYPE value; \
+ const rd_map_elem_t *elem; \
+ } RMAP = RD_MAP_INITIALIZER(EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \
+ DESTROY_VALUE)
+
+
+/**
+ * @brief Initialize typed map \p RMAP.
+ *
+ * @sa rd_map_init()
+ */
+#define RD_MAP_INIT(RMAP, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, DESTROY_VALUE) \
+ rd_map_init(&(RMAP)->rmap, EXPECTED_CNT, CMP, HASH, DESTROY_KEY, \
+ DESTROY_VALUE)
+
+
+/**
+ * @brief Allocate and initialize a typed map.
+ */
+
+
+/**
+ * @brief Typed hash map: Set key/value in map.
+ *
+ * @sa rd_map_set()
+ */
+#define RD_MAP_SET(RMAP, KEY, VALUE) \
+ ((RMAP)->key = KEY, (RMAP)->value = VALUE, \
+ rd_map_set(&(RMAP)->rmap, (void *)(RMAP)->key, \
+ (void *)(RMAP)->value))
+
+/**
+ * @brief Typed hash map: Get value for key.
+ *
+ * @sa rd_map_get()
+ */
+#define RD_MAP_GET(RMAP, KEY) \
+ ((RMAP)->key = (KEY), \
+ (RMAP)->value = rd_map_get(&(RMAP)->rmap, (RMAP)->key), \
+ (RMAP)->value)
+
+
+
+/**
+ * @brief Get value for key. If key does not exist in map a new
+ * entry is added using the DEFAULT_CODE.
+ */
+#define RD_MAP_GET_OR_SET(RMAP, KEY, DEFAULT_CODE) \
+ (RD_MAP_GET(RMAP, KEY) \
+ ? (RMAP)->value \
+ : (RD_MAP_SET(RMAP, (RMAP)->key, DEFAULT_CODE), (RMAP)->value))
+
+
+/**
+ * @brief Typed hash map: Delete element by key.
+ *
+ * The destroy_key and destroy_value functions (if set) will be used
+ * to free the key and value memory.
+ *
+ * @sa rd_map_delete()
+ */
+#define RD_MAP_DELETE(RMAP, KEY) \
+ ((RMAP)->key = (KEY), rd_map_delete(&(RMAP)->rmap, (RMAP)->key))
+
+
+/**
+ * @brief Copy all elements from \p SRC to \p DST.
+ * \p DST must be initialized and compatible with \p SRC.
+ *
+ * @param DST Destination map to copy to.
+ * @param SRC Source map to copy from.
+ * @param KEY_COPY Key copy callback. If NULL the \p DST key will just
+ * reference the \p SRC key.
+ * @param VALUE_COPY Value copy callback. If NULL the \p DST value will just
+ * reference the \p SRC value.
+ */
+#define RD_MAP_COPY(DST, SRC, KEY_COPY, VALUE_COPY) \
+ do { \
+ if ((DST) != (SRC)) /*implicit type-check*/ \
+ rd_map_copy(&(DST)->rmap, &(SRC)->rmap, KEY_COPY, \
+ VALUE_COPY); \
+ } while (0)
+
+
+/**
+ * @brief Empty the map and free all elements.
+ *
+ * @sa rd_map_clear()
+ */
+#define RD_MAP_CLEAR(RMAP) rd_map_clear(&(RMAP)->rmap)
+
+
+/**
+ * @brief Typed hash map: Destroy hash map.
+ *
+ * @sa rd_map_destroy()
+ */
+#define RD_MAP_DESTROY(RMAP) rd_map_destroy(&(RMAP)->rmap)
+
+
+/**
+ * @brief Typed hash map: Destroy and free the hash map.
+ *
+ * @sa rd_map_destroy()
+ */
+#define RD_MAP_DESTROY_AND_FREE(RMAP) \
+ do { \
+ rd_map_destroy(&(RMAP)->rmap); \
+ rd_free(RMAP); \
+ } while (0)
+
+
+/**
+ * @brief Typed hash map: Iterate over all elements in the map.
+ *
+ * @warning The current or previous elements may be removed, but the next
+ * element after the current one MUST NOT be modified during the loop.
+ *
+ * @warning RD_MAP_FOREACH() only supports one simultaneous invocation,
+ * that is, special care must be taken not to call FOREACH() from
+ * within a FOREACH() or FOREACH_KEY() loop on the same map.
+ * This is due to how RMAP->elem is used as the iterator.
+ * This restriction is unfortunately not enforced at build or run time.
+ *
+ * @remark The \p RMAP may not be const.
+ */
+#define RD_MAP_FOREACH(K, V, RMAP) \
+ for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL, \
+ (V) = NULL; \
+ rd_map_iter(&(RMAP)->elem) && \
+ ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \
+ (RMAP)->value = (void *)(RMAP)->elem->value, (V) = (RMAP)->value, \
+ rd_map_iter_next(&(RMAP)->elem), rd_true);)
+
+
+/**
+ * @brief Typed hash map: Iterate over all keys in the map.
+ *
+ * @warning The current or previous elements may be removed, but the next
+ * element after the current one MUST NOT be modified during the loop.
+ *
+ * @warning RD_MAP_FOREACH_KEY() only supports one simultaneous invocation,
+ * that is, special care must be taken not to call FOREACH_KEY() from
+ * within a FOREACH() or FOREACH_KEY() loop on the same map.
+ * This is due to how RMAP->elem is used as the iterator.
+ * This restriction is unfortunately not enforced at build or run time.
+ *
+ * @remark The \p RMAP may not be const.
+ */
+#define RD_MAP_FOREACH_KEY(K, RMAP) \
+ for (rd_map_iter_begin(&(RMAP)->rmap, &(RMAP)->elem), (K) = NULL; \
+ rd_map_iter(&(RMAP)->elem) && \
+ ((RMAP)->key = (void *)(RMAP)->elem->key, (K) = (RMAP)->key, \
+ rd_map_iter_next(&(RMAP)->elem), rd_true);)
+
+
+/**
+ * @returns the number of elements in the map.
+ */
+#define RD_MAP_CNT(RMAP) rd_map_cnt(&(RMAP)->rmap)
+
+/**
+ * @returns true if map is empty, else false.
+ */
+#define RD_MAP_IS_EMPTY(RMAP) rd_map_is_empty(&(RMAP)->rmap)
+
+#endif /* _RDMAP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c
new file mode 100644
index 000000000..c3e4095d4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.c
@@ -0,0 +1,167 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdunittest.h"
+#include "rdmurmur2.h"
+#include "rdendian.h"
+
+
+/* MurmurHash2, by Austin Appleby
+ *
+ * With librdkafka modifications combinining aligned/unaligned variants
+ * into the same function.
+ */
+
+#define MM_MIX(h, k, m) \
+ { \
+ k *= m; \
+ k ^= k >> r; \
+ k *= m; \
+ h *= m; \
+ h ^= k; \
+ }
+
+/*-----------------------------------------------------------------------------
+// Based on MurmurHashNeutral2, by Austin Appleby
+//
+// Same as MurmurHash2, but endian- and alignment-neutral.
+// Half the speed though, alas.
+//
+*/
+uint32_t rd_murmur2(const void *key, size_t len) {
+ const uint32_t seed = 0x9747b28c;
+ const uint32_t m = 0x5bd1e995;
+ const int r = 24;
+ uint32_t h = seed ^ (uint32_t)len;
+ const unsigned char *tail;
+
+ if (likely(((intptr_t)key & 0x3) == 0)) {
+ /* Input is 32-bit word aligned. */
+ const uint32_t *data = (const uint32_t *)key;
+
+ while (len >= 4) {
+ uint32_t k = htole32(*(uint32_t *)data);
+
+ MM_MIX(h, k, m);
+
+ data++;
+ len -= 4;
+ }
+
+ tail = (const unsigned char *)data;
+
+ } else {
+ /* Unaligned slower variant */
+ const unsigned char *data = (const unsigned char *)key;
+
+ while (len >= 4) {
+ uint32_t k;
+
+ k = data[0];
+ k |= data[1] << 8;
+ k |= data[2] << 16;
+ k |= data[3] << 24;
+
+ MM_MIX(h, k, m);
+
+ data += 4;
+ len -= 4;
+ }
+
+ tail = data;
+ }
+
+ /* Read remaining sub-word */
+ switch (len) {
+ case 3:
+ h ^= tail[2] << 16;
+ case 2:
+ h ^= tail[1] << 8;
+ case 1:
+ h ^= tail[0];
+ h *= m;
+ };
+
+ h ^= h >> 13;
+ h *= m;
+ h ^= h >> 15;
+
+ /* Last bit is set to 0 because the java implementation uses int_32
+ * and then sets to positive number flipping last bit to 1. */
+ return h;
+}
+
+
+/**
+ * @brief Unittest for rd_murmur2()
+ */
+int unittest_murmur2(void) {
+ const char *short_unaligned = "1234";
+ const char *unaligned = "PreAmbleWillBeRemoved,ThePrePartThatIs";
+ const char *keysToTest[] = {
+ "kafka",
+ "giberish123456789",
+ short_unaligned,
+ short_unaligned + 1,
+ short_unaligned + 2,
+ short_unaligned + 3,
+ unaligned,
+ unaligned + 1,
+ unaligned + 2,
+ unaligned + 3,
+ "",
+ NULL,
+ };
+
+ const int32_t java_murmur2_results[] = {
+ 0xd067cf64, // kafka
+ 0x8f552b0c, // giberish123456789
+ 0x9fc97b14, // short_unaligned
+ 0xe7c009ca, // short_unaligned+1
+ 0x873930da, // short_unaligned+2
+ 0x5a4b5ca1, // short_unaligned+3
+ 0x78424f1c, // unaligned
+ 0x4a62b377, // unaligned+1
+ 0xe0e4e09e, // unaligned+2
+ 0x62b8b43f, // unaligned+3
+ 0x106e08d9, // ""
+ 0x106e08d9, // NULL
+ };
+
+ size_t i;
+ for (i = 0; i < RD_ARRAYSIZE(keysToTest); i++) {
+ uint32_t h = rd_murmur2(
+ keysToTest[i], keysToTest[i] ? strlen(keysToTest[i]) : 0);
+ RD_UT_ASSERT((int32_t)h == java_murmur2_results[i],
+ "Calculated murmur2 hash 0x%x for \"%s\", "
+ "expected 0x%x",
+ h, keysToTest[i], java_murmur2_results[i]);
+ }
+ RD_UT_PASS();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h
new file mode 100644
index 000000000..5991caa50
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdmurmur2.h
@@ -0,0 +1,35 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __RDMURMUR2___H__
+#define __RDMURMUR2___H__
+
+uint32_t rd_murmur2(const void *key, size_t len);
+int unittest_murmur2(void);
+
+#endif // __RDMURMUR2___H__
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c
new file mode 100644
index 000000000..15c57e928
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdports.c
@@ -0,0 +1,61 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * System portability
+ */
+
+#include "rd.h"
+
+
+#include <stdlib.h>
+
+/**
+ * qsort_r substitute
+ * This nicely explains why we wont bother with the native implementation
+ * on Win32 (qsort_s), OSX/FreeBSD (qsort_r with diff args):
+ * http://forum.theorex.tech/t/different-declarations-of-qsort-r-on-mac-and-linux/93/2
+ */
+static RD_TLS int (*rd_qsort_r_cmp)(const void *, const void *, void *);
+static RD_TLS void *rd_qsort_r_arg;
+
+static RD_UNUSED int rd_qsort_r_trampoline(const void *a, const void *b) {
+ return rd_qsort_r_cmp(a, b, rd_qsort_r_arg);
+}
+
+void rd_qsort_r(void *base,
+ size_t nmemb,
+ size_t size,
+ int (*compar)(const void *, const void *, void *),
+ void *arg) {
+ rd_qsort_r_cmp = compar;
+ rd_qsort_r_arg = arg;
+ qsort(base, nmemb, size, rd_qsort_r_trampoline);
+ rd_qsort_r_cmp = NULL;
+ rd_qsort_r_arg = NULL;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h
new file mode 100644
index 000000000..0cdbcd85f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdports.h
@@ -0,0 +1,38 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDPORTS_H_
+#define _RDPORTS_H_
+
+
+void rd_qsort_r(void *base,
+ size_t nmemb,
+ size_t size,
+ int (*compar)(const void *, const void *, void *),
+ void *arg);
+
+#endif /* _RDPORTS_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h
new file mode 100644
index 000000000..7b2376823
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdposix.h
@@ -0,0 +1,250 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * POSIX system support
+ */
+#ifndef _RDPOSIX_H_
+#define _RDPOSIX_H_
+
+#include <unistd.h>
+#include <stdio.h>
+#include <sys/time.h>
+#include <inttypes.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <string.h>
+
+/**
+ * Types
+ */
+
+
+/**
+ * Annotations, attributes, optimizers
+ */
+#ifndef likely
+#define likely(x) __builtin_expect((x), 1)
+#endif
+#ifndef unlikely
+#define unlikely(x) __builtin_expect((x), 0)
+#endif
+
+#define RD_UNUSED __attribute__((unused))
+#define RD_INLINE inline
+#define RD_WARN_UNUSED_RESULT __attribute__((warn_unused_result))
+#define RD_NORETURN __attribute__((noreturn))
+#define RD_IS_CONSTANT(p) __builtin_constant_p((p))
+#define RD_TLS __thread
+
+/**
+ * Allocation
+ */
+#if !defined(__FreeBSD__) && !defined(__OpenBSD__)
+/* alloca(3) is in stdlib on FreeBSD */
+#include <alloca.h>
+#endif
+
+#define rd_alloca(N) alloca(N)
+
+
+/**
+ * Strings, formatting, printf, ..
+ */
+
+/* size_t and ssize_t format strings */
+#define PRIusz "zu"
+#define PRIdsz "zd"
+
+#ifndef RD_FORMAT
+#define RD_FORMAT(...) __attribute__((format(__VA_ARGS__)))
+#endif
+#define rd_snprintf(...) snprintf(__VA_ARGS__)
+#define rd_vsnprintf(...) vsnprintf(__VA_ARGS__)
+
+#define rd_strcasecmp(A, B) strcasecmp(A, B)
+#define rd_strncasecmp(A, B, N) strncasecmp(A, B, N)
+
+
+#ifdef HAVE_STRCASESTR
+#define rd_strcasestr(HAYSTACK, NEEDLE) strcasestr(HAYSTACK, NEEDLE)
+#else
+#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE)
+#endif
+
+
+/**
+ * Errors
+ */
+
+
+#define rd_set_errno(err) (errno = (err))
+
+#if HAVE_STRERROR_R
+static RD_INLINE RD_UNUSED const char *rd_strerror(int err) {
+ static RD_TLS char ret[128];
+
+#if defined(__GLIBC__) && defined(_GNU_SOURCE)
+ return strerror_r(err, ret, sizeof(ret));
+#else /* XSI version */
+ int r;
+ /* The r assignment is to catch the case where
+ * _GNU_SOURCE is not defined but the GNU version is
+ * picked up anyway. */
+ r = strerror_r(err, ret, sizeof(ret));
+ if (unlikely(r))
+ rd_snprintf(ret, sizeof(ret), "strerror_r(%d) failed (ret %d)",
+ err, r);
+ return ret;
+#endif
+}
+#else
+#define rd_strerror(err) strerror(err)
+#endif
+
+
+/**
+ * Atomics
+ */
+#include "rdatomic.h"
+
+/**
+ * Misc
+ */
+
+/**
+ * Microsecond sleep.
+ * Will retry on signal interrupt unless *terminate is true.
+ */
+static RD_INLINE RD_UNUSED void rd_usleep(int usec, rd_atomic32_t *terminate) {
+ struct timespec req = {usec / 1000000, (long)(usec % 1000000) * 1000};
+
+ /* Retry until complete (issue #272), unless terminating. */
+ while (nanosleep(&req, &req) == -1 &&
+ (errno == EINTR && (!terminate || !rd_atomic32_get(terminate))))
+ ;
+}
+
+
+
+#define rd_gettimeofday(tv, tz) gettimeofday(tv, tz)
+
+
+#ifndef __COVERITY__
+#define rd_assert(EXPR) assert(EXPR)
+#else
+extern void __coverity_panic__(void);
+#define rd_assert(EXPR) \
+ do { \
+ if (!(EXPR)) \
+ __coverity_panic__(); \
+ } while (0)
+#endif
+
+
+static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env,
+ const char *def) {
+ const char *tmp;
+ tmp = getenv(env);
+ if (tmp && *tmp)
+ return tmp;
+ return def;
+}
+
+
+/**
+ * Empty struct initializer
+ */
+#define RD_ZERO_INIT \
+ {}
+
+/**
+ * Sockets, IO
+ */
+
+/** @brief Socket type */
+typedef int rd_socket_t;
+
+/** @brief Socket API error return value */
+#define RD_SOCKET_ERROR (-1)
+
+/** @brief Last socket error */
+#define rd_socket_errno errno
+
+
+/** @brief String representation of socket error */
+#define rd_socket_strerror(ERR) rd_strerror(ERR)
+
+/** @brief poll() struct type */
+typedef struct pollfd rd_pollfd_t;
+
+/** @brief poll(2) */
+#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \
+ poll(POLLFD, FDCNT, TIMEOUT_MS)
+
+/**
+ * @brief Set socket to non-blocking
+ * @returns 0 on success or errno on failure.
+ */
+static RD_UNUSED int rd_fd_set_nonblocking(int fd) {
+ int fl = fcntl(fd, F_GETFL, 0);
+ if (fl == -1 || fcntl(fd, F_SETFL, fl | O_NONBLOCK) == -1)
+ return errno;
+ return 0;
+}
+
+/**
+ * @brief Create non-blocking pipe
+ * @returns 0 on success or errno on failure
+ */
+static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) {
+ if (pipe(fds) == -1 || rd_fd_set_nonblocking(fds[0]) == -1 ||
+ rd_fd_set_nonblocking(fds[1]))
+ return errno;
+
+ /* Minimize buffer sizes to avoid a large number
+ * of signaling bytes to accumulate when
+ * io-signalled queue is not being served for a while. */
+#ifdef F_SETPIPE_SZ
+ /* Linux automatically rounds the pipe size up
+ * to the minimum size. */
+ fcntl(fds[0], F_SETPIPE_SZ, 100);
+ fcntl(fds[1], F_SETPIPE_SZ, 100);
+#endif
+ return 0;
+}
+#define rd_socket_read(fd, buf, sz) read(fd, buf, sz)
+#define rd_socket_write(fd, buf, sz) write(fd, buf, sz)
+#define rd_socket_close(fd) close(fd)
+
+/* File IO */
+#define rd_write(fd, buf, sz) write(fd, buf, sz)
+#define rd_open(path, flags, mode) open(path, flags, mode)
+#define rd_close(fd) close(fd)
+
+#endif /* _RDPOSIX_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c
new file mode 100644
index 000000000..e36d79380
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.c
@@ -0,0 +1,70 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rd.h"
+#include "rdrand.h"
+#include "rdtime.h"
+#include "tinycthread.h"
+
+int rd_jitter(int low, int high) {
+ int rand_num;
+#if HAVE_RAND_R
+ static RD_TLS unsigned int seed = 0;
+
+ /* Initial seed with time+thread id */
+ if (unlikely(seed == 0)) {
+ struct timeval tv;
+ rd_gettimeofday(&tv, NULL);
+ seed = (unsigned int)(tv.tv_usec / 1000);
+ seed ^= (unsigned int)(intptr_t)thrd_current();
+ }
+
+ rand_num = rand_r(&seed);
+#else
+ rand_num = rand();
+#endif
+ return (low + (rand_num % ((high - low) + 1)));
+}
+
+void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size) {
+ int i;
+ void *tmp = rd_alloca(entry_size);
+
+ /* FIXME: Optimized version for word-sized entries. */
+
+ for (i = (int)nmemb - 1; i > 0; i--) {
+ int j = rd_jitter(0, i);
+ if (unlikely(i == j))
+ continue;
+
+ memcpy(tmp, (char *)base + (i * entry_size), entry_size);
+ memcpy((char *)base + (i * entry_size),
+ (char *)base + (j * entry_size), entry_size);
+ memcpy((char *)base + (j * entry_size), tmp, entry_size);
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h
new file mode 100644
index 000000000..0e3a927c2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdrand.h
@@ -0,0 +1,43 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDRAND_H_
+#define _RDRAND_H_
+
+
+/**
+ * Returns a random (using rand(3)) number between 'low'..'high' (inclusive).
+ */
+int rd_jitter(int low, int high);
+
+/**
+ * Shuffles (randomizes) an array using the modern Fisher-Yates algorithm.
+ */
+void rd_array_shuffle(void *base, size_t nmemb, size_t entry_size);
+
+#endif /* _RDRAND_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c
new file mode 100644
index 000000000..0c70cb334
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.c
@@ -0,0 +1,156 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rd.h"
+#include "rdstring.h"
+#include "rdregex.h"
+
+#if HAVE_REGEX
+#include <regex.h>
+struct rd_regex_s {
+ regex_t re;
+};
+
+#else
+
+#include "regexp.h"
+struct rd_regex_s {
+ Reprog *re;
+};
+#endif
+
+
+/**
+ * @brief Destroy compiled regex
+ */
+void rd_regex_destroy(rd_regex_t *re) {
+#if HAVE_REGEX
+ regfree(&re->re);
+#else
+ re_regfree(re->re);
+#endif
+ rd_free(re);
+}
+
+
+/**
+ * @brief Compile regex \p pattern
+ * @returns Compiled regex object on success on error.
+ */
+rd_regex_t *
+rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size) {
+ rd_regex_t *re = rd_calloc(1, sizeof(*re));
+#if HAVE_REGEX
+ int r;
+
+ r = regcomp(&re->re, pattern, REG_EXTENDED | REG_NOSUB);
+ if (r) {
+ if (errstr)
+ regerror(r, &re->re, errstr, errstr_size);
+ rd_free(re);
+ return NULL;
+ }
+#else
+ const char *errstr2;
+
+ re->re = re_regcomp(pattern, 0, &errstr2);
+ if (!re->re) {
+ if (errstr)
+ rd_strlcpy(errstr, errstr2, errstr_size);
+ rd_free(re);
+ return NULL;
+ }
+#endif
+
+ return re;
+}
+
+
+/**
+ * @brief Match \p str to pre-compiled regex \p re
+ * @returns 1 on match, else 0
+ */
+int rd_regex_exec(rd_regex_t *re, const char *str) {
+#if HAVE_REGEX
+ return regexec(&re->re, str, 0, NULL, 0) != REG_NOMATCH;
+#else
+ return !re_regexec(re->re, str, NULL, 0);
+#endif
+}
+
+
+/**
+ * @brief Perform regex match of \p str using regex \p pattern.
+ *
+ * @returns 1 on match, 0 on non-match or -1 on regex compilation error
+ * in which case a human readable error string is written to
+ * \p errstr (if not NULL).
+ */
+int rd_regex_match(const char *pattern,
+ const char *str,
+ char *errstr,
+ size_t errstr_size) {
+#if HAVE_REGEX /* use libc regex */
+ regex_t re;
+ int r;
+
+ /* FIXME: cache compiled regex */
+ r = regcomp(&re, pattern, REG_EXTENDED | REG_NOSUB);
+ if (r) {
+ if (errstr)
+ regerror(r, &re, errstr, errstr_size);
+ return 0;
+ }
+
+ r = regexec(&re, str, 0, NULL, 0) != REG_NOMATCH;
+
+ regfree(&re);
+
+ return r;
+
+#else /* Using regexp.h from minilibs (included) */
+ Reprog *re;
+ int r;
+ const char *errstr2;
+
+ /* FIXME: cache compiled regex */
+ re = re_regcomp(pattern, 0, &errstr2);
+ if (!re) {
+ if (errstr)
+ rd_strlcpy(errstr, errstr2, errstr_size);
+ return -1;
+ }
+
+ r = !re_regexec(re, str, NULL, 0);
+
+ re_regfree(re);
+
+ return r;
+#endif
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h
new file mode 100644
index 000000000..135229d62
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdregex.h
@@ -0,0 +1,43 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _RDREGEX_H_
+#define _RDREGEX_H_
+
+typedef struct rd_regex_s rd_regex_t;
+
+void rd_regex_destroy(rd_regex_t *re);
+rd_regex_t *
+rd_regex_comp(const char *pattern, char *errstr, size_t errstr_size);
+int rd_regex_exec(rd_regex_t *re, const char *str);
+
+int rd_regex_match(const char *pattern,
+ const char *str,
+ char *errstr,
+ size_t errstr_size);
+
+#endif /* _RDREGEX_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h
new file mode 100644
index 000000000..a2c0de1b0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdsignal.h
@@ -0,0 +1,57 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDSIGNAL_H_
+#define _RDSIGNAL_H_
+
+#include <signal.h>
+
+#define RD_SIG_ALL -1
+#define RD_SIG_END -2
+
+extern sigset_t rd_intr_sigset;
+extern int rd_intr_blocked;
+
+static __inline void rd_intr_block(void) RD_UNUSED;
+static __inline void rd_intr_block(void) {
+ if (rd_intr_blocked++)
+ return;
+
+ sigprocmask(SIG_BLOCK, &rd_intr_sigset, NULL);
+}
+
+static __inline void rd_intr_unblock(void) RD_UNUSED;
+static __inline void rd_intr_unblock(void) {
+ assert(rd_intr_blocked > 0);
+ if (--rd_intr_blocked)
+ return;
+
+ sigprocmask(SIG_UNBLOCK, &rd_intr_sigset, NULL);
+}
+
+#endif /* _RDSIGNAL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c
new file mode 100644
index 000000000..6a18210c9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.c
@@ -0,0 +1,629 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rd.h"
+#include "rdstring.h"
+#include "rdunittest.h"
+
+#include <ctype.h>
+
+
+/**
+ * @brief Render string \p template using \p callback for key lookups.
+ *
+ * Keys in template follow the %{keyname} syntax.
+ *
+ * The \p callback must not write more than \p size bytes to \p buf, must
+ * should return the number of bytes it wanted to write (which will indicate
+ * a truncated write).
+ * If the key is not found -1 should be returned (which fails the rendering).
+ *
+ * @returns number of written bytes to \p dest,
+ * or -1 on failure (errstr is written)
+ */
+char *rd_string_render(
+ const char *template,
+ char *errstr,
+ size_t errstr_size,
+ ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque),
+ void *opaque) {
+ const char *s = template;
+ const char *tend = template + strlen(template);
+ size_t size = 256;
+ char *buf;
+ size_t of = 0;
+
+ buf = rd_malloc(size);
+
+#define _remain() (size - of - 1)
+#define _assure_space(SZ) \
+ do { \
+ if (of + (SZ) + 1 >= size) { \
+ size = (size + (SZ) + 1) * 2; \
+ buf = rd_realloc(buf, size); \
+ } \
+ } while (0)
+
+#define _do_write(PTR, SZ) \
+ do { \
+ _assure_space(SZ); \
+ memcpy(buf + of, (PTR), (SZ)); \
+ of += (SZ); \
+ } while (0)
+
+
+
+ while (*s) {
+ const char *t;
+ size_t tof = (size_t)(s - template);
+
+ t = strstr(s, "%{");
+ if (t != s) {
+ /* Write "abc%{"
+ * ^^^ */
+ size_t len = (size_t)((t ? t : tend) - s);
+ if (len)
+ _do_write(s, len);
+ }
+
+ if (t) {
+ const char *te;
+ ssize_t r;
+ char *tmpkey;
+
+ /* Find "abc%{key}"
+ * ^ */
+ te = strchr(t + 2, '}');
+ if (!te) {
+ rd_snprintf(errstr, errstr_size,
+ "Missing close-brace } for "
+ "%.*s at %" PRIusz,
+ 15, t, tof);
+ rd_free(buf);
+ return NULL;
+ }
+
+ rd_strndupa(&tmpkey, t + 2, (int)(te - t - 2));
+
+ /* Query callback for length of key's value. */
+ r = callback(tmpkey, NULL, 0, opaque);
+ if (r == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Property not available: \"%s\"",
+ tmpkey);
+ rd_free(buf);
+ return NULL;
+ }
+
+ _assure_space(r);
+
+ /* Call again now providing a large enough buffer. */
+ r = callback(tmpkey, buf + of, _remain(), opaque);
+ if (r == -1) {
+ rd_snprintf(errstr, errstr_size,
+ "Property not available: "
+ "\"%s\"",
+ tmpkey);
+ rd_free(buf);
+ return NULL;
+ }
+
+ assert(r < (ssize_t)_remain());
+ of += r;
+ s = te + 1;
+
+ } else {
+ s = tend;
+ }
+ }
+
+ buf[of] = '\0';
+ return buf;
+}
+
+
+
+void rd_strtup_destroy(rd_strtup_t *strtup) {
+ rd_free(strtup);
+}
+
+void rd_strtup_free(void *strtup) {
+ rd_strtup_destroy((rd_strtup_t *)strtup);
+}
+
+rd_strtup_t *rd_strtup_new0(const char *name,
+ ssize_t name_len,
+ const char *value,
+ ssize_t value_len) {
+ rd_strtup_t *strtup;
+
+ /* Calculate lengths, if needed, and add space for \0 nul */
+
+ if (name_len == -1)
+ name_len = strlen(name);
+
+ if (!value)
+ value_len = 0;
+ else if (value_len == -1)
+ value_len = strlen(value);
+
+
+ strtup = rd_malloc(sizeof(*strtup) + name_len + 1 + value_len + 1 -
+ 1 /*name[1]*/);
+ memcpy(strtup->name, name, name_len);
+ strtup->name[name_len] = '\0';
+ if (value) {
+ strtup->value = &strtup->name[name_len + 1];
+ memcpy(strtup->value, value, value_len);
+ strtup->value[value_len] = '\0';
+ } else {
+ strtup->value = NULL;
+ }
+
+ return strtup;
+}
+
+rd_strtup_t *rd_strtup_new(const char *name, const char *value) {
+ return rd_strtup_new0(name, -1, value, -1);
+}
+
+
+/**
+ * @returns a new copy of \p src
+ */
+rd_strtup_t *rd_strtup_dup(const rd_strtup_t *src) {
+ return rd_strtup_new(src->name, src->value);
+}
+
+/**
+ * @brief Wrapper for rd_strtup_dup() suitable rd_list_copy*() use
+ */
+void *rd_strtup_list_copy(const void *elem, void *opaque) {
+ const rd_strtup_t *src = elem;
+ return (void *)rd_strtup_dup(src);
+}
+
+
+
+/**
+ * @brief Convert bit-flags in \p flags to human-readable CSV string
+ * use the bit-description strings in \p desc.
+ *
+ * \p desc array element N corresponds to bit (1<<N).
+ * \p desc MUST be terminated by a NULL array element.
+ * Empty descriptions are ignored even if the bit is set.
+ *
+ * @returns a null-terminated \p dst
+ */
+char *rd_flags2str(char *dst, size_t size, const char **desc, int flags) {
+ int bit = 0;
+ size_t of = 0;
+
+ for (; *desc; desc++, bit++) {
+ int r;
+
+ if (!(flags & (1 << bit)) || !*desc)
+ continue;
+
+ if (of >= size) {
+ /* Dest buffer too small, indicate truncation */
+ if (size > 3)
+ rd_snprintf(dst + (size - 3), 3, "..");
+ break;
+ }
+
+ r = rd_snprintf(dst + of, size - of, "%s%s", !of ? "" : ",",
+ *desc);
+
+ of += r;
+ }
+
+ if (of == 0 && size > 0)
+ *dst = '\0';
+
+ return dst;
+}
+
+
+
+/**
+ * @returns a djb2 hash of \p str.
+ *
+ * @param len If -1 the \p str will be hashed until nul is encountered,
+ * else up to the \p len.
+ */
+unsigned int rd_string_hash(const char *str, ssize_t len) {
+ unsigned int hash = 5381;
+ ssize_t i;
+
+ if (len == -1) {
+ for (i = 0; str[i] != '\0'; i++)
+ hash = ((hash << 5) + hash) + str[i];
+ } else {
+ for (i = 0; i < len; i++)
+ hash = ((hash << 5) + hash) + str[i];
+ }
+
+ return hash;
+}
+
+
+/**
+ * @brief Same as strcmp() but handles NULL values.
+ */
+int rd_strcmp(const char *a, const char *b) {
+ if (a == b)
+ return 0;
+ else if (!a && b)
+ return -1;
+ else if (!b)
+ return 1;
+ else
+ return strcmp(a, b);
+}
+
+
+
+/**
+ * @brief Case-insensitive strstr() for platforms where strcasestr()
+ * is not available.
+ */
+char *_rd_strcasestr(const char *haystack, const char *needle) {
+ const char *h_rem, *n_last;
+ size_t h_len = strlen(haystack);
+ size_t n_len = strlen(needle);
+
+
+ if (n_len == 0 || n_len > h_len)
+ return NULL;
+ else if (n_len == h_len)
+ return !rd_strcasecmp(haystack, needle) ? (char *)haystack
+ : NULL;
+
+ /*
+ * Scan inspired by Boyer-Moore:
+ *
+ * haystack = "this is a haystack"
+ * needle = "hays"
+ *
+ * "this is a haystack"
+ * ^ ^- h_last
+ * `-h (haystack + strlen(needle) - 1)
+ * `-h_rem
+ *
+ * "hays"
+ * ^-n
+ * ^-n_last
+ */
+ n_last = needle + n_len - 1;
+ h_rem = haystack + n_len - 1;
+
+ while (*h_rem) {
+ const char *h, *n = n_last;
+
+ /* Find first occurrence of last character in the needle
+ in the remaining haystack. */
+ for (h = h_rem; *h && tolower((int)*h) != tolower((int)*n); h++)
+ ;
+
+ if (!*h)
+ return NULL; /* No match */
+
+ /* Backtrack both needle and haystack as long as each character
+ * matches, if the start of the needle is found we have
+ * a full match, else start over from the remaining part of the
+ * haystack. */
+ do {
+ if (n == needle)
+ return (char *)h; /* Full match */
+
+ /* Rewind both n and h */
+ n--;
+ h--;
+
+ } while (tolower((int)*n) == tolower((int)*h));
+
+ /* Mismatch, start over at the next haystack position */
+ h_rem++;
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Unittests for rd_strcasestr()
+ */
+static int ut_strcasestr(void) {
+ static const struct {
+ const char *haystack;
+ const char *needle;
+ ssize_t exp;
+ } strs[] = {
+ {"this is a haystack", "hays", 10},
+ {"abc", "a", 0},
+ {"abc", "b", 1},
+ {"abc", "c", 2},
+ {"AbcaBcabC", "ABC", 0},
+ {"abcabcaBC", "BcA", 1},
+ {"abcabcABc", "cAB", 2},
+ {"need to estart stART the tart ReStArT!", "REsTaRt", 30},
+ {"need to estart stART the tart ReStArT!", "?sTaRt", -1},
+ {"aaaabaaAb", "ab", 3},
+ {"0A!", "a", 1},
+ {"a", "A", 0},
+ {".z", "Z", 1},
+ {"", "", -1},
+ {"", "a", -1},
+ {"a", "", -1},
+ {"peRfeCt", "peRfeCt", 0},
+ {"perfect", "perfect", 0},
+ {"PERFECT", "perfect", 0},
+ {NULL},
+ };
+ int i;
+
+ RD_UT_BEGIN();
+
+ for (i = 0; strs[i].haystack; i++) {
+ const char *ret;
+ ssize_t of = -1;
+
+ ret = _rd_strcasestr(strs[i].haystack, strs[i].needle);
+ if (ret)
+ of = ret - strs[i].haystack;
+ RD_UT_ASSERT(of == strs[i].exp,
+ "#%d: '%s' in '%s': expected offset %" PRIdsz
+ ", not %" PRIdsz " (%s)",
+ i, strs[i].needle, strs[i].haystack, strs[i].exp,
+ of, ret ? ret : "(NULL)");
+ }
+
+ RD_UT_PASS();
+}
+
+
+
+/**
+ * @brief Split a character-separated string into an array.
+ *
+ * @remark This is not CSV compliant as CSV uses " for escapes, but this here
+ * uses \.
+ *
+ * @param input Input string to parse.
+ * @param sep The separator character (typically ',')
+ * @param skip_empty Do not include empty fields in output array.
+ * @param cntp Will be set to number of elements in array.
+ *
+ * Supports "\" escapes.
+ * The array and the array elements will be allocated together and must be freed
+ * with a single rd_free(array) call.
+ * The array elements are copied and any "\" escapes are removed.
+ *
+ * @returns the parsed fields in an array. The number of elements in the
+ * array is returned in \p cntp
+ */
+char **rd_string_split(const char *input,
+ char sep,
+ rd_bool_t skip_empty,
+ size_t *cntp) {
+ size_t fieldcnt = 1;
+ rd_bool_t next_esc = rd_false;
+ const char *s;
+ char *p;
+ char **arr;
+ size_t inputlen;
+ size_t i = 0;
+ size_t elen = 0;
+
+ *cntp = 0;
+
+ /* First count the maximum number of fields so we know how large of
+ * an array we need to allocate. Escapes are ignored. */
+ for (s = input; *s; s++) {
+ if (*s == sep)
+ fieldcnt++;
+ }
+
+ inputlen = (size_t)(s - input);
+
+ /* Allocate array and memory for the copied elements in one go. */
+ arr = rd_malloc((sizeof(*arr) * fieldcnt) + inputlen + 1);
+ p = (char *)(&arr[fieldcnt]);
+
+ for (s = input;; s++) {
+ rd_bool_t at_end = *s == '\0';
+ rd_bool_t is_esc = next_esc;
+
+ /* If we've reached the end, jump to done to finish
+ * the current field. */
+ if (at_end)
+ goto done;
+
+ if (unlikely(!is_esc && *s == '\\')) {
+ next_esc = rd_true;
+ continue;
+ }
+
+ next_esc = rd_false;
+
+ /* Strip leading whitespaces for each element */
+ if (!is_esc && elen == 0 && isspace((int)*s))
+ continue;
+
+ if (likely(is_esc || *s != sep)) {
+ char c = *s;
+ if (is_esc) {
+ /* Perform some common escape substitions.
+ * If not known we'll just keep the escaped
+ * character as is (probably the separator). */
+ switch (c) {
+ case 't':
+ c = '\t';
+ break;
+ case 'n':
+ c = '\n';
+ break;
+ case 'r':
+ c = '\r';
+ break;
+ case '0':
+ c = '\0';
+ break;
+ }
+ }
+ p[elen++] = c;
+ continue;
+ }
+
+ done:
+ /* Strip trailing whitespaces */
+ while (elen > 0 && isspace((int)p[elen - 1]))
+ elen--;
+
+ /* End of field */
+ if (elen == 0 && skip_empty) {
+ if (at_end)
+ break;
+ continue;
+ }
+
+ rd_assert(i < fieldcnt);
+
+ /* Nul-terminate the element */
+ p[elen++] = '\0';
+ /* Assign element to array */
+ arr[i] = p;
+ /* Update next element pointer past the written bytes */
+ p += elen;
+ /* Reset element length */
+ elen = 0;
+ /* Advance array element index */
+ i++;
+
+ if (at_end)
+ break;
+ }
+
+ *cntp = i;
+
+ return arr;
+}
+
+/**
+ * @brief Unittest for rd_string_split()
+ */
+static int ut_string_split(void) {
+ static const struct {
+ const char *input;
+ const char sep;
+ rd_bool_t skip_empty;
+ size_t exp_cnt;
+ const char *exp[16];
+ } strs[] = {
+ {"just one field", ',', rd_true, 1, {"just one field"}},
+ /* Empty with skip_empty */
+ {"", ',', rd_true, 0},
+ /* Empty without skip_empty */
+ {"", ',', rd_false, 1, {""}},
+ {
+ ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v",
+ ',',
+ rd_true,
+ 11,
+ {"a", "b", "c", "d", "e", "f", "ghijk", "lmn", "opq",
+ "r s t u", "v"},
+ },
+ {
+ ", a,b ,,c, d, e,f,ghijk, lmn,opq , r s t u, v",
+ ',',
+ rd_false,
+ 13,
+ {"", "a", "b", "", "c", "d", "e", "f", "ghijk", "lmn", "opq",
+ "r s t u", "v"},
+ },
+ {" this is an \\,escaped comma,\\,,\\\\, "
+ "and this is an unbalanced escape: \\\\\\\\\\\\\\",
+ ',',
+ rd_true,
+ 4,
+ {"this is an ,escaped comma", ",", "\\",
+ "and this is an unbalanced escape: \\\\\\"}},
+ {
+ "using|another ||\\|d|elimiter",
+ '|',
+ rd_false,
+ 5,
+ {"using", "another", "", "|d", "elimiter"},
+ },
+ {NULL},
+ };
+ size_t i;
+
+ RD_UT_BEGIN();
+
+ for (i = 0; strs[i].input; i++) {
+ char **ret;
+ size_t cnt = 12345;
+ size_t j;
+
+ ret = rd_string_split(strs[i].input, strs[i].sep,
+ strs[i].skip_empty, &cnt);
+ RD_UT_ASSERT(ret != NULL, "#%" PRIusz ": Did not expect NULL",
+ i);
+ RD_UT_ASSERT(cnt == strs[i].exp_cnt,
+ "#%" PRIusz
+ ": "
+ "Expected %" PRIusz " elements, got %" PRIusz,
+ i, strs[i].exp_cnt, cnt);
+
+ for (j = 0; j < cnt; j++)
+ RD_UT_ASSERT(!strcmp(strs[i].exp[j], ret[j]),
+ "#%" PRIusz ": Expected string %" PRIusz
+ " to be \"%s\", not \"%s\"",
+ i, j, strs[i].exp[j], ret[j]);
+
+ rd_free(ret);
+ }
+
+ RD_UT_PASS();
+}
+
+/**
+ * @brief Unittests for strings
+ */
+int unittest_string(void) {
+ int fails = 0;
+
+ fails += ut_strcasestr();
+ fails += ut_string_split();
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h
new file mode 100644
index 000000000..67ea19401
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdstring.h
@@ -0,0 +1,93 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDSTRING_H_
+#define _RDSTRING_H_
+
+static RD_INLINE RD_UNUSED void
+rd_strlcpy(char *dst, const char *src, size_t dstsize) {
+#if HAVE_STRLCPY
+ (void)strlcpy(dst, src, dstsize);
+#else
+ if (likely(dstsize > 0)) {
+ size_t srclen = strlen(src);
+ size_t copylen = RD_MIN(srclen, dstsize - 1);
+ memcpy(dst, src, copylen);
+ dst[copylen] = '\0';
+ }
+#endif
+}
+
+
+
+char *rd_string_render(
+ const char *templ,
+ char *errstr,
+ size_t errstr_size,
+ ssize_t (*callback)(const char *key, char *buf, size_t size, void *opaque),
+ void *opaque);
+
+
+
+/**
+ * @brief An immutable string tuple (name, value) in a single allocation.
+ * \p value may be NULL.
+ */
+typedef struct rd_strtup_s {
+ char *value;
+ char name[1]; /* Actual allocation of name + val here */
+} rd_strtup_t;
+
+void rd_strtup_destroy(rd_strtup_t *strtup);
+void rd_strtup_free(void *strtup);
+rd_strtup_t *rd_strtup_new0(const char *name,
+ ssize_t name_len,
+ const char *value,
+ ssize_t value_len);
+rd_strtup_t *rd_strtup_new(const char *name, const char *value);
+rd_strtup_t *rd_strtup_dup(const rd_strtup_t *strtup);
+void *rd_strtup_list_copy(const void *elem, void *opaque);
+
+char *rd_flags2str(char *dst, size_t size, const char **desc, int flags);
+
+unsigned int rd_string_hash(const char *str, ssize_t len);
+
+int rd_strcmp(const char *a, const char *b);
+
+char *_rd_strcasestr(const char *haystack, const char *needle);
+
+char **rd_string_split(const char *input,
+ char sep,
+ rd_bool_t skip_empty,
+ size_t *cntp);
+
+/** @returns "true" if EXPR is true, else "false" */
+#define RD_STR_ToF(EXPR) ((EXPR) ? "true" : "false")
+
+#endif /* _RDSTRING_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h
new file mode 100644
index 000000000..ecba4154e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdsysqueue.h
@@ -0,0 +1,404 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * Copyright (c) 2012-2013, Andreas Öman
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/*
+
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDSYSQUEUE_H_
+#define _RDSYSQUEUE_H_
+
+#include "queue.h"
+
+/*
+ * Complete missing LIST-ops
+ */
+
+#ifndef LIST_FOREACH
+#define LIST_FOREACH(var, head, field) \
+ for ((var) = ((head)->lh_first); (var); (var) = ((var)->field.le_next))
+#endif
+
+#ifndef LIST_EMPTY
+#define LIST_EMPTY(head) ((head)->lh_first == NULL)
+#endif
+
+#ifndef LIST_FIRST
+#define LIST_FIRST(head) ((head)->lh_first)
+#endif
+
+#ifndef LIST_NEXT
+#define LIST_NEXT(elm, field) ((elm)->field.le_next)
+#endif
+
+#ifndef LIST_INSERT_BEFORE
+#define LIST_INSERT_BEFORE(listelm, elm, field) \
+ do { \
+ (elm)->field.le_prev = (listelm)->field.le_prev; \
+ (elm)->field.le_next = (listelm); \
+ *(listelm)->field.le_prev = (elm); \
+ (listelm)->field.le_prev = &(elm)->field.le_next; \
+ } while (/*CONSTCOND*/ 0)
+#endif
+
+/*
+ * Complete missing TAILQ-ops
+ */
+
+#ifndef TAILQ_HEAD_INITIALIZER
+#define TAILQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).tqh_first }
+#endif
+
+#ifndef TAILQ_INSERT_BEFORE
+#define TAILQ_INSERT_BEFORE(listelm, elm, field) \
+ do { \
+ (elm)->field.tqe_prev = (listelm)->field.tqe_prev; \
+ (elm)->field.tqe_next = (listelm); \
+ *(listelm)->field.tqe_prev = (elm); \
+ (listelm)->field.tqe_prev = &(elm)->field.tqe_next; \
+ } while (0)
+#endif
+
+#ifndef TAILQ_FOREACH
+#define TAILQ_FOREACH(var, head, field) \
+ for ((var) = ((head)->tqh_first); (var); \
+ (var) = ((var)->field.tqe_next))
+#endif
+
+#ifndef TAILQ_EMPTY
+#define TAILQ_EMPTY(head) ((head)->tqh_first == NULL)
+#endif
+
+#ifndef TAILQ_FIRST
+#define TAILQ_FIRST(head) ((head)->tqh_first)
+#endif
+
+#ifndef TAILQ_NEXT
+#define TAILQ_NEXT(elm, field) ((elm)->field.tqe_next)
+#endif
+
+#ifndef TAILQ_LAST
+#define TAILQ_LAST(head, headname) \
+ (*(((struct headname *)((head)->tqh_last))->tqh_last))
+#endif
+
+#ifndef TAILQ_PREV
+#define TAILQ_PREV(elm, headname, field) \
+ (*(((struct headname *)((elm)->field.tqe_prev))->tqh_last))
+#endif
+
+#ifndef TAILQ_FOREACH_SAFE
+/*
+ * TAILQ_FOREACH_SAFE() provides a traversal where the current iterated element
+ * may be freed or unlinked.
+ * It does not allow freeing or modifying any other element in the list,
+ * at least not the next element.
+ */
+#define TAILQ_FOREACH_SAFE(elm, head, field, tmpelm) \
+ for ((elm) = TAILQ_FIRST(head); \
+ (elm) && ((tmpelm) = TAILQ_NEXT((elm), field), 1); \
+ (elm) = (tmpelm))
+#endif
+
+/*
+ * In Mac OS 10.4 and earlier TAILQ_FOREACH_REVERSE was defined
+ * differently, redefined it.
+ */
+#ifdef __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__
+#if __ENVIRONMENT_MAC_OS_X_VERSION_MIN_REQUIRED__ < 1050
+#undef TAILQ_FOREACH_REVERSE
+#endif
+#endif
+
+#ifndef TAILQ_FOREACH_REVERSE
+#define TAILQ_FOREACH_REVERSE(var, head, headname, field) \
+ for ((var) = (*(((struct headname *)((head)->tqh_last))->tqh_last)); \
+ (var); \
+ (var) = \
+ (*(((struct headname *)((var)->field.tqe_prev))->tqh_last)))
+#endif
+
+
+/**
+ * Treat the TAILQ as a circular list and return the previous/next entry,
+ * possibly wrapping to the end/beginning.
+ */
+#define TAILQ_CIRC_PREV(var, head, headname, field) \
+ ((var) != TAILQ_FIRST(head) ? TAILQ_PREV(var, headname, field) \
+ : TAILQ_LAST(head, headname))
+
+#define TAILQ_CIRC_NEXT(var, head, headname, field) \
+ ((var) != TAILQ_LAST(head, headname) ? TAILQ_NEXT(var, field) \
+ : TAILQ_FIRST(head))
+
+/*
+ * Some extra functions for LIST manipulation
+ */
+
+#define LIST_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \
+ do { \
+ if (LIST_EMPTY(head)) { \
+ LIST_INSERT_HEAD(head, elm, field); \
+ } else { \
+ elmtype _tmp; \
+ LIST_FOREACH(_tmp, head, field) { \
+ if (cmpfunc(elm, _tmp) < 0) { \
+ LIST_INSERT_BEFORE(_tmp, elm, field); \
+ break; \
+ } \
+ if (!LIST_NEXT(_tmp, field)) { \
+ LIST_INSERT_AFTER(_tmp, elm, field); \
+ break; \
+ } \
+ } \
+ } \
+ } while (0)
+
+#ifndef TAILQ_INSERT_SORTED
+#define TAILQ_INSERT_SORTED(head, elm, elmtype, field, cmpfunc) \
+ do { \
+ if (TAILQ_FIRST(head) == NULL) { \
+ TAILQ_INSERT_HEAD(head, elm, field); \
+ } else { \
+ elmtype _tmp; \
+ TAILQ_FOREACH(_tmp, head, field) { \
+ if (cmpfunc(elm, _tmp) < 0) { \
+ TAILQ_INSERT_BEFORE(_tmp, elm, field); \
+ break; \
+ } \
+ if (!TAILQ_NEXT(_tmp, field)) { \
+ TAILQ_INSERT_AFTER(head, _tmp, elm, \
+ field); \
+ break; \
+ } \
+ } \
+ } \
+ } while (0)
+#endif
+
+/**
+ * @brief Add all elements from \p srchead to \p dsthead using sort
+ * comparator \p cmpfunc.
+ * \p src will be re-initialized on completion.
+ */
+#define TAILQ_CONCAT_SORTED(dsthead, srchead, elmtype, field, cmpfunc) \
+ do { \
+ elmtype _cstmp; \
+ elmtype _cstmp2; \
+ if (TAILQ_EMPTY(dsthead)) { \
+ TAILQ_CONCAT(dsthead, srchead, field); \
+ break; \
+ } \
+ TAILQ_FOREACH_SAFE(_cstmp, srchead, field, _cstmp2) { \
+ TAILQ_INSERT_SORTED(dsthead, _cstmp, elmtype, field, \
+ cmpfunc); \
+ } \
+ TAILQ_INIT(srchead); \
+ } while (0)
+
+#define TAILQ_MOVE(newhead, oldhead, field) \
+ do { \
+ if (TAILQ_FIRST(oldhead)) { \
+ TAILQ_FIRST(oldhead)->field.tqe_prev = \
+ &(newhead)->tqh_first; \
+ (newhead)->tqh_first = (oldhead)->tqh_first; \
+ (newhead)->tqh_last = (oldhead)->tqh_last; \
+ TAILQ_INIT(oldhead); \
+ } else \
+ TAILQ_INIT(newhead); \
+ } while (/*CONSTCOND*/ 0)
+
+
+/* @brief Prepend \p shead to \p dhead */
+#define TAILQ_PREPEND(dhead, shead, headname, field) \
+ do { \
+ if (unlikely(TAILQ_EMPTY(dhead))) { \
+ TAILQ_MOVE(dhead, shead, field); \
+ } else if (likely(!TAILQ_EMPTY(shead))) { \
+ TAILQ_LAST(shead, headname)->field.tqe_next = \
+ TAILQ_FIRST(dhead); \
+ TAILQ_FIRST(dhead)->field.tqe_prev = \
+ &TAILQ_LAST(shead, headname)->field.tqe_next; \
+ TAILQ_FIRST(shead)->field.tqe_prev = \
+ &(dhead)->tqh_first; \
+ TAILQ_FIRST(dhead) = TAILQ_FIRST(shead); \
+ TAILQ_INIT(shead); \
+ } \
+ } while (0)
+
+/* @brief Insert \p shead after element \p listelm in \p dhead */
+#define TAILQ_INSERT_LIST(dhead, listelm, shead, headname, elmtype, field) \
+ do { \
+ if (TAILQ_LAST(dhead, headname) == listelm) { \
+ TAILQ_CONCAT(dhead, shead, field); \
+ } else { \
+ elmtype _elm = TAILQ_FIRST(shead); \
+ elmtype _last = TAILQ_LAST(shead, headname); \
+ elmtype _aft = TAILQ_NEXT(listelm, field); \
+ (listelm)->field.tqe_next = _elm; \
+ _elm->field.tqe_prev = &(listelm)->field.tqe_next; \
+ _last->field.tqe_next = _aft; \
+ _aft->field.tqe_prev = &_last->field.tqe_next; \
+ TAILQ_INIT((shead)); \
+ } \
+ } while (0)
+
+/* @brief Insert \p shead before element \p listelm in \p dhead */
+#define TAILQ_INSERT_LIST_BEFORE(dhead, insert_before, shead, headname, \
+ elmtype, field) \
+ do { \
+ if (TAILQ_FIRST(dhead) == insert_before) { \
+ TAILQ_PREPEND(dhead, shead, headname, field); \
+ } else { \
+ elmtype _first = TAILQ_FIRST(shead); \
+ elmtype _last = TAILQ_LAST(shead, headname); \
+ elmtype _dprev = \
+ TAILQ_PREV(insert_before, headname, field); \
+ _last->field.tqe_next = insert_before; \
+ _dprev->field.tqe_next = _first; \
+ (insert_before)->field.tqe_prev = \
+ &_last->field.tqe_next; \
+ _first->field.tqe_prev = &(_dprev)->field.tqe_next; \
+ TAILQ_INIT((shead)); \
+ } \
+ } while (0)
+
+#ifndef SIMPLEQ_HEAD
+#define SIMPLEQ_HEAD(name, type) \
+ struct name { \
+ struct type *sqh_first; \
+ struct type **sqh_last; \
+ }
+#endif
+
+#ifndef SIMPLEQ_ENTRY
+#define SIMPLEQ_ENTRY(type) \
+ struct { \
+ struct type *sqe_next; \
+ }
+#endif
+
+#ifndef SIMPLEQ_FIRST
+#define SIMPLEQ_FIRST(head) ((head)->sqh_first)
+#endif
+
+#ifndef SIMPLEQ_REMOVE_HEAD
+#define SIMPLEQ_REMOVE_HEAD(head, field) \
+ do { \
+ if (((head)->sqh_first = (head)->sqh_first->field.sqe_next) == \
+ NULL) \
+ (head)->sqh_last = &(head)->sqh_first; \
+ } while (0)
+#endif
+
+#ifndef SIMPLEQ_INSERT_TAIL
+#define SIMPLEQ_INSERT_TAIL(head, elm, field) \
+ do { \
+ (elm)->field.sqe_next = NULL; \
+ *(head)->sqh_last = (elm); \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ } while (0)
+#endif
+
+#ifndef SIMPLEQ_INIT
+#define SIMPLEQ_INIT(head) \
+ do { \
+ (head)->sqh_first = NULL; \
+ (head)->sqh_last = &(head)->sqh_first; \
+ } while (0)
+#endif
+
+#ifndef SIMPLEQ_INSERT_HEAD
+#define SIMPLEQ_INSERT_HEAD(head, elm, field) \
+ do { \
+ if (((elm)->field.sqe_next = (head)->sqh_first) == NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (head)->sqh_first = (elm); \
+ } while (0)
+#endif
+
+#ifndef SIMPLEQ_FOREACH
+#define SIMPLEQ_FOREACH(var, head, field) \
+ for ((var) = SIMPLEQ_FIRST(head); (var) != SIMPLEQ_END(head); \
+ (var) = SIMPLEQ_NEXT(var, field))
+#endif
+
+#ifndef SIMPLEQ_INSERT_AFTER
+#define SIMPLEQ_INSERT_AFTER(head, listelm, elm, field) \
+ do { \
+ if (((elm)->field.sqe_next = (listelm)->field.sqe_next) == \
+ NULL) \
+ (head)->sqh_last = &(elm)->field.sqe_next; \
+ (listelm)->field.sqe_next = (elm); \
+ } while (0)
+#endif
+
+#ifndef SIMPLEQ_END
+#define SIMPLEQ_END(head) NULL
+#endif
+
+#ifndef SIMPLEQ_NEXT
+#define SIMPLEQ_NEXT(elm, field) ((elm)->field.sqe_next)
+#endif
+
+#ifndef SIMPLEQ_HEAD_INITIALIZER
+#define SIMPLEQ_HEAD_INITIALIZER(head) \
+ { NULL, &(head).sqh_first }
+#endif
+
+#ifndef SIMPLEQ_EMPTY
+#define SIMPLEQ_EMPTY(head) (SIMPLEQ_FIRST(head) == SIMPLEQ_END(head))
+#endif
+
+
+
+#endif /* _RDSYSQUEUE_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h
new file mode 100644
index 000000000..4a3e5d855
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdtime.h
@@ -0,0 +1,309 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDTIME_H_
+#define _RDTIME_H_
+
+
+#ifndef TIMEVAL_TO_TIMESPEC
+#define TIMEVAL_TO_TIMESPEC(tv, ts) \
+ do { \
+ (ts)->tv_sec = (tv)->tv_sec; \
+ (ts)->tv_nsec = (tv)->tv_usec * 1000; \
+ } while (0)
+
+#define TIMESPEC_TO_TIMEVAL(tv, ts) \
+ do { \
+ (tv)->tv_sec = (ts)->tv_sec; \
+ (tv)->tv_usec = (ts)->tv_nsec / 1000; \
+ } while (0)
+#endif
+
+#define TIMESPEC_TO_TS(ts) \
+ (((rd_ts_t)(ts)->tv_sec * 1000000LLU) + ((ts)->tv_nsec / 1000))
+
+#define TS_TO_TIMESPEC(ts, tsx) \
+ do { \
+ (ts)->tv_sec = (tsx) / 1000000; \
+ (ts)->tv_nsec = ((tsx) % 1000000) * 1000; \
+ if ((ts)->tv_nsec >= 1000000000LLU) { \
+ (ts)->tv_sec++; \
+ (ts)->tv_nsec -= 1000000000LLU; \
+ } \
+ } while (0)
+
+#define TIMESPEC_CLEAR(ts) ((ts)->tv_sec = (ts)->tv_nsec = 0LLU)
+
+
+#define RD_POLL_INFINITE -1
+#define RD_POLL_NOWAIT 0
+
+
+#if RD_UNITTEST_QPC_OVERRIDES
+/* Overrides for rd_clock() unittest using QPC on Windows */
+BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency);
+BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount);
+#define rd_QueryPerformanceFrequency(IFREQ) \
+ rd_ut_QueryPerformanceFrequency(IFREQ)
+#define rd_QueryPerformanceCounter(PC) rd_ut_QueryPerformanceCounter(PC)
+#else
+#define rd_QueryPerformanceFrequency(IFREQ) QueryPerformanceFrequency(IFREQ)
+#define rd_QueryPerformanceCounter(PC) QueryPerformanceCounter(PC)
+#endif
+
+/**
+ * @returns a monotonically increasing clock in microseconds.
+ * @remark There is no monotonic clock on OSX, the system time
+ * is returned instead.
+ */
+static RD_INLINE rd_ts_t rd_clock(void) RD_UNUSED;
+static RD_INLINE rd_ts_t rd_clock(void) {
+#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29)
+ /* No monotonic clock on Darwin */
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec;
+#elif defined(_WIN32)
+ LARGE_INTEGER now;
+ static RD_TLS double freq = 0.0;
+ if (!freq) {
+ LARGE_INTEGER ifreq;
+ rd_QueryPerformanceFrequency(&ifreq);
+ /* Convert frequency to double to avoid overflow in
+ * return statement */
+ freq = (double)ifreq.QuadPart / 1000000.0;
+ }
+ rd_QueryPerformanceCounter(&now);
+ return (rd_ts_t)((double)now.QuadPart / freq);
+#else
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ((rd_ts_t)ts.tv_sec * 1000000LLU) +
+ ((rd_ts_t)ts.tv_nsec / 1000LLU);
+#endif
+}
+
+
+/**
+ * @returns UTC wallclock time as number of microseconds since
+ * beginning of the epoch.
+ */
+static RD_INLINE RD_UNUSED rd_ts_t rd_uclock(void) {
+ struct timeval tv;
+ rd_gettimeofday(&tv, NULL);
+ return ((rd_ts_t)tv.tv_sec * 1000000LLU) + (rd_ts_t)tv.tv_usec;
+}
+
+
+
+/**
+ * Thread-safe version of ctime() that strips the trailing newline.
+ */
+static RD_INLINE const char *rd_ctime(const time_t *t) RD_UNUSED;
+static RD_INLINE const char *rd_ctime(const time_t *t) {
+ static RD_TLS char ret[27];
+
+#ifndef _WIN32
+ ctime_r(t, ret);
+#else
+ ctime_s(ret, sizeof(ret), t);
+#endif
+ ret[25] = '\0';
+
+ return ret;
+}
+
+
+/**
+ * @brief Convert a relative millisecond timeout to microseconds,
+ * properly handling RD_POLL_NOWAIT, et.al.
+ */
+static RD_INLINE rd_ts_t rd_timeout_us(int timeout_ms) {
+ if (timeout_ms <= 0)
+ return (rd_ts_t)timeout_ms;
+ else
+ return (rd_ts_t)timeout_ms * 1000;
+}
+
+/**
+ * @brief Convert a relative microsecond timeout to milliseconds,
+ * properly handling RD_POLL_NOWAIT, et.al.
+ */
+static RD_INLINE int rd_timeout_ms(rd_ts_t timeout_us) {
+ if (timeout_us <= 0)
+ return (int)timeout_us;
+ else
+ /* + 999: Round up to millisecond to
+ * avoid busy-looping during the last
+ * millisecond. */
+ return (int)((timeout_us + 999) / 1000);
+}
+
+
+/**
+ * @brief Initialize an absolute timeout based on the provided \p timeout_ms
+ *
+ * To be used with rd_timeout_adjust().
+ *
+ * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT.
+ *
+ * @returns the absolute timeout which should later be passed
+ * to rd_timeout_adjust().
+ */
+static RD_INLINE rd_ts_t rd_timeout_init(int timeout_ms) {
+ if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT)
+ return timeout_ms;
+
+ return rd_clock() + ((rd_ts_t)timeout_ms * 1000);
+}
+
+
+/**
+ * @brief Initialize an absolute timespec timeout based on the provided
+ * relative \p timeout_us.
+ *
+ * To be used with cnd_timedwait_abs().
+ *
+ * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec).
+ */
+static RD_INLINE void rd_timeout_init_timespec_us(struct timespec *tspec,
+ rd_ts_t timeout_us) {
+ if (timeout_us == RD_POLL_INFINITE || timeout_us == RD_POLL_NOWAIT) {
+ tspec->tv_sec = timeout_us;
+ tspec->tv_nsec = 0;
+ } else {
+#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29)
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ TIMEVAL_TO_TIMESPEC(&tv, tspec);
+#else
+ timespec_get(tspec, TIME_UTC);
+#endif
+ tspec->tv_sec += timeout_us / 1000000;
+ tspec->tv_nsec += (timeout_us % 1000000) * 1000;
+ if (tspec->tv_nsec >= 1000000000) {
+ tspec->tv_nsec -= 1000000000;
+ tspec->tv_sec++;
+ }
+ }
+}
+
+/**
+ * @brief Initialize an absolute timespec timeout based on the provided
+ * relative \p timeout_ms.
+ *
+ * To be used with cnd_timedwait_abs().
+ *
+ * Honours RD_POLL_INFITE and RD_POLL_NOWAIT (reflected in tspec.tv_sec).
+ */
+static RD_INLINE void rd_timeout_init_timespec(struct timespec *tspec,
+ int timeout_ms) {
+ if (timeout_ms == RD_POLL_INFINITE || timeout_ms == RD_POLL_NOWAIT) {
+ tspec->tv_sec = timeout_ms;
+ tspec->tv_nsec = 0;
+ } else {
+#if defined(__APPLE__) || (defined(__ANDROID__) && __ANDROID_API__ < 29)
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ TIMEVAL_TO_TIMESPEC(&tv, tspec);
+#else
+ timespec_get(tspec, TIME_UTC);
+#endif
+ tspec->tv_sec += timeout_ms / 1000;
+ tspec->tv_nsec += (timeout_ms % 1000) * 1000000;
+ if (tspec->tv_nsec >= 1000000000) {
+ tspec->tv_nsec -= 1000000000;
+ tspec->tv_sec++;
+ }
+ }
+}
+
+
+/**
+ * @brief Same as rd_timeout_remains() but with microsecond precision
+ */
+static RD_INLINE rd_ts_t rd_timeout_remains_us(rd_ts_t abs_timeout) {
+ rd_ts_t timeout_us;
+
+ if (abs_timeout == RD_POLL_INFINITE || abs_timeout == RD_POLL_NOWAIT)
+ return (rd_ts_t)abs_timeout;
+
+ timeout_us = abs_timeout - rd_clock();
+ if (timeout_us <= 0)
+ return RD_POLL_NOWAIT;
+ else
+ return timeout_us;
+}
+
+/**
+ * @returns the remaining timeout for timeout \p abs_timeout previously set
+ * up by rd_timeout_init()
+ *
+ * Honours RD_POLL_INFINITE, RD_POLL_NOWAIT.
+ *
+ * @remark Check explicitly for 0 (NOWAIT) to check if there is
+ * no remaining time to wait. Any other value, even negative (INFINITE),
+ * means there is remaining time.
+ * rd_timeout_expired() can be used to check the return value
+ * in a bool fashion.
+ */
+static RD_INLINE int rd_timeout_remains(rd_ts_t abs_timeout) {
+ return rd_timeout_ms(rd_timeout_remains_us(abs_timeout));
+}
+
+
+
+/**
+ * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms,
+ * and operates on the return value of rd_timeout_remains().
+ */
+static RD_INLINE int rd_timeout_remains_limit0(int remains_ms, int limit_ms) {
+ if (remains_ms == RD_POLL_INFINITE || remains_ms > limit_ms)
+ return limit_ms;
+ else
+ return remains_ms;
+}
+
+/**
+ * @brief Like rd_timeout_remains() but limits the maximum time to \p limit_ms
+ */
+static RD_INLINE int rd_timeout_remains_limit(rd_ts_t abs_timeout,
+ int limit_ms) {
+ return rd_timeout_remains_limit0(rd_timeout_remains(abs_timeout),
+ limit_ms);
+}
+
+/**
+ * @returns 1 if the **relative** timeout as returned by rd_timeout_remains()
+ * has timed out / expired, else 0.
+ */
+static RD_INLINE int rd_timeout_expired(int timeout_ms) {
+ return timeout_ms == RD_POLL_NOWAIT;
+}
+
+#endif /* _RDTIME_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h
new file mode 100644
index 000000000..8f3625512
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdtypes.h
@@ -0,0 +1,86 @@
+/*
+ * librd - Rapid Development C library
+ *
+ * Copyright (c) 2012, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RDTYPES_H_
+#define _RDTYPES_H_
+
+#include <inttypes.h>
+
+
+/*
+ * Fundamental types
+ */
+
+
+/* Timestamp (microseconds).
+ * Struct members with this type usually have the "ts_" prefix for
+ * the internal monotonic clock timestamp, or "wts_" for wall clock timestamp.
+ */
+typedef int64_t rd_ts_t;
+
+#define RD_TS_MAX INT64_MAX
+
+
+typedef uint8_t rd_bool_t;
+#define rd_true 1
+#define rd_false 0
+
+
+/**
+ * @enum Denotes an async or sync operation
+ */
+typedef enum {
+ RD_SYNC = 0, /**< Synchronous/blocking */
+ RD_ASYNC, /**< Asynchronous/non-blocking */
+} rd_async_t;
+
+
+/**
+ * @enum Instruct function to acquire or not to acquire a lock
+ */
+typedef enum {
+ RD_DONT_LOCK = 0, /**< Do not acquire lock */
+ RD_DO_LOCK = 1, /**< Do acquire lock */
+} rd_dolock_t;
+
+
+/*
+ * Helpers
+ */
+
+/**
+ * @brief Overflow-safe type-agnostic compare for use in cmp functions.
+ *
+ * @warning A and B may be evaluated multiple times.
+ *
+ * @returns -1, 0 or 1.
+ */
+#define RD_CMP(A, B) (int)((A) < (B) ? -1 : ((A) > (B)))
+
+
+#endif /* _RDTYPES_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c
new file mode 100644
index 000000000..aa14b6aa8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.c
@@ -0,0 +1,529 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef _WIN32
+#define RD_UNITTEST_QPC_OVERRIDES 1
+#endif
+
+#include "rd.h"
+#include "rdunittest.h"
+
+#include "rdvarint.h"
+#include "rdbuf.h"
+#include "crc32c.h"
+#include "rdmurmur2.h"
+#include "rdfnv1a.h"
+#if WITH_HDRHISTOGRAM
+#include "rdhdrhistogram.h"
+#endif
+#include "rdkafka_int.h"
+#include "rdkafka_broker.h"
+#include "rdkafka_request.h"
+
+#include "rdsysqueue.h"
+#include "rdkafka_sasl_oauthbearer.h"
+#if WITH_OAUTHBEARER_OIDC
+#include "rdkafka_sasl_oauthbearer_oidc.h"
+#endif
+#include "rdkafka_msgset.h"
+#include "rdkafka_txnmgr.h"
+
+rd_bool_t rd_unittest_assert_on_failure = rd_false;
+rd_bool_t rd_unittest_on_ci = rd_false;
+rd_bool_t rd_unittest_slow = rd_false;
+
+#if ENABLE_CODECOV
+/**
+ * @name Code coverage
+ * @{
+ */
+
+static rd_atomic64_t rd_ut_covnrs[RD_UT_COVNR_MAX + 1];
+
+void rd_ut_coverage(const char *file, const char *func, int line, int covnr) {
+ rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX);
+ rd_atomic64_add(&rd_ut_covnrs[covnr], 1);
+}
+
+
+int64_t
+rd_ut_coverage_check(const char *file, const char *func, int line, int covnr) {
+ int64_t r;
+
+ rd_assert(covnr >= 0 && covnr <= RD_UT_COVNR_MAX);
+
+ r = rd_atomic64_get(&rd_ut_covnrs[covnr]);
+
+ if (!r) {
+ fprintf(stderr,
+ "\033[31m"
+ "RDUT: FAIL: %s:%d: %s: "
+ "Code coverage nr %d: FAIL: "
+ "code path not executed: "
+ "perform `grep -RnF 'COVERAGE(%d)' src/` to find "
+ "source location"
+ "\033[0m\n",
+ file, line, func, covnr, covnr);
+ if (rd_unittest_assert_on_failure)
+ rd_assert(!*"unittest failure");
+ return 0;
+ }
+
+ fprintf(stderr,
+ "\033[34mRDUT: CCOV: %s:%d: %s: Code coverage nr %d: "
+ "PASS (%" PRId64 " code path execution(s))\033[0m\n",
+ file, line, func, covnr, r);
+
+ return r;
+}
+/**@}*/
+
+#endif /* ENABLE_CODECOV */
+
+
+/**
+ * @name Test rdsysqueue.h / queue.h
+ * @{
+ */
+
+struct ut_tq {
+ TAILQ_ENTRY(ut_tq) link;
+ int v;
+};
+
+TAILQ_HEAD(ut_tq_head, ut_tq);
+
+struct ut_tq_args {
+ const char *name; /**< Descriptive test name */
+ struct {
+ int base; /**< Base value */
+ int cnt; /**< Number of elements to add */
+ int step; /**< Value step */
+ } q[3]; /**< Queue element definition */
+ int qcnt; /**< Number of defs in .q */
+ int exp[16]; /**< Expected value order after join */
+};
+
+/**
+ * @brief Find the previous element (insert position) for
+ * value \p val in list \p head or NULL if \p val is less than
+ * the first element in \p head.
+ * @remarks \p head must be ascending sorted.
+ */
+static struct ut_tq *ut_tq_find_prev_pos(const struct ut_tq_head *head,
+ int val) {
+ struct ut_tq *e, *prev = NULL;
+
+ TAILQ_FOREACH(e, head, link) {
+ if (e->v > val)
+ return prev;
+ prev = e;
+ }
+
+ return prev;
+}
+
+static int ut_tq_test(const struct ut_tq_args *args) {
+ int totcnt = 0;
+ int fails = 0;
+ struct ut_tq_head *tqh[3] = {NULL, NULL, NULL};
+ struct ut_tq *e, *insert_after;
+ int i, qi;
+
+ RD_UT_SAY("Testing TAILQ: %s", args->name);
+
+ /*
+ * Verify TAILQ_INSERT_LIST:
+ * For each insert position test:
+ * - create two lists: tqh 0 and 1
+ * - add entries to both lists
+ * - insert list 1 into 0
+ * - verify expected order and correctness
+ */
+
+ /* Use heap allocated heads to let valgrind/asan assist
+ * in detecting corruption. */
+
+ for (qi = 0; qi < args->qcnt; qi++) {
+ tqh[qi] = rd_calloc(1, sizeof(*tqh[qi]));
+ TAILQ_INIT(tqh[qi]);
+
+ for (i = 0; i < args->q[qi].cnt; i++) {
+ e = rd_malloc(sizeof(*e));
+ e->v = args->q[qi].base + (i * args->q[qi].step);
+ TAILQ_INSERT_TAIL(tqh[qi], e, link);
+ }
+
+ totcnt += args->q[qi].cnt;
+ }
+
+ for (qi = 1; qi < args->qcnt; qi++) {
+ insert_after = ut_tq_find_prev_pos(tqh[0], args->q[qi].base);
+ if (!insert_after) {
+ /* Insert position is head of list,
+ * do two-step concat+move */
+ TAILQ_PREPEND(tqh[0], tqh[qi], ut_tq_head, link);
+ } else {
+ TAILQ_INSERT_LIST(tqh[0], insert_after, tqh[qi],
+ ut_tq_head, struct ut_tq *, link);
+ }
+
+ RD_UT_ASSERT(TAILQ_EMPTY(tqh[qi]), "expected empty tqh[%d]",
+ qi);
+ RD_UT_ASSERT(!TAILQ_EMPTY(tqh[0]), "expected non-empty tqh[0]");
+
+ memset(tqh[qi], (int)'A', sizeof(*tqh[qi]));
+ rd_free(tqh[qi]);
+ }
+
+ RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1],
+ "TAILQ_LAST val %d, expected %d",
+ TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]);
+
+ /* Add sentinel value to verify that INSERT_TAIL works
+ * after INSERT_LIST */
+ e = rd_malloc(sizeof(*e));
+ e->v = 99;
+ TAILQ_INSERT_TAIL(tqh[0], e, link);
+ totcnt++;
+
+ i = 0;
+ TAILQ_FOREACH(e, tqh[0], link) {
+ if (i >= totcnt) {
+ RD_UT_WARN(
+ "Too many elements in list tqh[0]: "
+ "idx %d > totcnt %d: element %p (value %d)",
+ i, totcnt, e, e->v);
+ fails++;
+ } else if (e->v != args->exp[i]) {
+ RD_UT_WARN(
+ "Element idx %d/%d in tqh[0] has value %d, "
+ "expected %d",
+ i, totcnt, e->v, args->exp[i]);
+ fails++;
+ } else if (i == totcnt - 1 &&
+ e != TAILQ_LAST(tqh[0], ut_tq_head)) {
+ RD_UT_WARN("TAILQ_LAST == %p, expected %p",
+ TAILQ_LAST(tqh[0], ut_tq_head), e);
+ fails++;
+ }
+ i++;
+ }
+
+ /* Then scan it in reverse */
+ i = totcnt - 1;
+ TAILQ_FOREACH_REVERSE(e, tqh[0], ut_tq_head, link) {
+ if (i < 0) {
+ RD_UT_WARN(
+ "REVERSE: Too many elements in list tqh[0]: "
+ "idx %d < 0: element %p (value %d)",
+ i, e, e->v);
+ fails++;
+ } else if (e->v != args->exp[i]) {
+ RD_UT_WARN(
+ "REVERSE: Element idx %d/%d in tqh[0] has "
+ "value %d, expected %d",
+ i, totcnt, e->v, args->exp[i]);
+ fails++;
+ } else if (i == totcnt - 1 &&
+ e != TAILQ_LAST(tqh[0], ut_tq_head)) {
+ RD_UT_WARN("REVERSE: TAILQ_LAST == %p, expected %p",
+ TAILQ_LAST(tqh[0], ut_tq_head), e);
+ fails++;
+ }
+ i--;
+ }
+
+ RD_UT_ASSERT(TAILQ_LAST(tqh[0], ut_tq_head)->v == args->exp[totcnt - 1],
+ "TAILQ_LAST val %d, expected %d",
+ TAILQ_LAST(tqh[0], ut_tq_head)->v, args->exp[totcnt - 1]);
+
+ while ((e = TAILQ_FIRST(tqh[0]))) {
+ TAILQ_REMOVE(tqh[0], e, link);
+ rd_free(e);
+ }
+
+ rd_free(tqh[0]);
+
+ return fails;
+}
+
+
+static int unittest_sysqueue(void) {
+ const struct ut_tq_args args[] = {
+ {"empty tqh[0]",
+ {{0, 0, 0}, {0, 3, 1}},
+ 2,
+ {0, 1, 2, 99 /*sentinel*/}},
+ {"prepend 1,0",
+ {{10, 3, 1}, {0, 3, 1}},
+ 2,
+ {0, 1, 2, 10, 11, 12, 99}},
+ {"prepend 2,1,0",
+ {
+ {10, 3, 1}, /* 10, 11, 12 */
+ {5, 3, 1}, /* 5, 6, 7 */
+ {0, 2, 1} /* 0, 1 */
+ },
+ 3,
+ {0, 1, 5, 6, 7, 10, 11, 12, 99}},
+ {"insert 1", {{0, 3, 2}, {1, 2, 2}}, 2, {0, 1, 3, 2, 4, 99}},
+ {"insert 1,2",
+ {
+ {0, 3, 3}, /* 0, 3, 6 */
+ {1, 2, 3}, /* 1, 4 */
+ {2, 1, 3} /* 2 */
+ },
+ 3,
+ {0, 1, 2, 4, 3, 6, 99}},
+ {"append 1",
+ {{0, 5, 1}, {5, 5, 1}},
+ 2,
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 99}},
+ {"append 1,2",
+ {
+ {0, 5, 1}, /* 0, 1, 2, 3, 4 */
+ {5, 5, 1}, /* 5, 6, 7, 8, 9 */
+ {11, 2, 1} /* 11, 12 */
+ },
+ 3,
+ {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 11, 12, 99}},
+ {
+ "insert 1,0,2",
+ {
+ {5, 3, 1}, /* 5, 6, 7 */
+ {0, 1, 1}, /* 0 */
+ {10, 2, 1} /* 10, 11 */
+ },
+ 3,
+ {0, 5, 6, 7, 10, 11, 99},
+ },
+ {
+ "insert 2,0,1",
+ {
+ {5, 3, 1}, /* 5, 6, 7 */
+ {10, 2, 1}, /* 10, 11 */
+ {0, 1, 1} /* 0 */
+ },
+ 3,
+ {0, 5, 6, 7, 10, 11, 99},
+ },
+ {NULL}};
+ int i;
+ int fails = 0;
+
+ for (i = 0; args[i].name != NULL; i++)
+ fails += ut_tq_test(&args[i]);
+
+ RD_UT_ASSERT(!fails, "See %d previous failure(s)", fails);
+
+ RD_UT_PASS();
+}
+
+/**@}*/
+
+
+/**
+ * @name rd_clock() unittests
+ * @{
+ */
+
+#if RD_UNITTEST_QPC_OVERRIDES
+
+/**
+ * These values are based off a machine with freq 14318180
+ * which would cause the original rd_clock() calculation to overflow
+ * after about 8 days.
+ * Details:
+ * https://github.com/confluentinc/confluent-kafka-dotnet/issues/603#issuecomment-417274540
+ */
+
+static const int64_t rd_ut_qpc_freq = 14318180;
+static int64_t rd_ut_qpc_now;
+
+BOOL rd_ut_QueryPerformanceFrequency(_Out_ LARGE_INTEGER *lpFrequency) {
+ lpFrequency->QuadPart = rd_ut_qpc_freq;
+ return TRUE;
+}
+
+BOOL rd_ut_QueryPerformanceCounter(_Out_ LARGE_INTEGER *lpPerformanceCount) {
+ lpPerformanceCount->QuadPart = rd_ut_qpc_now * rd_ut_qpc_freq;
+ return TRUE;
+}
+
+static int unittest_rdclock(void) {
+ rd_ts_t t1, t2;
+
+ /* First let "uptime" be fresh boot (0). */
+ rd_ut_qpc_now = 0;
+ t1 = rd_clock();
+ rd_ut_qpc_now++;
+ t2 = rd_clock();
+ RD_UT_ASSERT(t2 == t1 + (1 * 1000000),
+ "Expected t2 %" PRId64 " to be 1s more than t1 %" PRId64,
+ t2, t1);
+
+ /* Then skip forward to 8 days, which should trigger the
+ * overflow in a faulty implementation. */
+ rd_ut_qpc_now = 8 * 86400;
+ t2 = rd_clock();
+ RD_UT_ASSERT(t2 == t1 + (8LL * 86400 * 1000000),
+ "Expected t2 %" PRId64
+ " to be 8 days larger than t1 %" PRId64,
+ t2, t1);
+
+ /* And make sure we can run on a system with 38 years of uptime.. */
+ rd_ut_qpc_now = 38 * 365 * 86400;
+ t2 = rd_clock();
+ RD_UT_ASSERT(t2 == t1 + (38LL * 365 * 86400 * 1000000),
+ "Expected t2 %" PRId64
+ " to be 38 years larger than t1 %" PRId64,
+ t2, t1);
+
+ RD_UT_PASS();
+}
+#endif
+
+
+
+/**@}*/
+
+extern int unittest_string(void);
+extern int unittest_cgrp(void);
+#if WITH_SASL_SCRAM
+extern int unittest_scram(void);
+#endif
+extern int unittest_assignors(void);
+extern int unittest_map(void);
+#if WITH_CURL
+extern int unittest_http(void);
+#endif
+#if WITH_OAUTHBEARER_OIDC
+extern int unittest_sasl_oauthbearer_oidc(void);
+#endif
+
+int rd_unittest(void) {
+ int fails = 0;
+ const struct {
+ const char *name;
+ int (*call)(void);
+ } unittests[] = {
+ {"sysqueue", unittest_sysqueue},
+ {"string", unittest_string},
+ {"map", unittest_map},
+ {"rdbuf", unittest_rdbuf},
+ {"rdvarint", unittest_rdvarint},
+ {"crc32c", unittest_rd_crc32c},
+ {"msg", unittest_msg},
+ {"murmurhash", unittest_murmur2},
+ {"fnv1a", unittest_fnv1a},
+#if WITH_HDRHISTOGRAM
+ {"rdhdrhistogram", unittest_rdhdrhistogram},
+#endif
+#ifdef _WIN32
+ {"rdclock", unittest_rdclock},
+#endif
+ {"conf", unittest_conf},
+ {"broker", unittest_broker},
+ {"request", unittest_request},
+#if WITH_SASL_OAUTHBEARER
+ {"sasl_oauthbearer", unittest_sasl_oauthbearer},
+#endif
+ {"aborted_txns", unittest_aborted_txns},
+ {"cgrp", unittest_cgrp},
+#if WITH_SASL_SCRAM
+ {"scram", unittest_scram},
+#endif
+ {"assignors", unittest_assignors},
+#if WITH_CURL
+ {"http", unittest_http},
+#endif
+#if WITH_OAUTHBEARER_OIDC
+ {"sasl_oauthbearer_oidc", unittest_sasl_oauthbearer_oidc},
+#endif
+ {NULL}
+ };
+ int i;
+ const char *match = rd_getenv("RD_UT_TEST", NULL);
+ int cnt = 0;
+
+ if (rd_getenv("RD_UT_ASSERT", NULL))
+ rd_unittest_assert_on_failure = rd_true;
+ if (rd_getenv("CI", NULL)) {
+ RD_UT_SAY("Unittests running on CI");
+ rd_unittest_on_ci = rd_true;
+ }
+
+ if (rd_unittest_on_ci || (ENABLE_DEVEL + 0)) {
+ RD_UT_SAY("Unittests will not error out on slow CPUs");
+ rd_unittest_slow = rd_true;
+ }
+
+ rd_kafka_global_init();
+
+#if ENABLE_CODECOV
+ for (i = 0; i < RD_UT_COVNR_MAX + 1; i++)
+ rd_atomic64_init(&rd_ut_covnrs[i], 0);
+#endif
+
+ for (i = 0; unittests[i].name; i++) {
+ int f;
+
+ if (match && !strstr(unittests[i].name, match))
+ continue;
+
+ f = unittests[i].call();
+ RD_UT_SAY("unittest: %s: %4s\033[0m", unittests[i].name,
+ f ? "\033[31mFAIL" : "\033[32mPASS");
+ fails += f;
+ cnt++;
+ }
+
+#if ENABLE_CODECOV
+#if FIXME /* This check only works if all tests that use coverage checks \
+ * are run, which we can't really know, so disable until we \
+ * know what to do with this. */
+ if (!match) {
+ /* Verify all code paths were covered */
+ int cov_fails = 0;
+ for (i = 0; i < RD_UT_COVNR_MAX + 1; i++) {
+ if (!RD_UT_COVERAGE_CHECK(i))
+ cov_fails++;
+ }
+ if (cov_fails > 0)
+ RD_UT_SAY("%d code coverage failure(s) (ignored)\n",
+ cov_fails);
+ }
+#endif
+#endif
+
+ if (!cnt && match)
+ RD_UT_WARN("No unittests matching \"%s\"", match);
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h
new file mode 100644
index 000000000..a15488568
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdunittest.h
@@ -0,0 +1,230 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RD_UNITTEST_H
+#define _RD_UNITTEST_H
+
+#include <stdio.h>
+
+
+extern rd_bool_t rd_unittest_assert_on_failure;
+extern rd_bool_t rd_unittest_on_ci;
+extern rd_bool_t rd_unittest_slow;
+
+#define ENABLE_CODECOV ENABLE_DEVEL
+
+
+/**
+ * @brief Begin single unit-test function (optional).
+ * Currently only used for logging.
+ */
+#define RD_UT_BEGIN() \
+ fprintf(stderr, "\033[34mRDUT: INFO: %s:%d: %s: BEGIN: \033[0m\n", \
+ __FILE__, __LINE__, __FUNCTION__)
+
+
+/**
+ * @brief Fail the current unit-test function.
+ */
+#define RD_UT_FAIL(...) \
+ do { \
+ fprintf(stderr, "\033[31mRDUT: FAIL: %s:%d: %s: ", __FILE__, \
+ __LINE__, __FUNCTION__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m\n"); \
+ if (rd_unittest_assert_on_failure) \
+ rd_assert(!*"unittest failure"); \
+ return 1; \
+ } while (0)
+
+/**
+ * @brief Pass the current unit-test function
+ */
+#define RD_UT_PASS() \
+ do { \
+ fprintf(stderr, "\033[32mRDUT: PASS: %s:%d: %s\033[0m\n", \
+ __FILE__, __LINE__, __FUNCTION__); \
+ return 0; \
+ } while (0)
+
+/**
+ * @brief Skip the current unit-test function
+ */
+#define RD_UT_SKIP(...) \
+ do { \
+ fprintf(stderr, "\033[33mRDUT: SKIP: %s:%d: %s: ", __FILE__, \
+ __LINE__, __FUNCTION__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m\n"); \
+ return 0; \
+ } while (0)
+
+
+/**
+ * @brief Fail unit-test if \p expr is false
+ */
+#define RD_UT_ASSERT(expr, ...) \
+ do { \
+ if (!(expr)) { \
+ fprintf(stderr, \
+ "\033[31mRDUT: FAIL: %s:%d: %s: " \
+ "assert failed: " #expr ": ", \
+ __FILE__, __LINE__, __FUNCTION__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m\n"); \
+ if (rd_unittest_assert_on_failure) \
+ rd_assert(expr); \
+ return 1; \
+ } \
+ } while (0)
+
+
+/**
+ * @brief Check that value \p V is within inclusive range \p VMIN .. \p VMAX,
+ * else asserts.
+ *
+ * @param VFMT is the printf formatter for \p V's type
+ */
+#define RD_UT_ASSERT_RANGE(V, VMIN, VMAX, VFMT) \
+ RD_UT_ASSERT((VMIN) <= (V) && (VMAX) >= (V), \
+ VFMT " out of range " VFMT " .. " VFMT, (V), (VMIN), \
+ (VMAX))
+
+
+/**
+ * @brief Log something from a unit-test
+ */
+#define RD_UT_SAY(...) \
+ do { \
+ fprintf(stderr, "RDUT: INFO: %s:%d: %s: ", __FILE__, __LINE__, \
+ __FUNCTION__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\n"); \
+ } while (0)
+
+
+/**
+ * @brief Warn about something from a unit-test
+ */
+#define RD_UT_WARN(...) \
+ do { \
+ fprintf(stderr, "\033[33mRDUT: WARN: %s:%d: %s: ", __FILE__, \
+ __LINE__, __FUNCTION__); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m\n"); \
+ } while (0)
+
+
+
+int rd_unittest(void);
+
+
+
+/**
+ * @name Manual code coverage
+ *
+ * The RD_UT_COVERAGE*() set of macros are used to perform manual
+ * code coverage testing.
+ * This provides an alternative to object and state inspection by
+ * instead verifying that certain code paths (typically error paths)
+ * are executed, allowing functional black-box testing on the one part
+ * combined with precise knowledge of code flow on the other part.
+ *
+ * How to use:
+ *
+ * 1. First identify a code path that you want to make sure is executed, such
+ * as a corner error case, increase RD_UT_COVNR_MAX (below) and use the
+ * new max number as the coverage number (COVNR).
+ *
+ * 2. In the code path add RD_UT_COVERAGE(your_covnr).
+ *
+ * 3. Write a unittest case that is supposed to trigger the code path.
+ *
+ * 4. In the unittest, add a call to RD_UT_COVERAGE_CHECK(your_covnr) at the
+ * point where you expect the code path to have executed.
+ *
+ * 5. RD_UT_COVERAGE_CHECK(your_covnr) will fail the current test, but not
+ * return from your test function, so you need to `return 1;` if
+ * RD_UT_COVERAGE_CHECK(your_covnr) returns 0, e.g:
+ *
+ * if (!RD_UT_COVERAGE_CHECK(your_covnr))
+ * return 1; -- failure
+ *
+ * 6. Run the unit tests with `make unit` in tests/.
+ *
+ * 7. If the code path was not executed your test will fail, otherwise pass.
+ *
+ *
+ * Code coverage checks require --enable-devel.
+ *
+ * There is a script in packaging/tools/rdutcoverage.sh that checks that
+ * code coverage numbers are not reused.
+ *
+ * @{
+ */
+
+#if ENABLE_CODECOV
+
+/* @define When adding new code coverages, use the next value and increment
+ * this maximum accordingly. */
+#define RD_UT_COVNR_MAX 1
+
+/**
+ * @brief Register code as covered/executed.
+ */
+#define RD_UT_COVERAGE(COVNR) \
+ rd_ut_coverage(__FILE__, __FUNCTION__, __LINE__, COVNR)
+
+/**
+ * @returns how many times the code was executed.
+ * will fail the unit test (but not return) if code has not
+ * been executed.
+ */
+#define RD_UT_COVERAGE_CHECK(COVNR) \
+ rd_ut_coverage_check(__FILE__, __FUNCTION__, __LINE__, COVNR)
+
+
+void rd_ut_coverage(const char *file, const char *func, int line, int covnr);
+int64_t
+rd_ut_coverage_check(const char *file, const char *func, int line, int covnr);
+
+#else
+
+/* Does nothing if ENABLE_CODECOV is not set */
+#define RD_UT_COVERAGE(COVNR) \
+ do { \
+ } while (0)
+#define RD_UT_COVERAGE_CHECK(COVNR) 1
+
+#endif /* ENABLE_CODECOV */
+
+
+/**@}*/
+
+
+#endif /* _RD_UNITTEST_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c
new file mode 100644
index 000000000..fb0cbd046
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.c
@@ -0,0 +1,134 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rdvarint.h"
+#include "rdunittest.h"
+
+
+static int do_test_rd_uvarint_enc_i64(const char *file,
+ int line,
+ int64_t num,
+ const char *exp,
+ size_t exp_size) {
+ char buf[16] = {0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff,
+ 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff};
+ size_t sz = rd_uvarint_enc_i64(buf, sizeof(buf), num);
+ size_t r;
+ int ir;
+ rd_buf_t b;
+ rd_slice_t slice, bad_slice;
+ int64_t ret_num;
+
+ if (sz != exp_size || memcmp(buf, exp, exp_size))
+ RD_UT_FAIL("i64 encode of %" PRId64
+ ": "
+ "expected size %" PRIusz " (got %" PRIusz ")\n",
+ num, exp_size, sz);
+
+ /* Verify with standard decoder */
+ r = rd_varint_dec_i64(buf, sz, &ret_num);
+ RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r),
+ "varint decode failed: %" PRIusz, r);
+ RD_UT_ASSERT(ret_num == num,
+ "varint decode returned wrong number: "
+ "%" PRId64 " != %" PRId64,
+ ret_num, num);
+
+ /* Verify with slice decoder */
+ rd_buf_init(&b, 1, 0);
+ rd_buf_push(&b, buf, sizeof(buf), NULL); /* including trailing 0xff
+ * garbage which should be
+ * ignored by decoder */
+ rd_slice_init_full(&slice, &b);
+
+ /* Should fail for incomplete reads */
+ ir = rd_slice_narrow_copy(&slice, &bad_slice, sz - 1);
+ RD_UT_ASSERT(ir, "narrow_copy failed");
+ ret_num = -1;
+ r = rd_slice_read_varint(&bad_slice, &ret_num);
+ RD_UT_ASSERT(RD_UVARINT_DEC_FAILED(r),
+ "varint decode failed should have failed, "
+ "returned %" PRIusz,
+ r);
+ r = rd_slice_offset(&bad_slice);
+ RD_UT_ASSERT(r == 0,
+ "expected slice position to not change, but got %" PRIusz,
+ r);
+
+ /* Verify proper slice */
+ ret_num = -1;
+ r = rd_slice_read_varint(&slice, &ret_num);
+ RD_UT_ASSERT(!RD_UVARINT_DEC_FAILED(r),
+ "varint decode failed: %" PRIusz, r);
+ RD_UT_ASSERT(ret_num == num,
+ "varint decode returned wrong number: "
+ "%" PRId64 " != %" PRId64,
+ ret_num, num);
+ RD_UT_ASSERT(r == sz,
+ "expected varint decoder to read %" PRIusz
+ " bytes, "
+ "not %" PRIusz,
+ sz, r);
+ r = rd_slice_offset(&slice);
+ RD_UT_ASSERT(r == sz,
+ "expected slice position to change to %" PRIusz
+ ", but got %" PRIusz,
+ sz, r);
+
+
+ rd_buf_destroy(&b);
+
+ RD_UT_PASS();
+}
+
+
+int unittest_rdvarint(void) {
+ int fails = 0;
+
+ fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 0,
+ (const char[]) {0}, 1);
+ fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 1,
+ (const char[]) {0x2}, 1);
+ fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -1,
+ (const char[]) {0x1}, 1);
+ fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 23,
+ (const char[]) {0x2e}, 1);
+ fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, -23,
+ (const char[]) {0x2d}, 1);
+ fails += do_test_rd_uvarint_enc_i64(__FILE__, __LINE__, 253,
+ (const char[]) {0xfa, 3}, 2);
+ fails += do_test_rd_uvarint_enc_i64(
+ __FILE__, __LINE__, 1234567890101112,
+ (const char[]) {0xf0, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8);
+ fails += do_test_rd_uvarint_enc_i64(
+ __FILE__, __LINE__, -1234567890101112,
+ (const char[]) {0xef, 0x8d, 0xd3, 0xc8, 0xa7, 0xb5, 0xb1, 0x04}, 8);
+
+ return fails;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h
new file mode 100644
index 000000000..6fe112ba9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdvarint.h
@@ -0,0 +1,165 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2016 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#ifndef _RDVARINT_H
+#define _RDVARINT_H
+
+#include "rd.h"
+#include "rdbuf.h"
+
+/**
+ * @name signed varint zig-zag encoder/decoder
+ * @{
+ *
+ */
+
+/**
+ * @brief unsigned-varint encodes unsigned integer \p num into buffer
+ * at \p dst of size \p dstsize.
+ * @returns the number of bytes written to \p dst, or 0 if not enough space.
+ */
+
+static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_u64(char *dst,
+ size_t dstsize,
+ uint64_t num) {
+ size_t of = 0;
+
+ do {
+ if (unlikely(of >= dstsize))
+ return 0; /* Not enough space */
+
+ dst[of++] = (num & 0x7f) | (num > 0x7f ? 0x80 : 0);
+ num >>= 7;
+ } while (num);
+
+ return of;
+}
+
+/**
+ * @brief encodes a signed integer using zig-zag encoding.
+ * @sa rd_uvarint_enc_u64
+ */
+static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i64(char *dst,
+ size_t dstsize,
+ int64_t num) {
+ return rd_uvarint_enc_u64(dst, dstsize, (num << 1) ^ (num >> 63));
+}
+
+
+static RD_INLINE RD_UNUSED size_t rd_uvarint_enc_i32(char *dst,
+ size_t dstsize,
+ int32_t num) {
+ return rd_uvarint_enc_i64(dst, dstsize, num);
+}
+
+
+
+/**
+ * @brief Use on return value from rd_uvarint_dec() to check if
+ * decoded varint fit the size_t.
+ *
+ * @returns 1 on overflow, else 0.
+ */
+#define RD_UVARINT_OVERFLOW(DEC_RETVAL) (DEC_RETVAL > SIZE_MAX)
+
+/**
+ * @returns 1 if there were not enough bytes to decode the varint, else 0.
+ */
+#define RD_UVARINT_UNDERFLOW(DEC_RETVAL) (DEC_RETVAL == 0)
+
+
+/**
+ * @param DEC_RETVAL the return value from \c rd_uvarint_dec()
+ * @returns 1 if varint decoding failed, else 0.
+ * @warning \p DEC_RETVAL will be evaluated twice.
+ */
+#define RD_UVARINT_DEC_FAILED(DEC_RETVAL) \
+ (RD_UVARINT_UNDERFLOW(DEC_RETVAL) || RD_UVARINT_OVERFLOW(DEC_RETVAL))
+
+
+/**
+ * @brief Decodes the unsigned-varint in buffer \p src of size \p srcsize
+ * and stores the decoded unsigned integer in \p nump.
+ *
+ * @remark Use RD_UVARINT_OVERFLOW(returnvalue) to check if the varint
+ * could not fit \p nump, and RD_UVARINT_UNDERFLOW(returnvalue) to
+ * check if there were not enough bytes available in \p src to
+ * decode the full varint.
+ *
+ * @returns the number of bytes read from \p src.
+ */
+static RD_INLINE RD_UNUSED size_t rd_uvarint_dec(const char *src,
+ size_t srcsize,
+ uint64_t *nump) {
+ size_t of = 0;
+ uint64_t num = 0;
+ int shift = 0;
+
+ do {
+ if (unlikely(srcsize-- == 0))
+ return 0; /* Underflow */
+ num |= (uint64_t)(src[(int)of] & 0x7f) << shift;
+ shift += 7;
+ } while (src[(int)of++] & 0x80);
+
+ *nump = num;
+ return of;
+}
+
+static RD_INLINE RD_UNUSED size_t rd_varint_dec_i64(const char *src,
+ size_t srcsize,
+ int64_t *nump) {
+ uint64_t n;
+ size_t r;
+
+ r = rd_uvarint_dec(src, srcsize, &n);
+ if (likely(!RD_UVARINT_DEC_FAILED(r)))
+ *nump = (int64_t)(n >> 1) ^ -(int64_t)(n & 1);
+
+ return r;
+}
+
+
+/**
+ * @returns the maximum encoded size for a type
+ */
+#define RD_UVARINT_ENC_SIZEOF(TYPE) (sizeof(TYPE) + 1 + (sizeof(TYPE) / 7))
+
+/**
+ * @returns the encoding size of the value 0
+ */
+#define RD_UVARINT_ENC_SIZE_0() ((size_t)1)
+
+
+int unittest_rdvarint(void);
+
+/**@}*/
+
+
+#endif /* _RDVARINT_H */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h
new file mode 100644
index 000000000..73edd41d6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdwin32.h
@@ -0,0 +1,382 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Win32 (Visual Studio) support
+ */
+#ifndef _RDWIN32_H_
+#define _RDWIN32_H_
+
+#include <stdlib.h>
+#include <inttypes.h>
+#include <sys/types.h>
+#include <time.h>
+#include <assert.h>
+
+#define WIN32_MEAN_AND_LEAN
+#include <winsock2.h> /* for sockets + struct timeval */
+#include <io.h>
+#include <fcntl.h>
+
+
+/**
+ * Types
+ */
+#ifndef _SSIZE_T_DEFINED
+#define _SSIZE_T_DEFINED
+typedef SSIZE_T ssize_t;
+#endif
+typedef int socklen_t;
+
+struct iovec {
+ void *iov_base;
+ size_t iov_len;
+};
+
+struct msghdr {
+ struct iovec *msg_iov;
+ int msg_iovlen;
+};
+
+
+/**
+ * Annotations, attributes, optimizers
+ */
+#ifndef likely
+#define likely(x) x
+#endif
+#ifndef unlikely
+#define unlikely(x) x
+#endif
+
+#define RD_UNUSED
+#define RD_INLINE __inline
+#define RD_WARN_UNUSED_RESULT
+#define RD_NORETURN __declspec(noreturn)
+#define RD_IS_CONSTANT(p) (0)
+#ifdef _MSC_VER
+#define RD_TLS __declspec(thread)
+#elif defined(__MINGW32__)
+#define RD_TLS __thread
+#else
+#error Unknown Windows compiler, cannot set RD_TLS (thread-local-storage attribute)
+#endif
+
+
+/**
+ * Allocation
+ */
+#define rd_alloca(N) _alloca(N)
+
+
+/**
+ * Strings, formatting, printf, ..
+ */
+
+/* size_t and ssize_t format strings */
+#define PRIusz "Iu"
+#define PRIdsz "Id"
+
+#ifndef RD_FORMAT
+#define RD_FORMAT(...)
+#endif
+
+static RD_UNUSED RD_INLINE int
+rd_vsnprintf(char *str, size_t size, const char *format, va_list ap) {
+ int cnt = -1;
+
+ if (size != 0)
+ cnt = _vsnprintf_s(str, size, _TRUNCATE, format, ap);
+ if (cnt == -1)
+ cnt = _vscprintf(format, ap);
+
+ return cnt;
+}
+
+static RD_UNUSED RD_INLINE int
+rd_snprintf(char *str, size_t size, const char *format, ...) {
+ int cnt;
+ va_list ap;
+
+ va_start(ap, format);
+ cnt = rd_vsnprintf(str, size, format, ap);
+ va_end(ap);
+
+ return cnt;
+}
+
+
+#define rd_strcasecmp(A, B) _stricmp(A, B)
+#define rd_strncasecmp(A, B, N) _strnicmp(A, B, N)
+/* There is a StrStrIA() but it requires extra linking, so use our own
+ * implementation instead. */
+#define rd_strcasestr(HAYSTACK, NEEDLE) _rd_strcasestr(HAYSTACK, NEEDLE)
+
+
+
+/**
+ * Errors
+ */
+
+/* MSVC:
+ * This is the correct way to set errno on Windows,
+ * but it is still pointless due to different errnos in
+ * in different runtimes:
+ * https://social.msdn.microsoft.com/Forums/vstudio/en-US/b4500c0d-1b69-40c7-9ef5-08da1025b5bf/setting-errno-from-within-a-dll?forum=vclanguage/
+ * errno is thus highly deprecated, and buggy, on Windows
+ * when using librdkafka as a dynamically loaded DLL. */
+#define rd_set_errno(err) _set_errno((err))
+
+static RD_INLINE RD_UNUSED const char *rd_strerror(int err) {
+ static RD_TLS char ret[128];
+
+ strerror_s(ret, sizeof(ret) - 1, err);
+ return ret;
+}
+
+/**
+ * @brief strerror() for Win32 API errors as returned by GetLastError() et.al.
+ */
+static RD_UNUSED char *
+rd_strerror_w32(DWORD errcode, char *dst, size_t dstsize) {
+ char *t;
+ FormatMessageA(FORMAT_MESSAGE_FROM_SYSTEM |
+ FORMAT_MESSAGE_IGNORE_INSERTS,
+ NULL, errcode, MAKELANGID(LANG_NEUTRAL, SUBLANG_DEFAULT),
+ (LPSTR)dst, (DWORD)dstsize - 1, NULL);
+ /* Remove newlines */
+ while ((t = strchr(dst, (int)'\r')) || (t = strchr(dst, (int)'\n')))
+ *t = (char)'.';
+ return dst;
+}
+
+
+/**
+ * Atomics
+ */
+#ifndef __cplusplus
+#include "rdatomic.h"
+#endif
+
+
+/**
+ * Misc
+ */
+
+/**
+ * Microsecond sleep.
+ * 'retry': if true, retry if sleep is interrupted (because of signal)
+ */
+#define rd_usleep(usec, terminate) Sleep((usec) / 1000)
+
+
+/**
+ * @brief gettimeofday() for win32
+ */
+static RD_UNUSED int rd_gettimeofday(struct timeval *tv, struct timezone *tz) {
+ SYSTEMTIME st;
+ FILETIME ft;
+ ULARGE_INTEGER d;
+
+ GetSystemTime(&st);
+ SystemTimeToFileTime(&st, &ft);
+ d.HighPart = ft.dwHighDateTime;
+ d.LowPart = ft.dwLowDateTime;
+ tv->tv_sec = (long)((d.QuadPart - 116444736000000000llu) / 10000000L);
+ tv->tv_usec = (long)(st.wMilliseconds * 1000);
+
+ return 0;
+}
+
+
+#define rd_assert(EXPR) assert(EXPR)
+
+
+static RD_INLINE RD_UNUSED const char *rd_getenv(const char *env,
+ const char *def) {
+ static RD_TLS char tmp[512];
+ DWORD r;
+ r = GetEnvironmentVariableA(env, tmp, sizeof(tmp));
+ if (r == 0 || r > sizeof(tmp))
+ return def;
+ return tmp;
+}
+
+
+/**
+ * Empty struct initializer
+ */
+#define RD_ZERO_INIT \
+ { 0 }
+
+#ifndef __cplusplus
+/**
+ * Sockets, IO
+ */
+
+/** @brief Socket type */
+typedef SOCKET rd_socket_t;
+
+/** @brief Socket API error return value */
+#define RD_SOCKET_ERROR SOCKET_ERROR
+
+/** @brief Last socket error */
+#define rd_socket_errno WSAGetLastError()
+
+/** @brief String representation of socket error */
+static RD_UNUSED const char *rd_socket_strerror(int err) {
+ static RD_TLS char buf[256];
+ rd_strerror_w32(err, buf, sizeof(buf));
+ return buf;
+}
+
+/** @brief WSAPoll() struct type */
+typedef WSAPOLLFD rd_pollfd_t;
+
+/** @brief poll(2) */
+#define rd_socket_poll(POLLFD, FDCNT, TIMEOUT_MS) \
+ WSAPoll(POLLFD, FDCNT, TIMEOUT_MS)
+
+
+/**
+ * @brief Set socket to non-blocking
+ * @returns 0 on success or -1 on failure (see rd_kafka_rd_socket_errno)
+ */
+static RD_UNUSED int rd_fd_set_nonblocking(rd_socket_t fd) {
+ u_long on = 1;
+ if (ioctlsocket(fd, FIONBIO, &on) == SOCKET_ERROR)
+ return (int)WSAGetLastError();
+ return 0;
+}
+
+/**
+ * @brief Create non-blocking pipe
+ * @returns 0 on success or errno on failure
+ */
+static RD_UNUSED int rd_pipe_nonblocking(rd_socket_t *fds) {
+ /* On windows, the "pipe" will be a tcp connection.
+ * This is to allow WSAPoll to be used to poll pipe events */
+
+ SOCKET listen_s = INVALID_SOCKET;
+ SOCKET accept_s = INVALID_SOCKET;
+ SOCKET connect_s = INVALID_SOCKET;
+
+ struct sockaddr_in listen_addr;
+ struct sockaddr_in connect_addr;
+ socklen_t sock_len = 0;
+ int bufsz;
+
+ /* Create listen socket */
+ listen_s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (listen_s == INVALID_SOCKET)
+ goto err;
+
+ listen_addr.sin_family = AF_INET;
+ listen_addr.sin_addr.s_addr = ntohl(INADDR_LOOPBACK);
+ listen_addr.sin_port = 0;
+ if (bind(listen_s, (struct sockaddr *)&listen_addr,
+ sizeof(listen_addr)) != 0)
+ goto err;
+
+ sock_len = sizeof(connect_addr);
+ if (getsockname(listen_s, (struct sockaddr *)&connect_addr,
+ &sock_len) != 0)
+ goto err;
+
+ if (listen(listen_s, 1) != 0)
+ goto err;
+
+ /* Create connection socket */
+ connect_s = socket(AF_INET, SOCK_STREAM, IPPROTO_TCP);
+ if (connect_s == INVALID_SOCKET)
+ goto err;
+
+ if (connect(connect_s, (struct sockaddr *)&connect_addr,
+ sizeof(connect_addr)) == SOCKET_ERROR)
+ goto err;
+
+ /* Wait for incoming connection */
+ accept_s = accept(listen_s, NULL, NULL);
+ if (accept_s == SOCKET_ERROR)
+ goto err;
+
+ /* Done with listening */
+ closesocket(listen_s);
+
+ if (rd_fd_set_nonblocking(accept_s) != 0)
+ goto err;
+
+ if (rd_fd_set_nonblocking(connect_s) != 0)
+ goto err;
+
+ /* Minimize buffer sizes to avoid a large number
+ * of signaling bytes to accumulate when
+ * io-signalled queue is not being served for a while. */
+ bufsz = 100;
+ setsockopt(accept_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz,
+ sizeof(bufsz));
+ bufsz = 100;
+ setsockopt(accept_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz,
+ sizeof(bufsz));
+ bufsz = 100;
+ setsockopt(connect_s, SOL_SOCKET, SO_SNDBUF, (const char *)&bufsz,
+ sizeof(bufsz));
+ bufsz = 100;
+ setsockopt(connect_s, SOL_SOCKET, SO_RCVBUF, (const char *)&bufsz,
+ sizeof(bufsz));
+
+ /* Store resulting sockets.
+ * They are bidirectional, so it does not matter which is read or
+ * write side of pipe. */
+ fds[0] = accept_s;
+ fds[1] = connect_s;
+ return 0;
+
+err:
+ if (listen_s != INVALID_SOCKET)
+ closesocket(listen_s);
+ if (accept_s != INVALID_SOCKET)
+ closesocket(accept_s);
+ if (connect_s != INVALID_SOCKET)
+ closesocket(connect_s);
+ return -1;
+}
+
+/* Socket IO */
+#define rd_socket_read(fd, buf, sz) recv(fd, buf, sz, 0)
+#define rd_socket_write(fd, buf, sz) send(fd, buf, sz, 0)
+#define rd_socket_close(fd) closesocket(fd)
+
+/* File IO */
+#define rd_write(fd, buf, sz) _write(fd, buf, sz)
+#define rd_open(path, flags, mode) _open(path, flags, mode)
+#define rd_close(fd) _close(fd)
+
+#endif /* !__cplusplus*/
+
+#endif /* _RDWIN32_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c b/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c
new file mode 100644
index 000000000..89f7c8cf4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.c
@@ -0,0 +1,1187 @@
+/*
+ * xxHash - Fast Hash algorithm
+ * Copyright (C) 2012-2016, Yann Collet
+ *
+ * BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ * You can contact the author at :
+ * - xxHash homepage: http://www.xxhash.com
+ * - xxHash source repository : https://github.com/Cyan4973/xxHash
+ */
+
+
+/* *************************************
+ * Tuning parameters
+ ***************************************/
+/*!XXH_FORCE_MEMORY_ACCESS :
+ * By default, access to unaligned memory is controlled by `memcpy()`, which is
+ * safe and portable. Unfortunately, on some target/compiler combinations, the
+ * generated assembly is sub-optimal. The below switch allow to select different
+ * access method for improved performance. Method 0 (default) : use `memcpy()`.
+ * Safe and portable. Method 1 : `__packed` statement. It depends on compiler
+ * extension (ie, not portable). This method is safe if your compiler supports
+ * it, and *generally* as fast or faster than `memcpy`. Method 2 : direct
+ * access. This method doesn't depend on compiler but violate C standard. It can
+ * generate buggy code on targets which do not support unaligned memory
+ * accesses. But in some circumstances, it's the only known way to get the most
+ * performance (ie GCC + ARMv6) See http://stackoverflow.com/a/32095106/646947
+ * for details. Prefer these methods in priority order (0 > 1 > 2)
+ */
+#ifndef XXH_FORCE_MEMORY_ACCESS /* can be defined externally, on command line \
+ for example */
+#if defined(__GNUC__) && \
+ (defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6J__) || \
+ defined(__ARM_ARCH_6K__) || defined(__ARM_ARCH_6Z__) || \
+ defined(__ARM_ARCH_6ZK__) || defined(__ARM_ARCH_6T2__))
+#define XXH_FORCE_MEMORY_ACCESS 2
+#elif (defined(__INTEL_COMPILER) && !defined(_WIN32)) || \
+ (defined(__GNUC__) && \
+ (defined(__ARM_ARCH_7__) || defined(__ARM_ARCH_7A__) || \
+ defined(__ARM_ARCH_7R__) || defined(__ARM_ARCH_7M__) || \
+ defined(__ARM_ARCH_7S__)))
+#define XXH_FORCE_MEMORY_ACCESS 1
+#endif
+#endif
+
+/*!XXH_ACCEPT_NULL_INPUT_POINTER :
+ * If input pointer is NULL, xxHash default behavior is to dereference it,
+ * triggering a segfault. When this macro is enabled, xxHash actively checks
+ * input for null pointer. It it is, result for null input pointers is the same
+ * as a null-length input.
+ */
+#ifndef XXH_ACCEPT_NULL_INPUT_POINTER /* can be defined externally */
+#define XXH_ACCEPT_NULL_INPUT_POINTER 0
+#endif
+
+/*!XXH_FORCE_NATIVE_FORMAT :
+ * By default, xxHash library provides endian-independent Hash values, based on
+ * little-endian convention. Results are therefore identical for little-endian
+ * and big-endian CPU. This comes at a performance cost for big-endian CPU,
+ * since some swapping is required to emulate little-endian format. Should
+ * endian-independence be of no importance for your application, you may set the
+ * #define below to 1, to improve speed for Big-endian CPU. This option has no
+ * impact on Little_Endian CPU.
+ */
+#ifndef XXH_FORCE_NATIVE_FORMAT /* can be defined externally */
+#define XXH_FORCE_NATIVE_FORMAT 0
+#endif
+
+/*!XXH_FORCE_ALIGN_CHECK :
+ * This is a minor performance trick, only useful with lots of very small keys.
+ * It means : check for aligned/unaligned input.
+ * The check costs one initial branch per hash;
+ * set it to 0 when the input is guaranteed to be aligned,
+ * or when alignment doesn't matter for performance.
+ */
+#ifndef XXH_FORCE_ALIGN_CHECK /* can be defined externally */
+#if defined(__i386) || defined(_M_IX86) || defined(__x86_64__) || \
+ defined(_M_X64)
+#define XXH_FORCE_ALIGN_CHECK 0
+#else
+#define XXH_FORCE_ALIGN_CHECK 1
+#endif
+#endif
+
+
+/* *************************************
+ * Includes & Memory related functions
+ ***************************************/
+/*! Modify the local functions below should you wish to use some other memory
+ * routines for malloc(), free() */
+#include "rd.h"
+static void *XXH_malloc(size_t s) {
+ return rd_malloc(s);
+}
+static void XXH_free(void *p) {
+ rd_free(p);
+}
+/*! and for memcpy() */
+#include <string.h>
+static void *XXH_memcpy(void *dest, const void *src, size_t size) {
+ return memcpy(dest, src, size);
+}
+
+#include <assert.h> /* assert */
+
+#define XXH_STATIC_LINKING_ONLY
+#include "rdxxhash.h"
+
+
+/* *************************************
+ * Compiler Specific Options
+ ***************************************/
+#ifdef _MSC_VER /* Visual Studio */
+#pragma warning( \
+ disable : 4127) /* disable: C4127: conditional expression is constant */
+#define FORCE_INLINE static __forceinline
+#else
+#if defined(__cplusplus) || \
+ defined(__STDC_VERSION__) && __STDC_VERSION__ >= 199901L /* C99 */
+#ifdef __GNUC__
+#define FORCE_INLINE static inline __attribute__((always_inline))
+#else
+#define FORCE_INLINE static inline
+#endif
+#else
+#define FORCE_INLINE static
+#endif /* __STDC_VERSION__ */
+#endif
+
+
+/* *************************************
+ * Basic Types
+ ***************************************/
+#ifndef MEM_MODULE
+#if !defined(__VMS) && \
+ (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
+#include <stdint.h>
+typedef uint8_t BYTE;
+typedef uint16_t U16;
+typedef uint32_t U32;
+#else
+typedef unsigned char BYTE;
+typedef unsigned short U16;
+typedef unsigned int U32;
+#endif
+#endif
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory
+ * access in hardware */
+static U32 XXH_read32(const void *memPtr) {
+ return *(const U32 *)memPtr;
+}
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially
+ * problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union {
+ U32 u32;
+} __attribute__((packed)) unalign;
+static U32 XXH_read32(const void *ptr) {
+ return ((const unalign *)ptr)->u32;
+}
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+static U32 XXH_read32(const void *memPtr) {
+ U32 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+
+/* ****************************************
+ * Compiler-specific Functions and Macros
+ ******************************************/
+#define XXH_GCC_VERSION (__GNUC__ * 100 + __GNUC_MINOR__)
+
+/* Note : although _rotl exists for minGW (GCC under windows), performance seems
+ * poor */
+#if defined(_MSC_VER)
+#define XXH_rotl32(x, r) _rotl(x, r)
+#define XXH_rotl64(x, r) _rotl64(x, r)
+#else
+#define XXH_rotl32(x, r) ((x << r) | (x >> (32 - r)))
+#define XXH_rotl64(x, r) ((x << r) | (x >> (64 - r)))
+#endif
+
+#if defined(_MSC_VER) /* Visual Studio */
+#define XXH_swap32 _byteswap_ulong
+#elif XXH_GCC_VERSION >= 403
+#define XXH_swap32 __builtin_bswap32
+#else
+static U32 XXH_swap32(U32 x) {
+ return ((x << 24) & 0xff000000) | ((x << 8) & 0x00ff0000) |
+ ((x >> 8) & 0x0000ff00) | ((x >> 24) & 0x000000ff);
+}
+#endif
+
+
+/* *************************************
+ * Architecture Macros
+ ***************************************/
+typedef enum { XXH_bigEndian = 0, XXH_littleEndian = 1 } XXH_endianess;
+
+/* XXH_CPU_LITTLE_ENDIAN can be defined externally, for example on the compiler
+ * command line */
+#ifndef XXH_CPU_LITTLE_ENDIAN
+static int XXH_isLittleEndian(void) {
+ const union {
+ U32 u;
+ BYTE c[4];
+ } one = {1}; /* don't use static : performance detrimental */
+ return one.c[0];
+}
+#define XXH_CPU_LITTLE_ENDIAN XXH_isLittleEndian()
+#endif
+
+
+/* ***************************
+ * Memory reads
+ *****************************/
+typedef enum { XXH_aligned, XXH_unaligned } XXH_alignment;
+
+FORCE_INLINE U32 XXH_readLE32_align(const void *ptr,
+ XXH_endianess endian,
+ XXH_alignment align) {
+ if (align == XXH_unaligned)
+ return endian == XXH_littleEndian ? XXH_read32(ptr)
+ : XXH_swap32(XXH_read32(ptr));
+ else
+ return endian == XXH_littleEndian
+ ? *(const U32 *)ptr
+ : XXH_swap32(*(const U32 *)ptr);
+}
+
+FORCE_INLINE U32 XXH_readLE32(const void *ptr, XXH_endianess endian) {
+ return XXH_readLE32_align(ptr, endian, XXH_unaligned);
+}
+
+static U32 XXH_readBE32(const void *ptr) {
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap32(XXH_read32(ptr))
+ : XXH_read32(ptr);
+}
+
+
+/* *************************************
+ * Macros
+ ***************************************/
+#define XXH_STATIC_ASSERT(c) \
+ { \
+ enum { XXH_sa = 1 / (int)(!!(c)) }; \
+ } /* use after variable declarations */
+XXH_PUBLIC_API unsigned XXH_versionNumber(void) {
+ return XXH_VERSION_NUMBER;
+}
+
+
+/* *******************************************************************
+ * 32-bit hash functions
+ *********************************************************************/
+static const U32 PRIME32_1 = 2654435761U;
+static const U32 PRIME32_2 = 2246822519U;
+static const U32 PRIME32_3 = 3266489917U;
+static const U32 PRIME32_4 = 668265263U;
+static const U32 PRIME32_5 = 374761393U;
+
+static U32 XXH32_round(U32 seed, U32 input) {
+ seed += input * PRIME32_2;
+ seed = XXH_rotl32(seed, 13);
+ seed *= PRIME32_1;
+ return seed;
+}
+
+/* mix all bits */
+static U32 XXH32_avalanche(U32 h32) {
+ h32 ^= h32 >> 15;
+ h32 *= PRIME32_2;
+ h32 ^= h32 >> 13;
+ h32 *= PRIME32_3;
+ h32 ^= h32 >> 16;
+ return (h32);
+}
+
+#define XXH_get32bits(p) XXH_readLE32_align(p, endian, align)
+
+static U32 XXH32_finalize(U32 h32,
+ const void *ptr,
+ size_t len,
+ XXH_endianess endian,
+ XXH_alignment align)
+
+{
+ const BYTE *p = (const BYTE *)ptr;
+
+#define PROCESS1 \
+ h32 += (*p++) * PRIME32_5; \
+ h32 = XXH_rotl32(h32, 11) * PRIME32_1;
+
+#define PROCESS4 \
+ h32 += XXH_get32bits(p) * PRIME32_3; \
+ p += 4; \
+ h32 = XXH_rotl32(h32, 17) * PRIME32_4;
+
+ switch (len & 15) /* or switch(bEnd - p) */
+ {
+ case 12:
+ PROCESS4;
+ /* fallthrough */
+ case 8:
+ PROCESS4;
+ /* fallthrough */
+ case 4:
+ PROCESS4;
+ return XXH32_avalanche(h32);
+
+ case 13:
+ PROCESS4;
+ /* fallthrough */
+ case 9:
+ PROCESS4;
+ /* fallthrough */
+ case 5:
+ PROCESS4;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 14:
+ PROCESS4;
+ /* fallthrough */
+ case 10:
+ PROCESS4;
+ /* fallthrough */
+ case 6:
+ PROCESS4;
+ PROCESS1;
+ PROCESS1;
+ return XXH32_avalanche(h32);
+
+ case 15:
+ PROCESS4;
+ /* fallthrough */
+ case 11:
+ PROCESS4;
+ /* fallthrough */
+ case 7:
+ PROCESS4;
+ /* fallthrough */
+ case 3:
+ PROCESS1;
+ /* fallthrough */
+ case 2:
+ PROCESS1;
+ /* fallthrough */
+ case 1:
+ PROCESS1;
+ /* fallthrough */
+ case 0:
+ return XXH32_avalanche(h32);
+ }
+ assert(0);
+ return h32; /* reaching this point is deemed impossible */
+}
+
+
+FORCE_INLINE U32 XXH32_endian_align(const void *input,
+ size_t len,
+ U32 seed,
+ XXH_endianess endian,
+ XXH_alignment align) {
+ const BYTE *p = (const BYTE *)input;
+ const BYTE *bEnd = p + len;
+ U32 h32;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
+ (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+ if (p == NULL) {
+ len = 0;
+ bEnd = p = (const BYTE *)(size_t)16;
+ }
+#endif
+
+ if (len >= 16) {
+ const BYTE *const limit = bEnd - 15;
+ U32 v1 = seed + PRIME32_1 + PRIME32_2;
+ U32 v2 = seed + PRIME32_2;
+ U32 v3 = seed + 0;
+ U32 v4 = seed - PRIME32_1;
+
+ do {
+ v1 = XXH32_round(v1, XXH_get32bits(p));
+ p += 4;
+ v2 = XXH32_round(v2, XXH_get32bits(p));
+ p += 4;
+ v3 = XXH32_round(v3, XXH_get32bits(p));
+ p += 4;
+ v4 = XXH32_round(v4, XXH_get32bits(p));
+ p += 4;
+ } while (p < limit);
+
+ h32 = XXH_rotl32(v1, 1) + XXH_rotl32(v2, 7) +
+ XXH_rotl32(v3, 12) + XXH_rotl32(v4, 18);
+ } else {
+ h32 = seed + PRIME32_5;
+ }
+
+ h32 += (U32)len;
+
+ return XXH32_finalize(h32, p, len & 15, endian, align);
+}
+
+
+XXH_PUBLIC_API unsigned int
+XXH32(const void *input, size_t len, unsigned int seed) {
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH32_state_t state;
+ XXH32_reset(&state, seed);
+ XXH32_update(&state, input, len);
+ return XXH32_digest(&state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 3) ==
+ 0) { /* Input is 4-bytes aligned, leverage the speed benefit
+ */
+ if ((endian_detected == XXH_littleEndian) ||
+ XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed,
+ XXH_littleEndian,
+ XXH_aligned);
+ else
+ return XXH32_endian_align(input, len, seed,
+ XXH_bigEndian,
+ XXH_aligned);
+ }
+ }
+
+ if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_endian_align(input, len, seed, XXH_littleEndian,
+ XXH_unaligned);
+ else
+ return XXH32_endian_align(input, len, seed, XXH_bigEndian,
+ XXH_unaligned);
+#endif
+}
+
+
+
+/*====== Hash streaming ======*/
+
+XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void) {
+ return (XXH32_state_t *)XXH_malloc(sizeof(XXH32_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr) {
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dstState,
+ const XXH32_state_t *srcState) {
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
+ unsigned int seed) {
+ XXH32_state_t state; /* using a local state to memcpy() in order to
+ avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME32_1 + PRIME32_2;
+ state.v2 = seed + PRIME32_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME32_1;
+ /* do not write into reserved, planned to be removed in a future version
+ */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+
+FORCE_INLINE XXH_errorcode XXH32_update_endian(XXH32_state_t *state,
+ const void *input,
+ size_t len,
+ XXH_endianess endian) {
+ if (input == NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
+ (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ {
+ const BYTE *p = (const BYTE *)input;
+ const BYTE *const bEnd = p + len;
+
+ state->total_len_32 += (unsigned)len;
+ state->large_len |= (len >= 16) | (state->total_len_32 >= 16);
+
+ if (state->memsize + len < 16) { /* fill in tmp buffer */
+ XXH_memcpy((BYTE *)(state->mem32) + state->memsize,
+ input, len);
+ state->memsize += (unsigned)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* some data left from previous update */
+ XXH_memcpy((BYTE *)(state->mem32) + state->memsize,
+ input, 16 - state->memsize);
+ {
+ const U32 *p32 = state->mem32;
+ state->v1 = XXH32_round(
+ state->v1, XXH_readLE32(p32, endian));
+ p32++;
+ state->v2 = XXH32_round(
+ state->v2, XXH_readLE32(p32, endian));
+ p32++;
+ state->v3 = XXH32_round(
+ state->v3, XXH_readLE32(p32, endian));
+ p32++;
+ state->v4 = XXH32_round(
+ state->v4, XXH_readLE32(p32, endian));
+ }
+ p += 16 - state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p <= bEnd - 16) {
+ const BYTE *const limit = bEnd - 16;
+ U32 v1 = state->v1;
+ U32 v2 = state->v2;
+ U32 v3 = state->v3;
+ U32 v4 = state->v4;
+
+ do {
+ v1 = XXH32_round(v1, XXH_readLE32(p, endian));
+ p += 4;
+ v2 = XXH32_round(v2, XXH_readLE32(p, endian));
+ p += 4;
+ v3 = XXH32_round(v3, XXH_readLE32(p, endian));
+ p += 4;
+ v4 = XXH32_round(v4, XXH_readLE32(p, endian));
+ p += 4;
+ } while (p <= limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem32, p, (size_t)(bEnd - p));
+ state->memsize = (unsigned)(bEnd - p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+
+XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *state_in,
+ const void *input,
+ size_t len) {
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_update_endian(state_in, input, len,
+ XXH_littleEndian);
+ else
+ return XXH32_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+
+FORCE_INLINE U32 XXH32_digest_endian(const XXH32_state_t *state,
+ XXH_endianess endian) {
+ U32 h32;
+
+ if (state->large_len) {
+ h32 = XXH_rotl32(state->v1, 1) + XXH_rotl32(state->v2, 7) +
+ XXH_rotl32(state->v3, 12) + XXH_rotl32(state->v4, 18);
+ } else {
+ h32 = state->v3 /* == seed */ + PRIME32_5;
+ }
+
+ h32 += state->total_len_32;
+
+ return XXH32_finalize(h32, state->mem32, state->memsize, endian,
+ XXH_aligned);
+}
+
+
+XXH_PUBLIC_API unsigned int XXH32_digest(const XXH32_state_t *state_in) {
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH32_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH32_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation ======*/
+
+/*! Default XXH result types are basic unsigned 32 and 64 bits.
+ * The canonical representation follows human-readable write convention, aka
+ * big-endian (large digits first). These functions allow transformation of hash
+ * result into and from its canonical format. This way, hash values can be
+ * written into a file or buffer, remaining comparable across different systems.
+ */
+
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
+ XXH32_hash_t hash) {
+ XXH_STATIC_ASSERT(sizeof(XXH32_canonical_t) == sizeof(XXH32_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN)
+ hash = XXH_swap32(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH32_hash_t
+XXH32_hashFromCanonical(const XXH32_canonical_t *src) {
+ return XXH_readBE32(src);
+}
+
+
+#ifndef XXH_NO_LONG_LONG
+
+/* *******************************************************************
+ * 64-bit hash functions
+ *********************************************************************/
+
+/*====== Memory access ======*/
+
+#ifndef MEM_MODULE
+#define MEM_MODULE
+#if !defined(__VMS) && \
+ (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
+#include <stdint.h>
+typedef uint64_t U64;
+#else
+/* if compiler doesn't support unsigned long long, replace by another 64-bit
+ * type */
+typedef unsigned long long U64;
+#endif
+#endif
+
+
+#if (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 2))
+
+/* Force direct memory access. Only works on CPU which support unaligned memory
+ * access in hardware */
+static U64 XXH_read64(const void *memPtr) {
+ return *(const U64 *)memPtr;
+}
+
+#elif (defined(XXH_FORCE_MEMORY_ACCESS) && (XXH_FORCE_MEMORY_ACCESS == 1))
+
+/* __pack instructions are safer, but compiler specific, hence potentially
+ * problematic for some compilers */
+/* currently only defined for gcc and icc */
+typedef union {
+ U32 u32;
+ U64 u64;
+} __attribute__((packed)) unalign64;
+static U64 XXH_read64(const void *ptr) {
+ return ((const unalign64 *)ptr)->u64;
+}
+
+#else
+
+/* portable and safe solution. Generally efficient.
+ * see : http://stackoverflow.com/a/32095106/646947
+ */
+
+static U64 XXH_read64(const void *memPtr) {
+ U64 val;
+ memcpy(&val, memPtr, sizeof(val));
+ return val;
+}
+
+#endif /* XXH_FORCE_DIRECT_MEMORY_ACCESS */
+
+#if defined(_MSC_VER) /* Visual Studio */
+#define XXH_swap64 _byteswap_uint64
+#elif XXH_GCC_VERSION >= 403
+#define XXH_swap64 __builtin_bswap64
+#else
+static U64 XXH_swap64(U64 x) {
+ return ((x << 56) & 0xff00000000000000ULL) |
+ ((x << 40) & 0x00ff000000000000ULL) |
+ ((x << 24) & 0x0000ff0000000000ULL) |
+ ((x << 8) & 0x000000ff00000000ULL) |
+ ((x >> 8) & 0x00000000ff000000ULL) |
+ ((x >> 24) & 0x0000000000ff0000ULL) |
+ ((x >> 40) & 0x000000000000ff00ULL) |
+ ((x >> 56) & 0x00000000000000ffULL);
+}
+#endif
+
+FORCE_INLINE U64 XXH_readLE64_align(const void *ptr,
+ XXH_endianess endian,
+ XXH_alignment align) {
+ if (align == XXH_unaligned)
+ return endian == XXH_littleEndian ? XXH_read64(ptr)
+ : XXH_swap64(XXH_read64(ptr));
+ else
+ return endian == XXH_littleEndian
+ ? *(const U64 *)ptr
+ : XXH_swap64(*(const U64 *)ptr);
+}
+
+FORCE_INLINE U64 XXH_readLE64(const void *ptr, XXH_endianess endian) {
+ return XXH_readLE64_align(ptr, endian, XXH_unaligned);
+}
+
+static U64 XXH_readBE64(const void *ptr) {
+ return XXH_CPU_LITTLE_ENDIAN ? XXH_swap64(XXH_read64(ptr))
+ : XXH_read64(ptr);
+}
+
+
+/*====== xxh64 ======*/
+
+static const U64 PRIME64_1 = 11400714785074694791ULL;
+static const U64 PRIME64_2 = 14029467366897019727ULL;
+static const U64 PRIME64_3 = 1609587929392839161ULL;
+static const U64 PRIME64_4 = 9650029242287828579ULL;
+static const U64 PRIME64_5 = 2870177450012600261ULL;
+
+static U64 XXH64_round(U64 acc, U64 input) {
+ acc += input * PRIME64_2;
+ acc = XXH_rotl64(acc, 31);
+ acc *= PRIME64_1;
+ return acc;
+}
+
+static U64 XXH64_mergeRound(U64 acc, U64 val) {
+ val = XXH64_round(0, val);
+ acc ^= val;
+ acc = acc * PRIME64_1 + PRIME64_4;
+ return acc;
+}
+
+static U64 XXH64_avalanche(U64 h64) {
+ h64 ^= h64 >> 33;
+ h64 *= PRIME64_2;
+ h64 ^= h64 >> 29;
+ h64 *= PRIME64_3;
+ h64 ^= h64 >> 32;
+ return h64;
+}
+
+
+#define XXH_get64bits(p) XXH_readLE64_align(p, endian, align)
+
+static U64 XXH64_finalize(U64 h64,
+ const void *ptr,
+ size_t len,
+ XXH_endianess endian,
+ XXH_alignment align) {
+ const BYTE *p = (const BYTE *)ptr;
+
+#define PROCESS1_64 \
+ h64 ^= (*p++) * PRIME64_5; \
+ h64 = XXH_rotl64(h64, 11) * PRIME64_1;
+
+#define PROCESS4_64 \
+ h64 ^= (U64)(XXH_get32bits(p)) * PRIME64_1; \
+ p += 4; \
+ h64 = XXH_rotl64(h64, 23) * PRIME64_2 + PRIME64_3;
+
+#define PROCESS8_64 \
+ { \
+ U64 const k1 = XXH64_round(0, XXH_get64bits(p)); \
+ p += 8; \
+ h64 ^= k1; \
+ h64 = XXH_rotl64(h64, 27) * PRIME64_1 + PRIME64_4; \
+ }
+
+ switch (len & 31) {
+ case 24:
+ PROCESS8_64;
+ /* fallthrough */
+ case 16:
+ PROCESS8_64;
+ /* fallthrough */
+ case 8:
+ PROCESS8_64;
+ return XXH64_avalanche(h64);
+
+ case 28:
+ PROCESS8_64;
+ /* fallthrough */
+ case 20:
+ PROCESS8_64;
+ /* fallthrough */
+ case 12:
+ PROCESS8_64;
+ /* fallthrough */
+ case 4:
+ PROCESS4_64;
+ return XXH64_avalanche(h64);
+
+ case 25:
+ PROCESS8_64;
+ /* fallthrough */
+ case 17:
+ PROCESS8_64;
+ /* fallthrough */
+ case 9:
+ PROCESS8_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 29:
+ PROCESS8_64;
+ /* fallthrough */
+ case 21:
+ PROCESS8_64;
+ /* fallthrough */
+ case 13:
+ PROCESS8_64;
+ /* fallthrough */
+ case 5:
+ PROCESS4_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 26:
+ PROCESS8_64;
+ /* fallthrough */
+ case 18:
+ PROCESS8_64;
+ /* fallthrough */
+ case 10:
+ PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 30:
+ PROCESS8_64;
+ /* fallthrough */
+ case 22:
+ PROCESS8_64;
+ /* fallthrough */
+ case 14:
+ PROCESS8_64;
+ /* fallthrough */
+ case 6:
+ PROCESS4_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 27:
+ PROCESS8_64;
+ /* fallthrough */
+ case 19:
+ PROCESS8_64;
+ /* fallthrough */
+ case 11:
+ PROCESS8_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ PROCESS1_64;
+ return XXH64_avalanche(h64);
+
+ case 31:
+ PROCESS8_64;
+ /* fallthrough */
+ case 23:
+ PROCESS8_64;
+ /* fallthrough */
+ case 15:
+ PROCESS8_64;
+ /* fallthrough */
+ case 7:
+ PROCESS4_64;
+ /* fallthrough */
+ case 3:
+ PROCESS1_64;
+ /* fallthrough */
+ case 2:
+ PROCESS1_64;
+ /* fallthrough */
+ case 1:
+ PROCESS1_64;
+ /* fallthrough */
+ case 0:
+ return XXH64_avalanche(h64);
+ }
+
+ /* impossible to reach */
+ assert(0);
+ return 0; /* unreachable, but some compilers complain without it */
+}
+
+FORCE_INLINE U64 XXH64_endian_align(const void *input,
+ size_t len,
+ U64 seed,
+ XXH_endianess endian,
+ XXH_alignment align) {
+ const BYTE *p = (const BYTE *)input;
+ const BYTE *bEnd = p + len;
+ U64 h64;
+
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
+ (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+ if (p == NULL) {
+ len = 0;
+ bEnd = p = (const BYTE *)(size_t)32;
+ }
+#endif
+
+ if (len >= 32) {
+ const BYTE *const limit = bEnd - 32;
+ U64 v1 = seed + PRIME64_1 + PRIME64_2;
+ U64 v2 = seed + PRIME64_2;
+ U64 v3 = seed + 0;
+ U64 v4 = seed - PRIME64_1;
+
+ do {
+ v1 = XXH64_round(v1, XXH_get64bits(p));
+ p += 8;
+ v2 = XXH64_round(v2, XXH_get64bits(p));
+ p += 8;
+ v3 = XXH64_round(v3, XXH_get64bits(p));
+ p += 8;
+ v4 = XXH64_round(v4, XXH_get64bits(p));
+ p += 8;
+ } while (p <= limit);
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) +
+ XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+
+ } else {
+ h64 = seed + PRIME64_5;
+ }
+
+ h64 += (U64)len;
+
+ return XXH64_finalize(h64, p, len, endian, align);
+}
+
+
+XXH_PUBLIC_API unsigned long long
+XXH64(const void *input, size_t len, unsigned long long seed) {
+#if 0
+ /* Simple version, good for code maintenance, but unfortunately slow for small inputs */
+ XXH64_state_t state;
+ XXH64_reset(&state, seed);
+ XXH64_update(&state, input, len);
+ return XXH64_digest(&state);
+#else
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if (XXH_FORCE_ALIGN_CHECK) {
+ if ((((size_t)input) & 7) ==
+ 0) { /* Input is aligned, let's leverage the speed advantage
+ */
+ if ((endian_detected == XXH_littleEndian) ||
+ XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed,
+ XXH_littleEndian,
+ XXH_aligned);
+ else
+ return XXH64_endian_align(input, len, seed,
+ XXH_bigEndian,
+ XXH_aligned);
+ }
+ }
+
+ if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_endian_align(input, len, seed, XXH_littleEndian,
+ XXH_unaligned);
+ else
+ return XXH64_endian_align(input, len, seed, XXH_bigEndian,
+ XXH_unaligned);
+#endif
+}
+
+/*====== Hash Streaming ======*/
+
+XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void) {
+ return (XXH64_state_t *)XXH_malloc(sizeof(XXH64_state_t));
+}
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr) {
+ XXH_free(statePtr);
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dstState,
+ const XXH64_state_t *srcState) {
+ memcpy(dstState, srcState, sizeof(*dstState));
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr,
+ unsigned long long seed) {
+ XXH64_state_t state; /* using a local state to memcpy() in order to
+ avoid strict-aliasing warnings */
+ memset(&state, 0, sizeof(state));
+ state.v1 = seed + PRIME64_1 + PRIME64_2;
+ state.v2 = seed + PRIME64_2;
+ state.v3 = seed + 0;
+ state.v4 = seed - PRIME64_1;
+ /* do not write into reserved, planned to be removed in a future version
+ */
+ memcpy(statePtr, &state, sizeof(state) - sizeof(state.reserved));
+ return XXH_OK;
+}
+
+FORCE_INLINE XXH_errorcode XXH64_update_endian(XXH64_state_t *state,
+ const void *input,
+ size_t len,
+ XXH_endianess endian) {
+ if (input == NULL)
+#if defined(XXH_ACCEPT_NULL_INPUT_POINTER) && \
+ (XXH_ACCEPT_NULL_INPUT_POINTER >= 1)
+ return XXH_OK;
+#else
+ return XXH_ERROR;
+#endif
+
+ {
+ const BYTE *p = (const BYTE *)input;
+ const BYTE *const bEnd = p + len;
+
+ state->total_len += len;
+
+ if (state->memsize + len < 32) { /* fill in tmp buffer */
+ XXH_memcpy(((BYTE *)state->mem64) + state->memsize,
+ input, len);
+ state->memsize += (U32)len;
+ return XXH_OK;
+ }
+
+ if (state->memsize) { /* tmp buffer is full */
+ XXH_memcpy(((BYTE *)state->mem64) + state->memsize,
+ input, 32 - state->memsize);
+ state->v1 = XXH64_round(
+ state->v1, XXH_readLE64(state->mem64 + 0, endian));
+ state->v2 = XXH64_round(
+ state->v2, XXH_readLE64(state->mem64 + 1, endian));
+ state->v3 = XXH64_round(
+ state->v3, XXH_readLE64(state->mem64 + 2, endian));
+ state->v4 = XXH64_round(
+ state->v4, XXH_readLE64(state->mem64 + 3, endian));
+ p += 32 - state->memsize;
+ state->memsize = 0;
+ }
+
+ if (p + 32 <= bEnd) {
+ const BYTE *const limit = bEnd - 32;
+ U64 v1 = state->v1;
+ U64 v2 = state->v2;
+ U64 v3 = state->v3;
+ U64 v4 = state->v4;
+
+ do {
+ v1 = XXH64_round(v1, XXH_readLE64(p, endian));
+ p += 8;
+ v2 = XXH64_round(v2, XXH_readLE64(p, endian));
+ p += 8;
+ v3 = XXH64_round(v3, XXH_readLE64(p, endian));
+ p += 8;
+ v4 = XXH64_round(v4, XXH_readLE64(p, endian));
+ p += 8;
+ } while (p <= limit);
+
+ state->v1 = v1;
+ state->v2 = v2;
+ state->v3 = v3;
+ state->v4 = v4;
+ }
+
+ if (p < bEnd) {
+ XXH_memcpy(state->mem64, p, (size_t)(bEnd - p));
+ state->memsize = (unsigned)(bEnd - p);
+ }
+ }
+
+ return XXH_OK;
+}
+
+XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *state_in,
+ const void *input,
+ size_t len) {
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_update_endian(state_in, input, len,
+ XXH_littleEndian);
+ else
+ return XXH64_update_endian(state_in, input, len, XXH_bigEndian);
+}
+
+FORCE_INLINE U64 XXH64_digest_endian(const XXH64_state_t *state,
+ XXH_endianess endian) {
+ U64 h64;
+
+ if (state->total_len >= 32) {
+ U64 const v1 = state->v1;
+ U64 const v2 = state->v2;
+ U64 const v3 = state->v3;
+ U64 const v4 = state->v4;
+
+ h64 = XXH_rotl64(v1, 1) + XXH_rotl64(v2, 7) +
+ XXH_rotl64(v3, 12) + XXH_rotl64(v4, 18);
+ h64 = XXH64_mergeRound(h64, v1);
+ h64 = XXH64_mergeRound(h64, v2);
+ h64 = XXH64_mergeRound(h64, v3);
+ h64 = XXH64_mergeRound(h64, v4);
+ } else {
+ h64 = state->v3 /*seed*/ + PRIME64_5;
+ }
+
+ h64 += (U64)state->total_len;
+
+ return XXH64_finalize(h64, state->mem64, (size_t)state->total_len,
+ endian, XXH_aligned);
+}
+
+XXH_PUBLIC_API unsigned long long XXH64_digest(const XXH64_state_t *state_in) {
+ XXH_endianess endian_detected = (XXH_endianess)XXH_CPU_LITTLE_ENDIAN;
+
+ if ((endian_detected == XXH_littleEndian) || XXH_FORCE_NATIVE_FORMAT)
+ return XXH64_digest_endian(state_in, XXH_littleEndian);
+ else
+ return XXH64_digest_endian(state_in, XXH_bigEndian);
+}
+
+
+/*====== Canonical representation ======*/
+
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
+ XXH64_hash_t hash) {
+ XXH_STATIC_ASSERT(sizeof(XXH64_canonical_t) == sizeof(XXH64_hash_t));
+ if (XXH_CPU_LITTLE_ENDIAN)
+ hash = XXH_swap64(hash);
+ memcpy(dst, &hash, sizeof(*dst));
+}
+
+XXH_PUBLIC_API XXH64_hash_t
+XXH64_hashFromCanonical(const XXH64_canonical_t *src) {
+ return XXH_readBE64(src);
+}
+
+#endif /* XXH_NO_LONG_LONG */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h b/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h
new file mode 100644
index 000000000..1dad7a111
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/rdxxhash.h
@@ -0,0 +1,372 @@
+/*
+ xxHash - Extremely Fast Hash algorithm
+ Header File
+ Copyright (C) 2012-2016, Yann Collet.
+
+ BSD 2-Clause License (http://www.opensource.org/licenses/bsd-license.php)
+
+ Redistribution and use in source and binary forms, with or without
+ modification, are permitted provided that the following conditions are
+ met:
+
+ * Redistributions of source code must retain the above copyright
+ notice, this list of conditions and the following disclaimer.
+ * Redistributions in binary form must reproduce the above
+ copyright notice, this list of conditions and the following disclaimer
+ in the documentation and/or other materials provided with the
+ distribution.
+
+ THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+
+ You can contact the author at :
+ - xxHash source repository : https://github.com/Cyan4973/xxHash
+*/
+
+/* Notice extracted from xxHash homepage :
+
+xxHash is an extremely fast Hash algorithm, running at RAM speed limits.
+It also successfully passes all tests from the SMHasher suite.
+
+Comparison (single thread, Windows Seven 32 bits, using SMHasher on a Core 2 Duo
+@3GHz)
+
+Name Speed Q.Score Author
+xxHash 5.4 GB/s 10
+CrapWow 3.2 GB/s 2 Andrew
+MumurHash 3a 2.7 GB/s 10 Austin Appleby
+SpookyHash 2.0 GB/s 10 Bob Jenkins
+SBox 1.4 GB/s 9 Bret Mulvey
+Lookup3 1.2 GB/s 9 Bob Jenkins
+SuperFastHash 1.2 GB/s 1 Paul Hsieh
+CityHash64 1.05 GB/s 10 Pike & Alakuijala
+FNV 0.55 GB/s 5 Fowler, Noll, Vo
+CRC32 0.43 GB/s 9
+MD5-32 0.33 GB/s 10 Ronald L. Rivest
+SHA1-32 0.28 GB/s 10
+
+Q.Score is a measure of quality of the hash function.
+It depends on successfully passing SMHasher test set.
+10 is a perfect score.
+
+A 64-bit version, named XXH64, is available since r35.
+It offers much better speed, but for 64-bit applications only.
+Name Speed on 64 bits Speed on 32 bits
+XXH64 13.8 GB/s 1.9 GB/s
+XXH32 6.8 GB/s 6.0 GB/s
+*/
+
+#ifndef XXHASH_H_5627135585666179
+#define XXHASH_H_5627135585666179 1
+
+#if defined(__cplusplus)
+extern "C" {
+#endif
+
+
+/* ****************************
+ * Definitions
+ ******************************/
+#include <stddef.h> /* size_t */
+typedef enum { XXH_OK = 0, XXH_ERROR } XXH_errorcode;
+
+
+/* ****************************
+ * API modifier
+ ******************************/
+/** XXH_INLINE_ALL (and XXH_PRIVATE_API)
+ * This is useful to include xxhash functions in `static` mode
+ * in order to inline them, and remove their symbol from the public list.
+ * Inlining can offer dramatic performance improvement on small keys.
+ * Methodology :
+ * #define XXH_INLINE_ALL
+ * #include "xxhash.h"
+ * `xxhash.c` is automatically included.
+ * It's not useful to compile and link it as a separate module.
+ */
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+#ifndef XXH_STATIC_LINKING_ONLY
+#define XXH_STATIC_LINKING_ONLY
+#endif
+#if defined(__GNUC__)
+#define XXH_PUBLIC_API static __inline __attribute__((unused))
+#elif defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */)
+#define XXH_PUBLIC_API static inline
+#elif defined(_MSC_VER)
+#define XXH_PUBLIC_API static __inline
+#else
+/* this version may generate warnings for unused static functions */
+#define XXH_PUBLIC_API static
+#endif
+#else
+#define XXH_PUBLIC_API /* do nothing */
+#endif /* XXH_INLINE_ALL || XXH_PRIVATE_API */
+
+/*! XXH_NAMESPACE, aka Namespace Emulation :
+ *
+ * If you want to include _and expose_ xxHash functions from within your own
+ * library, but also want to avoid symbol collisions with other libraries which
+ * may also include xxHash,
+ *
+ * you can use XXH_NAMESPACE, to automatically prefix any public symbol from
+ * xxhash library with the value of XXH_NAMESPACE (therefore, avoid NULL and
+ * numeric values).
+ *
+ * Note that no change is required within the calling program as long as it
+ * includes `xxhash.h` : regular symbol name will be automatically translated by
+ * this header.
+ */
+#ifdef XXH_NAMESPACE
+#define XXH_CAT(A, B) A##B
+#define XXH_NAME2(A, B) XXH_CAT(A, B)
+#define XXH_versionNumber XXH_NAME2(XXH_NAMESPACE, XXH_versionNumber)
+#define XXH32 XXH_NAME2(XXH_NAMESPACE, XXH32)
+#define XXH32_createState XXH_NAME2(XXH_NAMESPACE, XXH32_createState)
+#define XXH32_freeState XXH_NAME2(XXH_NAMESPACE, XXH32_freeState)
+#define XXH32_reset XXH_NAME2(XXH_NAMESPACE, XXH32_reset)
+#define XXH32_update XXH_NAME2(XXH_NAMESPACE, XXH32_update)
+#define XXH32_digest XXH_NAME2(XXH_NAMESPACE, XXH32_digest)
+#define XXH32_copyState XXH_NAME2(XXH_NAMESPACE, XXH32_copyState)
+#define XXH32_canonicalFromHash \
+ XXH_NAME2(XXH_NAMESPACE, XXH32_canonicalFromHash)
+#define XXH32_hashFromCanonical \
+ XXH_NAME2(XXH_NAMESPACE, XXH32_hashFromCanonical)
+#define XXH64 XXH_NAME2(XXH_NAMESPACE, XXH64)
+#define XXH64_createState XXH_NAME2(XXH_NAMESPACE, XXH64_createState)
+#define XXH64_freeState XXH_NAME2(XXH_NAMESPACE, XXH64_freeState)
+#define XXH64_reset XXH_NAME2(XXH_NAMESPACE, XXH64_reset)
+#define XXH64_update XXH_NAME2(XXH_NAMESPACE, XXH64_update)
+#define XXH64_digest XXH_NAME2(XXH_NAMESPACE, XXH64_digest)
+#define XXH64_copyState XXH_NAME2(XXH_NAMESPACE, XXH64_copyState)
+#define XXH64_canonicalFromHash \
+ XXH_NAME2(XXH_NAMESPACE, XXH64_canonicalFromHash)
+#define XXH64_hashFromCanonical \
+ XXH_NAME2(XXH_NAMESPACE, XXH64_hashFromCanonical)
+#endif
+
+
+/* *************************************
+ * Version
+ ***************************************/
+#define XXH_VERSION_MAJOR 0
+#define XXH_VERSION_MINOR 6
+#define XXH_VERSION_RELEASE 5
+#define XXH_VERSION_NUMBER \
+ (XXH_VERSION_MAJOR * 100 * 100 + XXH_VERSION_MINOR * 100 + \
+ XXH_VERSION_RELEASE)
+XXH_PUBLIC_API unsigned XXH_versionNumber(void);
+
+
+/*-**********************************************************************
+ * 32-bit hash
+ ************************************************************************/
+typedef unsigned int XXH32_hash_t;
+
+/*! XXH32() :
+ Calculate the 32-bit hash of sequence "length" bytes stored at memory
+ address "input". The memory between input & input+length must be valid
+ (allocated and read-accessible). "seed" can be used to alter the result
+ predictably.
+ Speed on Core 2 Duo @ 3 GHz (single thread, SMHasher benchmark) : 5.4 GB/s
+ */
+XXH_PUBLIC_API XXH32_hash_t XXH32(const void *input,
+ size_t length,
+ unsigned int seed);
+
+/*====== Streaming ======*/
+typedef struct XXH32_state_s XXH32_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH32_state_t *XXH32_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH32_freeState(XXH32_state_t *statePtr);
+XXH_PUBLIC_API void XXH32_copyState(XXH32_state_t *dst_state,
+ const XXH32_state_t *src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH32_reset(XXH32_state_t *statePtr,
+ unsigned int seed);
+XXH_PUBLIC_API XXH_errorcode XXH32_update(XXH32_state_t *statePtr,
+ const void *input,
+ size_t length);
+XXH_PUBLIC_API XXH32_hash_t XXH32_digest(const XXH32_state_t *statePtr);
+
+/*
+ * Streaming functions generate the xxHash of an input provided in multiple
+ * segments. Note that, for small input, they are slower than single-call
+ * functions, due to state management. For small inputs, prefer `XXH32()` and
+ * `XXH64()`, which are better optimized.
+ *
+ * XXH state must first be allocated, using XXH*_createState() .
+ *
+ * Start a new hash by initializing state with a seed, using XXH*_reset().
+ *
+ * Then, feed the hash state by calling XXH*_update() as many times as
+ * necessary. The function returns an error code, with 0 meaning OK, and any
+ * other value meaning there is an error.
+ *
+ * Finally, a hash value can be produced anytime, by using XXH*_digest().
+ * This function returns the nn-bits hash as an int or long long.
+ *
+ * It's still possible to continue inserting input into the hash state after a
+ * digest, and generate some new hashes later on, by calling again
+ * XXH*_digest().
+ *
+ * When done, free XXH state space if it was allocated dynamically.
+ */
+
+/*====== Canonical representation ======*/
+
+typedef struct {
+ unsigned char digest[4];
+} XXH32_canonical_t;
+XXH_PUBLIC_API void XXH32_canonicalFromHash(XXH32_canonical_t *dst,
+ XXH32_hash_t hash);
+XXH_PUBLIC_API XXH32_hash_t
+XXH32_hashFromCanonical(const XXH32_canonical_t *src);
+
+/* Default result type for XXH functions are primitive unsigned 32 and 64 bits.
+ * The canonical representation uses human-readable write convention, aka
+ * big-endian (large digits first). These functions allow transformation of hash
+ * result into and from its canonical format. This way, hash values can be
+ * written into a file / memory, and remain comparable on different systems and
+ * programs.
+ */
+
+
+#ifndef XXH_NO_LONG_LONG
+/*-**********************************************************************
+ * 64-bit hash
+ ************************************************************************/
+typedef unsigned long long XXH64_hash_t;
+
+/*! XXH64() :
+ Calculate the 64-bit hash of sequence of length "len" stored at memory
+ address "input". "seed" can be used to alter the result predictably. This
+ function runs faster on 64-bit systems, but slower on 32-bit systems (see
+ benchmark).
+*/
+XXH_PUBLIC_API XXH64_hash_t XXH64(const void *input,
+ size_t length,
+ unsigned long long seed);
+
+/*====== Streaming ======*/
+typedef struct XXH64_state_s XXH64_state_t; /* incomplete type */
+XXH_PUBLIC_API XXH64_state_t *XXH64_createState(void);
+XXH_PUBLIC_API XXH_errorcode XXH64_freeState(XXH64_state_t *statePtr);
+XXH_PUBLIC_API void XXH64_copyState(XXH64_state_t *dst_state,
+ const XXH64_state_t *src_state);
+
+XXH_PUBLIC_API XXH_errorcode XXH64_reset(XXH64_state_t *statePtr,
+ unsigned long long seed);
+XXH_PUBLIC_API XXH_errorcode XXH64_update(XXH64_state_t *statePtr,
+ const void *input,
+ size_t length);
+XXH_PUBLIC_API XXH64_hash_t XXH64_digest(const XXH64_state_t *statePtr);
+
+/*====== Canonical representation ======*/
+typedef struct {
+ unsigned char digest[8];
+} XXH64_canonical_t;
+XXH_PUBLIC_API void XXH64_canonicalFromHash(XXH64_canonical_t *dst,
+ XXH64_hash_t hash);
+XXH_PUBLIC_API XXH64_hash_t
+XXH64_hashFromCanonical(const XXH64_canonical_t *src);
+#endif /* XXH_NO_LONG_LONG */
+
+
+
+#ifdef XXH_STATIC_LINKING_ONLY
+
+/* ================================================================================================
+ This section contains declarations which are not guaranteed to remain stable.
+ They may change in future versions, becoming incompatible with a different
+version of the library. These declarations should only be used with static
+linking. Never use them in association with dynamic linking !
+===================================================================================================
+*/
+
+/* These definitions are only present to allow
+ * static allocation of XXH state, on stack or in a struct for example.
+ * Never **ever** use members directly. */
+
+#if !defined(__VMS) && \
+ (defined(__cplusplus) || \
+ (defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 199901L) /* C99 */))
+#include <stdint.h>
+
+struct XXH32_state_s {
+ uint32_t total_len_32;
+ uint32_t large_len;
+ uint32_t v1;
+ uint32_t v2;
+ uint32_t v3;
+ uint32_t v4;
+ uint32_t mem32[4];
+ uint32_t memsize;
+ uint32_t reserved; /* never read nor write, might be removed in a future
+ version */
+}; /* typedef'd to XXH32_state_t */
+
+struct XXH64_state_s {
+ uint64_t total_len;
+ uint64_t v1;
+ uint64_t v2;
+ uint64_t v3;
+ uint64_t v4;
+ uint64_t mem64[4];
+ uint32_t memsize;
+ uint32_t reserved[2]; /* never read nor write, might be removed in a
+ future version */
+}; /* typedef'd to XXH64_state_t */
+
+#else
+
+struct XXH32_state_s {
+ unsigned total_len_32;
+ unsigned large_len;
+ unsigned v1;
+ unsigned v2;
+ unsigned v3;
+ unsigned v4;
+ unsigned mem32[4];
+ unsigned memsize;
+ unsigned reserved; /* never read nor write, might be removed in a future
+ version */
+}; /* typedef'd to XXH32_state_t */
+
+#ifndef XXH_NO_LONG_LONG /* remove 64-bit support */
+struct XXH64_state_s {
+ unsigned long long total_len;
+ unsigned long long v1;
+ unsigned long long v2;
+ unsigned long long v3;
+ unsigned long long v4;
+ unsigned long long mem64[4];
+ unsigned memsize;
+ unsigned reserved[2]; /* never read nor write, might be removed in a
+ future version */
+}; /* typedef'd to XXH64_state_t */
+#endif
+
+#endif
+
+
+#if defined(XXH_INLINE_ALL) || defined(XXH_PRIVATE_API)
+#include "rdxxhash.c" /* include xxhash function bodies as `static`, for inlining */
+#endif
+
+#endif /* XXH_STATIC_LINKING_ONLY */
+
+
+#if defined(__cplusplus)
+}
+#endif
+
+#endif /* XXHASH_H_5627135585666179 */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c b/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c
new file mode 100644
index 000000000..603546c47
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/regexp.c
@@ -0,0 +1,1347 @@
+/**
+ * Copyright: public domain
+ *
+ * From https://github.com/ccxvii/minilibs sha
+ * 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684:
+ *
+ * These libraries are in the public domain (or the equivalent where that is not
+ * possible). You can do anything you want with them. You have no legal
+ * obligation to do anything else, although I appreciate attribution.
+ */
+
+#include "rd.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <setjmp.h>
+#include <stdio.h>
+
+#include "regexp.h"
+
+#define nelem(a) (sizeof(a) / sizeof(a)[0])
+
+typedef unsigned int Rune;
+
+static int isalpharune(Rune c) {
+ /* TODO: Add unicode support */
+ return (c >= 'A' && c <= 'Z') || (c >= 'a' && c <= 'z');
+}
+
+static Rune toupperrune(Rune c) {
+ /* TODO: Add unicode support */
+ if (c >= 'a' && c <= 'z')
+ return c - 'a' + 'A';
+ return c;
+}
+
+static int chartorune(Rune *r, const char *s) {
+ /* TODO: Add UTF-8 decoding */
+ *r = *s;
+ return 1;
+}
+
+#define REPINF 255
+#define MAXTHREAD 1000
+#define MAXSUB REG_MAXSUB
+
+typedef struct Reclass Reclass;
+typedef struct Renode Renode;
+typedef struct Reinst Reinst;
+typedef struct Rethread Rethread;
+typedef struct Restate Restate;
+
+struct Reclass {
+ Rune *end;
+ Rune spans[64];
+};
+
+struct Restate {
+ Reprog *prog;
+ Renode *pstart, *pend;
+
+ const char *source;
+ unsigned int ncclass;
+ unsigned int nsub;
+ Renode *sub[MAXSUB];
+
+ int lookahead;
+ Rune yychar;
+ Reclass *yycc;
+ int yymin, yymax;
+
+ const char *error;
+ jmp_buf kaboom;
+};
+
+struct Reprog {
+ Reinst *start, *end;
+ int flags;
+ unsigned int nsub;
+ Reclass cclass[16];
+ Restate g; /**< Upstream has this as a global variable */
+};
+
+static void die(Restate *g, const char *message) {
+ g->error = message;
+ longjmp(g->kaboom, 1);
+}
+
+static Rune canon(Rune c) {
+ Rune u = toupperrune(c);
+ if (c >= 128 && u < 128)
+ return c;
+ return u;
+}
+
+/* Scan */
+
+enum { L_CHAR = 256,
+ L_CCLASS, /* character class */
+ L_NCCLASS, /* negative character class */
+ L_NC, /* "(?:" no capture */
+ L_PLA, /* "(?=" positive lookahead */
+ L_NLA, /* "(?!" negative lookahead */
+ L_WORD, /* "\b" word boundary */
+ L_NWORD, /* "\B" non-word boundary */
+ L_REF, /* "\1" back-reference */
+ L_COUNT /* {M,N} */
+};
+
+static int hex(Restate *g, int c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ if (c >= 'a' && c <= 'f')
+ return c - 'a' + 0xA;
+ if (c >= 'A' && c <= 'F')
+ return c - 'A' + 0xA;
+ die(g, "invalid escape sequence");
+ return 0;
+}
+
+static int dec(Restate *g, int c) {
+ if (c >= '0' && c <= '9')
+ return c - '0';
+ die(g, "invalid quantifier");
+ return 0;
+}
+
+#define ESCAPES "BbDdSsWw^$\\.*+?()[]{}|0123456789"
+
+static int nextrune(Restate *g) {
+ g->source += chartorune(&g->yychar, g->source);
+ if (g->yychar == '\\') {
+ g->source += chartorune(&g->yychar, g->source);
+ switch (g->yychar) {
+ case 0:
+ die(g, "unterminated escape sequence");
+ case 'f':
+ g->yychar = '\f';
+ return 0;
+ case 'n':
+ g->yychar = '\n';
+ return 0;
+ case 'r':
+ g->yychar = '\r';
+ return 0;
+ case 't':
+ g->yychar = '\t';
+ return 0;
+ case 'v':
+ g->yychar = '\v';
+ return 0;
+ case 'c':
+ g->yychar = (*g->source++) & 31;
+ return 0;
+ case 'x':
+ g->yychar = hex(g, *g->source++) << 4;
+ g->yychar += hex(g, *g->source++);
+ if (g->yychar == 0) {
+ g->yychar = '0';
+ return 1;
+ }
+ return 0;
+ case 'u':
+ g->yychar = hex(g, *g->source++) << 12;
+ g->yychar += hex(g, *g->source++) << 8;
+ g->yychar += hex(g, *g->source++) << 4;
+ g->yychar += hex(g, *g->source++);
+ if (g->yychar == 0) {
+ g->yychar = '0';
+ return 1;
+ }
+ return 0;
+ }
+ if (strchr(ESCAPES, g->yychar))
+ return 1;
+ if (isalpharune(g->yychar) ||
+ g->yychar == '_') /* check identity escape */
+ die(g, "invalid escape character");
+ return 0;
+ }
+ return 0;
+}
+
+static int lexcount(Restate *g) {
+ g->yychar = *g->source++;
+
+ g->yymin = dec(g, g->yychar);
+ g->yychar = *g->source++;
+ while (g->yychar != ',' && g->yychar != '}') {
+ g->yymin = g->yymin * 10 + dec(g, g->yychar);
+ g->yychar = *g->source++;
+ }
+ if (g->yymin >= REPINF)
+ die(g, "numeric overflow");
+
+ if (g->yychar == ',') {
+ g->yychar = *g->source++;
+ if (g->yychar == '}') {
+ g->yymax = REPINF;
+ } else {
+ g->yymax = dec(g, g->yychar);
+ g->yychar = *g->source++;
+ while (g->yychar != '}') {
+ g->yymax = g->yymax * 10 + dec(g, g->yychar);
+ g->yychar = *g->source++;
+ }
+ if (g->yymax >= REPINF)
+ die(g, "numeric overflow");
+ }
+ } else {
+ g->yymax = g->yymin;
+ }
+
+ return L_COUNT;
+}
+
+static void newcclass(Restate *g) {
+ if (g->ncclass >= nelem(g->prog->cclass))
+ die(g, "too many character classes");
+ g->yycc = g->prog->cclass + g->ncclass++;
+ g->yycc->end = g->yycc->spans;
+}
+
+static void addrange(Restate *g, Rune a, Rune b) {
+ if (a > b)
+ die(g, "invalid character class range");
+ if (g->yycc->end + 2 == g->yycc->spans + nelem(g->yycc->spans))
+ die(g, "too many character class ranges");
+ *g->yycc->end++ = a;
+ *g->yycc->end++ = b;
+}
+
+static void addranges_d(Restate *g) {
+ addrange(g, '0', '9');
+}
+
+static void addranges_D(Restate *g) {
+ addrange(g, 0, '0' - 1);
+ addrange(g, '9' + 1, 0xFFFF);
+}
+
+static void addranges_s(Restate *g) {
+ addrange(g, 0x9, 0x9);
+ addrange(g, 0xA, 0xD);
+ addrange(g, 0x20, 0x20);
+ addrange(g, 0xA0, 0xA0);
+ addrange(g, 0x2028, 0x2029);
+ addrange(g, 0xFEFF, 0xFEFF);
+}
+
+static void addranges_S(Restate *g) {
+ addrange(g, 0, 0x9 - 1);
+ addrange(g, 0x9 + 1, 0xA - 1);
+ addrange(g, 0xD + 1, 0x20 - 1);
+ addrange(g, 0x20 + 1, 0xA0 - 1);
+ addrange(g, 0xA0 + 1, 0x2028 - 1);
+ addrange(g, 0x2029 + 1, 0xFEFF - 1);
+ addrange(g, 0xFEFF + 1, 0xFFFF);
+}
+
+static void addranges_w(Restate *g) {
+ addrange(g, '0', '9');
+ addrange(g, 'A', 'Z');
+ addrange(g, '_', '_');
+ addrange(g, 'a', 'z');
+}
+
+static void addranges_W(Restate *g) {
+ addrange(g, 0, '0' - 1);
+ addrange(g, '9' + 1, 'A' - 1);
+ addrange(g, 'Z' + 1, '_' - 1);
+ addrange(g, '_' + 1, 'a' - 1);
+ addrange(g, 'z' + 1, 0xFFFF);
+}
+
+static int lexclass(Restate *g) {
+ int type = L_CCLASS;
+ int quoted, havesave, havedash;
+ Rune save = 0;
+
+ newcclass(g);
+
+ quoted = nextrune(g);
+ if (!quoted && g->yychar == '^') {
+ type = L_NCCLASS;
+ quoted = nextrune(g);
+ }
+
+ havesave = havedash = 0;
+ for (;;) {
+ if (g->yychar == 0)
+ die(g, "unterminated character class");
+ if (!quoted && g->yychar == ']')
+ break;
+
+ if (!quoted && g->yychar == '-') {
+ if (havesave) {
+ if (havedash) {
+ addrange(g, save, '-');
+ havesave = havedash = 0;
+ } else {
+ havedash = 1;
+ }
+ } else {
+ save = '-';
+ havesave = 1;
+ }
+ } else if (quoted && strchr("DSWdsw", g->yychar)) {
+ if (havesave) {
+ addrange(g, save, save);
+ if (havedash)
+ addrange(g, '-', '-');
+ }
+ switch (g->yychar) {
+ case 'd':
+ addranges_d(g);
+ break;
+ case 's':
+ addranges_s(g);
+ break;
+ case 'w':
+ addranges_w(g);
+ break;
+ case 'D':
+ addranges_D(g);
+ break;
+ case 'S':
+ addranges_S(g);
+ break;
+ case 'W':
+ addranges_W(g);
+ break;
+ }
+ havesave = havedash = 0;
+ } else {
+ if (quoted) {
+ if (g->yychar == 'b')
+ g->yychar = '\b';
+ else if (g->yychar == '0')
+ g->yychar = 0;
+ /* else identity escape */
+ }
+ if (havesave) {
+ if (havedash) {
+ addrange(g, save, g->yychar);
+ havesave = havedash = 0;
+ } else {
+ addrange(g, save, save);
+ save = g->yychar;
+ }
+ } else {
+ save = g->yychar;
+ havesave = 1;
+ }
+ }
+
+ quoted = nextrune(g);
+ }
+
+ if (havesave) {
+ addrange(g, save, save);
+ if (havedash)
+ addrange(g, '-', '-');
+ }
+
+ return type;
+}
+
+static int lex(Restate *g) {
+ int quoted = nextrune(g);
+ if (quoted) {
+ switch (g->yychar) {
+ case 'b':
+ return L_WORD;
+ case 'B':
+ return L_NWORD;
+ case 'd':
+ newcclass(g);
+ addranges_d(g);
+ return L_CCLASS;
+ case 's':
+ newcclass(g);
+ addranges_s(g);
+ return L_CCLASS;
+ case 'w':
+ newcclass(g);
+ addranges_w(g);
+ return L_CCLASS;
+ case 'D':
+ newcclass(g);
+ addranges_d(g);
+ return L_NCCLASS;
+ case 'S':
+ newcclass(g);
+ addranges_s(g);
+ return L_NCCLASS;
+ case 'W':
+ newcclass(g);
+ addranges_w(g);
+ return L_NCCLASS;
+ case '0':
+ g->yychar = 0;
+ return L_CHAR;
+ }
+ if (g->yychar >= '0' && g->yychar <= '9') {
+ g->yychar -= '0';
+ if (*g->source >= '0' && *g->source <= '9')
+ g->yychar = g->yychar * 10 + *g->source++ - '0';
+ return L_REF;
+ }
+ return L_CHAR;
+ }
+
+ switch (g->yychar) {
+ case 0:
+ case '$':
+ case ')':
+ case '*':
+ case '+':
+ case '.':
+ case '?':
+ case '^':
+ case '|':
+ return g->yychar;
+ }
+
+ if (g->yychar == '{')
+ return lexcount(g);
+ if (g->yychar == '[')
+ return lexclass(g);
+ if (g->yychar == '(') {
+ if (g->source[0] == '?') {
+ if (g->source[1] == ':') {
+ g->source += 2;
+ return L_NC;
+ }
+ if (g->source[1] == '=') {
+ g->source += 2;
+ return L_PLA;
+ }
+ if (g->source[1] == '!') {
+ g->source += 2;
+ return L_NLA;
+ }
+ }
+ return '(';
+ }
+
+ return L_CHAR;
+}
+
+/* Parse */
+
+enum { P_CAT,
+ P_ALT,
+ P_REP,
+ P_BOL,
+ P_EOL,
+ P_WORD,
+ P_NWORD,
+ P_PAR,
+ P_PLA,
+ P_NLA,
+ P_ANY,
+ P_CHAR,
+ P_CCLASS,
+ P_NCCLASS,
+ P_REF };
+
+struct Renode {
+ unsigned char type;
+ unsigned char ng, m, n;
+ Rune c;
+ Reclass *cc;
+ Renode *x;
+ Renode *y;
+};
+
+static Renode *newnode(Restate *g, int type) {
+ Renode *node = g->pend++;
+ node->type = type;
+ node->cc = NULL;
+ node->c = 0;
+ node->ng = 0;
+ node->m = 0;
+ node->n = 0;
+ node->x = node->y = NULL;
+ return node;
+}
+
+static int empty(Renode *node) {
+ if (!node)
+ return 1;
+ switch (node->type) {
+ default:
+ return 1;
+ case P_CAT:
+ return empty(node->x) && empty(node->y);
+ case P_ALT:
+ return empty(node->x) || empty(node->y);
+ case P_REP:
+ return empty(node->x) || node->m == 0;
+ case P_PAR:
+ return empty(node->x);
+ case P_REF:
+ return empty(node->x);
+ case P_ANY:
+ case P_CHAR:
+ case P_CCLASS:
+ case P_NCCLASS:
+ return 0;
+ }
+}
+
+static Renode *newrep(Restate *g, Renode *atom, int ng, int min, int max) {
+ Renode *rep = newnode(g, P_REP);
+ if (max == REPINF && empty(atom))
+ die(g, "infinite loop matching the empty string");
+ rep->ng = ng;
+ rep->m = min;
+ rep->n = max;
+ rep->x = atom;
+ return rep;
+}
+
+static void next(Restate *g) {
+ g->lookahead = lex(g);
+}
+
+static int re_accept(Restate *g, int t) {
+ if (g->lookahead == t) {
+ next(g);
+ return 1;
+ }
+ return 0;
+}
+
+static Renode *parsealt(Restate *g);
+
+static Renode *parseatom(Restate *g) {
+ Renode *atom;
+ if (g->lookahead == L_CHAR) {
+ atom = newnode(g, P_CHAR);
+ atom->c = g->yychar;
+ next(g);
+ return atom;
+ }
+ if (g->lookahead == L_CCLASS) {
+ atom = newnode(g, P_CCLASS);
+ atom->cc = g->yycc;
+ next(g);
+ return atom;
+ }
+ if (g->lookahead == L_NCCLASS) {
+ atom = newnode(g, P_NCCLASS);
+ atom->cc = g->yycc;
+ next(g);
+ return atom;
+ }
+ if (g->lookahead == L_REF) {
+ atom = newnode(g, P_REF);
+ if (g->yychar == 0 || g->yychar > g->nsub || !g->sub[g->yychar])
+ die(g, "invalid back-reference");
+ atom->n = g->yychar;
+ atom->x = g->sub[g->yychar];
+ next(g);
+ return atom;
+ }
+ if (re_accept(g, '.'))
+ return newnode(g, P_ANY);
+ if (re_accept(g, '(')) {
+ atom = newnode(g, P_PAR);
+ if (g->nsub == MAXSUB)
+ die(g, "too many captures");
+ atom->n = g->nsub++;
+ atom->x = parsealt(g);
+ g->sub[atom->n] = atom;
+ if (!re_accept(g, ')'))
+ die(g, "unmatched '('");
+ return atom;
+ }
+ if (re_accept(g, L_NC)) {
+ atom = parsealt(g);
+ if (!re_accept(g, ')'))
+ die(g, "unmatched '('");
+ return atom;
+ }
+ if (re_accept(g, L_PLA)) {
+ atom = newnode(g, P_PLA);
+ atom->x = parsealt(g);
+ if (!re_accept(g, ')'))
+ die(g, "unmatched '('");
+ return atom;
+ }
+ if (re_accept(g, L_NLA)) {
+ atom = newnode(g, P_NLA);
+ atom->x = parsealt(g);
+ if (!re_accept(g, ')'))
+ die(g, "unmatched '('");
+ return atom;
+ }
+ die(g, "syntax error");
+ return NULL;
+}
+
+static Renode *parserep(Restate *g) {
+ Renode *atom;
+
+ if (re_accept(g, '^'))
+ return newnode(g, P_BOL);
+ if (re_accept(g, '$'))
+ return newnode(g, P_EOL);
+ if (re_accept(g, L_WORD))
+ return newnode(g, P_WORD);
+ if (re_accept(g, L_NWORD))
+ return newnode(g, P_NWORD);
+
+ atom = parseatom(g);
+ if (g->lookahead == L_COUNT) {
+ int min = g->yymin, max = g->yymax;
+ next(g);
+ if (max < min)
+ die(g, "invalid quantifier");
+ return newrep(g, atom, re_accept(g, '?'), min, max);
+ }
+ if (re_accept(g, '*'))
+ return newrep(g, atom, re_accept(g, '?'), 0, REPINF);
+ if (re_accept(g, '+'))
+ return newrep(g, atom, re_accept(g, '?'), 1, REPINF);
+ if (re_accept(g, '?'))
+ return newrep(g, atom, re_accept(g, '?'), 0, 1);
+ return atom;
+}
+
+static Renode *parsecat(Restate *g) {
+ Renode *cat, *x;
+ if (g->lookahead && g->lookahead != '|' && g->lookahead != ')') {
+ cat = parserep(g);
+ while (g->lookahead && g->lookahead != '|' &&
+ g->lookahead != ')') {
+ x = cat;
+ cat = newnode(g, P_CAT);
+ cat->x = x;
+ cat->y = parserep(g);
+ }
+ return cat;
+ }
+ return NULL;
+}
+
+static Renode *parsealt(Restate *g) {
+ Renode *alt, *x;
+ alt = parsecat(g);
+ while (re_accept(g, '|')) {
+ x = alt;
+ alt = newnode(g, P_ALT);
+ alt->x = x;
+ alt->y = parsecat(g);
+ }
+ return alt;
+}
+
+/* Compile */
+
+enum { I_END,
+ I_JUMP,
+ I_SPLIT,
+ I_PLA,
+ I_NLA,
+ I_ANYNL,
+ I_ANY,
+ I_CHAR,
+ I_CCLASS,
+ I_NCCLASS,
+ I_REF,
+ I_BOL,
+ I_EOL,
+ I_WORD,
+ I_NWORD,
+ I_LPAR,
+ I_RPAR };
+
+struct Reinst {
+ unsigned char opcode;
+ unsigned char n;
+ Rune c;
+ Reclass *cc;
+ Reinst *x;
+ Reinst *y;
+};
+
+static unsigned int count(Renode *node) {
+ unsigned int min, max;
+ if (!node)
+ return 0;
+ switch (node->type) {
+ default:
+ return 1;
+ case P_CAT:
+ return count(node->x) + count(node->y);
+ case P_ALT:
+ return count(node->x) + count(node->y) + 2;
+ case P_REP:
+ min = node->m;
+ max = node->n;
+ if (min == max)
+ return count(node->x) * min;
+ if (max < REPINF)
+ return count(node->x) * max + (max - min);
+ return count(node->x) * (min + 1) + 2;
+ case P_PAR:
+ return count(node->x) + 2;
+ case P_PLA:
+ return count(node->x) + 2;
+ case P_NLA:
+ return count(node->x) + 2;
+ }
+}
+
+static Reinst *emit(Reprog *prog, int opcode) {
+ Reinst *inst = prog->end++;
+ inst->opcode = opcode;
+ inst->n = 0;
+ inst->c = 0;
+ inst->cc = NULL;
+ inst->x = inst->y = NULL;
+ return inst;
+}
+
+static void compile(Reprog *prog, Renode *node) {
+ Reinst *inst, *split, *jump;
+ unsigned int i;
+
+ if (!node)
+ return;
+
+ switch (node->type) {
+ case P_CAT:
+ compile(prog, node->x);
+ compile(prog, node->y);
+ break;
+
+ case P_ALT:
+ split = emit(prog, I_SPLIT);
+ compile(prog, node->x);
+ jump = emit(prog, I_JUMP);
+ compile(prog, node->y);
+ split->x = split + 1;
+ split->y = jump + 1;
+ jump->x = prog->end;
+ break;
+
+ case P_REP:
+ for (i = 0; i < node->m; ++i) {
+ inst = prog->end;
+ compile(prog, node->x);
+ }
+ if (node->m == node->n)
+ break;
+ if (node->n < REPINF) {
+ for (i = node->m; i < node->n; ++i) {
+ split = emit(prog, I_SPLIT);
+ compile(prog, node->x);
+ if (node->ng) {
+ split->y = split + 1;
+ split->x = prog->end;
+ } else {
+ split->x = split + 1;
+ split->y = prog->end;
+ }
+ }
+ } else if (node->m == 0) {
+ split = emit(prog, I_SPLIT);
+ compile(prog, node->x);
+ jump = emit(prog, I_JUMP);
+ if (node->ng) {
+ split->y = split + 1;
+ split->x = prog->end;
+ } else {
+ split->x = split + 1;
+ split->y = prog->end;
+ }
+ jump->x = split;
+ } else {
+ split = emit(prog, I_SPLIT);
+ if (node->ng) {
+ split->y = inst;
+ split->x = prog->end;
+ } else {
+ split->x = inst;
+ split->y = prog->end;
+ }
+ }
+ break;
+
+ case P_BOL:
+ emit(prog, I_BOL);
+ break;
+ case P_EOL:
+ emit(prog, I_EOL);
+ break;
+ case P_WORD:
+ emit(prog, I_WORD);
+ break;
+ case P_NWORD:
+ emit(prog, I_NWORD);
+ break;
+
+ case P_PAR:
+ inst = emit(prog, I_LPAR);
+ inst->n = node->n;
+ compile(prog, node->x);
+ inst = emit(prog, I_RPAR);
+ inst->n = node->n;
+ break;
+ case P_PLA:
+ split = emit(prog, I_PLA);
+ compile(prog, node->x);
+ emit(prog, I_END);
+ split->x = split + 1;
+ split->y = prog->end;
+ break;
+ case P_NLA:
+ split = emit(prog, I_NLA);
+ compile(prog, node->x);
+ emit(prog, I_END);
+ split->x = split + 1;
+ split->y = prog->end;
+ break;
+
+ case P_ANY:
+ emit(prog, I_ANY);
+ break;
+ case P_CHAR:
+ inst = emit(prog, I_CHAR);
+ inst->c = (prog->flags & REG_ICASE) ? canon(node->c) : node->c;
+ break;
+ case P_CCLASS:
+ inst = emit(prog, I_CCLASS);
+ inst->cc = node->cc;
+ break;
+ case P_NCCLASS:
+ inst = emit(prog, I_NCCLASS);
+ inst->cc = node->cc;
+ break;
+ case P_REF:
+ inst = emit(prog, I_REF);
+ inst->n = node->n;
+ break;
+ }
+}
+
+#ifdef TEST
+static void dumpnode(Renode *node) {
+ Rune *p;
+ if (!node) {
+ printf("Empty");
+ return;
+ }
+ switch (node->type) {
+ case P_CAT:
+ printf("Cat(");
+ dumpnode(node->x);
+ printf(", ");
+ dumpnode(node->y);
+ printf(")");
+ break;
+ case P_ALT:
+ printf("Alt(");
+ dumpnode(node->x);
+ printf(", ");
+ dumpnode(node->y);
+ printf(")");
+ break;
+ case P_REP:
+ printf(node->ng ? "NgRep(%d,%d," : "Rep(%d,%d,", node->m,
+ node->n);
+ dumpnode(node->x);
+ printf(")");
+ break;
+ case P_BOL:
+ printf("Bol");
+ break;
+ case P_EOL:
+ printf("Eol");
+ break;
+ case P_WORD:
+ printf("Word");
+ break;
+ case P_NWORD:
+ printf("NotWord");
+ break;
+ case P_PAR:
+ printf("Par(%d,", node->n);
+ dumpnode(node->x);
+ printf(")");
+ break;
+ case P_PLA:
+ printf("PLA(");
+ dumpnode(node->x);
+ printf(")");
+ break;
+ case P_NLA:
+ printf("NLA(");
+ dumpnode(node->x);
+ printf(")");
+ break;
+ case P_ANY:
+ printf("Any");
+ break;
+ case P_CHAR:
+ printf("Char(%c)", node->c);
+ break;
+ case P_CCLASS:
+ printf("Class(");
+ for (p = node->cc->spans; p < node->cc->end; p += 2)
+ printf("%02X-%02X,", p[0], p[1]);
+ printf(")");
+ break;
+ case P_NCCLASS:
+ printf("NotClass(");
+ for (p = node->cc->spans; p < node->cc->end; p += 2)
+ printf("%02X-%02X,", p[0], p[1]);
+ printf(")");
+ break;
+ case P_REF:
+ printf("Ref(%d)", node->n);
+ break;
+ }
+}
+
+static void dumpprog(Reprog *prog) {
+ Reinst *inst;
+ int i;
+ for (i = 0, inst = prog->start; inst < prog->end; ++i, ++inst) {
+ printf("% 5d: ", i);
+ switch (inst->opcode) {
+ case I_END:
+ puts("end");
+ break;
+ case I_JUMP:
+ printf("jump %d\n", (int)(inst->x - prog->start));
+ break;
+ case I_SPLIT:
+ printf("split %d %d\n", (int)(inst->x - prog->start),
+ (int)(inst->y - prog->start));
+ break;
+ case I_PLA:
+ printf("pla %d %d\n", (int)(inst->x - prog->start),
+ (int)(inst->y - prog->start));
+ break;
+ case I_NLA:
+ printf("nla %d %d\n", (int)(inst->x - prog->start),
+ (int)(inst->y - prog->start));
+ break;
+ case I_ANY:
+ puts("any");
+ break;
+ case I_ANYNL:
+ puts("anynl");
+ break;
+ case I_CHAR:
+ printf(inst->c >= 32 && inst->c < 127 ? "char '%c'\n"
+ : "char U+%04X\n",
+ inst->c);
+ break;
+ case I_CCLASS:
+ puts("cclass");
+ break;
+ case I_NCCLASS:
+ puts("ncclass");
+ break;
+ case I_REF:
+ printf("ref %d\n", inst->n);
+ break;
+ case I_BOL:
+ puts("bol");
+ break;
+ case I_EOL:
+ puts("eol");
+ break;
+ case I_WORD:
+ puts("word");
+ break;
+ case I_NWORD:
+ puts("nword");
+ break;
+ case I_LPAR:
+ printf("lpar %d\n", inst->n);
+ break;
+ case I_RPAR:
+ printf("rpar %d\n", inst->n);
+ break;
+ }
+ }
+}
+#endif
+
+Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp) {
+ Reprog *prog;
+ Restate *g;
+ Renode *node;
+ Reinst *split, *jump;
+ int i;
+ unsigned int ncount;
+ size_t pattern_len = strlen(pattern);
+
+ if (pattern_len > 10000) {
+ /* Avoid stack exhaustion in recursive parseatom() et.al. */
+ if (errorp)
+ *errorp = "regexp pattern too long (max 10000)";
+ return NULL;
+ }
+
+ prog = rd_calloc(1, sizeof(Reprog));
+ g = &prog->g;
+ g->prog = prog;
+ g->pstart = g->pend = rd_malloc(sizeof(Renode) * pattern_len * 2);
+
+ if (setjmp(g->kaboom)) {
+ if (errorp)
+ *errorp = g->error;
+ rd_free(g->pstart);
+ rd_free(prog);
+ return NULL;
+ }
+
+ g->source = pattern;
+ g->ncclass = 0;
+ g->nsub = 1;
+ for (i = 0; i < MAXSUB; ++i)
+ g->sub[i] = 0;
+
+ g->prog->flags = cflags;
+
+ next(g);
+ node = parsealt(g);
+ if (g->lookahead == ')')
+ die(g, "unmatched ')'");
+ if (g->lookahead != 0)
+ die(g, "syntax error");
+
+ g->prog->nsub = g->nsub;
+ ncount = count(node);
+ if (ncount > 10000)
+ die(g, "regexp graph too large");
+ g->prog->start = g->prog->end =
+ rd_malloc((ncount + 6) * sizeof(Reinst));
+
+ split = emit(g->prog, I_SPLIT);
+ split->x = split + 3;
+ split->y = split + 1;
+ emit(g->prog, I_ANYNL);
+ jump = emit(g->prog, I_JUMP);
+ jump->x = split;
+ emit(g->prog, I_LPAR);
+ compile(g->prog, node);
+ emit(g->prog, I_RPAR);
+ emit(g->prog, I_END);
+
+#ifdef TEST
+ dumpnode(node);
+ putchar('\n');
+ dumpprog(g->prog);
+#endif
+
+ rd_free(g->pstart);
+
+ if (errorp)
+ *errorp = NULL;
+ return g->prog;
+}
+
+void re_regfree(Reprog *prog) {
+ if (prog) {
+ rd_free(prog->start);
+ rd_free(prog);
+ }
+}
+
+/* Match */
+
+static int isnewline(int c) {
+ return c == 0xA || c == 0xD || c == 0x2028 || c == 0x2029;
+}
+
+static int iswordchar(int c) {
+ return c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') ||
+ (c >= '0' && c <= '9');
+}
+
+static int incclass(Reclass *cc, Rune c) {
+ Rune *p;
+ for (p = cc->spans; p < cc->end; p += 2)
+ if (p[0] <= c && c <= p[1])
+ return 1;
+ return 0;
+}
+
+static int incclasscanon(Reclass *cc, Rune c) {
+ Rune *p, r;
+ for (p = cc->spans; p < cc->end; p += 2)
+ for (r = p[0]; r <= p[1]; ++r)
+ if (c == canon(r))
+ return 1;
+ return 0;
+}
+
+static int strncmpcanon(const char *a, const char *b, unsigned int n) {
+ Rune ra, rb;
+ int c;
+ while (n--) {
+ if (!*a)
+ return -1;
+ if (!*b)
+ return 1;
+ a += chartorune(&ra, a);
+ b += chartorune(&rb, b);
+ c = canon(ra) - canon(rb);
+ if (c)
+ return c;
+ }
+ return 0;
+}
+
+struct Rethread {
+ Reinst *pc;
+ const char *sp;
+ Resub sub;
+};
+
+static void spawn(Rethread *t, Reinst *pc, const char *sp, Resub *sub) {
+ t->pc = pc;
+ t->sp = sp;
+ memcpy(&t->sub, sub, sizeof t->sub);
+}
+
+static int
+match(Reinst *pc, const char *sp, const char *bol, int flags, Resub *out) {
+ Rethread ready[MAXTHREAD];
+ Resub scratch;
+ Resub sub;
+ Rune c;
+ unsigned int nready;
+ int i;
+
+ /* queue initial thread */
+ spawn(ready + 0, pc, sp, out);
+ nready = 1;
+
+ /* run threads in stack order */
+ while (nready > 0) {
+ --nready;
+ pc = ready[nready].pc;
+ sp = ready[nready].sp;
+ memcpy(&sub, &ready[nready].sub, sizeof sub);
+ for (;;) {
+ switch (pc->opcode) {
+ case I_END:
+ for (i = 0; i < MAXSUB; ++i) {
+ out->sub[i].sp = sub.sub[i].sp;
+ out->sub[i].ep = sub.sub[i].ep;
+ }
+ return 1;
+ case I_JUMP:
+ pc = pc->x;
+ continue;
+ case I_SPLIT:
+ if (nready >= MAXTHREAD) {
+ fprintf(
+ stderr,
+ "regexec: backtrack overflow!\n");
+ return 0;
+ }
+ spawn(&ready[nready++], pc->y, sp, &sub);
+ pc = pc->x;
+ continue;
+
+ case I_PLA:
+ if (!match(pc->x, sp, bol, flags, &sub))
+ goto dead;
+ pc = pc->y;
+ continue;
+ case I_NLA:
+ memcpy(&scratch, &sub, sizeof scratch);
+ if (match(pc->x, sp, bol, flags, &scratch))
+ goto dead;
+ pc = pc->y;
+ continue;
+
+ case I_ANYNL:
+ sp += chartorune(&c, sp);
+ if (c == 0)
+ goto dead;
+ break;
+ case I_ANY:
+ sp += chartorune(&c, sp);
+ if (c == 0)
+ goto dead;
+ if (isnewline(c))
+ goto dead;
+ break;
+ case I_CHAR:
+ sp += chartorune(&c, sp);
+ if (c == 0)
+ goto dead;
+ if (flags & REG_ICASE)
+ c = canon(c);
+ if (c != pc->c)
+ goto dead;
+ break;
+ case I_CCLASS:
+ sp += chartorune(&c, sp);
+ if (c == 0)
+ goto dead;
+ if (flags & REG_ICASE) {
+ if (!incclasscanon(pc->cc, canon(c)))
+ goto dead;
+ } else {
+ if (!incclass(pc->cc, c))
+ goto dead;
+ }
+ break;
+ case I_NCCLASS:
+ sp += chartorune(&c, sp);
+ if (c == 0)
+ goto dead;
+ if (flags & REG_ICASE) {
+ if (incclasscanon(pc->cc, canon(c)))
+ goto dead;
+ } else {
+ if (incclass(pc->cc, c))
+ goto dead;
+ }
+ break;
+ case I_REF:
+ i = (int)(sub.sub[pc->n].ep -
+ sub.sub[pc->n].sp);
+ if (flags & REG_ICASE) {
+ if (strncmpcanon(sp, sub.sub[pc->n].sp,
+ i))
+ goto dead;
+ } else {
+ if (strncmp(sp, sub.sub[pc->n].sp, i))
+ goto dead;
+ }
+ if (i > 0)
+ sp += i;
+ break;
+
+ case I_BOL:
+ if (sp == bol && !(flags & REG_NOTBOL))
+ break;
+ if (flags & REG_NEWLINE)
+ if (sp > bol && isnewline(sp[-1]))
+ break;
+ goto dead;
+ case I_EOL:
+ if (*sp == 0)
+ break;
+ if (flags & REG_NEWLINE)
+ if (isnewline(*sp))
+ break;
+ goto dead;
+ case I_WORD:
+ i = sp > bol && iswordchar(sp[-1]);
+ i ^= iswordchar(sp[0]);
+ if (i)
+ break;
+ goto dead;
+ case I_NWORD:
+ i = sp > bol && iswordchar(sp[-1]);
+ i ^= iswordchar(sp[0]);
+ if (!i)
+ break;
+ goto dead;
+
+ case I_LPAR:
+ sub.sub[pc->n].sp = sp;
+ break;
+ case I_RPAR:
+ sub.sub[pc->n].ep = sp;
+ break;
+ default:
+ goto dead;
+ }
+ pc = pc + 1;
+ }
+ dead:;
+ }
+ return 0;
+}
+
+int re_regexec(Reprog *prog, const char *sp, Resub *sub, int eflags) {
+ Resub scratch;
+ int i;
+
+ if (!sub)
+ sub = &scratch;
+
+ sub->nsub = prog->nsub;
+ for (i = 0; i < MAXSUB; ++i)
+ sub->sub[i].sp = sub->sub[i].ep = NULL;
+
+ return !match(prog->start, sp, sp, prog->flags | eflags, sub);
+}
+
+#ifdef TEST
+int main(int argc, char **argv) {
+ const char *error;
+ const char *s;
+ Reprog *p;
+ Resub m;
+ unsigned int i;
+
+ if (argc > 1) {
+ p = regcomp(argv[1], 0, &error);
+ if (!p) {
+ fprintf(stderr, "regcomp: %s\n", error);
+ return 1;
+ }
+
+ if (argc > 2) {
+ s = argv[2];
+ printf("nsub = %d\n", p->nsub);
+ if (!regexec(p, s, &m, 0)) {
+ for (i = 0; i < m.nsub; ++i) {
+ int n = m.sub[i].ep - m.sub[i].sp;
+ if (n > 0)
+ printf(
+ "match %d: s=%d e=%d n=%d "
+ "'%.*s'\n",
+ i, (int)(m.sub[i].sp - s),
+ (int)(m.sub[i].ep - s), n,
+ n, m.sub[i].sp);
+ else
+ printf("match %d: n=0 ''\n", i);
+ }
+ } else {
+ printf("no match\n");
+ }
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h b/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h
new file mode 100644
index 000000000..3fd225071
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/regexp.h
@@ -0,0 +1,41 @@
+/**
+ * Copyright: public domain
+ *
+ * From https://github.com/ccxvii/minilibs sha 875c33568b5a4aa4fb3dd0c52ea98f7f0e5ca684:
+ *
+ * These libraries are in the public domain (or the equivalent where that is not possible).
+ * You can do anything you want with them. You have no legal obligation to do anything else,
+ * although I appreciate attribution.
+ */
+
+#ifndef regexp_h
+#define regexp_h
+
+typedef struct Reprog Reprog;
+typedef struct Resub Resub;
+
+Reprog *re_regcomp(const char *pattern, int cflags, const char **errorp);
+int re_regexec(Reprog *prog, const char *string, Resub *sub, int eflags);
+void re_regfree(Reprog *prog);
+
+enum {
+ /* regcomp flags */
+ REG_ICASE = 1,
+ REG_NEWLINE = 2,
+
+ /* regexec flags */
+ REG_NOTBOL = 4,
+
+ /* limits */
+ REG_MAXSUB = 16
+};
+
+struct Resub {
+ unsigned int nsub;
+ struct {
+ const char *sp;
+ const char *ep;
+ } sub[REG_MAXSUB];
+};
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c b/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c
new file mode 100644
index 000000000..e3988b186
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/snappy.c
@@ -0,0 +1,1866 @@
+/*
+ * C port of the snappy compressor from Google.
+ * This is a very fast compressor with comparable compression to lzo.
+ * Works best on 64bit little-endian, but should be good on others too.
+ * Ported by Andi Kleen.
+ * Uptodate with snappy 1.1.0
+ */
+
+/*
+ * Copyright 2005 Google Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifdef __GNUC__
+#pragma GCC diagnostic push
+#pragma GCC diagnostic ignored "-Wcast-align"
+#endif
+
+#ifndef SG
+#define SG /* Scatter-Gather / iovec support in Snappy */
+#endif
+
+#ifdef __KERNEL__
+#include <linux/kernel.h>
+#ifdef SG
+#include <linux/uio.h>
+#endif
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <linux/snappy.h>
+#include <linux/vmalloc.h>
+#include <asm/unaligned.h>
+#else
+#include "snappy.h"
+#include "snappy_compat.h"
+#endif
+
+#include "rd.h"
+
+#ifdef _MSC_VER
+#define inline __inline
+#endif
+
+static inline u64 get_unaligned64(const void *b)
+{
+ u64 ret;
+ memcpy(&ret, b, sizeof(u64));
+ return ret;
+}
+static inline u32 get_unaligned32(const void *b)
+{
+ u32 ret;
+ memcpy(&ret, b, sizeof(u32));
+ return ret;
+}
+#define get_unaligned_le32(x) (le32toh(get_unaligned32((u32 *)(x))))
+
+static inline void put_unaligned64(u64 v, void *b)
+{
+ memcpy(b, &v, sizeof(v));
+}
+static inline void put_unaligned32(u32 v, void *b)
+{
+ memcpy(b, &v, sizeof(v));
+}
+static inline void put_unaligned16(u16 v, void *b)
+{
+ memcpy(b, &v, sizeof(v));
+}
+#define put_unaligned_le16(v,x) (put_unaligned16(htole16(v), (u16 *)(x)))
+
+
+#define CRASH_UNLESS(x) BUG_ON(!(x))
+#define CHECK(cond) CRASH_UNLESS(cond)
+#define CHECK_LE(a, b) CRASH_UNLESS((a) <= (b))
+#define CHECK_GE(a, b) CRASH_UNLESS((a) >= (b))
+#define CHECK_EQ(a, b) CRASH_UNLESS((a) == (b))
+#define CHECK_NE(a, b) CRASH_UNLESS((a) != (b))
+#define CHECK_LT(a, b) CRASH_UNLESS((a) < (b))
+#define CHECK_GT(a, b) CRASH_UNLESS((a) > (b))
+
+#define UNALIGNED_LOAD32(_p) get_unaligned32((u32 *)(_p))
+#define UNALIGNED_LOAD64(_p) get_unaligned64((u64 *)(_p))
+
+#define UNALIGNED_STORE16(_p, _val) put_unaligned16(_val, (u16 *)(_p))
+#define UNALIGNED_STORE32(_p, _val) put_unaligned32(_val, (u32 *)(_p))
+#define UNALIGNED_STORE64(_p, _val) put_unaligned64(_val, (u64 *)(_p))
+
+/*
+ * This can be more efficient than UNALIGNED_LOAD64 + UNALIGNED_STORE64
+ * on some platforms, in particular ARM.
+ */
+static inline void unaligned_copy64(const void *src, void *dst)
+{
+ if (sizeof(void *) == 8) {
+ UNALIGNED_STORE64(dst, UNALIGNED_LOAD64(src));
+ } else {
+ const char *src_char = (const char *)(src);
+ char *dst_char = (char *)(dst);
+
+ UNALIGNED_STORE32(dst_char, UNALIGNED_LOAD32(src_char));
+ UNALIGNED_STORE32(dst_char + 4, UNALIGNED_LOAD32(src_char + 4));
+ }
+}
+
+#ifdef NDEBUG
+
+#define DCHECK(cond) do {} while(0)
+#define DCHECK_LE(a, b) do {} while(0)
+#define DCHECK_GE(a, b) do {} while(0)
+#define DCHECK_EQ(a, b) do {} while(0)
+#define DCHECK_NE(a, b) do {} while(0)
+#define DCHECK_LT(a, b) do {} while(0)
+#define DCHECK_GT(a, b) do {} while(0)
+
+#else
+
+#define DCHECK(cond) CHECK(cond)
+#define DCHECK_LE(a, b) CHECK_LE(a, b)
+#define DCHECK_GE(a, b) CHECK_GE(a, b)
+#define DCHECK_EQ(a, b) CHECK_EQ(a, b)
+#define DCHECK_NE(a, b) CHECK_NE(a, b)
+#define DCHECK_LT(a, b) CHECK_LT(a, b)
+#define DCHECK_GT(a, b) CHECK_GT(a, b)
+
+#endif
+
+static inline bool is_little_endian(void)
+{
+#ifdef __LITTLE_ENDIAN__
+ return true;
+#endif
+ return false;
+}
+
+#if defined(__xlc__) // xlc compiler on AIX
+#define rd_clz(n) __cntlz4(n)
+#define rd_ctz(n) __cnttz4(n)
+#define rd_ctz64(n) __cnttz8(n)
+
+#elif defined(__SUNPRO_C) // Solaris Studio compiler on sun
+/*
+ * Source for following definitions is Hacker’s Delight, Second Edition by Henry S. Warren
+ * http://www.hackersdelight.org/permissions.htm
+ */
+u32 rd_clz(u32 x) {
+ u32 n;
+
+ if (x == 0) return(32);
+ n = 1;
+ if ((x >> 16) == 0) {n = n +16; x = x <<16;}
+ if ((x >> 24) == 0) {n = n + 8; x = x << 8;}
+ if ((x >> 28) == 0) {n = n + 4; x = x << 4;}
+ if ((x >> 30) == 0) {n = n + 2; x = x << 2;}
+ n = n - (x >> 31);
+ return n;
+}
+
+u32 rd_ctz(u32 x) {
+ u32 y;
+ u32 n;
+
+ if (x == 0) return 32;
+ n = 31;
+ y = x <<16; if (y != 0) {n = n -16; x = y;}
+ y = x << 8; if (y != 0) {n = n - 8; x = y;}
+ y = x << 4; if (y != 0) {n = n - 4; x = y;}
+ y = x << 2; if (y != 0) {n = n - 2; x = y;}
+ y = x << 1; if (y != 0) {n = n - 1;}
+ return n;
+}
+
+u64 rd_ctz64(u64 x) {
+ u64 y;
+ u64 n;
+
+ if (x == 0) return 64;
+ n = 63;
+ y = x <<32; if (y != 0) {n = n -32; x = y;}
+ y = x <<16; if (y != 0) {n = n -16; x = y;}
+ y = x << 8; if (y != 0) {n = n - 8; x = y;}
+ y = x << 4; if (y != 0) {n = n - 4; x = y;}
+ y = x << 2; if (y != 0) {n = n - 2; x = y;}
+ y = x << 1; if (y != 0) {n = n - 1;}
+ return n;
+}
+#elif !defined(_MSC_VER)
+#define rd_clz(n) __builtin_clz(n)
+#define rd_ctz(n) __builtin_ctz(n)
+#define rd_ctz64(n) __builtin_ctzll(n)
+#else
+#include <intrin.h>
+static int inline rd_clz(u32 x) {
+ int r = 0;
+ if (_BitScanForward(&r, x))
+ return 31 - r;
+ else
+ return 32;
+}
+
+static int inline rd_ctz(u32 x) {
+ int r = 0;
+ if (_BitScanForward(&r, x))
+ return r;
+ else
+ return 32;
+}
+
+static int inline rd_ctz64(u64 x) {
+#ifdef _M_X64
+ int r = 0;
+ if (_BitScanReverse64(&r, x))
+ return r;
+ else
+ return 64;
+#else
+ int r;
+ if ((r = rd_ctz(x & 0xffffffff)) < 32)
+ return r;
+ return 32 + rd_ctz(x >> 32);
+#endif
+}
+#endif
+
+
+static inline int log2_floor(u32 n)
+{
+ return n == 0 ? -1 : 31 ^ rd_clz(n);
+}
+
+static inline RD_UNUSED int find_lsb_set_non_zero(u32 n)
+{
+ return rd_ctz(n);
+}
+
+static inline RD_UNUSED int find_lsb_set_non_zero64(u64 n)
+{
+ return rd_ctz64(n);
+}
+
+#define kmax32 5
+
+/*
+ * Attempts to parse a varint32 from a prefix of the bytes in [ptr,limit-1].
+ * Never reads a character at or beyond limit. If a valid/terminated varint32
+ * was found in the range, stores it in *OUTPUT and returns a pointer just
+ * past the last byte of the varint32. Else returns NULL. On success,
+ * "result <= limit".
+ */
+static inline const char *varint_parse32_with_limit(const char *p,
+ const char *l,
+ u32 * OUTPUT)
+{
+ const unsigned char *ptr = (const unsigned char *)(p);
+ const unsigned char *limit = (const unsigned char *)(l);
+ u32 b, result;
+
+ if (ptr >= limit)
+ return NULL;
+ b = *(ptr++);
+ result = b & 127;
+ if (b < 128)
+ goto done;
+ if (ptr >= limit)
+ return NULL;
+ b = *(ptr++);
+ result |= (b & 127) << 7;
+ if (b < 128)
+ goto done;
+ if (ptr >= limit)
+ return NULL;
+ b = *(ptr++);
+ result |= (b & 127) << 14;
+ if (b < 128)
+ goto done;
+ if (ptr >= limit)
+ return NULL;
+ b = *(ptr++);
+ result |= (b & 127) << 21;
+ if (b < 128)
+ goto done;
+ if (ptr >= limit)
+ return NULL;
+ b = *(ptr++);
+ result |= (b & 127) << 28;
+ if (b < 16)
+ goto done;
+ return NULL; /* Value is too long to be a varint32 */
+done:
+ *OUTPUT = result;
+ return (const char *)(ptr);
+}
+
+/*
+ * REQUIRES "ptr" points to a buffer of length sufficient to hold "v".
+ * EFFECTS Encodes "v" into "ptr" and returns a pointer to the
+ * byte just past the last encoded byte.
+ */
+static inline char *varint_encode32(char *sptr, u32 v)
+{
+ /* Operate on characters as unsigneds */
+ unsigned char *ptr = (unsigned char *)(sptr);
+ static const int B = 128;
+
+ if (v < (1 << 7)) {
+ *(ptr++) = v;
+ } else if (v < (1 << 14)) {
+ *(ptr++) = v | B;
+ *(ptr++) = v >> 7;
+ } else if (v < (1 << 21)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = v >> 14;
+ } else if (v < (1 << 28)) {
+ *(ptr++) = v | B;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = v >> 21;
+ } else {
+ *(ptr++) = v | B;
+ *(ptr++) = (v >> 7) | B;
+ *(ptr++) = (v >> 14) | B;
+ *(ptr++) = (v >> 21) | B;
+ *(ptr++) = v >> 28;
+ }
+ return (char *)(ptr);
+}
+
+#ifdef SG
+
+static inline void *n_bytes_after_addr(void *addr, size_t n_bytes)
+{
+ return (void *) ((char *)addr + n_bytes);
+}
+
+struct source {
+ struct iovec *iov;
+ int iovlen;
+ int curvec;
+ int curoff;
+ size_t total;
+};
+
+/* Only valid at beginning when nothing is consumed */
+static inline int available(struct source *s)
+{
+ return (int) s->total;
+}
+
+static inline const char *peek(struct source *s, size_t *len)
+{
+ if (likely(s->curvec < s->iovlen)) {
+ struct iovec *iv = &s->iov[s->curvec];
+ if ((unsigned)s->curoff < (size_t)iv->iov_len) {
+ *len = iv->iov_len - s->curoff;
+ return n_bytes_after_addr(iv->iov_base, s->curoff);
+ }
+ }
+ *len = 0;
+ return NULL;
+}
+
+static inline void skip(struct source *s, size_t n)
+{
+ struct iovec *iv = &s->iov[s->curvec];
+ s->curoff += (int) n;
+ DCHECK_LE((unsigned)s->curoff, (size_t)iv->iov_len);
+ if ((unsigned)s->curoff >= (size_t)iv->iov_len &&
+ s->curvec + 1 < s->iovlen) {
+ s->curoff = 0;
+ s->curvec++;
+ }
+}
+
+struct sink {
+ struct iovec *iov;
+ int iovlen;
+ unsigned curvec;
+ unsigned curoff;
+ unsigned written;
+};
+
+static inline void append(struct sink *s, const char *data, size_t n)
+{
+ struct iovec *iov = &s->iov[s->curvec];
+ char *dst = n_bytes_after_addr(iov->iov_base, s->curoff);
+ size_t nlen = min_t(size_t, iov->iov_len - s->curoff, n);
+ if (data != dst)
+ memcpy(dst, data, nlen);
+ s->written += (int) n;
+ s->curoff += (int) nlen;
+ while ((n -= nlen) > 0) {
+ data += nlen;
+ s->curvec++;
+ DCHECK_LT((signed)s->curvec, s->iovlen);
+ iov++;
+ nlen = min_t(size_t, (size_t)iov->iov_len, n);
+ memcpy(iov->iov_base, data, nlen);
+ s->curoff = (int) nlen;
+ }
+}
+
+static inline void *sink_peek(struct sink *s, size_t n)
+{
+ struct iovec *iov = &s->iov[s->curvec];
+ if (s->curvec < (size_t)iov->iov_len && iov->iov_len - s->curoff >= n)
+ return n_bytes_after_addr(iov->iov_base, s->curoff);
+ return NULL;
+}
+
+#else
+
+struct source {
+ const char *ptr;
+ size_t left;
+};
+
+static inline int available(struct source *s)
+{
+ return s->left;
+}
+
+static inline const char *peek(struct source *s, size_t * len)
+{
+ *len = s->left;
+ return s->ptr;
+}
+
+static inline void skip(struct source *s, size_t n)
+{
+ s->left -= n;
+ s->ptr += n;
+}
+
+struct sink {
+ char *dest;
+};
+
+static inline void append(struct sink *s, const char *data, size_t n)
+{
+ if (data != s->dest)
+ memcpy(s->dest, data, n);
+ s->dest += n;
+}
+
+#define sink_peek(s, n) sink_peek_no_sg(s)
+
+static inline void *sink_peek_no_sg(const struct sink *s)
+{
+ return s->dest;
+}
+
+#endif
+
+struct writer {
+ char *base;
+ char *op;
+ char *op_limit;
+};
+
+/* Called before decompression */
+static inline void writer_set_expected_length(struct writer *w, size_t len)
+{
+ w->op_limit = w->op + len;
+}
+
+/* Called after decompression */
+static inline bool writer_check_length(struct writer *w)
+{
+ return w->op == w->op_limit;
+}
+
+/*
+ * Copy "len" bytes from "src" to "op", one byte at a time. Used for
+ * handling COPY operations where the input and output regions may
+ * overlap. For example, suppose:
+ * src == "ab"
+ * op == src + 2
+ * len == 20
+ * After IncrementalCopy(src, op, len), the result will have
+ * eleven copies of "ab"
+ * ababababababababababab
+ * Note that this does not match the semantics of either memcpy()
+ * or memmove().
+ */
+static inline void incremental_copy(const char *src, char *op, ssize_t len)
+{
+ DCHECK_GT(len, 0);
+ do {
+ *op++ = *src++;
+ } while (--len > 0);
+}
+
+/*
+ * Equivalent to IncrementalCopy except that it can write up to ten extra
+ * bytes after the end of the copy, and that it is faster.
+ *
+ * The main part of this loop is a simple copy of eight bytes at a time until
+ * we've copied (at least) the requested amount of bytes. However, if op and
+ * src are less than eight bytes apart (indicating a repeating pattern of
+ * length < 8), we first need to expand the pattern in order to get the correct
+ * results. For instance, if the buffer looks like this, with the eight-byte
+ * <src> and <op> patterns marked as intervals:
+ *
+ * abxxxxxxxxxxxx
+ * [------] src
+ * [------] op
+ *
+ * a single eight-byte copy from <src> to <op> will repeat the pattern once,
+ * after which we can move <op> two bytes without moving <src>:
+ *
+ * ababxxxxxxxxxx
+ * [------] src
+ * [------] op
+ *
+ * and repeat the exercise until the two no longer overlap.
+ *
+ * This allows us to do very well in the special case of one single byte
+ * repeated many times, without taking a big hit for more general cases.
+ *
+ * The worst case of extra writing past the end of the match occurs when
+ * op - src == 1 and len == 1; the last copy will read from byte positions
+ * [0..7] and write to [4..11], whereas it was only supposed to write to
+ * position 1. Thus, ten excess bytes.
+ */
+
+#define kmax_increment_copy_overflow 10
+
+static inline void incremental_copy_fast_path(const char *src, char *op,
+ ssize_t len)
+{
+ while (op - src < 8) {
+ unaligned_copy64(src, op);
+ len -= op - src;
+ op += op - src;
+ }
+ while (len > 0) {
+ unaligned_copy64(src, op);
+ src += 8;
+ op += 8;
+ len -= 8;
+ }
+}
+
+static inline bool writer_append_from_self(struct writer *w, u32 offset,
+ u32 len)
+{
+ char *const op = w->op;
+ CHECK_LE(op, w->op_limit);
+ const u32 space_left = (u32) (w->op_limit - op);
+
+ if ((unsigned)(op - w->base) <= offset - 1u) /* -1u catches offset==0 */
+ return false;
+ if (len <= 16 && offset >= 8 && space_left >= 16) {
+ /* Fast path, used for the majority (70-80%) of dynamic
+ * invocations. */
+ unaligned_copy64(op - offset, op);
+ unaligned_copy64(op - offset + 8, op + 8);
+ } else {
+ if (space_left >= len + kmax_increment_copy_overflow) {
+ incremental_copy_fast_path(op - offset, op, len);
+ } else {
+ if (space_left < len) {
+ return false;
+ }
+ incremental_copy(op - offset, op, len);
+ }
+ }
+
+ w->op = op + len;
+ return true;
+}
+
+static inline bool writer_append(struct writer *w, const char *ip, u32 len)
+{
+ char *const op = w->op;
+ CHECK_LE(op, w->op_limit);
+ const u32 space_left = (u32) (w->op_limit - op);
+ if (space_left < len)
+ return false;
+ memcpy(op, ip, len);
+ w->op = op + len;
+ return true;
+}
+
+static inline bool writer_try_fast_append(struct writer *w, const char *ip,
+ u32 available_bytes, u32 len)
+{
+ char *const op = w->op;
+ const int space_left = (int) (w->op_limit - op);
+ if (len <= 16 && available_bytes >= 16 && space_left >= 16) {
+ /* Fast path, used for the majority (~95%) of invocations */
+ unaligned_copy64(ip, op);
+ unaligned_copy64(ip + 8, op + 8);
+ w->op = op + len;
+ return true;
+ }
+ return false;
+}
+
+/*
+ * Any hash function will produce a valid compressed bitstream, but a good
+ * hash function reduces the number of collisions and thus yields better
+ * compression for compressible input, and more speed for incompressible
+ * input. Of course, it doesn't hurt if the hash function is reasonably fast
+ * either, as it gets called a lot.
+ */
+static inline u32 hash_bytes(u32 bytes, int shift)
+{
+ u32 kmul = 0x1e35a7bd;
+ return (bytes * kmul) >> shift;
+}
+
+static inline u32 hash(const char *p, int shift)
+{
+ return hash_bytes(UNALIGNED_LOAD32(p), shift);
+}
+
+/*
+ * Compressed data can be defined as:
+ * compressed := item* literal*
+ * item := literal* copy
+ *
+ * The trailing literal sequence has a space blowup of at most 62/60
+ * since a literal of length 60 needs one tag byte + one extra byte
+ * for length information.
+ *
+ * Item blowup is trickier to measure. Suppose the "copy" op copies
+ * 4 bytes of data. Because of a special check in the encoding code,
+ * we produce a 4-byte copy only if the offset is < 65536. Therefore
+ * the copy op takes 3 bytes to encode, and this type of item leads
+ * to at most the 62/60 blowup for representing literals.
+ *
+ * Suppose the "copy" op copies 5 bytes of data. If the offset is big
+ * enough, it will take 5 bytes to encode the copy op. Therefore the
+ * worst case here is a one-byte literal followed by a five-byte copy.
+ * I.e., 6 bytes of input turn into 7 bytes of "compressed" data.
+ *
+ * This last factor dominates the blowup, so the final estimate is:
+ */
+size_t rd_kafka_snappy_max_compressed_length(size_t source_len)
+{
+ return 32 + source_len + source_len / 6;
+}
+EXPORT_SYMBOL(rd_kafka_snappy_max_compressed_length);
+
+enum {
+ LITERAL = 0,
+ COPY_1_BYTE_OFFSET = 1, /* 3 bit length + 3 bits of offset in opcode */
+ COPY_2_BYTE_OFFSET = 2,
+ COPY_4_BYTE_OFFSET = 3
+};
+
+static inline char *emit_literal(char *op,
+ const char *literal,
+ int len, bool allow_fast_path)
+{
+ int n = len - 1; /* Zero-length literals are disallowed */
+
+ if (n < 60) {
+ /* Fits in tag byte */
+ *op++ = LITERAL | (n << 2);
+
+/*
+ * The vast majority of copies are below 16 bytes, for which a
+ * call to memcpy is overkill. This fast path can sometimes
+ * copy up to 15 bytes too much, but that is okay in the
+ * main loop, since we have a bit to go on for both sides:
+ *
+ * - The input will always have kInputMarginBytes = 15 extra
+ * available bytes, as long as we're in the main loop, and
+ * if not, allow_fast_path = false.
+ * - The output will always have 32 spare bytes (see
+ * MaxCompressedLength).
+ */
+ if (allow_fast_path && len <= 16) {
+ unaligned_copy64(literal, op);
+ unaligned_copy64(literal + 8, op + 8);
+ return op + len;
+ }
+ } else {
+ /* Encode in upcoming bytes */
+ char *base = op;
+ int count = 0;
+ op++;
+ while (n > 0) {
+ *op++ = n & 0xff;
+ n >>= 8;
+ count++;
+ }
+ DCHECK(count >= 1);
+ DCHECK(count <= 4);
+ *base = LITERAL | ((59 + count) << 2);
+ }
+ memcpy(op, literal, len);
+ return op + len;
+}
+
+static inline char *emit_copy_less_than64(char *op, int offset, int len)
+{
+ DCHECK_LE(len, 64);
+ DCHECK_GE(len, 4);
+ DCHECK_LT(offset, 65536);
+
+ if ((len < 12) && (offset < 2048)) {
+ int len_minus_4 = len - 4;
+ DCHECK(len_minus_4 < 8); /* Must fit in 3 bits */
+ *op++ =
+ COPY_1_BYTE_OFFSET + ((len_minus_4) << 2) + ((offset >> 8)
+ << 5);
+ *op++ = offset & 0xff;
+ } else {
+ *op++ = COPY_2_BYTE_OFFSET + ((len - 1) << 2);
+ put_unaligned_le16(offset, op);
+ op += 2;
+ }
+ return op;
+}
+
+static inline char *emit_copy(char *op, int offset, int len)
+{
+ /*
+ * Emit 64 byte copies but make sure to keep at least four bytes
+ * reserved
+ */
+ while (len >= 68) {
+ op = emit_copy_less_than64(op, offset, 64);
+ len -= 64;
+ }
+
+ /*
+ * Emit an extra 60 byte copy if have too much data to fit in
+ * one copy
+ */
+ if (len > 64) {
+ op = emit_copy_less_than64(op, offset, 60);
+ len -= 60;
+ }
+
+ /* Emit remainder */
+ op = emit_copy_less_than64(op, offset, len);
+ return op;
+}
+
+/**
+ * rd_kafka_snappy_uncompressed_length - return length of uncompressed output.
+ * @start: compressed buffer
+ * @n: length of compressed buffer.
+ * @result: Write the length of the uncompressed output here.
+ *
+ * Returns true when successfull, otherwise false.
+ */
+bool rd_kafka_snappy_uncompressed_length(const char *start, size_t n, size_t * result)
+{
+ u32 v = 0;
+ const char *limit = start + n;
+ if (varint_parse32_with_limit(start, limit, &v) != NULL) {
+ *result = v;
+ return true;
+ } else {
+ return false;
+ }
+}
+EXPORT_SYMBOL(rd_kafka_snappy_uncompressed_length);
+
+/*
+ * The size of a compression block. Note that many parts of the compression
+ * code assumes that kBlockSize <= 65536; in particular, the hash table
+ * can only store 16-bit offsets, and EmitCopy() also assumes the offset
+ * is 65535 bytes or less. Note also that if you change this, it will
+ * affect the framing format
+ * Note that there might be older data around that is compressed with larger
+ * block sizes, so the decompression code should not rely on the
+ * non-existence of long backreferences.
+ */
+#define kblock_log 16
+#define kblock_size (1 << kblock_log)
+
+/*
+ * This value could be halfed or quartered to save memory
+ * at the cost of slightly worse compression.
+ */
+#define kmax_hash_table_bits 14
+#define kmax_hash_table_size (1U << kmax_hash_table_bits)
+
+/*
+ * Use smaller hash table when input.size() is smaller, since we
+ * fill the table, incurring O(hash table size) overhead for
+ * compression, and if the input is short, we won't need that
+ * many hash table entries anyway.
+ */
+static u16 *get_hash_table(struct snappy_env *env, size_t input_size,
+ int *table_size)
+{
+ unsigned htsize = 256;
+
+ DCHECK(kmax_hash_table_size >= 256);
+ while (htsize < kmax_hash_table_size && htsize < input_size)
+ htsize <<= 1;
+ CHECK_EQ(0, htsize & (htsize - 1));
+ CHECK_LE(htsize, kmax_hash_table_size);
+
+ u16 *table;
+ table = env->hash_table;
+
+ *table_size = htsize;
+ memset(table, 0, htsize * sizeof(*table));
+ return table;
+}
+
+/*
+ * Return the largest n such that
+ *
+ * s1[0,n-1] == s2[0,n-1]
+ * and n <= (s2_limit - s2).
+ *
+ * Does not read *s2_limit or beyond.
+ * Does not read *(s1 + (s2_limit - s2)) or beyond.
+ * Requires that s2_limit >= s2.
+ *
+ * Separate implementation for x86_64, for speed. Uses the fact that
+ * x86_64 is little endian.
+ */
+#if defined(__LITTLE_ENDIAN__) && BITS_PER_LONG == 64
+static inline int find_match_length(const char *s1,
+ const char *s2, const char *s2_limit)
+{
+ int matched = 0;
+
+ DCHECK_GE(s2_limit, s2);
+ /*
+ * Find out how long the match is. We loop over the data 64 bits at a
+ * time until we find a 64-bit block that doesn't match; then we find
+ * the first non-matching bit and use that to calculate the total
+ * length of the match.
+ */
+ while (likely(s2 <= s2_limit - 8)) {
+ if (unlikely
+ (UNALIGNED_LOAD64(s2) == UNALIGNED_LOAD64(s1 + matched))) {
+ s2 += 8;
+ matched += 8;
+ } else {
+ /*
+ * On current (mid-2008) Opteron models there
+ * is a 3% more efficient code sequence to
+ * find the first non-matching byte. However,
+ * what follows is ~10% better on Intel Core 2
+ * and newer, and we expect AMD's bsf
+ * instruction to improve.
+ */
+ u64 x =
+ UNALIGNED_LOAD64(s2) ^ UNALIGNED_LOAD64(s1 +
+ matched);
+ int matching_bits = find_lsb_set_non_zero64(x);
+ matched += matching_bits >> 3;
+ return matched;
+ }
+ }
+ while (likely(s2 < s2_limit)) {
+ if (likely(s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ } else {
+ return matched;
+ }
+ }
+ return matched;
+}
+#else
+static inline int find_match_length(const char *s1,
+ const char *s2, const char *s2_limit)
+{
+ /* Implementation based on the x86-64 version, above. */
+ DCHECK_GE(s2_limit, s2);
+ int matched = 0;
+
+ while (s2 <= s2_limit - 4 &&
+ UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
+ s2 += 4;
+ matched += 4;
+ }
+ if (is_little_endian() && s2 <= s2_limit - 4) {
+ u32 x =
+ UNALIGNED_LOAD32(s2) ^ UNALIGNED_LOAD32(s1 + matched);
+ int matching_bits = find_lsb_set_non_zero(x);
+ matched += matching_bits >> 3;
+ } else {
+ while ((s2 < s2_limit) && (s1[matched] == *s2)) {
+ ++s2;
+ ++matched;
+ }
+ }
+ return matched;
+}
+#endif
+
+/*
+ * For 0 <= offset <= 4, GetU32AtOffset(GetEightBytesAt(p), offset) will
+ * equal UNALIGNED_LOAD32(p + offset). Motivation: On x86-64 hardware we have
+ * empirically found that overlapping loads such as
+ * UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
+ * are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to u32.
+ *
+ * We have different versions for 64- and 32-bit; ideally we would avoid the
+ * two functions and just inline the UNALIGNED_LOAD64 call into
+ * GetUint32AtOffset, but GCC (at least not as of 4.6) is seemingly not clever
+ * enough to avoid loading the value multiple times then. For 64-bit, the load
+ * is done when GetEightBytesAt() is called, whereas for 32-bit, the load is
+ * done at GetUint32AtOffset() time.
+ */
+
+#if BITS_PER_LONG == 64
+
+typedef u64 eight_bytes_reference;
+
+static inline eight_bytes_reference get_eight_bytes_at(const char* ptr)
+{
+ return UNALIGNED_LOAD64(ptr);
+}
+
+static inline u32 get_u32_at_offset(u64 v, int offset)
+{
+ DCHECK_GE(offset, 0);
+ DCHECK_LE(offset, 4);
+ return v >> (is_little_endian()? 8 * offset : 32 - 8 * offset);
+}
+
+#else
+
+typedef const char *eight_bytes_reference;
+
+static inline eight_bytes_reference get_eight_bytes_at(const char* ptr)
+{
+ return ptr;
+}
+
+static inline u32 get_u32_at_offset(const char *v, int offset)
+{
+ DCHECK_GE(offset, 0);
+ DCHECK_LE(offset, 4);
+ return UNALIGNED_LOAD32(v + offset);
+}
+#endif
+
+/*
+ * Flat array compression that does not emit the "uncompressed length"
+ * prefix. Compresses "input" string to the "*op" buffer.
+ *
+ * REQUIRES: "input" is at most "kBlockSize" bytes long.
+ * REQUIRES: "op" points to an array of memory that is at least
+ * "MaxCompressedLength(input.size())" in size.
+ * REQUIRES: All elements in "table[0..table_size-1]" are initialized to zero.
+ * REQUIRES: "table_size" is a power of two
+ *
+ * Returns an "end" pointer into "op" buffer.
+ * "end - op" is the compressed size of "input".
+ */
+
+static char *compress_fragment(const char *const input,
+ const size_t input_size,
+ char *op, u16 * table, const unsigned table_size)
+{
+ /* "ip" is the input pointer, and "op" is the output pointer. */
+ const char *ip = input;
+ CHECK_LE(input_size, kblock_size);
+ CHECK_EQ(table_size & (table_size - 1), 0);
+ const int shift = 32 - log2_floor(table_size);
+ DCHECK_EQ(UINT_MAX >> shift, table_size - 1);
+ const char *ip_end = input + input_size;
+ const char *baseip = ip;
+ /*
+ * Bytes in [next_emit, ip) will be emitted as literal bytes. Or
+ * [next_emit, ip_end) after the main loop.
+ */
+ const char *next_emit = ip;
+
+ const unsigned kinput_margin_bytes = 15;
+
+ if (likely(input_size >= kinput_margin_bytes)) {
+ const char *const ip_limit = input + input_size -
+ kinput_margin_bytes;
+
+ u32 next_hash;
+ for (next_hash = hash(++ip, shift);;) {
+ DCHECK_LT(next_emit, ip);
+/*
+ * The body of this loop calls EmitLiteral once and then EmitCopy one or
+ * more times. (The exception is that when we're close to exhausting
+ * the input we goto emit_remainder.)
+ *
+ * In the first iteration of this loop we're just starting, so
+ * there's nothing to copy, so calling EmitLiteral once is
+ * necessary. And we only start a new iteration when the
+ * current iteration has determined that a call to EmitLiteral will
+ * precede the next call to EmitCopy (if any).
+ *
+ * Step 1: Scan forward in the input looking for a 4-byte-long match.
+ * If we get close to exhausting the input then goto emit_remainder.
+ *
+ * Heuristic match skipping: If 32 bytes are scanned with no matches
+ * found, start looking only at every other byte. If 32 more bytes are
+ * scanned, look at every third byte, etc.. When a match is found,
+ * immediately go back to looking at every byte. This is a small loss
+ * (~5% performance, ~0.1% density) for lcompressible data due to more
+ * bookkeeping, but for non-compressible data (such as JPEG) it's a huge
+ * win since the compressor quickly "realizes" the data is incompressible
+ * and doesn't bother looking for matches everywhere.
+ *
+ * The "skip" variable keeps track of how many bytes there are since the
+ * last match; dividing it by 32 (ie. right-shifting by five) gives the
+ * number of bytes to move ahead for each iteration.
+ */
+ u32 skip_bytes = 32;
+
+ const char *next_ip = ip;
+ const char *candidate;
+ do {
+ ip = next_ip;
+ u32 hval = next_hash;
+ DCHECK_EQ(hval, hash(ip, shift));
+ u32 bytes_between_hash_lookups = skip_bytes++ >> 5;
+ next_ip = ip + bytes_between_hash_lookups;
+ if (unlikely(next_ip > ip_limit)) {
+ goto emit_remainder;
+ }
+ next_hash = hash(next_ip, shift);
+ candidate = baseip + table[hval];
+ DCHECK_GE(candidate, baseip);
+ DCHECK_LT(candidate, ip);
+
+ table[hval] = (u16) (ip - baseip);
+ } while (likely(UNALIGNED_LOAD32(ip) !=
+ UNALIGNED_LOAD32(candidate)));
+
+/*
+ * Step 2: A 4-byte match has been found. We'll later see if more
+ * than 4 bytes match. But, prior to the match, input
+ * bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
+ */
+ DCHECK_LE(next_emit + 16, ip_end);
+ op = emit_literal(op, next_emit, (int) (ip - next_emit), true);
+
+/*
+ * Step 3: Call EmitCopy, and then see if another EmitCopy could
+ * be our next move. Repeat until we find no match for the
+ * input immediately after what was consumed by the last EmitCopy call.
+ *
+ * If we exit this loop normally then we need to call EmitLiteral next,
+ * though we don't yet know how big the literal will be. We handle that
+ * by proceeding to the next iteration of the main loop. We also can exit
+ * this loop via goto if we get close to exhausting the input.
+ */
+ eight_bytes_reference input_bytes;
+ u32 candidate_bytes = 0;
+
+ do {
+/*
+ * We have a 4-byte match at ip, and no need to emit any
+ * "literal bytes" prior to ip.
+ */
+ const char *base = ip;
+ int matched = 4 +
+ find_match_length(candidate + 4, ip + 4,
+ ip_end);
+ ip += matched;
+ int offset = (int) (base - candidate);
+ DCHECK_EQ(0, memcmp(base, candidate, matched));
+ op = emit_copy(op, offset, matched);
+/*
+ * We could immediately start working at ip now, but to improve
+ * compression we first update table[Hash(ip - 1, ...)].
+ */
+ const char *insert_tail = ip - 1;
+ next_emit = ip;
+ if (unlikely(ip >= ip_limit)) {
+ goto emit_remainder;
+ }
+ input_bytes = get_eight_bytes_at(insert_tail);
+ u32 prev_hash =
+ hash_bytes(get_u32_at_offset
+ (input_bytes, 0), shift);
+ table[prev_hash] = (u16) (ip - baseip - 1);
+ u32 cur_hash =
+ hash_bytes(get_u32_at_offset
+ (input_bytes, 1), shift);
+ candidate = baseip + table[cur_hash];
+ candidate_bytes = UNALIGNED_LOAD32(candidate);
+ table[cur_hash] = (u16) (ip - baseip);
+ } while (get_u32_at_offset(input_bytes, 1) ==
+ candidate_bytes);
+
+ next_hash =
+ hash_bytes(get_u32_at_offset(input_bytes, 2),
+ shift);
+ ++ip;
+ }
+ }
+
+emit_remainder:
+ /* Emit the remaining bytes as a literal */
+ if (next_emit < ip_end)
+ op = emit_literal(op, next_emit, (int) (ip_end - next_emit), false);
+
+ return op;
+}
+
+/*
+ * -----------------------------------------------------------------------
+ * Lookup table for decompression code. Generated by ComputeTable() below.
+ * -----------------------------------------------------------------------
+ */
+
+/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */
+static const u32 wordmask[] = {
+ 0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
+};
+
+/*
+ * Data stored per entry in lookup table:
+ * Range Bits-used Description
+ * ------------------------------------
+ * 1..64 0..7 Literal/copy length encoded in opcode byte
+ * 0..7 8..10 Copy offset encoded in opcode byte / 256
+ * 0..4 11..13 Extra bytes after opcode
+ *
+ * We use eight bits for the length even though 7 would have sufficed
+ * because of efficiency reasons:
+ * (1) Extracting a byte is faster than a bit-field
+ * (2) It properly aligns copy offset so we do not need a <<8
+ */
+static const u16 char_table[256] = {
+ 0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
+ 0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
+ 0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
+ 0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
+ 0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
+ 0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
+ 0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
+ 0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
+ 0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
+ 0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
+ 0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
+ 0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
+ 0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
+ 0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
+ 0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
+ 0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
+ 0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
+ 0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
+ 0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
+ 0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
+ 0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
+ 0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
+ 0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
+ 0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
+ 0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
+ 0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
+ 0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
+ 0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
+ 0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
+ 0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
+ 0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
+ 0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
+};
+
+struct snappy_decompressor {
+ struct source *reader; /* Underlying source of bytes to decompress */
+ const char *ip; /* Points to next buffered byte */
+ const char *ip_limit; /* Points just past buffered bytes */
+ u32 peeked; /* Bytes peeked from reader (need to skip) */
+ bool eof; /* Hit end of input without an error? */
+ char scratch[5]; /* Temporary buffer for peekfast boundaries */
+};
+
+static void
+init_snappy_decompressor(struct snappy_decompressor *d, struct source *reader)
+{
+ d->reader = reader;
+ d->ip = NULL;
+ d->ip_limit = NULL;
+ d->peeked = 0;
+ d->eof = false;
+}
+
+static void exit_snappy_decompressor(struct snappy_decompressor *d)
+{
+ skip(d->reader, d->peeked);
+}
+
+/*
+ * Read the uncompressed length stored at the start of the compressed data.
+ * On succcess, stores the length in *result and returns true.
+ * On failure, returns false.
+ */
+static bool read_uncompressed_length(struct snappy_decompressor *d,
+ u32 * result)
+{
+ DCHECK(d->ip == NULL); /*
+ * Must not have read anything yet
+ * Length is encoded in 1..5 bytes
+ */
+ *result = 0;
+ u32 shift = 0;
+ while (true) {
+ if (shift >= 32)
+ return false;
+ size_t n;
+ const char *ip = peek(d->reader, &n);
+ if (n == 0)
+ return false;
+ const unsigned char c = *(const unsigned char *)(ip);
+ skip(d->reader, 1);
+ *result |= (u32) (c & 0x7f) << shift;
+ if (c < 128) {
+ break;
+ }
+ shift += 7;
+ }
+ return true;
+}
+
+static bool refill_tag(struct snappy_decompressor *d);
+
+/*
+ * Process the next item found in the input.
+ * Returns true if successful, false on error or end of input.
+ */
+static void decompress_all_tags(struct snappy_decompressor *d,
+ struct writer *writer)
+{
+ const char *ip = d->ip;
+
+ /*
+ * We could have put this refill fragment only at the beginning of the loop.
+ * However, duplicating it at the end of each branch gives the compiler more
+ * scope to optimize the <ip_limit_ - ip> expression based on the local
+ * context, which overall increases speed.
+ */
+#define MAYBE_REFILL() \
+ if (d->ip_limit - ip < 5) { \
+ d->ip = ip; \
+ if (!refill_tag(d)) return; \
+ ip = d->ip; \
+ }
+
+
+ MAYBE_REFILL();
+ for (;;) {
+ if (d->ip_limit - ip < 5) {
+ d->ip = ip;
+ if (!refill_tag(d))
+ return;
+ ip = d->ip;
+ }
+
+ const unsigned char c = *(const unsigned char *)(ip++);
+
+ if ((c & 0x3) == LITERAL) {
+ u32 literal_length = (c >> 2) + 1;
+ if (writer_try_fast_append(writer, ip, (u32) (d->ip_limit - ip),
+ literal_length)) {
+ DCHECK_LT(literal_length, 61);
+ ip += literal_length;
+ MAYBE_REFILL();
+ continue;
+ }
+ if (unlikely(literal_length >= 61)) {
+ /* Long literal */
+ const u32 literal_ll = literal_length - 60;
+ literal_length = (get_unaligned_le32(ip) &
+ wordmask[literal_ll]) + 1;
+ ip += literal_ll;
+ }
+
+ u32 avail = (u32) (d->ip_limit - ip);
+ while (avail < literal_length) {
+ if (!writer_append(writer, ip, avail))
+ return;
+ literal_length -= avail;
+ skip(d->reader, d->peeked);
+ size_t n;
+ ip = peek(d->reader, &n);
+ avail = (u32) n;
+ d->peeked = avail;
+ if (avail == 0)
+ return; /* Premature end of input */
+ d->ip_limit = ip + avail;
+ }
+ if (!writer_append(writer, ip, literal_length))
+ return;
+ ip += literal_length;
+ MAYBE_REFILL();
+ } else {
+ const u32 entry = char_table[c];
+ const u32 trailer = get_unaligned_le32(ip) &
+ wordmask[entry >> 11];
+ const u32 length = entry & 0xff;
+ ip += entry >> 11;
+
+ /*
+ * copy_offset/256 is encoded in bits 8..10.
+ * By just fetching those bits, we get
+ * copy_offset (since the bit-field starts at
+ * bit 8).
+ */
+ const u32 copy_offset = entry & 0x700;
+ if (!writer_append_from_self(writer,
+ copy_offset + trailer,
+ length))
+ return;
+ MAYBE_REFILL();
+ }
+ }
+}
+
+#undef MAYBE_REFILL
+
+static bool refill_tag(struct snappy_decompressor *d)
+{
+ const char *ip = d->ip;
+
+ if (ip == d->ip_limit) {
+ size_t n;
+ /* Fetch a new fragment from the reader */
+ skip(d->reader, d->peeked); /* All peeked bytes are used up */
+ ip = peek(d->reader, &n);
+ d->peeked = (u32) n;
+ if (n == 0) {
+ d->eof = true;
+ return false;
+ }
+ d->ip_limit = ip + n;
+ }
+
+ /* Read the tag character */
+ DCHECK_LT(ip, d->ip_limit);
+ const unsigned char c = *(const unsigned char *)(ip);
+ const u32 entry = char_table[c];
+ const u32 needed = (entry >> 11) + 1; /* +1 byte for 'c' */
+ DCHECK_LE(needed, sizeof(d->scratch));
+
+ /* Read more bytes from reader if needed */
+ u32 nbuf = (u32) (d->ip_limit - ip);
+
+ if (nbuf < needed) {
+ /*
+ * Stitch together bytes from ip and reader to form the word
+ * contents. We store the needed bytes in "scratch". They
+ * will be consumed immediately by the caller since we do not
+ * read more than we need.
+ */
+ memmove(d->scratch, ip, nbuf);
+ skip(d->reader, d->peeked); /* All peeked bytes are used up */
+ d->peeked = 0;
+ while (nbuf < needed) {
+ size_t length;
+ const char *src = peek(d->reader, &length);
+ if (length == 0)
+ return false;
+ u32 to_add = min_t(u32, needed - nbuf, (u32) length);
+ memcpy(d->scratch + nbuf, src, to_add);
+ nbuf += to_add;
+ skip(d->reader, to_add);
+ }
+ DCHECK_EQ(nbuf, needed);
+ d->ip = d->scratch;
+ d->ip_limit = d->scratch + needed;
+ } else if (nbuf < 5) {
+ /*
+ * Have enough bytes, but move into scratch so that we do not
+ * read past end of input
+ */
+ memmove(d->scratch, ip, nbuf);
+ skip(d->reader, d->peeked); /* All peeked bytes are used up */
+ d->peeked = 0;
+ d->ip = d->scratch;
+ d->ip_limit = d->scratch + nbuf;
+ } else {
+ /* Pass pointer to buffer returned by reader. */
+ d->ip = ip;
+ }
+ return true;
+}
+
+static int internal_uncompress(struct source *r,
+ struct writer *writer, u32 max_len)
+{
+ struct snappy_decompressor decompressor;
+ u32 uncompressed_len = 0;
+
+ init_snappy_decompressor(&decompressor, r);
+
+ if (!read_uncompressed_length(&decompressor, &uncompressed_len))
+ return -EIO;
+ /* Protect against possible DoS attack */
+ if ((u64) (uncompressed_len) > max_len)
+ return -EIO;
+
+ writer_set_expected_length(writer, uncompressed_len);
+
+ /* Process the entire input */
+ decompress_all_tags(&decompressor, writer);
+
+ exit_snappy_decompressor(&decompressor);
+ if (decompressor.eof && writer_check_length(writer))
+ return 0;
+ return -EIO;
+}
+
+static inline int sn_compress(struct snappy_env *env, struct source *reader,
+ struct sink *writer)
+{
+ int err;
+ size_t written = 0;
+ int N = available(reader);
+ char ulength[kmax32];
+ char *p = varint_encode32(ulength, N);
+
+ append(writer, ulength, p - ulength);
+ written += (p - ulength);
+
+ while (N > 0) {
+ /* Get next block to compress (without copying if possible) */
+ size_t fragment_size;
+ const char *fragment = peek(reader, &fragment_size);
+ if (fragment_size == 0) {
+ err = -EIO;
+ goto out;
+ }
+ const unsigned num_to_read = min_t(int, N, kblock_size);
+ size_t bytes_read = fragment_size;
+
+ int pending_advance = 0;
+ if (bytes_read >= num_to_read) {
+ /* Buffer returned by reader is large enough */
+ pending_advance = num_to_read;
+ fragment_size = num_to_read;
+ }
+ else {
+ memcpy(env->scratch, fragment, bytes_read);
+ skip(reader, bytes_read);
+
+ while (bytes_read < num_to_read) {
+ fragment = peek(reader, &fragment_size);
+ size_t n =
+ min_t(size_t, fragment_size,
+ num_to_read - bytes_read);
+ memcpy((char *)(env->scratch) + bytes_read, fragment, n);
+ bytes_read += n;
+ skip(reader, n);
+ }
+ DCHECK_EQ(bytes_read, num_to_read);
+ fragment = env->scratch;
+ fragment_size = num_to_read;
+ }
+ if (fragment_size < num_to_read)
+ return -EIO;
+
+ /* Get encoding table for compression */
+ int table_size;
+ u16 *table = get_hash_table(env, num_to_read, &table_size);
+
+ /* Compress input_fragment and append to dest */
+ char *dest;
+ dest = sink_peek(writer, rd_kafka_snappy_max_compressed_length(num_to_read));
+ if (!dest) {
+ /*
+ * Need a scratch buffer for the output,
+ * because the byte sink doesn't have enough
+ * in one piece.
+ */
+ dest = env->scratch_output;
+ }
+ char *end = compress_fragment(fragment, fragment_size,
+ dest, table, table_size);
+ append(writer, dest, end - dest);
+ written += (end - dest);
+
+ N -= num_to_read;
+ skip(reader, pending_advance);
+ }
+
+ err = 0;
+out:
+ return err;
+}
+
+#ifdef SG
+
+int rd_kafka_snappy_compress_iov(struct snappy_env *env,
+ const struct iovec *iov_in, size_t iov_in_cnt,
+ size_t input_length,
+ struct iovec *iov_out) {
+ struct source reader = {
+ .iov = (struct iovec *)iov_in,
+ .iovlen = (int)iov_in_cnt,
+ .total = input_length
+ };
+ struct sink writer = {
+ .iov = iov_out,
+ .iovlen = 1
+ };
+ int err = sn_compress(env, &reader, &writer);
+
+ iov_out->iov_len = writer.written;
+
+ return err;
+}
+EXPORT_SYMBOL(rd_kafka_snappy_compress_iov);
+
+/**
+ * rd_kafka_snappy_compress - Compress a buffer using the snappy compressor.
+ * @env: Preallocated environment
+ * @input: Input buffer
+ * @input_length: Length of input_buffer
+ * @compressed: Output buffer for compressed data
+ * @compressed_length: The real length of the output written here.
+ *
+ * Return 0 on success, otherwise an negative error code.
+ *
+ * The output buffer must be at least
+ * rd_kafka_snappy_max_compressed_length(input_length) bytes long.
+ *
+ * Requires a preallocated environment from rd_kafka_snappy_init_env.
+ * The environment does not keep state over individual calls
+ * of this function, just preallocates the memory.
+ */
+int rd_kafka_snappy_compress(struct snappy_env *env,
+ const char *input,
+ size_t input_length,
+ char *compressed, size_t *compressed_length)
+{
+ struct iovec iov_in = {
+ .iov_base = (char *)input,
+ .iov_len = input_length,
+ };
+ struct iovec iov_out = {
+ .iov_base = compressed,
+ .iov_len = 0xffffffff,
+ };
+ return rd_kafka_snappy_compress_iov(env,
+ &iov_in, 1, input_length,
+ &iov_out);
+}
+EXPORT_SYMBOL(rd_kafka_snappy_compress);
+
+int rd_kafka_snappy_uncompress_iov(struct iovec *iov_in, int iov_in_len,
+ size_t input_len, char *uncompressed)
+{
+ struct source reader = {
+ .iov = iov_in,
+ .iovlen = iov_in_len,
+ .total = input_len
+ };
+ struct writer output = {
+ .base = uncompressed,
+ .op = uncompressed
+ };
+ return internal_uncompress(&reader, &output, 0xffffffff);
+}
+EXPORT_SYMBOL(rd_kafka_snappy_uncompress_iov);
+
+/**
+ * rd_kafka_snappy_uncompress - Uncompress a snappy compressed buffer
+ * @compressed: Input buffer with compressed data
+ * @n: length of compressed buffer
+ * @uncompressed: buffer for uncompressed data
+ *
+ * The uncompressed data buffer must be at least
+ * rd_kafka_snappy_uncompressed_length(compressed) bytes long.
+ *
+ * Return 0 on success, otherwise an negative error code.
+ */
+int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed)
+{
+ struct iovec iov = {
+ .iov_base = (char *)compressed,
+ .iov_len = n
+ };
+ return rd_kafka_snappy_uncompress_iov(&iov, 1, n, uncompressed);
+}
+EXPORT_SYMBOL(rd_kafka_snappy_uncompress);
+
+
+/**
+ * @brief Decompress Snappy message with Snappy-java framing.
+ *
+ * @returns a malloced buffer with the uncompressed data, or NULL on failure.
+ */
+char *rd_kafka_snappy_java_uncompress (const char *inbuf, size_t inlen,
+ size_t *outlenp,
+ char *errstr, size_t errstr_size) {
+ int pass;
+ char *outbuf = NULL;
+
+ /**
+ * Traverse all chunks in two passes:
+ * pass 1: calculate total uncompressed length
+ * pass 2: uncompress
+ *
+ * Each chunk is prefixed with 4: length */
+
+ for (pass = 1 ; pass <= 2 ; pass++) {
+ ssize_t of = 0; /* inbuf offset */
+ ssize_t uof = 0; /* outbuf offset */
+
+ while (of + 4 <= (ssize_t)inlen) {
+ uint32_t clen; /* compressed length */
+ size_t ulen; /* uncompressed length */
+ int r;
+
+ memcpy(&clen, inbuf+of, 4);
+ clen = be32toh(clen);
+ of += 4;
+
+ if (unlikely(clen > inlen - of)) {
+ rd_snprintf(errstr, errstr_size,
+ "Invalid snappy-java chunk length "
+ "%"PRId32" > %"PRIdsz
+ " available bytes",
+ clen, (ssize_t)inlen - of);
+ return NULL;
+ }
+
+ /* Acquire uncompressed length */
+ if (unlikely(!rd_kafka_snappy_uncompressed_length(
+ inbuf+of, clen, &ulen))) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to get length of "
+ "(snappy-java framed) Snappy "
+ "compressed payload "
+ "(clen %"PRId32")",
+ clen);
+ return NULL;
+ }
+
+ if (pass == 1) {
+ /* pass 1: calculate total length */
+ of += clen;
+ uof += ulen;
+ continue;
+ }
+
+ /* pass 2: Uncompress to outbuf */
+ if (unlikely((r = rd_kafka_snappy_uncompress(
+ inbuf+of, clen, outbuf+uof)))) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to decompress Snappy-java "
+ "framed payload of size %"PRId32
+ ": %s",
+ clen,
+ rd_strerror(-r/*negative errno*/));
+ rd_free(outbuf);
+ return NULL;
+ }
+
+ of += clen;
+ uof += ulen;
+ }
+
+ if (unlikely(of != (ssize_t)inlen)) {
+ rd_snprintf(errstr, errstr_size,
+ "%"PRIusz" trailing bytes in Snappy-java "
+ "framed compressed data",
+ inlen - of);
+ if (outbuf)
+ rd_free(outbuf);
+ return NULL;
+ }
+
+ if (pass == 1) {
+ if (uof <= 0) {
+ rd_snprintf(errstr, errstr_size,
+ "Empty Snappy-java framed data");
+ return NULL;
+ }
+
+ /* Allocate memory for uncompressed data */
+ outbuf = rd_malloc(uof);
+ if (unlikely(!outbuf)) {
+ rd_snprintf(errstr, errstr_size,
+ "Failed to allocate memory "
+ "(%"PRIdsz") for "
+ "uncompressed Snappy data: %s",
+ uof, rd_strerror(errno));
+ return NULL;
+ }
+
+ } else {
+ /* pass 2 */
+ *outlenp = uof;
+ }
+ }
+
+ return outbuf;
+}
+
+
+
+#else
+/**
+ * rd_kafka_snappy_compress - Compress a buffer using the snappy compressor.
+ * @env: Preallocated environment
+ * @input: Input buffer
+ * @input_length: Length of input_buffer
+ * @compressed: Output buffer for compressed data
+ * @compressed_length: The real length of the output written here.
+ *
+ * Return 0 on success, otherwise an negative error code.
+ *
+ * The output buffer must be at least
+ * rd_kafka_snappy_max_compressed_length(input_length) bytes long.
+ *
+ * Requires a preallocated environment from rd_kafka_snappy_init_env.
+ * The environment does not keep state over individual calls
+ * of this function, just preallocates the memory.
+ */
+int rd_kafka_snappy_compress(struct snappy_env *env,
+ const char *input,
+ size_t input_length,
+ char *compressed, size_t *compressed_length)
+{
+ struct source reader = {
+ .ptr = input,
+ .left = input_length
+ };
+ struct sink writer = {
+ .dest = compressed,
+ };
+ int err = sn_compress(env, &reader, &writer);
+
+ /* Compute how many bytes were added */
+ *compressed_length = (writer.dest - compressed);
+ return err;
+}
+EXPORT_SYMBOL(rd_kafka_snappy_compress);
+
+/**
+ * rd_kafka_snappy_uncompress - Uncompress a snappy compressed buffer
+ * @compressed: Input buffer with compressed data
+ * @n: length of compressed buffer
+ * @uncompressed: buffer for uncompressed data
+ *
+ * The uncompressed data buffer must be at least
+ * rd_kafka_snappy_uncompressed_length(compressed) bytes long.
+ *
+ * Return 0 on success, otherwise an negative error code.
+ */
+int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed)
+{
+ struct source reader = {
+ .ptr = compressed,
+ .left = n
+ };
+ struct writer output = {
+ .base = uncompressed,
+ .op = uncompressed
+ };
+ return internal_uncompress(&reader, &output, 0xffffffff);
+}
+EXPORT_SYMBOL(rd_kafka_snappy_uncompress);
+#endif
+
+static inline void clear_env(struct snappy_env *env)
+{
+ memset(env, 0, sizeof(*env));
+}
+
+#ifdef SG
+/**
+ * rd_kafka_snappy_init_env_sg - Allocate snappy compression environment
+ * @env: Environment to preallocate
+ * @sg: Input environment ever does scather gather
+ *
+ * If false is passed to sg then multiple entries in an iovec
+ * are not legal.
+ * Returns 0 on success, otherwise negative errno.
+ * Must run in process context.
+ */
+int rd_kafka_snappy_init_env_sg(struct snappy_env *env, bool sg)
+{
+ if (rd_kafka_snappy_init_env(env) < 0)
+ goto error;
+
+ if (sg) {
+ env->scratch = vmalloc(kblock_size);
+ if (!env->scratch)
+ goto error;
+ env->scratch_output =
+ vmalloc(rd_kafka_snappy_max_compressed_length(kblock_size));
+ if (!env->scratch_output)
+ goto error;
+ }
+ return 0;
+error:
+ rd_kafka_snappy_free_env(env);
+ return -ENOMEM;
+}
+EXPORT_SYMBOL(rd_kafka_snappy_init_env_sg);
+#endif
+
+/**
+ * rd_kafka_snappy_init_env - Allocate snappy compression environment
+ * @env: Environment to preallocate
+ *
+ * Passing multiple entries in an iovec is not allowed
+ * on the environment allocated here.
+ * Returns 0 on success, otherwise negative errno.
+ * Must run in process context.
+ */
+int rd_kafka_snappy_init_env(struct snappy_env *env)
+{
+ clear_env(env);
+ env->hash_table = vmalloc(sizeof(u16) * kmax_hash_table_size);
+ if (!env->hash_table)
+ return -ENOMEM;
+ return 0;
+}
+EXPORT_SYMBOL(rd_kafka_snappy_init_env);
+
+/**
+ * rd_kafka_snappy_free_env - Free an snappy compression environment
+ * @env: Environment to free.
+ *
+ * Must run in process context.
+ */
+void rd_kafka_snappy_free_env(struct snappy_env *env)
+{
+ vfree(env->hash_table);
+#ifdef SG
+ vfree(env->scratch);
+ vfree(env->scratch_output);
+#endif
+ clear_env(env);
+}
+EXPORT_SYMBOL(rd_kafka_snappy_free_env);
+
+#ifdef __GNUC__
+#pragma GCC diagnostic pop /* -Wcast-align ignore */
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h b/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h
new file mode 100644
index 000000000..b3742f1ac
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/snappy.h
@@ -0,0 +1,62 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _LINUX_SNAPPY_H
+#define _LINUX_SNAPPY_H 1
+
+#include <stdbool.h>
+#include <stddef.h>
+
+/* Only needed for compression. This preallocates the worst case */
+struct snappy_env {
+ unsigned short *hash_table;
+ void *scratch;
+ void *scratch_output;
+};
+
+struct iovec;
+int rd_kafka_snappy_init_env(struct snappy_env *env);
+int rd_kafka_snappy_init_env_sg(struct snappy_env *env, bool sg);
+void rd_kafka_snappy_free_env(struct snappy_env *env);
+int rd_kafka_snappy_uncompress_iov(struct iovec *iov_in, int iov_in_len,
+ size_t input_len, char *uncompressed);
+int rd_kafka_snappy_uncompress(const char *compressed, size_t n, char *uncompressed);
+char *rd_kafka_snappy_java_uncompress (const char *inbuf, size_t inlen,
+ size_t *outlenp,
+ char *errstr, size_t errstr_size);
+int rd_kafka_snappy_compress_iov(struct snappy_env *env,
+ const struct iovec *iov_in, size_t iov_in_cnt,
+ size_t input_length,
+ struct iovec *iov_out);
+bool rd_kafka_snappy_uncompressed_length(const char *buf, size_t len, size_t *result);
+size_t rd_kafka_snappy_max_compressed_length(size_t source_len);
+
+
+
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h b/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h
new file mode 100644
index 000000000..3286f63de
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/snappy_compat.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright 2005 Google Inc. All Rights Reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "rdkafka_int.h"
+#include "rdendian.h"
+
+
+
+#ifdef __FreeBSD__
+# include <sys/endian.h>
+#elif defined(__APPLE_CC_) || (defined(__MACH__) && defined(__APPLE__)) /* MacOS/X support */
+# include <machine/endian.h>
+
+#if __DARWIN_BYTE_ORDER == __DARWIN_LITTLE_ENDIAN
+# define htole16(x) (x)
+# define le32toh(x) (x)
+#elif __DARWIN_BYTE_ORDER == __DARWIN_BIG_ENDIAN
+# define htole16(x) __DARWIN_OSSwapInt16(x)
+# define le32toh(x) __DARWIN_OSSwapInt32(x)
+#else
+# error "Endianness is undefined"
+#endif
+
+
+#elif !defined(__WIN32__) && !defined(_MSC_VER) && !defined(__sun) && !defined(_AIX)
+# include <endian.h>
+#endif
+
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <errno.h>
+#include <stdbool.h>
+#include <limits.h>
+#if !defined(__WIN32__) && !defined(_MSC_VER)
+#include <sys/uio.h>
+#endif
+
+#ifdef __ANDROID__
+#define le32toh letoh32
+#endif
+
+#if !defined(__MINGW32__) && defined(__WIN32__) && defined(SG)
+struct iovec {
+ void *iov_base; /* Pointer to data. */
+ size_t iov_len; /* Length of data. */
+};
+#endif
+
+typedef unsigned char u8;
+typedef unsigned short u16;
+typedef unsigned u32;
+typedef unsigned long long u64;
+
+#ifdef _MSC_VER
+#define BUG_ON(x) do { if (unlikely((x))) abort(); } while (0)
+#else
+#define BUG_ON(x) assert(!(x))
+#endif
+
+
+#define vmalloc(x) rd_malloc(x)
+#define vfree(x) rd_free(x)
+
+#define EXPORT_SYMBOL(x)
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof(*(x)))
+
+#ifndef likely
+#define likely(x) __builtin_expect((x), 1)
+#define unlikely(x) __builtin_expect((x), 0)
+#endif
+
+#define min_t(t,x,y) ((x) < (y) ? (x) : (y))
+#define max_t(t,x,y) ((x) > (y) ? (x) : (y))
+
+#if __BYTE_ORDER == __LITTLE_ENDIAN
+#define __LITTLE_ENDIAN__ 1
+#endif
+
+#if __LITTLE_ENDIAN__ == 1 || defined(__WIN32__)
+#ifndef htole16
+#define htole16(x) (x)
+#endif
+#ifndef le32toh
+#define le32toh(x) (x)
+#endif
+#endif
+
+
+#if defined(_MSC_VER)
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define htole16(x) (x)
+#define le32toh(x) (x)
+
+#elif BYTE_ORDER == BIG_ENDIAN
+#define htole16(x) __builtin_bswap16(x)
+#define le32toh(x) __builtin_bswap32(x)
+#endif
+#endif
+
+#if defined(__sun)
+#ifndef htole16
+#define htole16(x) LE_16(x)
+#endif
+#ifndef le32toh
+#define le32toh(x) LE_32(x)
+#endif
+#endif
+
+#define BITS_PER_LONG (__SIZEOF_LONG__ * 8)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json b/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json
new file mode 100644
index 000000000..d0dbedda7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/statistics_schema.json
@@ -0,0 +1,444 @@
+{ "$schema": "http://json-schema.org/schema#",
+ "id": "https://github.com/edenhill/librdkafka/src/statistics_schema.json",
+ "title": "librdkafka statistics schema - INCOMPLETE - WORK IN PROGRESS",
+ "definitions": {
+ "window": {
+ "type": "object",
+ "title": "Rolling window statistics",
+ "description": "The values are in microseconds unless otherwise stated.",
+ "properties": {
+ "type": "object",
+ "properties": {
+ "min": {
+ "type": "integer"
+ },
+ "max": {
+ "type": "integer"
+ },
+ "avg": {
+ "type": "integer"
+ },
+ "sum": {
+ "type": "integer"
+ },
+ "stddev": {
+ "type": "integer"
+ },
+ "p50": {
+ "type": "integer"
+ },
+ "p75": {
+ "type": "integer"
+ },
+ "p90": {
+ "type": "integer"
+ },
+ "p95": {
+ "type": "integer"
+ },
+ "p99": {
+ "type": "integer"
+ },
+ "p99_99": {
+ "type": "integer"
+ },
+ "outofrange": {
+ "type": "integer"
+ },
+ "hdrsize": {
+ "type": "integer"
+ },
+ "cnt": {
+ "type": "integer"
+ }
+ }
+ }
+ }
+ },
+ "type": "object",
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "client_id": {
+ "type": "string"
+ },
+ "type": {
+ "type": "string"
+ },
+ "ts": {
+ "type": "integer"
+ },
+ "time": {
+ "type": "integer"
+ },
+ "replyq": {
+ "type": "integer"
+ },
+ "msg_cnt": {
+ "type": "integer"
+ },
+ "msg_size": {
+ "type": "integer"
+ },
+ "msg_max": {
+ "type": "integer"
+ },
+ "msg_size_max": {
+ "type": "integer"
+ },
+ "simple_cnt": {
+ "type": "integer"
+ },
+ "metadata_cache_cnt": {
+ "type": "integer"
+ },
+ "brokers": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "object",
+ "title": "Broker object keyed by the broker \"name:port/id\"",
+ "properties": {
+ "name": {
+ "type": "string"
+ },
+ "nodeid": {
+ "type": "integer"
+ },
+ "state": {
+ "type": "string"
+ },
+ "stateage": {
+ "type": "integer"
+ },
+ "outbuf_cnt": {
+ "type": "integer"
+ },
+ "outbuf_msg_cnt": {
+ "type": "integer"
+ },
+ "waitresp_cnt": {
+ "type": "integer"
+ },
+ "waitresp_msg_cnt": {
+ "type": "integer"
+ },
+ "tx": {
+ "type": "integer"
+ },
+ "txbytes": {
+ "type": "integer"
+ },
+ "txerrs": {
+ "type": "integer"
+ },
+ "txretries": {
+ "type": "integer"
+ },
+ "txidle": {
+ "type": "integer"
+ },
+ "req_timeouts": {
+ "type": "integer"
+ },
+ "rx": {
+ "type": "integer"
+ },
+ "rxbytes": {
+ "type": "integer"
+ },
+ "rxerrs": {
+ "type": "integer"
+ },
+ "rxcorriderrs": {
+ "type": "integer"
+ },
+ "rxpartial": {
+ "type": "integer"
+ },
+ "rxidle": {
+ "type": "integer"
+ },
+ "zbuf_grow": {
+ "type": "integer"
+ },
+ "buf_grow": {
+ "type": "integer"
+ },
+ "wakeups": {
+ "type": "integer"
+ },
+ "int_latency": {
+ "$ref": "#/definitions/window"
+ },
+ "outbuf_latency": {
+ "$ref": "#/definitions/window"
+ },
+ "rtt": {
+ "$ref": "#/definitions/window"
+ },
+ "throttle": {
+ "$ref": "#/definitions/window"
+ },
+ "toppars": {
+ "type": "object",
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "topic": {
+ "type": "string"
+ },
+ "partition": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "topic",
+ "partition"
+ ]
+ }
+ }
+ },
+ "required": [
+ "name",
+ "nodeid",
+ "state",
+ "stateage",
+ "outbuf_cnt",
+ "outbuf_msg_cnt",
+ "waitresp_cnt",
+ "waitresp_msg_cnt",
+ "tx",
+ "txbytes",
+ "txerrs",
+ "txretries",
+ "req_timeouts",
+ "rx",
+ "rxbytes",
+ "rxerrs",
+ "rxcorriderrs",
+ "rxpartial",
+ "zbuf_grow",
+ "buf_grow",
+ "wakeups",
+ "int_latency",
+ "rtt",
+ "throttle",
+ "toppars"
+ ]
+ }
+ },
+ "topics": {
+ "type": "object",
+ "properties": {
+ "additionalProperties": {
+ "type": "object",
+ "properties": {
+ "topic": {
+ "type": "string"
+ },
+ "metadata_age": {
+ "type": "integer"
+ },
+ "batchsize": {
+ "$ref": "#/definitions/window"
+ },
+ "batchcnt": {
+ "$ref": "#/definitions/window"
+ },
+ "partitions": {
+ "type": "object",
+ "properties": {
+ "^-?[0-9]+$": {
+ "type": "object",
+ "properties": {
+ "partition": {
+ "type": "integer"
+ },
+ "leader": {
+ "type": "integer"
+ },
+ "desired": {
+ "type": "boolean"
+ },
+ "unknown": {
+ "type": "boolean"
+ },
+ "msgq_cnt": {
+ "type": "integer"
+ },
+ "msgq_bytes": {
+ "type": "integer"
+ },
+ "xmit_msgq_cnt": {
+ "type": "integer"
+ },
+ "xmit_msgq_bytes": {
+ "type": "integer"
+ },
+ "fetchq_cnt": {
+ "type": "integer"
+ },
+ "fetchq_size": {
+ "type": "integer"
+ },
+ "fetch_state": {
+ "type": "string"
+ },
+ "query_offset": {
+ "type": "integer"
+ },
+ "next_offset": {
+ "type": "integer"
+ },
+ "app_offset": {
+ "type": "integer"
+ },
+ "stored_offset": {
+ "type": "integer"
+ },
+ "stored_leader_epoch": {
+ "type": "integer"
+ },
+ "commited_offset": {
+ "type": "integer"
+ },
+ "committed_offset": {
+ "type": "integer"
+ },
+ "committed_leader_epoch": {
+ "type": "integer"
+ },
+
+ "eof_offset": {
+ "type": "integer"
+ },
+ "lo_offset": {
+ "type": "integer"
+ },
+ "hi_offset": {
+ "type": "integer"
+ },
+ "consumer_lag": {
+ "type": "integer"
+ },
+ "consumer_lag_stored": {
+ "type": "integer"
+ },
+ "leader_epoch": {
+ "type": "integer"
+ },
+ "txmsgs": {
+ "type": "integer"
+ },
+ "txbytes": {
+ "type": "integer"
+ },
+ "rxmsgs": {
+ "type": "integer"
+ },
+ "rxbytes": {
+ "type": "integer"
+ },
+ "msgs": {
+ "type": "integer"
+ },
+ "rx_ver_drops": {
+ "type": "integer"
+ },
+ "msgs_inflight": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "partition",
+ "leader",
+ "desired",
+ "unknown",
+ "msgq_cnt",
+ "msgq_bytes",
+ "xmit_msgq_cnt",
+ "xmit_msgq_bytes",
+ "fetchq_cnt",
+ "fetchq_size",
+ "fetch_state",
+ "query_offset",
+ "next_offset",
+ "app_offset",
+ "stored_offset",
+ "commited_offset",
+ "committed_offset",
+ "eof_offset",
+ "lo_offset",
+ "hi_offset",
+ "consumer_lag",
+ "txmsgs",
+ "txbytes",
+ "rxmsgs",
+ "rxbytes",
+ "msgs",
+ "rx_ver_drops"
+ ]
+ }
+ }
+ }
+ },
+ "required": [
+ "topic",
+ "metadata_age",
+ "batchsize",
+ "partitions"
+ ]
+ }
+ }
+ },
+ "tx": {
+ "type": "integer"
+ },
+ "tx_bytes": {
+ "type": "integer"
+ },
+ "rx": {
+ "type": "integer"
+ },
+ "rx_bytes": {
+ "type": "integer"
+ },
+ "txmsgs": {
+ "type": "integer"
+ },
+ "txmsg_bytes": {
+ "type": "integer"
+ },
+ "rxmsgs": {
+ "type": "integer"
+ },
+ "rxmsg_bytes": {
+ "type": "integer"
+ }
+ },
+ "required": [
+ "name",
+ "client_id",
+ "type",
+ "ts",
+ "time",
+ "replyq",
+ "msg_cnt",
+ "msg_size",
+ "msg_max",
+ "msg_size_max",
+ "simple_cnt",
+ "metadata_cache_cnt",
+ "brokers",
+ "topics",
+ "tx",
+ "tx_bytes",
+ "rx",
+ "rx_bytes",
+ "txmsgs",
+ "txmsg_bytes",
+ "rxmsgs",
+ "rxmsg_bytes"
+ ]
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c
new file mode 100644
index 000000000..b0ec8e956
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.c
@@ -0,0 +1,932 @@
+/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
+Copyright (c) 2012 Marcus Geelnard
+Copyright (c) 2013-2014 Evan Nemerson
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
+*/
+
+#include "rd.h"
+#include <stdlib.h>
+
+#if !WITH_C11THREADS
+
+/* Platform specific includes */
+#if defined(_TTHREAD_POSIX_)
+ #include <signal.h>
+ #include <sched.h>
+ #include <unistd.h>
+ #include <sys/time.h>
+ #include <errno.h>
+#elif defined(_TTHREAD_WIN32_)
+ #include <process.h>
+ #include <sys/timeb.h>
+#endif
+
+
+/* Standard, good-to-have defines */
+#ifndef NULL
+ #define NULL (void*)0
+#endif
+#ifndef TRUE
+ #define TRUE 1
+#endif
+#ifndef FALSE
+ #define FALSE 0
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static RD_TLS int thrd_is_detached;
+
+
+int mtx_init(mtx_t *mtx, int type)
+{
+#if defined(_TTHREAD_WIN32_)
+ mtx->mAlreadyLocked = FALSE;
+ mtx->mRecursive = type & mtx_recursive;
+ mtx->mTimed = type & mtx_timed;
+ if (!mtx->mTimed)
+ {
+ InitializeCriticalSection(&(mtx->mHandle.cs));
+ }
+ else
+ {
+ mtx->mHandle.mut = CreateMutex(NULL, FALSE, NULL);
+ if (mtx->mHandle.mut == NULL)
+ {
+ return thrd_error;
+ }
+ }
+ return thrd_success;
+#else
+ int ret;
+ pthread_mutexattr_t attr;
+ pthread_mutexattr_init(&attr);
+ if (type & mtx_recursive)
+ {
+ pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_RECURSIVE);
+ }
+ ret = pthread_mutex_init(mtx, &attr);
+ pthread_mutexattr_destroy(&attr);
+ return ret == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+void mtx_destroy(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (!mtx->mTimed)
+ {
+ DeleteCriticalSection(&(mtx->mHandle.cs));
+ }
+ else
+ {
+ CloseHandle(mtx->mHandle.mut);
+ }
+#else
+ pthread_mutex_destroy(mtx);
+#endif
+}
+
+int mtx_lock(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (!mtx->mTimed)
+ {
+ EnterCriticalSection(&(mtx->mHandle.cs));
+ }
+ else
+ {
+ switch (WaitForSingleObject(mtx->mHandle.mut, INFINITE))
+ {
+ case WAIT_OBJECT_0:
+ break;
+ case WAIT_ABANDONED:
+ default:
+ return thrd_error;
+ }
+ }
+
+ if (!mtx->mRecursive)
+ {
+ rd_assert(!mtx->mAlreadyLocked); /* Would deadlock */
+ mtx->mAlreadyLocked = TRUE;
+ }
+ return thrd_success;
+#else
+ return pthread_mutex_lock(mtx) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int mtx_timedlock(mtx_t *mtx, const struct timespec *ts)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct timespec current_ts;
+ DWORD timeoutMs;
+
+ if (!mtx->mTimed)
+ {
+ return thrd_error;
+ }
+
+ timespec_get(&current_ts, TIME_UTC);
+
+ if ((current_ts.tv_sec > ts->tv_sec) || ((current_ts.tv_sec == ts->tv_sec) && (current_ts.tv_nsec >= ts->tv_nsec)))
+ {
+ timeoutMs = 0;
+ }
+ else
+ {
+ timeoutMs = (DWORD)(ts->tv_sec - current_ts.tv_sec) * 1000;
+ timeoutMs += (ts->tv_nsec - current_ts.tv_nsec) / 1000000;
+ timeoutMs += 1;
+ }
+
+ /* TODO: the timeout for WaitForSingleObject doesn't include time
+ while the computer is asleep. */
+ switch (WaitForSingleObject(mtx->mHandle.mut, timeoutMs))
+ {
+ case WAIT_OBJECT_0:
+ break;
+ case WAIT_TIMEOUT:
+ return thrd_timedout;
+ case WAIT_ABANDONED:
+ default:
+ return thrd_error;
+ }
+
+ if (!mtx->mRecursive)
+ {
+ rd_assert(!mtx->mAlreadyLocked); /* Would deadlock */
+ mtx->mAlreadyLocked = TRUE;
+ }
+
+ return thrd_success;
+#elif defined(_POSIX_TIMEOUTS) && (_POSIX_TIMEOUTS >= 200112L) && defined(_POSIX_THREADS) && (_POSIX_THREADS >= 200112L)
+ switch (pthread_mutex_timedlock(mtx, ts)) {
+ case 0:
+ return thrd_success;
+ case ETIMEDOUT:
+ return thrd_timedout;
+ default:
+ return thrd_error;
+ }
+#else
+ int rc;
+ struct timespec cur, dur;
+
+ /* Try to acquire the lock and, if we fail, sleep for 5ms. */
+ while ((rc = pthread_mutex_trylock (mtx)) == EBUSY) {
+ timespec_get(&cur, TIME_UTC);
+
+ if ((cur.tv_sec > ts->tv_sec) || ((cur.tv_sec == ts->tv_sec) && (cur.tv_nsec >= ts->tv_nsec)))
+ {
+ break;
+ }
+
+ dur.tv_sec = ts->tv_sec - cur.tv_sec;
+ dur.tv_nsec = ts->tv_nsec - cur.tv_nsec;
+ if (dur.tv_nsec < 0)
+ {
+ dur.tv_sec--;
+ dur.tv_nsec += 1000000000;
+ }
+
+ if ((dur.tv_sec != 0) || (dur.tv_nsec > 5000000))
+ {
+ dur.tv_sec = 0;
+ dur.tv_nsec = 5000000;
+ }
+
+ nanosleep(&dur, NULL);
+ }
+
+ switch (rc) {
+ case 0:
+ return thrd_success;
+ case ETIMEDOUT:
+ case EBUSY:
+ return thrd_timedout;
+ default:
+ return thrd_error;
+ }
+#endif
+}
+
+int mtx_trylock(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ int ret;
+
+ if (!mtx->mTimed)
+ {
+ ret = TryEnterCriticalSection(&(mtx->mHandle.cs)) ? thrd_success : thrd_busy;
+ }
+ else
+ {
+ ret = (WaitForSingleObject(mtx->mHandle.mut, 0) == WAIT_OBJECT_0) ? thrd_success : thrd_busy;
+ }
+
+ if ((!mtx->mRecursive) && (ret == thrd_success))
+ {
+ if (mtx->mAlreadyLocked)
+ {
+ LeaveCriticalSection(&(mtx->mHandle.cs));
+ ret = thrd_busy;
+ }
+ else
+ {
+ mtx->mAlreadyLocked = TRUE;
+ }
+ }
+ return ret;
+#else
+ return (pthread_mutex_trylock(mtx) == 0) ? thrd_success : thrd_busy;
+#endif
+}
+
+int mtx_unlock(mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ mtx->mAlreadyLocked = FALSE;
+ if (!mtx->mTimed)
+ {
+ LeaveCriticalSection(&(mtx->mHandle.cs));
+ }
+ else
+ {
+ if (!ReleaseMutex(mtx->mHandle.mut))
+ {
+ return thrd_error;
+ }
+ }
+ return thrd_success;
+#else
+ return pthread_mutex_unlock(mtx) == 0 ? thrd_success : thrd_error;;
+#endif
+}
+
+#if defined(_TTHREAD_WIN32_)
+#define _CONDITION_EVENT_ONE 0
+#define _CONDITION_EVENT_ALL 1
+#endif
+
+int cnd_init(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ cond->mWaitersCount = 0;
+
+ /* Init critical section */
+ InitializeCriticalSection(&cond->mWaitersCountLock);
+
+ /* Init events */
+ cond->mEvents[_CONDITION_EVENT_ONE] = CreateEvent(NULL, FALSE, FALSE, NULL);
+ if (cond->mEvents[_CONDITION_EVENT_ONE] == NULL)
+ {
+ cond->mEvents[_CONDITION_EVENT_ALL] = NULL;
+ return thrd_error;
+ }
+ cond->mEvents[_CONDITION_EVENT_ALL] = CreateEvent(NULL, TRUE, FALSE, NULL);
+ if (cond->mEvents[_CONDITION_EVENT_ALL] == NULL)
+ {
+ CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
+ cond->mEvents[_CONDITION_EVENT_ONE] = NULL;
+ return thrd_error;
+ }
+
+ return thrd_success;
+#else
+ return pthread_cond_init(cond, NULL) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+void cnd_destroy(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (cond->mEvents[_CONDITION_EVENT_ONE] != NULL)
+ {
+ CloseHandle(cond->mEvents[_CONDITION_EVENT_ONE]);
+ }
+ if (cond->mEvents[_CONDITION_EVENT_ALL] != NULL)
+ {
+ CloseHandle(cond->mEvents[_CONDITION_EVENT_ALL]);
+ }
+ DeleteCriticalSection(&cond->mWaitersCountLock);
+#else
+ pthread_cond_destroy(cond);
+#endif
+}
+
+int cnd_signal(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ int haveWaiters;
+
+ /* Are there any waiters? */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ haveWaiters = (cond->mWaitersCount > 0);
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* If we have any waiting threads, send them a signal */
+ if(haveWaiters)
+ {
+ if (SetEvent(cond->mEvents[_CONDITION_EVENT_ONE]) == 0)
+ {
+ return thrd_error;
+ }
+ }
+
+ return thrd_success;
+#else
+ return pthread_cond_signal(cond) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int cnd_broadcast(cnd_t *cond)
+{
+#if defined(_TTHREAD_WIN32_)
+ int haveWaiters;
+
+ /* Are there any waiters? */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ haveWaiters = (cond->mWaitersCount > 0);
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* If we have any waiting threads, send them a signal */
+ if(haveWaiters)
+ {
+ if (SetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
+ {
+ return thrd_error;
+ }
+ }
+
+ return thrd_success;
+#else
+ return pthread_cond_broadcast(cond) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+#if defined(_TTHREAD_WIN32_)
+int _cnd_timedwait_win32(cnd_t *cond, mtx_t *mtx, DWORD timeout)
+{
+ int result, lastWaiter;
+
+ /* Increment number of waiters */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ ++ cond->mWaitersCount;
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* Release the mutex while waiting for the condition (will decrease
+ the number of waiters when done)... */
+ mtx_unlock(mtx);
+
+ /* Wait for either event to become signaled due to cnd_signal() or
+ cnd_broadcast() being called */
+ result = WaitForMultipleObjects(2, cond->mEvents, FALSE, timeout);
+
+ /* Check if we are the last waiter */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ -- cond->mWaitersCount;
+ lastWaiter = (result == (WAIT_OBJECT_0 + _CONDITION_EVENT_ALL)) &&
+ (cond->mWaitersCount == 0);
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+
+ /* If we are the last waiter to be notified to stop waiting, reset the event */
+ if (lastWaiter)
+ {
+ if (ResetEvent(cond->mEvents[_CONDITION_EVENT_ALL]) == 0)
+ {
+ /* The mutex is locked again before the function returns, even if an error occurred */
+ mtx_lock(mtx);
+ return thrd_error;
+ }
+ }
+
+ /* The mutex is locked again before the function returns, even if an error occurred */
+ mtx_lock(mtx);
+
+ if (result == WAIT_TIMEOUT)
+ return thrd_timedout;
+ else if (result == (int)WAIT_FAILED)
+ return thrd_error;
+
+ return thrd_success;
+}
+#endif
+
+int cnd_wait(cnd_t *cond, mtx_t *mtx)
+{
+#if defined(_TTHREAD_WIN32_)
+ return _cnd_timedwait_win32(cond, mtx, INFINITE);
+#else
+ return pthread_cond_wait(cond, mtx) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct timespec now;
+ if (timespec_get(&now, TIME_UTC) == TIME_UTC)
+ {
+ unsigned long long nowInMilliseconds = now.tv_sec * 1000 + now.tv_nsec / 1000000;
+ unsigned long long tsInMilliseconds = ts->tv_sec * 1000 + ts->tv_nsec / 1000000;
+ DWORD delta = (tsInMilliseconds > nowInMilliseconds) ?
+ (DWORD)(tsInMilliseconds - nowInMilliseconds) : 0;
+ return _cnd_timedwait_win32(cond, mtx, delta);
+ }
+ else
+ return thrd_error;
+#else
+ int ret;
+ ret = pthread_cond_timedwait(cond, mtx, ts);
+ if (ret == ETIMEDOUT)
+ {
+ return thrd_timedout;
+ }
+ return ret == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+
+
+#if defined(_TTHREAD_WIN32_)
+struct TinyCThreadTSSData {
+ void* value;
+ tss_t key;
+ struct TinyCThreadTSSData* next;
+};
+
+static tss_dtor_t _tinycthread_tss_dtors[1088] = { NULL, };
+
+static _Thread_local struct TinyCThreadTSSData* _tinycthread_tss_head = NULL;
+static _Thread_local struct TinyCThreadTSSData* _tinycthread_tss_tail = NULL;
+
+static void _tinycthread_tss_cleanup (void);
+
+static void _tinycthread_tss_cleanup (void) {
+ struct TinyCThreadTSSData* data;
+ int iteration;
+ unsigned int again = 1;
+ void* value;
+
+ for (iteration = 0 ; iteration < TSS_DTOR_ITERATIONS && again > 0 ; iteration++)
+ {
+ again = 0;
+ for (data = _tinycthread_tss_head ; data != NULL ; data = data->next)
+ {
+ if (data->value != NULL)
+ {
+ value = data->value;
+ data->value = NULL;
+
+ if (_tinycthread_tss_dtors[data->key] != NULL)
+ {
+ again = 1;
+ _tinycthread_tss_dtors[data->key](value);
+ }
+ }
+ }
+ }
+
+ while (_tinycthread_tss_head != NULL) {
+ data = _tinycthread_tss_head->next;
+ rd_free (_tinycthread_tss_head);
+ _tinycthread_tss_head = data;
+ }
+ _tinycthread_tss_head = NULL;
+ _tinycthread_tss_tail = NULL;
+}
+
+static void NTAPI _tinycthread_tss_callback(PVOID h, DWORD dwReason, PVOID pv)
+{
+ (void)h;
+ (void)pv;
+
+ if (_tinycthread_tss_head != NULL && (dwReason == DLL_THREAD_DETACH || dwReason == DLL_PROCESS_DETACH))
+ {
+ _tinycthread_tss_cleanup();
+ }
+}
+
+#ifdef _WIN32
+ #ifdef _M_X64
+ #pragma const_seg(".CRT$XLB")
+ #else
+ #pragma data_seg(".CRT$XLB")
+ #endif
+ PIMAGE_TLS_CALLBACK p_thread_callback = _tinycthread_tss_callback;
+ #ifdef _M_X64
+ #pragma const_seg()
+ #else
+ #pragma data_seg()
+ #endif
+#else
+ PIMAGE_TLS_CALLBACK p_thread_callback __attribute__((section(".CRT$XLB"))) = _tinycthread_tss_callback;
+#endif
+
+#endif /* defined(_TTHREAD_WIN32_) */
+
+/** Information to pass to the new thread (what to run). */
+typedef struct {
+ thrd_start_t mFunction; /**< Pointer to the function to be executed. */
+ void * mArg; /**< Function argument for the thread function. */
+} _thread_start_info;
+
+/* Thread wrapper function. */
+#if defined(_TTHREAD_WIN32_)
+static DWORD WINAPI _thrd_wrapper_function(LPVOID aArg)
+#elif defined(_TTHREAD_POSIX_)
+static void * _thrd_wrapper_function(void * aArg)
+#endif
+{
+ thrd_start_t fun;
+ void *arg;
+ int res;
+
+ /* Get thread startup information */
+ _thread_start_info *ti = (_thread_start_info *) aArg;
+ fun = ti->mFunction;
+ arg = ti->mArg;
+
+ /* The thread is responsible for freeing the startup information */
+ rd_free((void *)ti);
+
+ /* Call the actual client thread function */
+ res = fun(arg);
+
+#if defined(_TTHREAD_WIN32_)
+ if (_tinycthread_tss_head != NULL)
+ {
+ _tinycthread_tss_cleanup();
+ }
+
+ return (DWORD)res;
+#else
+ return (void*)(intptr_t)res;
+#endif
+}
+
+int thrd_create(thrd_t *thr, thrd_start_t func, void *arg)
+{
+ /* Fill out the thread startup information (passed to the thread wrapper,
+ which will eventually free it) */
+ _thread_start_info* ti = (_thread_start_info*)rd_malloc(sizeof(_thread_start_info));
+ if (ti == NULL)
+ {
+ return thrd_nomem;
+ }
+ ti->mFunction = func;
+ ti->mArg = arg;
+
+ /* Create the thread */
+#if defined(_TTHREAD_WIN32_)
+ *thr = CreateThread(NULL, 0, _thrd_wrapper_function, (LPVOID) ti, 0, NULL);
+#elif defined(_TTHREAD_POSIX_)
+ {
+ int err;
+ if((err = pthread_create(thr, NULL, _thrd_wrapper_function,
+ (void *)ti)) != 0) {
+ errno = err;
+ *thr = 0;
+ }
+ }
+#endif
+
+ /* Did we fail to create the thread? */
+ if(!*thr)
+ {
+ rd_free(ti);
+ return thrd_error;
+ }
+
+ return thrd_success;
+}
+
+thrd_t thrd_current(void)
+{
+#if defined(_TTHREAD_WIN32_)
+ return GetCurrentThread();
+#else
+ return pthread_self();
+#endif
+}
+
+int thrd_detach(thrd_t thr)
+{
+ thrd_is_detached = 1;
+#if defined(_TTHREAD_WIN32_)
+ /* https://stackoverflow.com/questions/12744324/how-to-detach-a-thread-on-windows-c#answer-12746081 */
+ return CloseHandle(thr) != 0 ? thrd_success : thrd_error;
+#else
+ return pthread_detach(thr) == 0 ? thrd_success : thrd_error;
+#endif
+}
+
+int thrd_equal(thrd_t thr0, thrd_t thr1)
+{
+#if defined(_TTHREAD_WIN32_)
+ return thr0 == thr1;
+#else
+ return pthread_equal(thr0, thr1);
+#endif
+}
+
+void thrd_exit(int res)
+{
+#if defined(_TTHREAD_WIN32_)
+ if (_tinycthread_tss_head != NULL)
+ {
+ _tinycthread_tss_cleanup();
+ }
+
+ ExitThread(res);
+#else
+ pthread_exit((void*)(intptr_t)res);
+#endif
+}
+
+int thrd_join(thrd_t thr, int *res)
+{
+#if defined(_TTHREAD_WIN32_)
+ DWORD dwRes;
+
+ if (WaitForSingleObject(thr, INFINITE) == WAIT_FAILED)
+ {
+ return thrd_error;
+ }
+ if (res != NULL)
+ {
+ if (GetExitCodeThread(thr, &dwRes) != 0)
+ {
+ *res = dwRes;
+ }
+ else
+ {
+ return thrd_error;
+ }
+ }
+ CloseHandle(thr);
+#elif defined(_TTHREAD_POSIX_)
+ void *pres;
+ if (pthread_join(thr, &pres) != 0)
+ {
+ return thrd_error;
+ }
+ if (res != NULL)
+ {
+ *res = (int)(intptr_t)pres;
+ }
+#endif
+ return thrd_success;
+}
+
+int thrd_sleep(const struct timespec *duration, struct timespec *remaining)
+{
+#if !defined(_TTHREAD_WIN32_)
+ return nanosleep(duration, remaining);
+#else
+ struct timespec start;
+ DWORD t;
+
+ timespec_get(&start, TIME_UTC);
+
+ t = SleepEx((DWORD)(duration->tv_sec * 1000 +
+ duration->tv_nsec / 1000000 +
+ (((duration->tv_nsec % 1000000) == 0) ? 0 : 1)),
+ TRUE);
+
+ if (t == 0) {
+ return 0;
+ } else if (remaining != NULL) {
+ timespec_get(remaining, TIME_UTC);
+ remaining->tv_sec -= start.tv_sec;
+ remaining->tv_nsec -= start.tv_nsec;
+ if (remaining->tv_nsec < 0)
+ {
+ remaining->tv_nsec += 1000000000;
+ remaining->tv_sec -= 1;
+ }
+ } else {
+ return -1;
+ }
+
+ return 0;
+#endif
+}
+
+void thrd_yield(void)
+{
+#if defined(_TTHREAD_WIN32_)
+ Sleep(0);
+#else
+ sched_yield();
+#endif
+}
+
+int tss_create(tss_t *key, tss_dtor_t dtor)
+{
+#if defined(_TTHREAD_WIN32_)
+ *key = TlsAlloc();
+ if (*key == TLS_OUT_OF_INDEXES)
+ {
+ return thrd_error;
+ }
+ _tinycthread_tss_dtors[*key] = dtor;
+#else
+ if (pthread_key_create(key, dtor) != 0)
+ {
+ return thrd_error;
+ }
+#endif
+ return thrd_success;
+}
+
+void tss_delete(tss_t key)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*) TlsGetValue (key);
+ struct TinyCThreadTSSData* prev = NULL;
+ if (data != NULL)
+ {
+ if (data == _tinycthread_tss_head)
+ {
+ _tinycthread_tss_head = data->next;
+ }
+ else
+ {
+ prev = _tinycthread_tss_head;
+ if (prev != NULL)
+ {
+ while (prev->next != data)
+ {
+ prev = prev->next;
+ }
+ }
+ }
+
+ if (data == _tinycthread_tss_tail)
+ {
+ _tinycthread_tss_tail = prev;
+ }
+
+ rd_free (data);
+ }
+ _tinycthread_tss_dtors[key] = NULL;
+ TlsFree(key);
+#else
+ pthread_key_delete(key);
+#endif
+}
+
+void *tss_get(tss_t key)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key);
+ if (data == NULL)
+ {
+ return NULL;
+ }
+ return data->value;
+#else
+ return pthread_getspecific(key);
+#endif
+}
+
+int tss_set(tss_t key, void *val)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct TinyCThreadTSSData* data = (struct TinyCThreadTSSData*)TlsGetValue(key);
+ if (data == NULL)
+ {
+ data = (struct TinyCThreadTSSData*)rd_malloc(sizeof(struct TinyCThreadTSSData));
+ if (data == NULL)
+ {
+ return thrd_error;
+ }
+
+ data->value = NULL;
+ data->key = key;
+ data->next = NULL;
+
+ if (_tinycthread_tss_tail != NULL)
+ {
+ _tinycthread_tss_tail->next = data;
+ }
+ else
+ {
+ _tinycthread_tss_tail = data;
+ }
+
+ if (_tinycthread_tss_head == NULL)
+ {
+ _tinycthread_tss_head = data;
+ }
+
+ if (!TlsSetValue(key, data))
+ {
+ rd_free (data);
+ return thrd_error;
+ }
+ }
+ data->value = val;
+#else
+ if (pthread_setspecific(key, val) != 0)
+ {
+ return thrd_error;
+ }
+#endif
+ return thrd_success;
+}
+
+#if defined(_TTHREAD_EMULATE_TIMESPEC_GET_)
+int _tthread_timespec_get(struct timespec *ts, int base)
+{
+#if defined(_TTHREAD_WIN32_)
+ struct _timeb tb;
+#elif !defined(CLOCK_REALTIME)
+ struct timeval tv;
+#endif
+
+ if (base != TIME_UTC)
+ {
+ return 0;
+ }
+
+#if defined(_TTHREAD_WIN32_)
+ _ftime_s(&tb);
+ ts->tv_sec = (time_t)tb.time;
+ ts->tv_nsec = 1000000L * (long)tb.millitm;
+#elif defined(CLOCK_REALTIME)
+ base = (clock_gettime(CLOCK_REALTIME, ts) == 0) ? base : 0;
+#else
+ gettimeofday(&tv, NULL);
+ ts->tv_sec = (time_t)tv.tv_sec;
+ ts->tv_nsec = 1000L * (long)tv.tv_usec;
+#endif
+
+ return base;
+}
+#endif /* _TTHREAD_EMULATE_TIMESPEC_GET_ */
+
+#if defined(_TTHREAD_WIN32_)
+void call_once(once_flag *flag, void (*func)(void))
+{
+ /* The idea here is that we use a spin lock (via the
+ InterlockedCompareExchange function) to restrict access to the
+ critical section until we have initialized it, then we use the
+ critical section to block until the callback has completed
+ execution. */
+ while (flag->status < 3)
+ {
+ switch (flag->status)
+ {
+ case 0:
+ if (InterlockedCompareExchange (&(flag->status), 1, 0) == 0) {
+ InitializeCriticalSection(&(flag->lock));
+ EnterCriticalSection(&(flag->lock));
+ flag->status = 2;
+ func();
+ flag->status = 3;
+ LeaveCriticalSection(&(flag->lock));
+ return;
+ }
+ break;
+ case 1:
+ break;
+ case 2:
+ EnterCriticalSection(&(flag->lock));
+ LeaveCriticalSection(&(flag->lock));
+ break;
+ }
+ }
+}
+#endif /* defined(_TTHREAD_WIN32_) */
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !WITH_C11THREADS */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h
new file mode 100644
index 000000000..6bc39fe09
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread.h
@@ -0,0 +1,503 @@
+/* -*- mode: c; tab-width: 2; indent-tabs-mode: nil; -*-
+Copyright (c) 2012 Marcus Geelnard
+Copyright (c) 2013-2014 Evan Nemerson
+
+This software is provided 'as-is', without any express or implied
+warranty. In no event will the authors be held liable for any damages
+arising from the use of this software.
+
+Permission is granted to anyone to use this software for any purpose,
+including commercial applications, and to alter it and redistribute it
+freely, subject to the following restrictions:
+
+ 1. The origin of this software must not be misrepresented; you must not
+ claim that you wrote the original software. If you use this software
+ in a product, an acknowledgment in the product documentation would be
+ appreciated but is not required.
+
+ 2. Altered source versions must be plainly marked as such, and must not be
+ misrepresented as being the original software.
+
+ 3. This notice may not be removed or altered from any source
+ distribution.
+*/
+
+#ifndef _TINYCTHREAD_H_
+#define _TINYCTHREAD_H_
+
+/* Include config to know if C11 threads are available */
+#ifdef _WIN32
+#include "win32_config.h"
+#else
+#include "../config.h"
+#endif
+
+#if WITH_C11THREADS
+#include <threads.h>
+#else
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+* @file
+* @mainpage TinyCThread API Reference
+*
+* @section intro_sec Introduction
+* TinyCThread is a minimal, portable implementation of basic threading
+* classes for C.
+*
+* They closely mimic the functionality and naming of the C11 standard, and
+* should be easily replaceable with the corresponding standard variants.
+*
+* @section port_sec Portability
+* The Win32 variant uses the native Win32 API for implementing the thread
+* classes, while for other systems, the POSIX threads API (pthread) is used.
+*
+* @section misc_sec Miscellaneous
+* The following special keywords are available: #_Thread_local.
+*
+* For more detailed information, browse the different sections of this
+* documentation. A good place to start is:
+* tinycthread.h.
+*/
+
+/* Which platform are we on? */
+#if !defined(_TTHREAD_PLATFORM_DEFINED_)
+ #if defined(_WIN32) || defined(__WIN32__) || defined(__WINDOWS__)
+ #define _TTHREAD_WIN32_
+ #else
+ #define _TTHREAD_POSIX_
+ #endif
+ #define _TTHREAD_PLATFORM_DEFINED_
+#endif
+
+/* Activate some POSIX functionality (e.g. clock_gettime and recursive mutexes) */
+#if defined(_TTHREAD_POSIX_)
+ #undef _FEATURES_H
+ #if !defined(_GNU_SOURCE)
+ #define _GNU_SOURCE
+ #endif
+ #if !defined(_POSIX_C_SOURCE) || ((_POSIX_C_SOURCE - 0) < 199309L)
+ #undef _POSIX_C_SOURCE
+ #define _POSIX_C_SOURCE 199309L
+ #endif
+ #if !defined(_XOPEN_SOURCE) || ((_XOPEN_SOURCE - 0) < 500)
+ #undef _XOPEN_SOURCE
+ #define _XOPEN_SOURCE 500
+ #endif
+#endif
+
+/* Generic includes */
+#include <time.h>
+
+/* Platform specific includes */
+#if defined(_TTHREAD_POSIX_)
+ #ifndef _GNU_SOURCE
+ #define _GNU_SOURCE /* for pthread_setname_np() */
+ #endif
+ #include <pthread.h>
+#elif defined(_TTHREAD_WIN32_)
+ #ifndef WIN32_LEAN_AND_MEAN
+ #define WIN32_LEAN_AND_MEAN
+ #define __UNDEF_LEAN_AND_MEAN
+ #endif
+ #include <windows.h>
+ #ifdef __UNDEF_LEAN_AND_MEAN
+ #undef WIN32_LEAN_AND_MEAN
+ #undef __UNDEF_LEAN_AND_MEAN
+ #endif
+#endif
+
+/* Compiler-specific information */
+#if defined(__STDC_VERSION__) && __STDC_VERSION__ >= 201112L
+ #define TTHREAD_NORETURN _Noreturn
+#elif defined(__GNUC__)
+ #define TTHREAD_NORETURN __attribute__((__noreturn__))
+#else
+ #define TTHREAD_NORETURN
+#endif
+
+/* If TIME_UTC is missing, provide it and provide a wrapper for
+ timespec_get. */
+#ifndef TIME_UTC
+#define TIME_UTC 1
+#define _TTHREAD_EMULATE_TIMESPEC_GET_
+
+#if defined(_TTHREAD_WIN32_)
+struct _tthread_timespec {
+ time_t tv_sec;
+ long tv_nsec;
+};
+#define timespec _tthread_timespec
+#endif
+
+int _tthread_timespec_get(struct timespec *ts, int base);
+#define timespec_get _tthread_timespec_get
+#endif
+
+/** TinyCThread version (major number). */
+#define TINYCTHREAD_VERSION_MAJOR 1
+/** TinyCThread version (minor number). */
+#define TINYCTHREAD_VERSION_MINOR 2
+/** TinyCThread version (full version). */
+#define TINYCTHREAD_VERSION (TINYCTHREAD_VERSION_MAJOR * 100 + TINYCTHREAD_VERSION_MINOR)
+
+/**
+* @def _Thread_local
+* Thread local storage keyword.
+* A variable that is declared with the @c _Thread_local keyword makes the
+* value of the variable local to each thread (known as thread-local storage,
+* or TLS). Example usage:
+* @code
+* // This variable is local to each thread.
+* _Thread_local int variable;
+* @endcode
+* @note The @c _Thread_local keyword is a macro that maps to the corresponding
+* compiler directive (e.g. @c __declspec(thread)).
+* @note This directive is currently not supported on Mac OS X (it will give
+* a compiler error), since compile-time TLS is not supported in the Mac OS X
+* executable format. Also, some older versions of MinGW (before GCC 4.x) do
+* not support this directive, nor does the Tiny C Compiler.
+* @hideinitializer
+*/
+
+#if !(defined(__STDC_VERSION__) && (__STDC_VERSION__ >= 201102L)) && !defined(_Thread_local)
+ #if defined(__GNUC__) || defined(__INTEL_COMPILER) || defined(__SUNPRO_CC) || defined(__IBMCPP__)
+ #define _Thread_local __thread
+ #else
+ #define _Thread_local __declspec(thread)
+ #endif
+#elif defined(__GNUC__) && defined(__GNUC_MINOR__) && (((__GNUC__ << 8) | __GNUC_MINOR__) < ((4 << 8) | 9))
+ #define _Thread_local __thread
+#endif
+
+/* Macros */
+#if defined(_TTHREAD_WIN32_)
+#define TSS_DTOR_ITERATIONS (4)
+#else
+#define TSS_DTOR_ITERATIONS PTHREAD_DESTRUCTOR_ITERATIONS
+#endif
+
+/* Function return values */
+/* Note: The values are unspecified by C11 but match glibc and musl to make
+ * sure they're compatible for the case where librdkafka was built with
+ * tinycthreads but the runtime libc also provides C11 threads.
+ * The *BSD values are notably different. */
+#define thrd_success 0 /**< The requested operation succeeded */
+#define thrd_busy 1 /**< The requested operation failed because a tesource requested by a test and return function is already in use */
+#define thrd_error 2 /**< The requested operation failed */
+#define thrd_nomem 3 /**< The requested operation failed because it was unable to allocate memory */
+#define thrd_timedout 4 /**< The time specified in the call was reached without acquiring the requested resource */
+
+/* Mutex types */
+#define mtx_plain 0
+#define mtx_recursive 1
+#define mtx_timed 2
+
+/* Mutex */
+#if defined(_TTHREAD_WIN32_)
+typedef struct {
+ union {
+ CRITICAL_SECTION cs; /* Critical section handle (used for non-timed mutexes) */
+ HANDLE mut; /* Mutex handle (used for timed mutex) */
+ } mHandle; /* Mutex handle */
+ int mAlreadyLocked; /* TRUE if the mutex is already locked */
+ int mRecursive; /* TRUE if the mutex is recursive */
+ int mTimed; /* TRUE if the mutex is timed */
+} mtx_t;
+#else
+typedef pthread_mutex_t mtx_t;
+#endif
+
+/** Create a mutex object.
+* @param mtx A mutex object.
+* @param type Bit-mask that must have one of the following six values:
+* @li @c mtx_plain for a simple non-recursive mutex
+* @li @c mtx_timed for a non-recursive mutex that supports timeout
+* @li @c mtx_plain | @c mtx_recursive (same as @c mtx_plain, but recursive)
+* @li @c mtx_timed | @c mtx_recursive (same as @c mtx_timed, but recursive)
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int mtx_init(mtx_t *mtx, int type);
+
+/** Release any resources used by the given mutex.
+* @param mtx A mutex object.
+*/
+void mtx_destroy(mtx_t *mtx);
+
+/** Lock the given mutex.
+* Blocks until the given mutex can be locked. If the mutex is non-recursive, and
+* the calling thread already has a lock on the mutex, this call will block
+* forever.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int mtx_lock(mtx_t *mtx);
+
+/** NOT YET IMPLEMENTED.
+*/
+int mtx_timedlock(mtx_t *mtx, const struct timespec *ts);
+
+/** Try to lock the given mutex.
+* The specified mutex shall support either test and return or timeout. If the
+* mutex is already locked, the function returns without blocking.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_busy if the resource
+* requested is already in use, or @ref thrd_error if the request could not be
+* honored.
+*/
+int mtx_trylock(mtx_t *mtx);
+
+/** Unlock the given mutex.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int mtx_unlock(mtx_t *mtx);
+
+/* Condition variable */
+#if defined(_TTHREAD_WIN32_)
+typedef struct {
+ HANDLE mEvents[2]; /* Signal and broadcast event HANDLEs. */
+ unsigned int mWaitersCount; /* Count of the number of waiters. */
+ CRITICAL_SECTION mWaitersCountLock; /* Serialize access to mWaitersCount. */
+} cnd_t;
+#else
+typedef pthread_cond_t cnd_t;
+#endif
+
+/** Create a condition variable object.
+* @param cond A condition variable object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_init(cnd_t *cond);
+
+/** Release any resources used by the given condition variable.
+* @param cond A condition variable object.
+*/
+void cnd_destroy(cnd_t *cond);
+
+/** Signal a condition variable.
+* Unblocks one of the threads that are blocked on the given condition variable
+* at the time of the call. If no threads are blocked on the condition variable
+* at the time of the call, the function does nothing and return success.
+* @param cond A condition variable object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_signal(cnd_t *cond);
+
+/** Broadcast a condition variable.
+* Unblocks all of the threads that are blocked on the given condition variable
+* at the time of the call. If no threads are blocked on the condition variable
+* at the time of the call, the function does nothing and return success.
+* @param cond A condition variable object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_broadcast(cnd_t *cond);
+
+/** Wait for a condition variable to become signaled.
+* The function atomically unlocks the given mutex and endeavors to block until
+* the given condition variable is signaled by a call to cnd_signal or to
+* cnd_broadcast. When the calling thread becomes unblocked it locks the mutex
+* before it returns.
+* @param cond A condition variable object.
+* @param mtx A mutex object.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int cnd_wait(cnd_t *cond, mtx_t *mtx);
+
+/** Wait for a condition variable to become signaled.
+* The function atomically unlocks the given mutex and endeavors to block until
+* the given condition variable is signaled by a call to cnd_signal or to
+* cnd_broadcast, or until after the specified time. When the calling thread
+* becomes unblocked it locks the mutex before it returns.
+* @param cond A condition variable object.
+* @param mtx A mutex object.
+* @param xt A point in time at which the request will time out (absolute time).
+* @return @ref thrd_success upon success, or @ref thrd_timeout if the time
+* specified in the call was reached without acquiring the requested resource, or
+* @ref thrd_error if the request could not be honored.
+*/
+int cnd_timedwait(cnd_t *cond, mtx_t *mtx, const struct timespec *ts);
+
+#if defined(_TTHREAD_WIN32_)
+int _cnd_timedwait_win32(cnd_t *cond, mtx_t *mtx, DWORD timeout);
+#endif
+
+/* Thread */
+#if defined(_TTHREAD_WIN32_)
+typedef HANDLE thrd_t;
+#else
+typedef pthread_t thrd_t;
+#endif
+
+/** Thread start function.
+* Any thread that is started with the @ref thrd_create() function must be
+* started through a function of this type.
+* @param arg The thread argument (the @c arg argument of the corresponding
+* @ref thrd_create() call).
+* @return The thread return value, which can be obtained by another thread
+* by using the @ref thrd_join() function.
+*/
+typedef int (*thrd_start_t)(void *arg);
+
+/** Create a new thread.
+* @param thr Identifier of the newly created thread.
+* @param func A function pointer to the function that will be executed in
+* the new thread.
+* @param arg An argument to the thread function.
+* @return @ref thrd_success on success, or @ref thrd_nomem if no memory could
+* be allocated for the thread requested, or @ref thrd_error if the request
+* could not be honored.
+* @note A thread’s identifier may be reused for a different thread once the
+* original thread has exited and either been detached or joined to another
+* thread.
+*/
+int thrd_create(thrd_t *thr, thrd_start_t func, void *arg);
+
+/** Identify the calling thread.
+* @return The identifier of the calling thread.
+*/
+thrd_t thrd_current(void);
+
+
+/** Dispose of any resources allocated to the thread when that thread exits.
+ * @return thrd_success, or thrd_error on error
+*/
+int thrd_detach(thrd_t thr);
+
+/** Compare two thread identifiers.
+* The function determines if two thread identifiers refer to the same thread.
+* @return Zero if the two thread identifiers refer to different threads.
+* Otherwise a nonzero value is returned.
+*/
+int thrd_equal(thrd_t thr0, thrd_t thr1);
+
+/** Terminate execution of the calling thread.
+* @param res Result code of the calling thread.
+*/
+TTHREAD_NORETURN void thrd_exit(int res);
+
+/** Wait for a thread to terminate.
+* The function joins the given thread with the current thread by blocking
+* until the other thread has terminated.
+* @param thr The thread to join with.
+* @param res If this pointer is not NULL, the function will store the result
+* code of the given thread in the integer pointed to by @c res.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int thrd_join(thrd_t thr, int *res);
+
+/** Put the calling thread to sleep.
+* Suspend execution of the calling thread.
+* @param duration Interval to sleep for
+* @param remaining If non-NULL, this parameter will hold the remaining
+* time until time_point upon return. This will
+* typically be zero, but if the thread was woken up
+* by a signal that is not ignored before duration was
+* reached @c remaining will hold a positive time.
+* @return 0 (zero) on successful sleep, -1 if an interrupt occurred,
+* or a negative value if the operation fails.
+*/
+int thrd_sleep(const struct timespec *duration, struct timespec *remaining);
+
+/** Yield execution to another thread.
+* Permit other threads to run, even if the current thread would ordinarily
+* continue to run.
+*/
+void thrd_yield(void);
+
+/* Thread local storage */
+#if defined(_TTHREAD_WIN32_)
+typedef DWORD tss_t;
+#else
+typedef pthread_key_t tss_t;
+#endif
+
+/** Destructor function for a thread-specific storage.
+* @param val The value of the destructed thread-specific storage.
+*/
+typedef void (*tss_dtor_t)(void *val);
+
+/** Create a thread-specific storage.
+* @param key The unique key identifier that will be set if the function is
+* successful.
+* @param dtor Destructor function. This can be NULL.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+* @note On Windows, the @c dtor will definitely be called when
+* appropriate for threads created with @ref thrd_create. It will be
+* called for other threads in most cases, the possible exception being
+* for DLLs loaded with LoadLibraryEx. In order to be certain, you
+* should use @ref thrd_create whenever possible.
+*/
+int tss_create(tss_t *key, tss_dtor_t dtor);
+
+/** Delete a thread-specific storage.
+* The function releases any resources used by the given thread-specific
+* storage.
+* @param key The key that shall be deleted.
+*/
+void tss_delete(tss_t key);
+
+/** Get the value for a thread-specific storage.
+* @param key The thread-specific storage identifier.
+* @return The value for the current thread held in the given thread-specific
+* storage.
+*/
+void *tss_get(tss_t key);
+
+/** Set the value for a thread-specific storage.
+* @param key The thread-specific storage identifier.
+* @param val The value of the thread-specific storage to set for the current
+* thread.
+* @return @ref thrd_success on success, or @ref thrd_error if the request could
+* not be honored.
+*/
+int tss_set(tss_t key, void *val);
+
+#if defined(_TTHREAD_WIN32_)
+ typedef struct {
+ LONG volatile status;
+ CRITICAL_SECTION lock;
+ } once_flag;
+ #define ONCE_FLAG_INIT {0,}
+#else
+ #define once_flag pthread_once_t
+ #define ONCE_FLAG_INIT PTHREAD_ONCE_INIT
+#endif
+
+/** Invoke a callback exactly once
+ * @param flag Flag used to ensure the callback is invoked exactly
+ * once.
+ * @param func Callback to invoke.
+ */
+#if defined(_TTHREAD_WIN32_)
+ void call_once(once_flag *flag, void (*func)(void));
+#else
+ #define call_once(flag,func) pthread_once(flag,func)
+#endif
+
+
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !WITH_C11THREADS */
+
+/**
+ * @brief librdkafka extensions to c11threads
+ */
+#include "tinycthread_extra.h"
+
+#endif /* _TINYTHREAD_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c
new file mode 100644
index 000000000..58049448c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.c
@@ -0,0 +1,175 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @brief Extra methods added to tinycthread/c11threads
+ */
+
+#include "rd.h"
+#include "rdtime.h"
+#include "tinycthread.h"
+
+
+int thrd_setname(const char *name) {
+#if HAVE_PTHREAD_SETNAME_GNU
+ if (!pthread_setname_np(pthread_self(), name))
+ return thrd_success;
+#elif HAVE_PTHREAD_SETNAME_DARWIN
+ pthread_setname_np(name);
+ return thrd_success;
+#elif HAVE_PTHREAD_SETNAME_FREEBSD
+ pthread_set_name_np(pthread_self(), name);
+ return thrd_success;
+#endif
+ return thrd_error;
+}
+
+int thrd_is_current(thrd_t thr) {
+#if defined(_TTHREAD_WIN32_)
+ return GetThreadId(thr) == GetCurrentThreadId();
+#else
+ return (pthread_self() == thr);
+#endif
+}
+
+
+#ifdef _WIN32
+void cnd_wait_enter(cnd_t *cond) {
+ /* Increment number of waiters */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ ++cond->mWaitersCount;
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+}
+
+void cnd_wait_exit(cnd_t *cond) {
+ /* Increment number of waiters */
+ EnterCriticalSection(&cond->mWaitersCountLock);
+ --cond->mWaitersCount;
+ LeaveCriticalSection(&cond->mWaitersCountLock);
+}
+#endif
+
+
+
+int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms) {
+ if (timeout_ms == -1 /* INFINITE*/)
+ return cnd_wait(cnd, mtx);
+#if defined(_TTHREAD_WIN32_)
+ return _cnd_timedwait_win32(cnd, mtx, (DWORD)timeout_ms);
+#else
+ struct timeval tv;
+ struct timespec ts;
+
+ gettimeofday(&tv, NULL);
+ ts.tv_sec = tv.tv_sec;
+ ts.tv_nsec = tv.tv_usec * 1000;
+
+ ts.tv_sec += timeout_ms / 1000;
+ ts.tv_nsec += (timeout_ms % 1000) * 1000000;
+
+ if (ts.tv_nsec >= 1000000000) {
+ ts.tv_sec++;
+ ts.tv_nsec -= 1000000000;
+ }
+
+ return cnd_timedwait(cnd, mtx, &ts);
+#endif
+}
+
+int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp) {
+ rd_ts_t pre = rd_clock();
+ int r;
+ r = cnd_timedwait_ms(cnd, mtx, *timeout_msp);
+ if (r != thrd_timedout) {
+ /* Subtract spent time */
+ (*timeout_msp) -= (int)(rd_clock() - pre) / 1000;
+ }
+ return r;
+}
+
+int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec) {
+ if (tspec->tv_sec == RD_POLL_INFINITE)
+ return cnd_wait(cnd, mtx);
+ else if (tspec->tv_sec == RD_POLL_NOWAIT)
+ return thrd_timedout;
+
+ return cnd_timedwait(cnd, mtx, tspec);
+}
+
+
+/**
+ * @name Read-write locks
+ * @{
+ */
+#ifndef _WIN32
+int rwlock_init(rwlock_t *rwl) {
+ int r = pthread_rwlock_init(rwl, NULL);
+ if (r) {
+ errno = r;
+ return thrd_error;
+ }
+ return thrd_success;
+}
+
+int rwlock_destroy(rwlock_t *rwl) {
+ int r = pthread_rwlock_destroy(rwl);
+ if (r) {
+ errno = r;
+ return thrd_error;
+ }
+ return thrd_success;
+}
+
+int rwlock_rdlock(rwlock_t *rwl) {
+ int r = pthread_rwlock_rdlock(rwl);
+ assert(r == 0);
+ return thrd_success;
+}
+
+int rwlock_wrlock(rwlock_t *rwl) {
+ int r = pthread_rwlock_wrlock(rwl);
+ assert(r == 0);
+ return thrd_success;
+}
+
+int rwlock_rdunlock(rwlock_t *rwl) {
+ int r = pthread_rwlock_unlock(rwl);
+ assert(r == 0);
+ return thrd_success;
+}
+
+int rwlock_wrunlock(rwlock_t *rwl) {
+ int r = pthread_rwlock_unlock(rwl);
+ assert(r == 0);
+ return thrd_success;
+}
+/**@}*/
+
+
+#endif /* !_MSC_VER */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h
new file mode 100644
index 000000000..e5f673173
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/tinycthread_extra.h
@@ -0,0 +1,208 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @brief Extra methods added to tinychtread/c11threads
+ */
+
+
+#ifndef _TINYCTHREAD_EXTRA_H_
+#define _TINYCTHREAD_EXTRA_H_
+
+
+#ifndef _WIN32
+#include <pthread.h> /* needed for rwlock_t */
+#endif
+
+
+/**
+ * @brief Set thread system name if platform supports it (pthreads)
+ * @return thrd_success or thrd_error
+ */
+int thrd_setname(const char *name);
+
+/**
+ * @brief Checks if passed thread is the current thread.
+ * @return non-zero if same thread, else 0.
+ */
+int thrd_is_current(thrd_t thr);
+
+
+#ifdef _WIN32
+/**
+ * @brief Mark the current thread as waiting on cnd.
+ *
+ * @remark This is to be used when the thread uses its own
+ * WaitForMultipleEvents() call rather than cnd_timedwait().
+ *
+ * @sa cnd_wait_exit()
+ */
+void cnd_wait_enter(cnd_t *cond);
+
+/**
+ * @brief Mark the current thread as no longer waiting on cnd.
+ */
+void cnd_wait_exit(cnd_t *cond);
+#endif
+
+
+/**
+ * @brief Same as cnd_timedwait() but takes a relative timeout in milliseconds.
+ */
+int cnd_timedwait_ms(cnd_t *cnd, mtx_t *mtx, int timeout_ms);
+
+/**
+ * @brief Same as cnd_timedwait_ms() but updates the remaining time.
+ */
+int cnd_timedwait_msp(cnd_t *cnd, mtx_t *mtx, int *timeout_msp);
+
+/**
+ * @brief Same as cnd_timedwait() but honours
+ * RD_POLL_INFINITE (uses cnd_wait()),
+ * and RD_POLL_NOWAIT (return thrd_timedout immediately).
+ *
+ * @remark Set up \p tspec with rd_timeout_init_timespec().
+ */
+int cnd_timedwait_abs(cnd_t *cnd, mtx_t *mtx, const struct timespec *tspec);
+
+
+
+/**
+ * @brief Read-write locks
+ */
+
+#if defined(_TTHREAD_WIN32_)
+typedef struct rwlock_t {
+ SRWLOCK lock;
+ LONG rcnt;
+ LONG wcnt;
+} rwlock_t;
+#define rwlock_init(rwl) \
+ do { \
+ (rwl)->rcnt = (rwl)->wcnt = 0; \
+ InitializeSRWLock(&(rwl)->lock); \
+ } while (0)
+#define rwlock_destroy(rwl)
+#define rwlock_rdlock(rwl) \
+ do { \
+ if (0) \
+ printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
+ AcquireSRWLockShared(&(rwl)->lock); \
+ InterlockedIncrement(&(rwl)->rcnt); \
+ } while (0)
+#define rwlock_wrlock(rwl) \
+ do { \
+ if (0) \
+ printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
+ AcquireSRWLockExclusive(&(rwl)->lock); \
+ InterlockedIncrement(&(rwl)->wcnt); \
+ } while (0)
+#define rwlock_rdunlock(rwl) \
+ do { \
+ if (0) \
+ printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \
+ ReleaseSRWLockShared(&(rwl)->lock); \
+ InterlockedDecrement(&(rwl)->rcnt); \
+ } while (0)
+#define rwlock_wrunlock(rwl) \
+ do { \
+ if (0) \
+ printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \
+ ReleaseSRWLockExclusive(&(rwl)->lock); \
+ InterlockedDecrement(&(rwl)->wcnt); \
+ } while (0)
+
+#define rwlock_rdlock_d(rwl) \
+ do { \
+ if (1) \
+ printf("Thr %i: at %i: RDLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
+ AcquireSRWLockShared(&(rwl)->lock); \
+ InterlockedIncrement(&(rwl)->rcnt); \
+ } while (0)
+#define rwlock_wrlock_d(rwl) \
+ do { \
+ if (1) \
+ printf("Thr %i: at %i: WRLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt >= 0 && (rwl)->wcnt >= 0); \
+ AcquireSRWLockExclusive(&(rwl)->lock); \
+ InterlockedIncrement(&(rwl)->wcnt); \
+ } while (0)
+#define rwlock_rdunlock_d(rwl) \
+ do { \
+ if (1) \
+ printf("Thr %i: at %i: RDUNLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt > 0 && (rwl)->wcnt >= 0); \
+ ReleaseSRWLockShared(&(rwl)->lock); \
+ InterlockedDecrement(&(rwl)->rcnt); \
+ } while (0)
+#define rwlock_wrunlock_d(rwl) \
+ do { \
+ if (1) \
+ printf("Thr %i: at %i: RWUNLOCK %p %s (%i, %i)\n", \
+ GetCurrentThreadId(), __LINE__, rwl, \
+ __FUNCTION__, (rwl)->rcnt, (rwl)->wcnt); \
+ assert((rwl)->rcnt >= 0 && (rwl)->wcnt > 0); \
+ ReleaseSRWLockExclusive(&(rwl)->lock); \
+ InterlockedDecrement(&(rwl)->wcnt); \
+ } while (0)
+
+
+#else
+typedef pthread_rwlock_t rwlock_t;
+
+int rwlock_init(rwlock_t *rwl);
+int rwlock_destroy(rwlock_t *rwl);
+int rwlock_rdlock(rwlock_t *rwl);
+int rwlock_wrlock(rwlock_t *rwl);
+int rwlock_rdunlock(rwlock_t *rwl);
+int rwlock_wrunlock(rwlock_t *rwl);
+
+#endif
+
+
+#endif /* _TINYCTHREAD_EXTRA_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h b/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h
new file mode 100644
index 000000000..dd61b2c92
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/src/win32_config.h
@@ -0,0 +1,58 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Hand-crafted config header file for Win32 builds.
+ */
+#ifndef _RD_WIN32_CONFIG_H_
+#define _RD_WIN32_CONFIG_H_
+
+#ifndef WITHOUT_WIN32_CONFIG
+#define WITH_SSL 1
+#define WITH_ZLIB 1
+#define WITH_SNAPPY 1
+#define WITH_ZSTD 1
+#define WITH_CURL 1
+#define WITH_OAUTHBEARER_OIDC 1
+/* zstd is linked dynamically on Windows, but the dynamic library provides
+ * the experimental/advanced API, just as the static builds on *nix */
+#define WITH_ZSTD_STATIC 1
+#define WITH_SASL_SCRAM 1
+#define WITH_SASL_OAUTHBEARER 1
+#define ENABLE_DEVEL 0
+#define WITH_PLUGINS 1
+#define WITH_HDRHISTOGRAM 1
+#endif
+#define SOLIB_EXT ".dll"
+
+/* Notice: Keep up to date */
+#define BUILT_WITH \
+ "SSL ZLIB SNAPPY ZSTD CURL SASL_SCRAM SASL_OAUTHBEARER PLUGINS " \
+ "HDRHISTOGRAM"
+
+#endif /* _RD_WIN32_CONFIG_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore
new file mode 100644
index 000000000..6d6f9ff96
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/.gitignore
@@ -0,0 +1,15 @@
+*.test
+test.conf
+test-runner
+core
+vgcore.*
+core.*
+stats_*.json
+test_report_*.json
+test_suite_*.json
+.\#*
+*.pyc
+# sqlite3 db:
+rdktests
+*.log
+*.png
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c
new file mode 100644
index 000000000..e0a02fb62
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0000-unittests.c
@@ -0,0 +1,72 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+/**
+ * @brief Initialize a client with debugging to have it print its
+ * build options, OpenSSL version, etc.
+ * Useful for manually verifying build options in CI logs.
+ */
+static void show_build_opts(void) {
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_t *rk;
+ char errstr[512];
+
+ TEST_SAY("builtin.features = %s\n",
+ test_conf_get(conf, "builtin.features"));
+
+ test_conf_set(conf, "debug", "generic,security");
+
+ /* Try with SSL first, which may or may not be a build option. */
+ if (rd_kafka_conf_set(conf, "security.protocol", "SSL", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_SAY("Failed to security.protocol=SSL: %s\n", errstr);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "Failed to create producer: %s", errstr);
+
+ rd_kafka_destroy(rk);
+}
+
+
+/**
+ * @brief Call librdkafka built-in unit-tests
+ */
+int main_0000_unittests(int argc, char **argv) {
+ int fails = 0;
+
+ show_build_opts();
+
+ fails += rd_kafka_unittest();
+ if (fails)
+ TEST_FAIL("%d unit-test(s) failed", fails);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c
new file mode 100644
index 000000000..c2a4eb57a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0001-multiobj.c
@@ -0,0 +1,98 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests multiple rd_kafka_t object creations and destructions.
+ * Issue #20
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+int main_0001_multiobj(int argc, char **argv) {
+ int partition = RD_KAFKA_PARTITION_UA; /* random */
+ int i;
+ int NUM_ITER = test_quick ? 2 : 5;
+ const char *topic = NULL;
+
+ TEST_SAY("Creating and destroying %i kafka instances\n", NUM_ITER);
+
+ /* Create, use and destroy NUM_ITER kafka instances. */
+ for (i = 0; i < NUM_ITER; i++) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ test_timing_t t_full, t_destroy;
+
+ test_conf_init(&conf, &topic_conf, 30);
+
+ if (!topic)
+ topic = test_mk_topic_name("0001", 0);
+
+ TIMING_START(&t_full, "full create-produce-destroy cycle");
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL(
+ "Failed to create topic for "
+ "rdkafka instance #%i: %s\n",
+ i, rd_kafka_err2str(rd_kafka_last_error()));
+
+ rd_snprintf(msg, sizeof(msg),
+ "%s test message for iteration #%i", argv[0], i);
+
+ /* Produce a message */
+ rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, NULL);
+
+ /* Wait for it to be sent (and possibly acked) */
+ rd_kafka_flush(rk, -1);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TIMING_START(&t_destroy, "rd_kafka_destroy()");
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_destroy);
+
+ TIMING_STOP(&t_full);
+
+ /* Topic is created on the first iteration. */
+ if (i > 0)
+ TIMING_ASSERT(&t_full, 0, 999);
+ }
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c
new file mode 100644
index 000000000..087e37ae6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0002-unkpart.c
@@ -0,0 +1,244 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests that producing to unknown partitions fails.
+ * Issue #39
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgs_wait = 0; /* bitmask */
+
+/**
+ * Delivery report callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (!(msgs_wait & (1 << msgid)))
+ TEST_FAIL(
+ "Unwanted delivery report for message #%i "
+ "(waiting for 0x%x)\n",
+ msgid, msgs_wait);
+
+ TEST_SAY("Delivery report for message #%i: %s\n", msgid,
+ rd_kafka_err2str(err));
+
+ msgs_wait &= ~(1 << msgid);
+
+ if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ TEST_FAIL("Message #%i failed with unexpected error %s\n",
+ msgid, rd_kafka_err2str(err));
+}
+
+
+static void do_test_unkpart(void) {
+ int partition = 99; /* non-existent */
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ int msgcnt = 10;
+ int i;
+ int fails = 0;
+ const struct rd_kafka_metadata *metadata;
+
+ TEST_SAY(_C_BLU "%s\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(&conf, &topic_conf, 10);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0002", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ /* Request metadata so that we know the cluster is up before producing
+ * messages, otherwise erroneous partitions will not fail immediately.*/
+ if ((r = rd_kafka_metadata(rk, 0, rkt, &metadata,
+ tmout_multip(15000))) !=
+ RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Failed to acquire metadata: %s\n",
+ rd_kafka_err2str(r));
+
+ rd_kafka_metadata_destroy(metadata);
+
+ /* Produce a message */
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s test message #%i",
+ __FUNCTION__, i);
+ r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, msgidp);
+ if (r == -1) {
+ if (rd_kafka_last_error() ==
+ RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ TEST_SAY(
+ "Failed to produce message #%i: "
+ "unknown partition: good!\n",
+ i);
+ else
+ TEST_FAIL(
+ "Failed to produce message #%i: %s\n", i,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ free(msgidp);
+ } else {
+ if (i > 5) {
+ fails++;
+ TEST_SAY(
+ "Message #%i produced: "
+ "should've failed\n",
+ i);
+ }
+ msgs_wait |= (1 << i);
+ }
+
+ /* After half the messages: forcibly refresh metadata
+ * to update the actual partition count:
+ * this will make subsequent produce() calls fail immediately.
+ */
+ if (i == 5) {
+ r = test_get_partition_count(
+ rk, rd_kafka_topic_name(rkt), 15000);
+ TEST_ASSERT(r != -1, "failed to get partition count");
+ }
+ }
+
+ /* Wait for messages to time out */
+ rd_kafka_flush(rk, -1);
+
+ if (msgs_wait != 0)
+ TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
+
+
+ if (fails > 0)
+ TEST_FAIL("See previous error(s)\n");
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN "%s PASSED\n" _C_CLR, __FUNCTION__);
+}
+
+
+/**
+ * @brief Test message timeouts for messages produced to unknown partitions
+ * when there is no broker connection, which makes the messages end
+ * up in the UA partition.
+ * This verifies the UA partitions are properly scanned for timeouts.
+ *
+ * This test is a copy of confluent-kafka-python's
+ * test_Producer.test_basic_api() test that surfaced this issue.
+ */
+static void do_test_unkpart_timeout_nobroker(void) {
+ const char *topic = test_mk_topic_name("0002_unkpart_tmout", 0);
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_resp_err_t err;
+ int remains = 0;
+
+ TEST_SAY(_C_BLU "%s\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(NULL, NULL, 10);
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "debug", "topic");
+ test_conf_set(conf, "message.timeout.ms", "10");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
+ NULL, 0, NULL, 0, &remains);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+ remains++;
+
+ err = rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, RD_KAFKA_MSG_F_COPY,
+ "hi", 2, "hello", 5, &remains);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+ remains++;
+
+ err = rd_kafka_produce(rkt, 9 /* explicit, but unknown, partition */,
+ RD_KAFKA_MSG_F_COPY, "three", 5, NULL, 0,
+ &remains);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+ remains++;
+
+ rd_kafka_poll(rk, 1);
+ rd_kafka_poll(rk, 2);
+ TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));
+ rd_kafka_flush(rk, -1);
+
+ TEST_ASSERT(rd_kafka_outq_len(rk) == 0,
+ "expected no more messages in queue, got %d",
+ rd_kafka_outq_len(rk));
+
+ TEST_ASSERT(remains == 0, "expected no messages remaining, got %d",
+ remains);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN "%s PASSED\n" _C_CLR, __FUNCTION__);
+}
+
+
+int main_0002_unkpart(int argc, char **argv) {
+ do_test_unkpart();
+ do_test_unkpart_timeout_nobroker();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c
new file mode 100644
index 000000000..97b511125
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0003-msgmaxsize.c
@@ -0,0 +1,173 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests "message.bytes.max"
+ * Issue #24
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgs_wait = 0; /* bitmask */
+
+/**
+ * Delivery report callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (err)
+ TEST_FAIL("Unexpected delivery error for message #%i: %s\n",
+ msgid, rd_kafka_err2str(err));
+
+ if (!(msgs_wait & (1 << msgid)))
+ TEST_FAIL(
+ "Unwanted delivery report for message #%i "
+ "(waiting for 0x%x)\n",
+ msgid, msgs_wait);
+
+ TEST_SAY("Delivery report for message #%i: %s\n", msgid,
+ rd_kafka_err2str(err));
+
+ msgs_wait &= ~(1 << msgid);
+}
+
+
+int main_0003_msgmaxsize(int argc, char **argv) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+
+ static const struct {
+ ssize_t keylen;
+ ssize_t len;
+ rd_kafka_resp_err_t exp_err;
+ } sizes[] = {/* message.max.bytes is including framing */
+ {-1, 5000, RD_KAFKA_RESP_ERR_NO_ERROR},
+ {0, 99900, RD_KAFKA_RESP_ERR_NO_ERROR},
+ {0, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
+ {100000, 0, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
+ {1000, 100000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
+ {0, 101000, RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE},
+ {99000, -1, RD_KAFKA_RESP_ERR_NO_ERROR},
+ {-1, -1, RD_KAFKA_RESP_ERR__END}};
+ int i;
+
+ test_conf_init(&conf, &topic_conf, 10);
+
+ /* Set a small maximum message size. */
+ if (rd_kafka_conf_set(conf, "message.max.bytes", "100000", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s\n", errstr);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0003", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ for (i = 0; sizes[i].exp_err != RD_KAFKA_RESP_ERR__END; i++) {
+ void *value =
+ sizes[i].len != -1 ? calloc(1, sizes[i].len) : NULL;
+ size_t len = sizes[i].len != -1 ? sizes[i].len : 0;
+ void *key =
+ sizes[i].keylen != -1 ? calloc(1, sizes[i].keylen) : NULL;
+ size_t keylen = sizes[i].keylen != -1 ? sizes[i].keylen : 0;
+ int *msgidp = malloc(sizeof(*msgidp));
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ *msgidp = i;
+
+ r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, value,
+ len, key, keylen, msgidp);
+ if (r == -1)
+ err = rd_kafka_last_error();
+
+ if (err != sizes[i].exp_err) {
+ TEST_FAIL("Msg #%d produce(len=%" PRIdsz
+ ", keylen=%" PRIdsz "): got %s, expected %s",
+ i, sizes[i].len, sizes[i].keylen,
+ rd_kafka_err2name(err),
+ rd_kafka_err2name(sizes[i].exp_err));
+ } else {
+ TEST_SAY(
+ "Msg #%d produce() returned expected %s "
+ "for value size %" PRIdsz " and key size %" PRIdsz
+ "\n",
+ i, rd_kafka_err2name(err), sizes[i].len,
+ sizes[i].keylen);
+
+ if (!sizes[i].exp_err)
+ msgs_wait |= (1 << i);
+ else
+ free(msgidp);
+ }
+
+ if (value)
+ free(value);
+ if (key)
+ free(key);
+ }
+
+ /* Wait for messages to be delivered. */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 50);
+
+ if (msgs_wait != 0)
+ TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c
new file mode 100644
index 000000000..51401e17d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0004-conf.c
@@ -0,0 +1,865 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests various config related things
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+}
+
+static void
+error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+}
+
+
+static int32_t partitioner(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ return 0;
+}
+
+
+static void
+conf_verify(int line, const char **arr, size_t cnt, const char **confs) {
+ int i, j;
+
+
+ for (i = 0; confs[i]; i += 2) {
+ for (j = 0; j < (int)cnt; j += 2) {
+ if (!strcmp(confs[i], arr[j])) {
+ if (strcmp(confs[i + 1], arr[j + 1]))
+ TEST_FAIL(
+ "%i: Property %s mismatch: "
+ "expected %s != retrieved %s",
+ line, confs[i], confs[i + 1],
+ arr[j + 1]);
+ }
+ if (j == (int)cnt)
+ TEST_FAIL(
+ "%i: "
+ "Property %s not found in config\n",
+ line, confs[i]);
+ }
+ }
+}
+
+
+static void conf_cmp(const char *desc,
+ const char **a,
+ size_t acnt,
+ const char **b,
+ size_t bcnt) {
+ int i;
+
+ if (acnt != bcnt)
+ TEST_FAIL("%s config compare: count %" PRIusz " != %" PRIusz
+ " mismatch",
+ desc, acnt, bcnt);
+
+ for (i = 0; i < (int)acnt; i += 2) {
+ if (strcmp(a[i], b[i]))
+ TEST_FAIL("%s conf mismatch: %s != %s", desc, a[i],
+ b[i]);
+ else if (strcmp(a[i + 1], b[i + 1])) {
+ /* The default_topic_conf will be auto-created
+ * when global->topic fallthru is used, so its
+ * value will not match here. */
+ if (!strcmp(a[i], "default_topic_conf"))
+ continue;
+ TEST_FAIL("%s conf value mismatch for %s: %s != %s",
+ desc, a[i], a[i + 1], b[i + 1]);
+ }
+ }
+}
+
+
+/**
+ * @brief Not called, just used for config
+ */
+static int on_new_call_cnt;
+static rd_kafka_resp_err_t my_on_new(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ TEST_SAY("%s: on_new() called\n", rd_kafka_name(rk));
+ on_new_call_cnt++;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief When rd_kafka_new() succeeds it takes ownership of the config object,
+ * but when it fails the config object remains in application custody.
+ * These tests makes sure that's the case (preferably run with valgrind)
+ */
+static void do_test_kafka_new_failures(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ char errstr[512];
+
+ conf = rd_kafka_conf_new();
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "kafka_new() failed: %s", errstr);
+ rd_kafka_destroy(rk);
+
+ /* Set an erroneous configuration value that is not checked
+ * by conf_set() but by rd_kafka_new() */
+ conf = rd_kafka_conf_new();
+ if (rd_kafka_conf_set(conf, "partition.assignment.strategy",
+ "range,thiswillfail", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s", errstr);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(!rk, "kafka_new() should have failed");
+
+ /* config object should still belong to us,
+ * correct the erroneous config and try again. */
+ if (rd_kafka_conf_set(conf, "partition.assignment.strategy", NULL,
+ errstr, sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s", errstr);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "kafka_new() failed: %s", errstr);
+ rd_kafka_destroy(rk);
+
+ /* set conflicting properties */
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "acks", "1");
+ test_conf_set(conf, "enable.idempotence", "true");
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(!rk, "kafka_new() should have failed");
+ rd_kafka_conf_destroy(conf);
+ TEST_SAY(_C_GRN "Ok: %s\n", errstr);
+}
+
+
+/**
+ * @brief Verify that INVALID properties (such as for Java SSL properties)
+ * work, as well as INTERNAL properties.
+ */
+static void do_test_special_invalid_conf(void) {
+ rd_kafka_conf_t *conf;
+ char errstr[512];
+ rd_kafka_conf_res_t res;
+
+ conf = rd_kafka_conf_new();
+
+ res = rd_kafka_conf_set(conf, "ssl.truststore.location", "abc", errstr,
+ sizeof(errstr));
+ /* Existing apps might not print the error string when conf_set
+ * returns UNKNOWN, only on INVALID, so make sure that is
+ * what is being returned. */
+ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID,
+ "expected ssl.truststore.location to fail with INVALID, "
+ "not %d",
+ res);
+ /* Make sure there is a link to documentation */
+ TEST_ASSERT(strstr(errstr, "http"),
+ "expected ssl.truststore.location to provide link to "
+ "documentation, not \"%s\"",
+ errstr);
+ TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
+
+
+ res = rd_kafka_conf_set(conf, "sasl.jaas.config", "abc", errstr,
+ sizeof(errstr));
+ /* Existing apps might not print the error string when conf_set
+ * returns UNKNOWN, only on INVALID, so make sure that is
+ * what is being returned. */
+ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID,
+ "expected sasl.jaas.config to fail with INVALID, "
+ "not %d",
+ res);
+ /* Make sure there is a link to documentation */
+ TEST_ASSERT(strstr(errstr, "http"),
+ "expected sasl.jaas.config to provide link to "
+ "documentation, not \"%s\"",
+ errstr);
+ TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
+
+
+ res = rd_kafka_conf_set(conf, "interceptors", "1", errstr,
+ sizeof(errstr));
+ TEST_ASSERT(res == RD_KAFKA_CONF_INVALID,
+ "expected interceptors to fail with INVALID, "
+ "not %d",
+ res);
+ TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
+
+ rd_kafka_conf_destroy(conf);
+}
+
+
+/**
+ * @brief Verify idempotence configuration constraints
+ */
+static void do_test_idempotence_conf(void) {
+ static const struct {
+ const char *prop;
+ const char *val;
+ rd_bool_t topic_conf;
+ rd_bool_t exp_rk_fail;
+ rd_bool_t exp_rkt_fail;
+ } check[] = {{"acks", "1", rd_true, rd_false, rd_true},
+ {"acks", "all", rd_true, rd_false, rd_false},
+ {"queuing.strategy", "lifo", rd_true, rd_false, rd_true},
+ {NULL}};
+ int i;
+
+ for (i = 0; check[i].prop; i++) {
+ int j;
+
+ for (j = 0; j < 1 + (check[i].topic_conf ? 1 : 0); j++) {
+ /* j = 0: set on global config
+ * j = 1: set on topic config */
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf = NULL;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ char errstr[512];
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "enable.idempotence", "true");
+
+ if (j == 0)
+ test_conf_set(conf, check[i].prop,
+ check[i].val);
+
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr));
+
+ if (!rk) {
+ /* default topic config (j=0) will fail. */
+ TEST_ASSERT(check[i].exp_rk_fail ||
+ (j == 0 &&
+ check[i].exp_rkt_fail &&
+ check[i].topic_conf),
+ "Did not expect config #%d.%d "
+ "to fail: %s",
+ i, j, errstr);
+
+ rd_kafka_conf_destroy(conf);
+ continue;
+
+ } else {
+ TEST_ASSERT(!check[i].exp_rk_fail,
+ "Expect config #%d.%d to fail", i,
+ j);
+ }
+
+ if (j == 1) {
+ tconf = rd_kafka_topic_conf_new();
+ test_topic_conf_set(tconf, check[i].prop,
+ check[i].val);
+ }
+
+ rkt = rd_kafka_topic_new(rk, "mytopic", tconf);
+ if (!rkt) {
+ TEST_ASSERT(
+ check[i].exp_rkt_fail,
+ "Did not expect topic config "
+ "#%d.%d to fail: %s",
+ i, j,
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+
+ } else {
+ TEST_ASSERT(!check[i].exp_rkt_fail,
+ "Expect topic config "
+ "#%d.%d to fail",
+ i, j);
+ rd_kafka_topic_destroy(rkt);
+ }
+
+ rd_kafka_destroy(rk);
+ }
+ }
+}
+
+
+/**
+ * @brief Verify that configuration properties can be extract
+ * from the instance config object.
+ */
+static void do_test_instance_conf(void) {
+ rd_kafka_conf_t *conf;
+ const rd_kafka_conf_t *iconf;
+ rd_kafka_t *rk;
+ rd_kafka_conf_res_t res;
+ static const char *props[] = {
+ "linger.ms", "123", "group.id", "test1",
+ "enable.auto.commit", "false", NULL,
+ };
+ const char **p;
+
+ conf = rd_kafka_conf_new();
+
+ for (p = props; *p; p += 2) {
+ res = rd_kafka_conf_set(conf, *p, *(p + 1), NULL, 0);
+ TEST_ASSERT(res == RD_KAFKA_CONF_OK, "failed to set %s", *p);
+ }
+
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, NULL, 0);
+ TEST_ASSERT(rk, "failed to create consumer");
+
+ iconf = rd_kafka_conf(rk);
+ TEST_ASSERT(conf, "failed to get instance config");
+
+ for (p = props; *p; p += 2) {
+ char dest[512];
+ size_t destsz = sizeof(dest);
+
+ res = rd_kafka_conf_get(iconf, *p, dest, &destsz);
+ TEST_ASSERT(res == RD_KAFKA_CONF_OK,
+ "failed to get %s: result %d", *p, res);
+
+ TEST_SAY("Instance config %s=%s\n", *p, dest);
+ TEST_ASSERT(!strcmp(*(p + 1), dest), "Expected %s=%s, not %s",
+ *p, *(p + 1), dest);
+ }
+
+ rd_kafka_destroy(rk);
+}
+
+
+/**
+ * @brief Verify that setting and retrieving the default topic config works.
+ */
+static void do_test_default_topic_conf(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ const char *val, *exp_val;
+
+ SUB_TEST_QUICK();
+
+ conf = rd_kafka_conf_new();
+
+ /* Set topic-level property, this will create the default topic config*/
+ exp_val = "1234";
+ test_conf_set(conf, "message.timeout.ms", exp_val);
+
+ /* Get the default topic config */
+ tconf = rd_kafka_conf_get_default_topic_conf(conf);
+ TEST_ASSERT(tconf != NULL, "");
+
+ /* Get value from global config by fall-thru */
+ val = test_conf_get(conf, "message.timeout.ms");
+ TEST_ASSERT(val && !strcmp(val, exp_val),
+ "Expected (conf) message.timeout.ms=%s, not %s", exp_val,
+ val ? val : "(NULL)");
+
+ /* Get value from default topic config */
+ val = test_topic_conf_get(tconf, "message.timeout.ms");
+ TEST_ASSERT(val && !strcmp(val, exp_val),
+ "Expected (topic conf) message.timeout.ms=%s, not %s",
+ exp_val, val ? val : "(NULL)");
+
+ /* Now change the value, should be reflected in both. */
+ exp_val = "4444";
+ test_topic_conf_set(tconf, "message.timeout.ms", exp_val);
+
+ /* Get value from global config by fall-thru */
+ val = test_conf_get(conf, "message.timeout.ms");
+ TEST_ASSERT(val && !strcmp(val, exp_val),
+ "Expected (conf) message.timeout.ms=%s, not %s", exp_val,
+ val ? val : "(NULL)");
+
+ /* Get value from default topic config */
+ val = test_topic_conf_get(tconf, "message.timeout.ms");
+ TEST_ASSERT(val && !strcmp(val, exp_val),
+ "Expected (topic conf) message.timeout.ms=%s, not %s",
+ exp_val, val ? val : "(NULL)");
+
+
+ rd_kafka_conf_destroy(conf);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify behaviour of checking that message.timeout.ms fits within
+ * configured linger.ms. By larry-cdn77.
+ */
+static void do_message_timeout_linger_checks(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_t *rk;
+ char errstr[512];
+ int i;
+ const char values[7][3][40] = {
+ {"-", "-", "default and L and M"},
+ {"100", "-", "set L such that L<M"},
+ {"-", "300000", "set M such that L<M"},
+ {"100", "300000", "set L and M such that L<M"},
+ {"500000", "-", "!set L such that L>=M"},
+ {"-", "10", "set M such that L>=M"},
+ {"500000", "10", "!set L and M such that L>=M"}};
+
+ SUB_TEST_QUICK();
+
+ for (i = 0; i < 7; i++) {
+ const char *linger = values[i][0];
+ const char *msgtimeout = values[i][1];
+ const char *desc = values[i][2];
+ rd_bool_t expect_fail = *desc == '!';
+
+ if (expect_fail)
+ desc++; /* Push past the '!' */
+
+ conf = rd_kafka_conf_new();
+ tconf = rd_kafka_topic_conf_new();
+
+ if (*linger != '-')
+ test_conf_set(conf, "linger.ms", linger);
+
+ if (*msgtimeout != '-')
+ test_topic_conf_set(tconf, "message.timeout.ms",
+ msgtimeout);
+
+ rd_kafka_conf_set_default_topic_conf(conf, tconf);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr));
+
+ if (!rk)
+ TEST_SAY("#%d \"%s\": rd_kafka_new() failed: %s\n", i,
+ desc, errstr);
+ else
+ TEST_SAY("#%d \"%s\": rd_kafka_new() succeeded\n", i,
+ desc);
+
+ if (!expect_fail) {
+ TEST_ASSERT(rk != NULL,
+ "Expected success: "
+ "message timeout linger: %s: %s",
+ desc, errstr);
+
+ rd_kafka_destroy(rk);
+
+ } else {
+ TEST_ASSERT(rk == NULL,
+ "Expected failure: "
+ "message timeout linger: %s",
+ desc);
+
+ rd_kafka_conf_destroy(conf);
+ }
+ }
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0004_conf(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *ignore_conf, *conf, *conf2;
+ rd_kafka_topic_conf_t *ignore_topic_conf, *tconf, *tconf2;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ const char **arr_orig, **arr_dup;
+ size_t cnt_orig, cnt_dup;
+ int i;
+ const char *topic;
+ static const char *gconfs[] = {
+ "message.max.bytes",
+ "12345", /* int property */
+ "client.id",
+ "my id", /* string property */
+ "debug",
+ "topic,metadata,interceptor", /* S2F property */
+ "topic.blacklist",
+ "__.*", /* #778 */
+ "auto.offset.reset",
+ "earliest", /* Global->Topic fallthru */
+#if WITH_ZLIB
+ "compression.codec",
+ "gzip", /* S2I property */
+#endif
+#if defined(_WIN32)
+ "ssl.ca.certificate.stores",
+ "Intermediate ,, Root ,",
+#endif
+ NULL
+ };
+ static const char *tconfs[] = {"request.required.acks",
+ "-1", /* int */
+ "auto.commit.enable",
+ "false", /* bool */
+ "auto.offset.reset",
+ "error", /* S2I */
+ "offset.store.path",
+ "my/path", /* string */
+ NULL};
+
+ test_conf_init(&ignore_conf, &ignore_topic_conf, 10);
+ rd_kafka_conf_destroy(ignore_conf);
+ rd_kafka_topic_conf_destroy(ignore_topic_conf);
+
+ topic = test_mk_topic_name("0004", 0);
+
+ /* Set up a global config object */
+ conf = rd_kafka_conf_new();
+
+ for (i = 0; gconfs[i]; i += 2) {
+ if (rd_kafka_conf_set(conf, gconfs[i], gconfs[i + 1], errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s\n", errstr);
+ }
+
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+ rd_kafka_conf_set_error_cb(conf, error_cb);
+ /* interceptor configs are not exposed as strings or in dumps
+ * so the dump verification step will not cover them, but valgrind
+ * will help track down memory leaks/use-after-free etc. */
+ err = rd_kafka_conf_interceptor_add_on_new(conf, "testic", my_on_new,
+ NULL);
+ TEST_ASSERT(!err, "add_on_new() failed: %s", rd_kafka_err2str(err));
+
+ /* Set up a topic config object */
+ tconf = rd_kafka_topic_conf_new();
+
+ rd_kafka_topic_conf_set_partitioner_cb(tconf, partitioner);
+ rd_kafka_topic_conf_set_opaque(tconf, (void *)0xbeef);
+
+ for (i = 0; tconfs[i]; i += 2) {
+ if (rd_kafka_topic_conf_set(tconf, tconfs[i], tconfs[i + 1],
+ errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s\n", errstr);
+ }
+
+
+ /* Verify global config */
+ arr_orig = rd_kafka_conf_dump(conf, &cnt_orig);
+ conf_verify(__LINE__, arr_orig, cnt_orig, gconfs);
+
+ /* Verify copied global config */
+ conf2 = rd_kafka_conf_dup(conf);
+ arr_dup = rd_kafka_conf_dump(conf2, &cnt_dup);
+ conf_verify(__LINE__, arr_dup, cnt_dup, gconfs);
+ conf_cmp("global", arr_orig, cnt_orig, arr_dup, cnt_dup);
+ rd_kafka_conf_dump_free(arr_orig, cnt_orig);
+ rd_kafka_conf_dump_free(arr_dup, cnt_dup);
+
+ /* Verify topic config */
+ arr_orig = rd_kafka_topic_conf_dump(tconf, &cnt_orig);
+ conf_verify(__LINE__, arr_orig, cnt_orig, tconfs);
+
+ /* Verify copied topic config */
+ tconf2 = rd_kafka_topic_conf_dup(tconf);
+ arr_dup = rd_kafka_topic_conf_dump(tconf2, &cnt_dup);
+ conf_verify(__LINE__, arr_dup, cnt_dup, tconfs);
+ conf_cmp("topic", arr_orig, cnt_orig, arr_dup, cnt_dup);
+ rd_kafka_conf_dump_free(arr_orig, cnt_orig);
+ rd_kafka_conf_dump_free(arr_dup, cnt_dup);
+
+
+ /*
+ * Create kafka instances using original and copied confs
+ */
+
+ /* original */
+ TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d",
+ on_new_call_cnt);
+ on_new_call_cnt = 0;
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ TEST_ASSERT(on_new_call_cnt == 1, "expected 1 on_new call, not %d",
+ on_new_call_cnt);
+
+ rkt = rd_kafka_topic_new(rk, topic, tconf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ /* copied */
+ on_new_call_cnt = 0; /* interceptors are not copied. */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf2);
+ TEST_ASSERT(on_new_call_cnt == 0, "expected 0 on_new call, not %d",
+ on_new_call_cnt);
+
+ rkt = rd_kafka_topic_new(rk, topic, tconf2);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+
+ /* Incremental S2F property.
+ * NOTE: The order of fields returned in get() is hardcoded here. */
+ {
+ static const char *s2fs[] = {"generic,broker,queue,cgrp",
+ "generic,broker,queue,cgrp",
+
+ "-broker,+queue,topic",
+ "generic,topic,queue,cgrp",
+
+ "-all,security,-fetch,+metadata",
+ "metadata,security",
+
+ NULL};
+
+ TEST_SAY("Incremental S2F tests\n");
+ conf = rd_kafka_conf_new();
+
+ for (i = 0; s2fs[i]; i += 2) {
+ const char *val;
+
+ TEST_SAY(" Set: %s\n", s2fs[i]);
+ test_conf_set(conf, "debug", s2fs[i]);
+ val = test_conf_get(conf, "debug");
+ TEST_SAY(" Now: %s\n", val);
+
+ if (strcmp(val, s2fs[i + 1]))
+ TEST_FAIL_LATER(
+ "\n"
+ "Expected: %s\n"
+ " Got: %s",
+ s2fs[i + 1], val);
+ }
+ rd_kafka_conf_destroy(conf);
+ }
+
+ {
+ rd_kafka_conf_res_t res;
+
+ TEST_SAY("Error reporting for S2F properties\n");
+ conf = rd_kafka_conf_new();
+
+ res =
+ rd_kafka_conf_set(conf, "debug", "cgrp,invalid-value,topic",
+ errstr, sizeof(errstr));
+
+ TEST_ASSERT(
+ res == RD_KAFKA_CONF_INVALID,
+ "expected 'debug=invalid-value' to fail with INVALID, "
+ "not %d",
+ res);
+ TEST_ASSERT(strstr(errstr, "invalid-value"),
+ "expected invalid value to be mentioned in error, "
+ "not \"%s\"",
+ errstr);
+ TEST_ASSERT(!strstr(errstr, "cgrp") && !strstr(errstr, "topic"),
+ "expected only invalid value to be mentioned, "
+ "not \"%s\"",
+ errstr);
+ TEST_SAY(_C_GRN "Ok: %s\n" _C_CLR, errstr);
+
+ rd_kafka_conf_destroy(conf);
+ }
+
+#if WITH_SSL
+ {
+ TEST_SAY(
+ "Verifying that ssl.ca.location is not "
+ "overwritten (#3566)\n");
+
+ conf = rd_kafka_conf_new();
+
+ test_conf_set(conf, "security.protocol", "SSL");
+ test_conf_set(conf, "ssl.ca.location", "/?/does/!/not/exist!");
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(!rk,
+ "Expected rd_kafka_new() to fail with "
+ "invalid ssl.ca.location");
+ TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr);
+ rd_kafka_conf_destroy(conf);
+ }
+
+#ifdef _WIN32
+ {
+ FILE *fp;
+ TEST_SAY(
+ "Verifying that OpenSSL_AppLink "
+ "is not needed (#3554)\n");
+
+ /* Create dummy file so the file open works,
+ * but parsing fails. */
+ fp = fopen("_tmp_0004", "w");
+ TEST_ASSERT(fp != NULL, "Failed to create dummy file: %s",
+ rd_strerror(errno));
+ if (fwrite("?", 1, 1, fp) != 1)
+ TEST_FAIL("Failed to write to dummy file _tmp_0004: %s",
+ rd_strerror(errno));
+ fclose(fp);
+
+ conf = rd_kafka_conf_new();
+
+ test_conf_set(conf, "security.protocol", "SSL");
+ test_conf_set(conf, "ssl.keystore.location", "_tmp_0004");
+ test_conf_set(conf, "ssl.keystore.password", "x");
+
+ /* Prior to the fix OpenSSL will assert with a message like
+ * this: "OPENSSL_Uplink(00007FF9C0229D30,08): no
+ * OPENSSL_Applink"
+ * and the program will exit with error code 1. */
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr,
+ sizeof(errstr));
+ _unlink("tmp_0004");
+
+ TEST_ASSERT(!rk,
+ "Expected rd_kafka_new() to fail due to "
+ "dummy ssl.keystore.location");
+ TEST_ASSERT(strstr(errstr, "ssl.keystore.location") != NULL,
+ "Expected rd_kafka_new() to fail with "
+ "dummy ssl.keystore.location, not: %s",
+ errstr);
+
+ TEST_SAY("rd_kafka_new() failed as expected: %s\n", errstr);
+ }
+#endif /* _WIN32 */
+
+#endif /* WITH_SSL */
+
+ /* Canonical int values, aliases, s2i-verified strings, doubles */
+ {
+ static const struct {
+ const char *prop;
+ const char *val;
+ const char *exp;
+ int is_global;
+ } props[] = {
+ {"request.required.acks", "0", "0"},
+ {"request.required.acks", "-1", "-1"},
+ {"request.required.acks", "1", "1"},
+ {"acks", "3", "3"}, /* alias test */
+ {"request.required.acks", "393", "393"},
+ {"request.required.acks", "bad", NULL},
+ {"request.required.acks", "all", "-1"},
+ {"request.required.acks", "all", "-1", 1 /*fallthru*/},
+ {"acks", "0", "0"}, /* alias test */
+#if WITH_SASL
+ {"sasl.mechanisms", "GSSAPI", "GSSAPI", 1},
+ {"sasl.mechanisms", "PLAIN", "PLAIN", 1},
+ {"sasl.mechanisms", "GSSAPI,PLAIN", NULL, 1},
+ {"sasl.mechanisms", "", NULL, 1},
+#endif
+ {"linger.ms", "12555.3", "12555.3", 1},
+ {"linger.ms", "1500.000", "1500", 1},
+ {"linger.ms", "0.0001", "0.0001", 1},
+ {NULL}
+ };
+
+ TEST_SAY("Canonical tests\n");
+ tconf = rd_kafka_topic_conf_new();
+ conf = rd_kafka_conf_new();
+
+ for (i = 0; props[i].prop; i++) {
+ char dest[64];
+ size_t destsz;
+ rd_kafka_conf_res_t res;
+
+ TEST_SAY(" Set: %s=%s expect %s (%s)\n", props[i].prop,
+ props[i].val, props[i].exp,
+ props[i].is_global ? "global" : "topic");
+
+
+ /* Set value */
+ if (props[i].is_global)
+ res = rd_kafka_conf_set(conf, props[i].prop,
+ props[i].val, errstr,
+ sizeof(errstr));
+ else
+ res = rd_kafka_topic_conf_set(
+ tconf, props[i].prop, props[i].val, errstr,
+ sizeof(errstr));
+ if ((res == RD_KAFKA_CONF_OK ? 1 : 0) !=
+ (props[i].exp ? 1 : 0))
+ TEST_FAIL("Expected %s, got %s",
+ props[i].exp ? "success" : "failure",
+ (res == RD_KAFKA_CONF_OK
+ ? "OK"
+ : (res == RD_KAFKA_CONF_INVALID
+ ? "INVALID"
+ : "UNKNOWN")));
+
+ if (!props[i].exp)
+ continue;
+
+ /* Get value and compare to expected result */
+ destsz = sizeof(dest);
+ if (props[i].is_global)
+ res = rd_kafka_conf_get(conf, props[i].prop,
+ dest, &destsz);
+ else
+ res = rd_kafka_topic_conf_get(
+ tconf, props[i].prop, dest, &destsz);
+ TEST_ASSERT(res == RD_KAFKA_CONF_OK,
+ ".._conf_get(%s) returned %d",
+ props[i].prop, res);
+
+ TEST_ASSERT(!strcmp(props[i].exp, dest),
+ "Expected \"%s\", got \"%s\"", props[i].exp,
+ dest);
+ }
+ rd_kafka_topic_conf_destroy(tconf);
+ rd_kafka_conf_destroy(conf);
+ }
+
+ do_test_kafka_new_failures();
+
+ do_test_special_invalid_conf();
+
+ do_test_idempotence_conf();
+
+ do_test_instance_conf();
+
+ do_test_default_topic_conf();
+
+ do_message_timeout_linger_checks();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c
new file mode 100644
index 000000000..722cef3b0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0005-order.c
@@ -0,0 +1,133 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests messages are produced in order.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgid_next = 0;
+static int fails = 0;
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (msgid != msgid_next) {
+ fails++;
+ TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next);
+ return;
+ }
+
+ msgid_next = msgid + 1;
+}
+
+
+int main_0005_order(int argc, char **argv) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ int msgcnt = test_quick ? 500 : 50000;
+ int i;
+ test_timing_t t_produce, t_delivery;
+
+ test_conf_init(&conf, &topic_conf, 10);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Produce messages */
+ TIMING_START(&t_produce, "PRODUCE");
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
+ i);
+ r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, msgidp);
+ if (r == -1)
+ TEST_FAIL("Failed to produce message #%i: %s\n", i,
+ rd_strerror(errno));
+ }
+ TIMING_STOP(&t_produce);
+ TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt);
+
+ /* Wait for messages to be delivered */
+ TIMING_START(&t_delivery, "DELIVERY");
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 50);
+ TIMING_STOP(&t_delivery);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (msgid_next != msgcnt)
+ TEST_FAIL("Still waiting for messages: next %i != end %i\n",
+ msgid_next, msgcnt);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c
new file mode 100644
index 000000000..8a25f6a1d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0006-symbols.c
@@ -0,0 +1,163 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Makes sure all symbols in the public API actually resolves during linking.
+ * This test needs to be updated manually when new symbols are added.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+int main_0006_symbols(int argc, char **argv) {
+
+ if (argc < 0 /* always false */) {
+ rd_kafka_version();
+ rd_kafka_version_str();
+ rd_kafka_get_debug_contexts();
+ rd_kafka_get_err_descs(NULL, NULL);
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR);
+ rd_kafka_err2name(RD_KAFKA_RESP_ERR_NO_ERROR);
+ rd_kafka_last_error();
+ rd_kafka_conf_new();
+ rd_kafka_conf_destroy(NULL);
+ rd_kafka_conf_dup(NULL);
+ rd_kafka_conf_set(NULL, NULL, NULL, NULL, 0);
+ rd_kafka_conf_set_dr_cb(NULL, NULL);
+ rd_kafka_conf_set_dr_msg_cb(NULL, NULL);
+ rd_kafka_conf_set_error_cb(NULL, NULL);
+ rd_kafka_conf_set_stats_cb(NULL, NULL);
+ rd_kafka_conf_set_log_cb(NULL, NULL);
+ rd_kafka_conf_set_socket_cb(NULL, NULL);
+ rd_kafka_conf_set_rebalance_cb(NULL, NULL);
+ rd_kafka_conf_set_offset_commit_cb(NULL, NULL);
+ rd_kafka_conf_set_throttle_cb(NULL, NULL);
+ rd_kafka_conf_set_default_topic_conf(NULL, NULL);
+ rd_kafka_conf_get(NULL, NULL, NULL, NULL);
+#ifndef _WIN32
+ rd_kafka_conf_set_open_cb(NULL, NULL);
+#endif
+ rd_kafka_conf_set_opaque(NULL, NULL);
+ rd_kafka_opaque(NULL);
+ rd_kafka_conf_dump(NULL, NULL);
+ rd_kafka_topic_conf_dump(NULL, NULL);
+ rd_kafka_conf_dump_free(NULL, 0);
+ rd_kafka_conf_properties_show(NULL);
+ rd_kafka_topic_conf_new();
+ rd_kafka_topic_conf_dup(NULL);
+ rd_kafka_topic_conf_destroy(NULL);
+ rd_kafka_topic_conf_set(NULL, NULL, NULL, NULL, 0);
+ rd_kafka_topic_conf_set_opaque(NULL, NULL);
+ rd_kafka_topic_conf_get(NULL, NULL, NULL, NULL);
+ rd_kafka_topic_conf_set_partitioner_cb(NULL, NULL);
+ rd_kafka_topic_partition_available(NULL, 0);
+ rd_kafka_topic_opaque(NULL);
+ rd_kafka_msg_partitioner_random(NULL, NULL, 0, 0, NULL, NULL);
+ rd_kafka_msg_partitioner_consistent(NULL, NULL, 0, 0, NULL,
+ NULL);
+ rd_kafka_msg_partitioner_consistent_random(NULL, NULL, 0, 0,
+ NULL, NULL);
+ rd_kafka_new(0, NULL, NULL, 0);
+ rd_kafka_destroy(NULL);
+ rd_kafka_flush(NULL, 0);
+ rd_kafka_name(NULL);
+ rd_kafka_memberid(NULL);
+ rd_kafka_topic_new(NULL, NULL, NULL);
+ rd_kafka_topic_destroy(NULL);
+ rd_kafka_topic_name(NULL);
+ rd_kafka_message_destroy(NULL);
+ rd_kafka_message_errstr(NULL);
+ rd_kafka_message_timestamp(NULL, NULL);
+ rd_kafka_consume_start(NULL, 0, 0);
+ rd_kafka_consume_stop(NULL, 0);
+ rd_kafka_consume(NULL, 0, 0);
+ rd_kafka_consume_batch(NULL, 0, 0, NULL, 0);
+ rd_kafka_consume_callback(NULL, 0, 0, NULL, NULL);
+ rd_kafka_offset_store(NULL, 0, 0);
+ rd_kafka_produce(NULL, 0, 0, NULL, 0, NULL, 0, NULL);
+ rd_kafka_produce_batch(NULL, 0, 0, NULL, 0);
+ rd_kafka_poll(NULL, 0);
+ rd_kafka_brokers_add(NULL, NULL);
+ /* DEPRECATED: rd_kafka_set_logger(NULL, NULL); */
+ rd_kafka_set_log_level(NULL, 0);
+ rd_kafka_log_print(NULL, 0, NULL, NULL);
+#ifndef _WIN32
+ rd_kafka_log_syslog(NULL, 0, NULL, NULL);
+#endif
+ rd_kafka_outq_len(NULL);
+ rd_kafka_dump(NULL, NULL);
+ rd_kafka_thread_cnt();
+ rd_kafka_wait_destroyed(0);
+ rd_kafka_metadata(NULL, 0, NULL, NULL, 0);
+ rd_kafka_metadata_destroy(NULL);
+ rd_kafka_queue_get_partition(NULL, NULL, 0);
+ rd_kafka_queue_destroy(NULL);
+ rd_kafka_consume_start_queue(NULL, 0, 0, NULL);
+ rd_kafka_consume_queue(NULL, 0);
+ rd_kafka_consume_batch_queue(NULL, 0, NULL, 0);
+ rd_kafka_consume_callback_queue(NULL, 0, NULL, NULL);
+ rd_kafka_seek(NULL, 0, 0, 0);
+ rd_kafka_yield(NULL);
+ rd_kafka_mem_free(NULL, NULL);
+ rd_kafka_list_groups(NULL, NULL, NULL, 0);
+ rd_kafka_group_list_destroy(NULL);
+
+ /* KafkaConsumer API */
+ rd_kafka_subscribe(NULL, NULL);
+ rd_kafka_unsubscribe(NULL);
+ rd_kafka_subscription(NULL, NULL);
+ rd_kafka_consumer_poll(NULL, 0);
+ rd_kafka_consumer_close(NULL);
+ rd_kafka_assign(NULL, NULL);
+ rd_kafka_assignment(NULL, NULL);
+ rd_kafka_commit(NULL, NULL, 0);
+ rd_kafka_commit_message(NULL, NULL, 0);
+ rd_kafka_committed(NULL, NULL, 0);
+ rd_kafka_position(NULL, NULL);
+
+ /* TopicPartition */
+ rd_kafka_topic_partition_list_new(0);
+ rd_kafka_topic_partition_list_destroy(NULL);
+ rd_kafka_topic_partition_list_add(NULL, NULL, 0);
+ rd_kafka_topic_partition_list_add_range(NULL, NULL, 0, 0);
+ rd_kafka_topic_partition_list_del(NULL, NULL, 0);
+ rd_kafka_topic_partition_list_del_by_idx(NULL, 0);
+ rd_kafka_topic_partition_list_copy(NULL);
+ rd_kafka_topic_partition_list_set_offset(NULL, NULL, 0, 0);
+ rd_kafka_topic_partition_list_find(NULL, NULL, 0);
+ rd_kafka_query_watermark_offsets(NULL, NULL, 0, NULL, NULL, 0);
+ rd_kafka_get_watermark_offsets(NULL, NULL, 0, NULL, NULL);
+ }
+
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c
new file mode 100644
index 000000000..cf196d60c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0007-autotopic.c
@@ -0,0 +1,136 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Auto create topics
+ *
+ * NOTE! This test requires auto.create.topics.enable=true to be
+ * configured on the broker!
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgs_wait = 0; /* bitmask */
+
+/**
+ * Delivery report callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (!(msgs_wait & (1 << msgid)))
+ TEST_FAIL(
+ "Unwanted delivery report for message #%i "
+ "(waiting for 0x%x)\n",
+ msgid, msgs_wait);
+
+ TEST_SAY("Delivery report for message #%i: %s\n", msgid,
+ rd_kafka_err2str(err));
+
+ msgs_wait &= ~(1 << msgid);
+
+ if (err)
+ TEST_FAIL("Message #%i failed with unexpected error %s\n",
+ msgid, rd_kafka_err2str(err));
+}
+
+
+int main_0007_autotopic(int argc, char **argv) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ int msgcnt = 10;
+ int i;
+
+ /* Generate unique topic name */
+ test_conf_init(&conf, &topic_conf, 10);
+
+ TEST_SAY(
+ "\033[33mNOTE! This test requires "
+ "auto.create.topics.enable=true to be configured on "
+ "the broker!\033[0m\n");
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0007_autotopic", 1),
+ topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Produce a message */
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
+ i);
+ r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, msgidp);
+ if (r == -1)
+ TEST_FAIL("Failed to produce message #%i: %s\n", i,
+ rd_strerror(errno));
+ msgs_wait |= (1 << i);
+ }
+
+ /* Wait for messages to time out */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 50);
+
+ if (msgs_wait != 0)
+ TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c
new file mode 100644
index 000000000..d52081b75
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0008-reqacks.c
@@ -0,0 +1,179 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests request.required.acks (issue #75)
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgid_next = 0;
+static int fails = 0;
+static rd_kafka_msg_status_t exp_status;
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ int msgid = *(int *)rkmessage->_private;
+ rd_kafka_msg_status_t status = rd_kafka_message_status(rkmessage);
+
+ free(rkmessage->_private);
+
+ if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s (status %d)\n",
+ rd_kafka_err2str(rkmessage->err), status);
+
+ if (msgid != msgid_next) {
+ fails++;
+ TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next);
+ return;
+ }
+
+ TEST_ASSERT(status == exp_status,
+ "For msgid #%d: expected status %d, got %d", msgid,
+ exp_status, status);
+
+ msgid_next = msgid + 1;
+}
+
+
+int main_0008_reqacks(int argc, char **argv) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ char msg[128];
+ int msgcnt = test_quick ? 20 : 100;
+ int i;
+ int reqacks;
+ int idbase = 0;
+ const char *topic = NULL;
+
+ TEST_SAY(
+ "\033[33mNOTE! This test requires at "
+ "least 3 brokers!\033[0m\n");
+
+ TEST_SAY(
+ "\033[33mNOTE! This test requires "
+ "default.replication.factor=3 to be configured on "
+ "all brokers!\033[0m\n");
+
+ /* Try different request.required.acks settings (issue #75) */
+ for (reqacks = -1; reqacks <= 1; reqacks++) {
+ char tmp[10];
+
+ test_conf_init(&conf, &topic_conf, 10);
+
+ if (reqacks != -1)
+ test_conf_set(conf, "enable.idempotence", "false");
+
+ if (!topic)
+ topic = test_mk_topic_name("0008", 0);
+
+ rd_snprintf(tmp, sizeof(tmp), "%i", reqacks);
+
+ if (rd_kafka_topic_conf_set(topic_conf, "request.required.acks",
+ tmp, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s", errstr);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ if (reqacks == 0)
+ exp_status = RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ else
+ exp_status = RD_KAFKA_MSG_STATUS_PERSISTED;
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY(
+ "Created kafka instance %s with required acks %d, "
+ "expecting status %d\n",
+ rd_kafka_name(rk), reqacks, exp_status);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_strerror(errno));
+
+ /* Produce messages */
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = idbase + i;
+ rd_snprintf(msg, sizeof(msg),
+ "%s test message #%i (acks=%i)", argv[0],
+ *msgidp, reqacks);
+ r = rd_kafka_produce(rkt, partition,
+ RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, msgidp);
+ if (r == -1)
+ TEST_FAIL("Failed to produce message #%i: %s\n",
+ *msgidp, rd_strerror(errno));
+ }
+
+ TEST_SAY("Produced %i messages, waiting for deliveries\n",
+ msgcnt);
+
+ /* Wait for messages to time out */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 50);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (msgid_next != idbase + msgcnt)
+ TEST_FAIL(
+ "Still waiting for messages: "
+ "next %i != end %i\n",
+ msgid_next, msgcnt);
+ idbase += i;
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+ }
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c
new file mode 100644
index 000000000..32590820e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0009-mock_cluster.c
@@ -0,0 +1,99 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * @name Verify that the builtin mock cluster works by producing to a topic
+ * and then consuming from it.
+ */
+
+
+
+int main_0009_mock_cluster(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0009_mock_cluster", 1);
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_t *p, *c;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ const int msgcnt = 100;
+ const char *bootstraps;
+ rd_kafka_topic_partition_list_t *parts;
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+
+ /* Producer */
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ p = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
+
+ /* Consumer */
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ c = test_create_consumer(topic, NULL, conf, NULL);
+
+ rkt = test_create_producer_topic(p, topic, NULL);
+
+ /* Produce */
+ test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, NULL, 0);
+
+ /* Produce tiny messages */
+ test_produce_msgs(p, rkt, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, "hello",
+ 5);
+
+ rd_kafka_topic_destroy(rkt);
+
+ /* Assign */
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, 0);
+ rd_kafka_topic_partition_list_add(parts, topic, 1);
+ rd_kafka_topic_partition_list_add(parts, topic, 2);
+ rd_kafka_topic_partition_list_add(parts, topic, 3);
+ test_consumer_assign("CONSUME", c, parts);
+ rd_kafka_topic_partition_list_destroy(parts);
+
+
+ /* Consume */
+ test_consumer_poll("CONSUME", c, 0, -1, 0, msgcnt, NULL);
+
+ rd_kafka_destroy(c);
+ rd_kafka_destroy(p);
+
+ test_mock_cluster_destroy(mcluster);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c
new file mode 100644
index 000000000..584d37bc6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0011-produce_batch.c
@@ -0,0 +1,576 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests messages are produced in order.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgid_next = 0;
+static int fails = 0;
+static int msgcounter = 0;
+static int *dr_partition_count = NULL;
+static const int topic_num_partitions = 4;
+static int msg_partition_wo_flag = 2;
+static int msg_partition_wo_flag_success = 0;
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_single_partition_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (msgid != msgid_next) {
+ fails++;
+ TEST_FAIL("Delivered msg %i, expected %i\n", msgid, msgid_next);
+ return;
+ }
+
+ msgid_next = msgid + 1;
+ msgcounter--;
+}
+
+/* Produce a batch of messages to a single partition. */
+static void test_single_partition(void) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ int msgcnt = test_quick ? 100 : 100000;
+ int failcnt = 0;
+ int i;
+ rd_kafka_message_t *rkmessages;
+
+ msgid_next = 0;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_single_partition_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("test_single_partition: Created kafka instance %s\n",
+ rd_kafka_name(rk));
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Create messages */
+ rkmessages = calloc(sizeof(*rkmessages), msgcnt);
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
+ __FILE__, __FUNCTION__, i);
+
+ rkmessages[i].payload = rd_strdup(msg);
+ rkmessages[i].len = strlen(msg);
+ rkmessages[i]._private = msgidp;
+ rkmessages[i].partition = 2; /* Will be ignored since
+ * RD_KAFKA_MSG_F_PARTITION
+ * is not supplied. */
+ }
+
+ r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
+ rkmessages, msgcnt);
+
+ /* Scan through messages to check for errors. */
+ for (i = 0; i < msgcnt; i++) {
+ if (rkmessages[i].err) {
+ failcnt++;
+ if (failcnt < 100)
+ TEST_SAY("Message #%i failed: %s\n", i,
+ rd_kafka_err2str(rkmessages[i].err));
+ }
+ }
+
+ /* All messages should've been produced. */
+ if (r < msgcnt) {
+ TEST_SAY(
+ "Not all messages were accepted "
+ "by produce_batch(): %i < %i\n",
+ r, msgcnt);
+ if (msgcnt - r != failcnt)
+ TEST_SAY(
+ "Discrepency between failed messages (%i) "
+ "and return value %i (%i - %i)\n",
+ failcnt, msgcnt - r, msgcnt, r);
+ TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
+ }
+
+ free(rkmessages);
+ TEST_SAY(
+ "Single partition: "
+ "Produced %i messages, waiting for deliveries\n",
+ r);
+
+ msgcounter = msgcnt;
+
+ /* Wait for messages to be delivered */
+ test_wait_delivery(rk, &msgcounter);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (msgid_next != msgcnt)
+ TEST_FAIL("Still waiting for messages: next %i != end %i\n",
+ msgid_next, msgcnt);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return;
+}
+
+
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_partitioner_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (msgcounter <= 0)
+ TEST_FAIL(
+ "Too many message dr_cb callback calls "
+ "(at msgid #%i)\n",
+ msgid);
+ msgcounter--;
+}
+
+/* Produce a batch of messages using random (default) partitioner */
+static void test_partitioner(void) {
+ int partition = RD_KAFKA_PARTITION_UA;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ int msgcnt = test_quick ? 100 : 100000;
+ int failcnt = 0;
+ int i;
+ rd_kafka_message_t *rkmessages;
+
+ test_conf_init(&conf, &topic_conf, 30);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_partitioner_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("test_partitioner: Created kafka instance %s\n",
+ rd_kafka_name(rk));
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Create messages */
+ rkmessages = calloc(sizeof(*rkmessages), msgcnt);
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
+ __FILE__, __FUNCTION__, i);
+
+ rkmessages[i].payload = rd_strdup(msg);
+ rkmessages[i].len = strlen(msg);
+ rkmessages[i]._private = msgidp;
+ }
+
+ r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
+ rkmessages, msgcnt);
+
+ /* Scan through messages to check for errors. */
+ for (i = 0; i < msgcnt; i++) {
+ if (rkmessages[i].err) {
+ failcnt++;
+ if (failcnt < 100)
+ TEST_SAY("Message #%i failed: %s\n", i,
+ rd_kafka_err2str(rkmessages[i].err));
+ }
+ }
+
+ /* All messages should've been produced. */
+ if (r < msgcnt) {
+ TEST_SAY(
+ "Not all messages were accepted "
+ "by produce_batch(): %i < %i\n",
+ r, msgcnt);
+ if (msgcnt - r != failcnt)
+ TEST_SAY(
+ "Discrepency between failed messages (%i) "
+ "and return value %i (%i - %i)\n",
+ failcnt, msgcnt - r, msgcnt, r);
+ TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
+ }
+
+ free(rkmessages);
+ TEST_SAY(
+ "Partitioner: "
+ "Produced %i messages, waiting for deliveries\n",
+ r);
+
+ msgcounter = msgcnt;
+ /* Wait for messages to be delivered */
+ test_wait_delivery(rk, &msgcounter);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (msgcounter != 0)
+ TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter,
+ msgcnt);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return;
+}
+
+static void dr_per_message_partition_cb(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+
+ free(rkmessage->_private);
+
+ if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+
+ if (msgcounter <= 0)
+ TEST_FAIL(
+ "Too many message dr_cb callback calls "
+ "(at msg offset #%" PRId64 ")\n",
+ rkmessage->offset);
+
+ TEST_ASSERT(rkmessage->partition < topic_num_partitions);
+ msgcounter--;
+
+ dr_partition_count[rkmessage->partition]++;
+}
+
+/* Produce a batch of messages using with per message partition flag */
+static void test_per_message_partition_flag(void) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)];
+ int msgcnt = test_quick ? 100 : 1000;
+ int failcnt = 0;
+ int i;
+ int *rkpartition_counts;
+ rd_kafka_message_t *rkmessages;
+ const char *topic_name;
+
+ test_conf_init(&conf, &topic_conf, 30);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_per_message_partition_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("test_per_message_partition_flag: Created kafka instance %s\n",
+ rd_kafka_name(rk));
+ topic_name = test_mk_topic_name("0011_per_message_flag", 1);
+ test_create_topic(rk, topic_name, topic_num_partitions, 1);
+
+ rkt = rd_kafka_topic_new(rk, topic_name, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Create messages */
+ rkpartition_counts = calloc(sizeof(int), topic_num_partitions);
+ dr_partition_count = calloc(sizeof(int), topic_num_partitions);
+ rkmessages = calloc(sizeof(*rkmessages), msgcnt);
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
+ __FILE__, __FUNCTION__, i);
+
+ rkmessages[i].payload = rd_strdup(msg);
+ rkmessages[i].len = strlen(msg);
+ rkmessages[i]._private = msgidp;
+ rkmessages[i].partition = jitter(0, topic_num_partitions - 1);
+ rkpartition_counts[rkmessages[i].partition]++;
+ }
+
+ r = rd_kafka_produce_batch(
+ rkt, partition, RD_KAFKA_MSG_F_PARTITION | RD_KAFKA_MSG_F_FREE,
+ rkmessages, msgcnt);
+
+ /* Scan through messages to check for errors. */
+ for (i = 0; i < msgcnt; i++) {
+ if (rkmessages[i].err) {
+ failcnt++;
+ if (failcnt < 100)
+ TEST_SAY("Message #%i failed: %s\n", i,
+ rd_kafka_err2str(rkmessages[i].err));
+ }
+ }
+
+ /* All messages should've been produced. */
+ if (r < msgcnt) {
+ TEST_SAY(
+ "Not all messages were accepted "
+ "by produce_batch(): %i < %i\n",
+ r, msgcnt);
+ if (msgcnt - r != failcnt)
+ TEST_SAY(
+ "Discrepency between failed messages (%i) "
+ "and return value %i (%i - %i)\n",
+ failcnt, msgcnt - r, msgcnt, r);
+ TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
+ }
+
+ free(rkmessages);
+ TEST_SAY(
+ "Per-message partition: "
+ "Produced %i messages, waiting for deliveries\n",
+ r);
+
+ msgcounter = msgcnt;
+ /* Wait for messages to be delivered */
+ test_wait_delivery(rk, &msgcounter);
+
+ if (msgcounter != 0)
+ TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter,
+ msgcnt);
+
+ for (i = 0; i < topic_num_partitions; i++) {
+ if (dr_partition_count[i] != rkpartition_counts[i]) {
+ TEST_FAIL(
+ "messages were not sent to designated "
+ "partitions expected messages %i in "
+ "partition %i, but only "
+ "%i messages were sent",
+ rkpartition_counts[i], i, dr_partition_count[i]);
+ }
+ }
+
+ free(rkpartition_counts);
+ free(dr_partition_count);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return;
+}
+
+static void
+dr_partitioner_wo_per_message_flag_cb(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ free(rkmessage->_private);
+
+ if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+ if (msgcounter <= 0)
+ TEST_FAIL(
+ "Too many message dr_cb callback calls "
+ "(at msg offset #%" PRId64 ")\n",
+ rkmessage->offset);
+ if (rkmessage->partition != msg_partition_wo_flag)
+ msg_partition_wo_flag_success = 1;
+ msgcounter--;
+}
+
+/**
+ * @brief Produce a batch of messages using partitioner
+ * without per message partition flag
+ */
+static void test_message_partitioner_wo_per_message_flag(void) {
+ int partition = RD_KAFKA_PARTITION_UA;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128 + sizeof(__FILE__) + sizeof(__FUNCTION__)];
+ int msgcnt = test_quick ? 100 : 1000;
+ int failcnt = 0;
+ int i;
+ rd_kafka_message_t *rkmessages;
+
+ test_conf_init(&conf, &topic_conf, 30);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_msg_cb(conf,
+ dr_partitioner_wo_per_message_flag_cb);
+ test_conf_set(conf, "sticky.partitioning.linger.ms", "0");
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("test_partitioner: Created kafka instance %s\n",
+ rd_kafka_name(rk));
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0011", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Create messages */
+ rkmessages = calloc(sizeof(*rkmessages), msgcnt);
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s:%s test message #%i",
+ __FILE__, __FUNCTION__, i);
+
+ rkmessages[i].payload = rd_strdup(msg);
+ rkmessages[i].len = strlen(msg);
+ rkmessages[i]._private = msgidp;
+ rkmessages[i].partition = msg_partition_wo_flag;
+ }
+
+ r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
+ rkmessages, msgcnt);
+
+ /* Scan through messages to check for errors. */
+ for (i = 0; i < msgcnt; i++) {
+ if (rkmessages[i].err) {
+ failcnt++;
+ if (failcnt < 100)
+ TEST_SAY("Message #%i failed: %s\n", i,
+ rd_kafka_err2str(rkmessages[i].err));
+ }
+ }
+
+ /* All messages should've been produced. */
+ if (r < msgcnt) {
+ TEST_SAY(
+ "Not all messages were accepted "
+ "by produce_batch(): %i < %i\n",
+ r, msgcnt);
+ if (msgcnt - r != failcnt)
+ TEST_SAY(
+ "Discrepency between failed messages (%i) "
+ "and return value %i (%i - %i)\n",
+ failcnt, msgcnt - r, msgcnt, r);
+ TEST_FAIL("%i/%i messages failed\n", msgcnt - r, msgcnt);
+ }
+
+ free(rkmessages);
+ TEST_SAY(
+ "Partitioner: "
+ "Produced %i messages, waiting for deliveries\n",
+ r);
+
+ msgcounter = msgcnt;
+ /* Wait for messages to be delivered */
+ test_wait_delivery(rk, &msgcounter);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (msgcounter != 0)
+ TEST_FAIL("Still waiting for %i/%i messages\n", msgcounter,
+ msgcnt);
+ if (msg_partition_wo_flag_success == 0) {
+ TEST_FAIL(
+ "partitioner was not used, all messages were sent to "
+ "message specified partition %i",
+ i);
+ }
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return;
+}
+
+
+int main_0011_produce_batch(int argc, char **argv) {
+ test_message_partitioner_wo_per_message_flag();
+ test_single_partition();
+ test_partitioner();
+ if (test_can_create_topics(1))
+ test_per_message_partition_flag();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c
new file mode 100644
index 000000000..30ff392c4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0012-produce_consume.c
@@ -0,0 +1,537 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Produce messages, then consume them.
+ * Consume both through the standard interface and through the queue interface.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int prod_msg_remains = 0;
+static int fails = 0;
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (prod_msg_remains == 0)
+ TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
+ prod_msg_remains);
+
+ prod_msg_remains--;
+}
+
+
+/**
+ * Produces 'msgcnt' messages split over 'partition_cnt' partitions.
+ */
+static void produce_messages(uint64_t testid,
+ const char *topic,
+ int partition_cnt,
+ int msgcnt) {
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ char msg[128];
+ int failcnt = 0;
+ int i;
+ rd_kafka_message_t *rkmessages;
+ int32_t partition;
+ int msgid = 0;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Make sure all replicas are in-sync after producing
+ * so that consume test wont fail. */
+ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
+ errstr, sizeof(errstr));
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Create messages. */
+ prod_msg_remains = msgcnt;
+ rkmessages = calloc(sizeof(*rkmessages), msgcnt / partition_cnt);
+ for (partition = 0; partition < partition_cnt; partition++) {
+ int batch_cnt = msgcnt / partition_cnt;
+
+ for (i = 0; i < batch_cnt; i++) {
+ rd_snprintf(msg, sizeof(msg),
+ "testid=%" PRIu64 ", partition=%i, msg=%i",
+ testid, (int)partition, msgid);
+ rkmessages[i].payload = rd_strdup(msg);
+ rkmessages[i].len = strlen(msg);
+ msgid++;
+ }
+
+ TEST_SAY("Start produce to partition %i: msgs #%d..%d\n",
+ (int)partition, msgid - batch_cnt, msgid);
+ /* Produce batch for this partition */
+ r = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_FREE,
+ rkmessages, batch_cnt);
+ if (r == -1)
+ TEST_FAIL(
+ "Failed to produce "
+ "batch for partition %i: %s",
+ (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ /* Scan through messages to check for errors. */
+ for (i = 0; i < batch_cnt; i++) {
+ if (rkmessages[i].err) {
+ failcnt++;
+ if (failcnt < 100)
+ TEST_SAY("Message #%i failed: %s\n", i,
+ rd_kafka_err2str(
+ rkmessages[i].err));
+ }
+ }
+
+ /* All messages should've been produced. */
+ if (r < batch_cnt) {
+ TEST_SAY(
+ "Not all messages were accepted "
+ "by produce_batch(): %i < %i\n",
+ r, batch_cnt);
+
+ if (batch_cnt - r != failcnt)
+ TEST_SAY(
+ "Discrepency between failed "
+ "messages (%i) "
+ "and return value %i (%i - %i)\n",
+ failcnt, batch_cnt - r, batch_cnt, r);
+ TEST_FAIL("%i/%i messages failed\n", batch_cnt - r,
+ batch_cnt);
+ }
+
+ TEST_SAY(
+ "Produced %i messages to partition %i, "
+ "waiting for deliveries\n",
+ r, partition);
+ }
+
+
+ free(rkmessages);
+
+ /* Wait for messages to be delivered */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 100);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (prod_msg_remains != 0)
+ TEST_FAIL("Still waiting for %i messages to be produced",
+ prod_msg_remains);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+
+static int *cons_msgs;
+static int cons_msgs_size;
+static int cons_msgs_cnt;
+
+static void verify_consumed_msg_reset(int msgcnt) {
+ TEST_SAY("Resetting consumed_msgs (msgcnt %d)\n", msgcnt);
+ if (cons_msgs) {
+ free(cons_msgs);
+ cons_msgs = NULL;
+ }
+
+ if (msgcnt) {
+ int i;
+
+ cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt);
+ for (i = 0; i < msgcnt; i++)
+ cons_msgs[i] = -1;
+ }
+
+ cons_msgs_size = msgcnt;
+ cons_msgs_cnt = 0;
+}
+
+
+static int int_cmp(const void *_a, const void *_b) {
+ int a = *(int *)_a;
+ int b = *(int *)_b;
+ return RD_CMP(a, b);
+}
+
+static void verify_consumed_msg_check0(const char *func, int line) {
+ int i;
+ int fails = 0;
+
+ if (cons_msgs_cnt < cons_msgs_size) {
+ TEST_SAY("Missing %i messages in consumer\n",
+ cons_msgs_size - cons_msgs_cnt);
+ fails++;
+ }
+
+ qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
+
+ for (i = 0; i < cons_msgs_size; i++) {
+ if (cons_msgs[i] != i) {
+ TEST_SAY(
+ "Consumed message #%i is wrong, "
+ "expected #%i\n",
+ cons_msgs[i], i);
+ fails++;
+ }
+ }
+
+ if (fails)
+ TEST_FAIL("See above error(s)");
+
+ verify_consumed_msg_reset(0);
+}
+
+
+#define verify_consumed_msg_check() \
+ verify_consumed_msg_check0(__FUNCTION__, __LINE__)
+
+
+
+static void verify_consumed_msg0(const char *func,
+ int line,
+ uint64_t testid,
+ int32_t partition,
+ int msgnum,
+ rd_kafka_message_t *rkmessage) {
+ uint64_t in_testid;
+ int in_part;
+ int in_msgnum;
+ char buf[1024];
+
+ if (rkmessage->len + 1 >= sizeof(buf))
+ TEST_FAIL(
+ "Incoming message too large (%i): "
+ "not sourced by this test",
+ (int)rkmessage->len);
+
+ rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len,
+ (char *)rkmessage->payload);
+
+ if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid,
+ &in_part, &in_msgnum) != 3)
+ TEST_FAIL("Incorrect message format: %s", buf);
+
+ if (test_level > 2) {
+ TEST_SAY("%s:%i: Our testid %" PRIu64
+ ", part %i =? %i, "
+ "msg %i =? %i "
+ ", message's: \"%s\"\n",
+ func, line, testid, (int)partition,
+ (int)rkmessage->partition, msgnum, in_msgnum, buf);
+ }
+
+ if (testid != in_testid || (partition != -1 && partition != in_part) ||
+ (msgnum != -1 && msgnum != in_msgnum) ||
+ (in_msgnum < 0 || in_msgnum > cons_msgs_size))
+ goto fail_match;
+
+ if (cons_msgs_cnt == cons_msgs_size) {
+ TEST_SAY(
+ "Too many messages in cons_msgs (%i) while reading "
+ "message \"%s\"\n",
+ cons_msgs_cnt, buf);
+ verify_consumed_msg_check();
+ TEST_FAIL("See above error(s)");
+ }
+
+ cons_msgs[cons_msgs_cnt++] = in_msgnum;
+
+ return;
+
+fail_match:
+ TEST_FAIL("%s:%i: Our testid %" PRIu64
+ ", part %i, msg %i/%i did "
+ "not match message's: \"%s\"\n",
+ func, line, testid, (int)partition, msgnum, cons_msgs_size,
+ buf);
+}
+
+#define verify_consumed_msg(testid, part, msgnum, rkmessage) \
+ verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \
+ rkmessage)
+
+
+static void consume_messages(uint64_t testid,
+ const char *topic,
+ int32_t partition,
+ int msg_base,
+ int batch_cnt,
+ int msgcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ int i;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk));
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt,
+ partition);
+
+ /* Consume messages */
+ if (rd_kafka_consume_start(rkt, partition,
+ RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
+ TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition,
+ batch_cnt, rd_kafka_err2str(rd_kafka_last_error()));
+
+ for (i = 0; i < batch_cnt;) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage =
+ rd_kafka_consume(rkt, partition, tmout_multip(5000));
+ if (!rkmessage)
+ TEST_FAIL(
+ "Failed to consume message %i/%i from "
+ "partition %i: %s",
+ i, batch_cnt, (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ if (rkmessage->err) {
+ if (rkmessage->err ==
+ RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ rd_kafka_message_destroy(rkmessage);
+ continue;
+ }
+ TEST_FAIL(
+ "Consume message %i/%i from partition %i "
+ "has error: %s: %s",
+ i, batch_cnt, (int)partition,
+ rd_kafka_err2str(rkmessage->err),
+ rd_kafka_message_errstr(rkmessage));
+ }
+
+ verify_consumed_msg(testid, partition, msg_base + i, rkmessage);
+
+ rd_kafka_message_destroy(rkmessage);
+ i++;
+ }
+
+ rd_kafka_consume_stop(rkt, partition);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+static void consume_messages_with_queues(uint64_t testid,
+ const char *topic,
+ int partition_cnt,
+ int msgcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ rd_kafka_queue_t *rkqu;
+ int i;
+ int32_t partition;
+ int batch_cnt = msgcnt / partition_cnt;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ test_conf_set(conf, "enable.partition.eof", "true");
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ /* Create queue */
+ rkqu = rd_kafka_queue_new(rk);
+
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ TEST_SAY("Consuming %i messages from one queue serving %i partitions\n",
+ msgcnt, partition_cnt);
+
+ /* Start consuming each partition */
+ for (partition = 0; partition < partition_cnt; partition++) {
+ /* Consume messages */
+ TEST_SAY("Start consuming partition %i at offset -%i\n",
+ partition, batch_cnt);
+ if (rd_kafka_consume_start_queue(
+ rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt),
+ rkqu) == -1)
+ TEST_FAIL("consume_start_queue(%i) failed: %s",
+ (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ }
+
+
+ /* Consume messages from queue */
+ for (i = 0; i < msgcnt;) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000));
+ if (!rkmessage)
+ TEST_FAIL(
+ "Failed to consume message %i/%i from "
+ "queue: %s",
+ i, msgcnt, rd_kafka_err2str(rd_kafka_last_error()));
+ if (rkmessage->err) {
+ if (rkmessage->err ==
+ RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("Topic %s [%" PRId32
+ "] reached "
+ "EOF at offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition,
+ rkmessage->offset);
+ rd_kafka_message_destroy(rkmessage);
+ continue;
+ }
+ TEST_FAIL(
+ "Consume message %i/%i from queue "
+ "has error (offset %" PRId64 ", partition %" PRId32
+ "): %s",
+ i, msgcnt, rkmessage->offset, rkmessage->partition,
+ rd_kafka_err2str(rkmessage->err));
+ }
+
+ verify_consumed_msg(testid, -1, -1, rkmessage);
+
+ rd_kafka_message_destroy(rkmessage);
+ i++;
+ }
+
+ /* Stop consuming each partition */
+ for (partition = 0; partition < partition_cnt; partition++)
+ rd_kafka_consume_stop(rkt, partition);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(rkqu);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+/**
+ * Produce to two partitions.
+ * Consume with standard interface from both, one after the other.
+ * Consume with queue interface from both, simultanously.
+ */
+static void test_produce_consume(void) {
+ int msgcnt = test_quick ? 100 : 1000;
+ int partition_cnt = 2;
+ int i;
+ uint64_t testid;
+ int msg_base = 0;
+ const char *topic;
+
+ /* Generate a testid so we can differentiate messages
+ * from other tests */
+ testid = test_id_generate();
+
+ /* Read test.conf to configure topic name */
+ test_conf_init(NULL, NULL, 20);
+ topic = test_mk_topic_name("0012", 1);
+
+ TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid);
+
+ /* Produce messages */
+ produce_messages(testid, topic, partition_cnt, msgcnt);
+
+
+ /* Consume messages with standard interface */
+ verify_consumed_msg_reset(msgcnt);
+ for (i = 0; i < partition_cnt; i++) {
+ consume_messages(testid, topic, i, msg_base,
+ msgcnt / partition_cnt, msgcnt);
+ msg_base += msgcnt / partition_cnt;
+ }
+ verify_consumed_msg_check();
+
+ /* Consume messages with queue interface */
+ verify_consumed_msg_reset(msgcnt);
+ consume_messages_with_queues(testid, topic, partition_cnt, msgcnt);
+ verify_consumed_msg_check();
+
+ return;
+}
+
+
+
+int main_0012_produce_consume(int argc, char **argv) {
+ test_produce_consume();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c
new file mode 100644
index 000000000..26a7ac070
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0013-null-msgs.c
@@ -0,0 +1,473 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Produce NULL payload messages, then consume them.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int prod_msg_remains = 0;
+static int fails = 0;
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (prod_msg_remains == 0)
+ TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
+ prod_msg_remains);
+
+ prod_msg_remains--;
+}
+
+
+/**
+ * Produces 'msgcnt' messages split over 'partition_cnt' partitions.
+ */
+static void produce_null_messages(uint64_t testid,
+ const char *topic,
+ int partition_cnt,
+ int msgcnt) {
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ int i;
+ int32_t partition;
+ int msgid = 0;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Make sure all replicas are in-sync after producing
+ * so that consume test wont fail. */
+ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
+ errstr, sizeof(errstr));
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ /* Produce messages */
+ prod_msg_remains = msgcnt;
+ for (partition = 0; partition < partition_cnt; partition++) {
+ int batch_cnt = msgcnt / partition_cnt;
+
+ for (i = 0; i < batch_cnt; i++) {
+ char key[128];
+ rd_snprintf(key, sizeof(key),
+ "testid=%" PRIu64 ", partition=%i, msg=%i",
+ testid, (int)partition, msgid);
+ r = rd_kafka_produce(rkt, partition, 0, NULL, 0, key,
+ strlen(key), NULL);
+ if (r == -1)
+ TEST_FAIL(
+ "Failed to produce message %i "
+ "to partition %i: %s",
+ msgid, (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ msgid++;
+ }
+ }
+
+
+ TEST_SAY(
+ "Produced %d messages to %d partition(s), "
+ "waiting for deliveries\n",
+ msgcnt, partition_cnt);
+ /* Wait for messages to be delivered */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 100);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (prod_msg_remains != 0)
+ TEST_FAIL("Still waiting for %i messages to be produced",
+ prod_msg_remains);
+ else
+ TEST_SAY("All messages delivered\n");
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+
+static int *cons_msgs;
+static int cons_msgs_size;
+static int cons_msgs_cnt;
+
+static void verify_consumed_msg_reset(int msgcnt) {
+ if (cons_msgs) {
+ free(cons_msgs);
+ cons_msgs = NULL;
+ }
+
+ if (msgcnt) {
+ int i;
+
+ cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt);
+ for (i = 0; i < msgcnt; i++)
+ cons_msgs[i] = -1;
+ }
+
+ cons_msgs_size = msgcnt;
+ cons_msgs_cnt = 0;
+}
+
+
+static int int_cmp(const void *_a, const void *_b) {
+ int a = *(int *)_a;
+ int b = *(int *)_b;
+ return RD_CMP(a, b);
+}
+
+static void verify_consumed_msg_check0(const char *func, int line) {
+ int i;
+ int fails = 0;
+
+ if (cons_msgs_cnt < cons_msgs_size) {
+ TEST_SAY("Missing %i messages in consumer\n",
+ cons_msgs_size - cons_msgs_cnt);
+ fails++;
+ }
+
+ qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
+
+ for (i = 0; i < cons_msgs_size; i++) {
+ if (cons_msgs[i] != i) {
+ TEST_SAY(
+ "Consumed message #%i is wrong, "
+ "expected #%i\n",
+ cons_msgs[i], i);
+ fails++;
+ }
+ }
+
+ if (fails)
+ TEST_FAIL("See above error(s)");
+
+ verify_consumed_msg_reset(0);
+}
+
+
+#define verify_consumed_msg_check() \
+ verify_consumed_msg_check0(__FUNCTION__, __LINE__)
+
+
+
+static void verify_consumed_msg0(const char *func,
+ int line,
+ uint64_t testid,
+ int32_t partition,
+ int msgnum,
+ rd_kafka_message_t *rkmessage) {
+ uint64_t in_testid;
+ int in_part;
+ int in_msgnum;
+ char buf[128];
+
+ if (rkmessage->len != 0)
+ TEST_FAIL("Incoming message not NULL: %i bytes",
+ (int)rkmessage->len);
+
+ if (rkmessage->key_len + 1 >= sizeof(buf))
+ TEST_FAIL(
+ "Incoming message key too large (%i): "
+ "not sourced by this test",
+ (int)rkmessage->key_len);
+
+ rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len,
+ (char *)rkmessage->key);
+
+ if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid,
+ &in_part, &in_msgnum) != 3)
+ TEST_FAIL("Incorrect key format: %s", buf);
+
+ if (testid != in_testid || (partition != -1 && partition != in_part) ||
+ (msgnum != -1 && msgnum != in_msgnum) ||
+ (in_msgnum < 0 || in_msgnum > cons_msgs_size))
+ goto fail_match;
+
+ if (test_level > 2) {
+ TEST_SAY("%s:%i: Our testid %" PRIu64
+ ", part %i (%i), "
+ "msg %i/%i did "
+ ", key's: \"%s\"\n",
+ func, line, testid, (int)partition,
+ (int)rkmessage->partition, msgnum, cons_msgs_size,
+ buf);
+ }
+
+ if (cons_msgs_cnt == cons_msgs_size) {
+ TEST_SAY(
+ "Too many messages in cons_msgs (%i) while reading "
+ "message key \"%s\"\n",
+ cons_msgs_cnt, buf);
+ verify_consumed_msg_check();
+ TEST_FAIL("See above error(s)");
+ }
+
+ cons_msgs[cons_msgs_cnt++] = in_msgnum;
+
+ return;
+
+fail_match:
+ TEST_FAIL("%s:%i: Our testid %" PRIu64
+ ", part %i, msg %i/%i did "
+ "not match message's key: \"%s\"\n",
+ func, line, testid, (int)partition, msgnum, cons_msgs_size,
+ buf);
+}
+
+#define verify_consumed_msg(testid, part, msgnum, rkmessage) \
+ verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \
+ rkmessage)
+
+
+static void consume_messages(uint64_t testid,
+ const char *topic,
+ int32_t partition,
+ int msg_base,
+ int batch_cnt,
+ int msgcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ int i;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ TEST_SAY("Consuming %i messages from partition %i\n", batch_cnt,
+ partition);
+
+ /* Consume messages */
+ if (rd_kafka_consume_start(rkt, partition,
+ RD_KAFKA_OFFSET_TAIL(batch_cnt)) == -1)
+ TEST_FAIL("consume_start(%i, -%i) failed: %s", (int)partition,
+ batch_cnt, rd_kafka_err2str(rd_kafka_last_error()));
+
+ for (i = 0; i < batch_cnt; i++) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage =
+ rd_kafka_consume(rkt, partition, tmout_multip(5000));
+ if (!rkmessage)
+ TEST_FAIL(
+ "Failed to consume message %i/%i from "
+ "partition %i: %s",
+ i, batch_cnt, (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ if (rkmessage->err)
+ TEST_FAIL(
+ "Consume message %i/%i from partition %i "
+ "has error: %s",
+ i, batch_cnt, (int)partition,
+ rd_kafka_err2str(rkmessage->err));
+
+ verify_consumed_msg(testid, partition, msg_base + i, rkmessage);
+
+ rd_kafka_message_destroy(rkmessage);
+ }
+
+ rd_kafka_consume_stop(rkt, partition);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+static void consume_messages_with_queues(uint64_t testid,
+ const char *topic,
+ int partition_cnt,
+ int msgcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ rd_kafka_queue_t *rkqu;
+ int i;
+ int32_t partition;
+ int batch_cnt = msgcnt / partition_cnt;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ /* Create queue */
+ rkqu = rd_kafka_queue_new(rk);
+
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ TEST_SAY("Consuming %i messages from one queue serving %i partitions\n",
+ msgcnt, partition_cnt);
+
+ /* Start consuming each partition */
+ for (partition = 0; partition < partition_cnt; partition++) {
+ /* Consume messages */
+ TEST_SAY("Start consuming partition %i at tail offset -%i\n",
+ partition, batch_cnt);
+ if (rd_kafka_consume_start_queue(
+ rkt, partition, RD_KAFKA_OFFSET_TAIL(batch_cnt),
+ rkqu) == -1)
+ TEST_FAIL("consume_start_queue(%i) failed: %s",
+ (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ }
+
+
+ /* Consume messages from queue */
+ for (i = 0; i < msgcnt; i++) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consume_queue(rkqu, tmout_multip(5000));
+ if (!rkmessage)
+ TEST_FAIL(
+ "Failed to consume message %i/%i from "
+ "queue: %s",
+ i, msgcnt, rd_kafka_err2str(rd_kafka_last_error()));
+ if (rkmessage->err)
+ TEST_FAIL(
+ "Consume message %i/%i from queue "
+ "has error (partition %" PRId32 "): %s",
+ i, msgcnt, rkmessage->partition,
+ rd_kafka_err2str(rkmessage->err));
+
+ verify_consumed_msg(testid, -1, -1, rkmessage);
+
+ rd_kafka_message_destroy(rkmessage);
+ }
+
+ /* Stop consuming each partition */
+ for (partition = 0; partition < partition_cnt; partition++)
+ rd_kafka_consume_stop(rkt, partition);
+
+ /* Destroy queue */
+ rd_kafka_queue_destroy(rkqu);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+static void test_produce_consume(void) {
+ int msgcnt = test_quick ? 100 : 1000;
+ int partition_cnt = 1;
+ int i;
+ uint64_t testid;
+ int msg_base = 0;
+ const char *topic;
+
+ /* Generate a testid so we can differentiate messages
+ * from other tests */
+ testid = test_id_generate();
+
+ /* Read test.conf to configure topic name */
+ test_conf_init(NULL, NULL, 20);
+ topic = test_mk_topic_name("0013", 0);
+
+ TEST_SAY("Topic %s, testid %" PRIu64 "\n", topic, testid);
+
+ /* Produce messages */
+ produce_null_messages(testid, topic, partition_cnt, msgcnt);
+
+
+ /* Consume messages with standard interface */
+ verify_consumed_msg_reset(msgcnt);
+ for (i = 0; i < partition_cnt; i++) {
+ consume_messages(testid, topic, i, msg_base,
+ msgcnt / partition_cnt, msgcnt);
+ msg_base += msgcnt / partition_cnt;
+ }
+ verify_consumed_msg_check();
+
+ /* Consume messages with queue interface */
+ verify_consumed_msg_reset(msgcnt);
+ consume_messages_with_queues(testid, topic, partition_cnt, msgcnt);
+ verify_consumed_msg_check();
+
+ return;
+}
+
+
+
+int main_0013_null_msgs(int argc, char **argv) {
+ test_produce_consume();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c
new file mode 100644
index 000000000..edae85f5c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0014-reconsume-191.c
@@ -0,0 +1,512 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+static int prod_msg_remains = 0;
+static int fails = 0;
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+
+ if (err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (prod_msg_remains == 0)
+ TEST_FAIL("Too many messages delivered (prod_msg_remains %i)",
+ prod_msg_remains);
+
+ prod_msg_remains--;
+}
+
+
+/**
+ * Produces 'msgcnt' messages split over 'partition_cnt' partitions.
+ */
+static void produce_messages(uint64_t testid,
+ const char *topic,
+ int partition_cnt,
+ int msg_base,
+ int msgcnt) {
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ int i;
+ int32_t partition;
+ int msgid = msg_base;
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Make sure all replicas are in-sync after producing
+ * so that consume test wont fail. */
+ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
+ errstr, sizeof(errstr));
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ /* Produce messages */
+ prod_msg_remains = msgcnt;
+ for (partition = 0; partition < partition_cnt; partition++) {
+ int batch_cnt = msgcnt / partition_cnt;
+
+ for (i = 0; i < batch_cnt; i++) {
+ char key[128];
+ char buf[128];
+ rd_snprintf(key, sizeof(key),
+ "testid=%" PRIu64 ", partition=%i, msg=%i",
+ testid, (int)partition, msgid);
+ rd_snprintf(buf, sizeof(buf),
+ "data: testid=%" PRIu64
+ ", partition=%i, msg=%i",
+ testid, (int)partition, msgid);
+
+ r = rd_kafka_produce(
+ rkt, partition, RD_KAFKA_MSG_F_COPY, buf,
+ strlen(buf), key, strlen(key), NULL);
+ if (r == -1)
+ TEST_FAIL(
+ "Failed to produce message %i "
+ "to partition %i: %s",
+ msgid, (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+ msgid++;
+ }
+ }
+
+
+ /* Wait for messages to be delivered */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 100);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (prod_msg_remains != 0)
+ TEST_FAIL("Still waiting for %i messages to be produced",
+ prod_msg_remains);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+
+static int *cons_msgs;
+static int cons_msgs_size;
+static int cons_msgs_cnt;
+static int cons_msg_next;
+static int cons_msg_stop = -1;
+static int64_t cons_last_offset = -1; /* last offset received */
+
+static void verify_consumed_msg_reset(int msgcnt) {
+ if (cons_msgs) {
+ free(cons_msgs);
+ cons_msgs = NULL;
+ }
+
+ if (msgcnt) {
+ int i;
+
+ cons_msgs = malloc(sizeof(*cons_msgs) * msgcnt);
+ for (i = 0; i < msgcnt; i++)
+ cons_msgs[i] = -1;
+ }
+
+ cons_msgs_size = msgcnt;
+ cons_msgs_cnt = 0;
+ cons_msg_next = 0;
+ cons_msg_stop = -1;
+ cons_last_offset = -1;
+
+ TEST_SAY("Reset consumed_msg stats, making room for %d new messages\n",
+ msgcnt);
+}
+
+
+static int int_cmp(const void *_a, const void *_b) {
+ int a = *(int *)_a;
+ int b = *(int *)_b;
+ /* Sort -1 (non-received msgs) at the end */
+ return (a == -1 ? 100000000 : a) - (b == -1 ? 10000000 : b);
+}
+
+static void verify_consumed_msg_check0(const char *func,
+ int line,
+ const char *desc,
+ int expected_cnt) {
+ int i;
+ int fails = 0;
+ int not_recvd = 0;
+
+ TEST_SAY("%s: received %d/%d/%d messages\n", desc, cons_msgs_cnt,
+ expected_cnt, cons_msgs_size);
+ if (expected_cnt > cons_msgs_size)
+ TEST_FAIL("expected_cnt %d > cons_msgs_size %d\n", expected_cnt,
+ cons_msgs_size);
+
+ if (cons_msgs_cnt < expected_cnt) {
+ TEST_SAY("%s: Missing %i messages in consumer\n", desc,
+ expected_cnt - cons_msgs_cnt);
+ fails++;
+ }
+
+ qsort(cons_msgs, cons_msgs_size, sizeof(*cons_msgs), int_cmp);
+
+ for (i = 0; i < expected_cnt; i++) {
+ if (cons_msgs[i] != i) {
+ if (cons_msgs[i] == -1) {
+ not_recvd++;
+ TEST_SAY("%s: msg %d/%d not received\n", desc,
+ i, expected_cnt);
+ } else
+ TEST_SAY(
+ "%s: Consumed message #%i is wrong, "
+ "expected #%i\n",
+ desc, cons_msgs[i], i);
+ fails++;
+ }
+ }
+
+ if (not_recvd)
+ TEST_SAY("%s: %d messages not received at all\n", desc,
+ not_recvd);
+
+ if (fails)
+ TEST_FAIL("%s: See above error(s)", desc);
+ else
+ TEST_SAY(
+ "%s: message range check: %d/%d messages consumed: "
+ "succeeded\n",
+ desc, cons_msgs_cnt, expected_cnt);
+}
+
+
+#define verify_consumed_msg_check(desc, expected_cnt) \
+ verify_consumed_msg_check0(__FUNCTION__, __LINE__, desc, expected_cnt)
+
+
+
+static void verify_consumed_msg0(const char *func,
+ int line,
+ uint64_t testid,
+ int32_t partition,
+ int msgnum,
+ rd_kafka_message_t *rkmessage) {
+ uint64_t in_testid;
+ int in_part;
+ int in_msgnum;
+ char buf[128];
+
+ if (rkmessage->key_len + 1 >= sizeof(buf))
+ TEST_FAIL(
+ "Incoming message key too large (%i): "
+ "not sourced by this test",
+ (int)rkmessage->key_len);
+
+ rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->key_len,
+ (char *)rkmessage->key);
+
+ if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i", &in_testid,
+ &in_part, &in_msgnum) != 3)
+ TEST_FAIL("Incorrect key format: %s", buf);
+
+ if (test_level > 2) {
+ TEST_SAY("%s:%i: Our testid %" PRIu64
+ ", part %i (%i), "
+ "msg %i/%i, key's: \"%s\"\n",
+ func, line, testid, (int)partition,
+ (int)rkmessage->partition, msgnum, cons_msgs_size,
+ buf);
+ }
+
+ if (testid != in_testid || (partition != -1 && partition != in_part) ||
+ (msgnum != -1 && msgnum != in_msgnum) ||
+ (in_msgnum < 0 || in_msgnum > cons_msgs_size))
+ goto fail_match;
+
+ if (cons_msgs_cnt == cons_msgs_size) {
+ TEST_SAY(
+ "Too many messages in cons_msgs (%i) while reading "
+ "message key \"%s\"\n",
+ cons_msgs_cnt, buf);
+ verify_consumed_msg_check("?", cons_msgs_size);
+ TEST_FAIL("See above error(s)");
+ }
+
+ cons_msgs[cons_msgs_cnt++] = in_msgnum;
+ cons_last_offset = rkmessage->offset;
+
+ return;
+
+fail_match:
+ TEST_FAIL("%s:%i: Our testid %" PRIu64
+ ", part %i, msg %i/%i did "
+ "not match message's key: \"%s\"\n",
+ func, line, testid, (int)partition, msgnum, cons_msgs_size,
+ buf);
+}
+
+#define verify_consumed_msg(testid, part, msgnum, rkmessage) \
+ verify_consumed_msg0(__FUNCTION__, __LINE__, testid, part, msgnum, \
+ rkmessage)
+
+
+static void consume_cb(rd_kafka_message_t *rkmessage, void *opaque) {
+ int64_t testid = *(int64_t *)opaque;
+
+ if (test_level > 2)
+ TEST_SAY("Consumed message #%d? at offset %" PRId64 ": %s\n",
+ cons_msg_next, rkmessage->offset,
+ rd_kafka_err2str(rkmessage->err));
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("EOF at offset %" PRId64 "\n", rkmessage->offset);
+ return;
+ }
+
+ if (rkmessage->err)
+ TEST_FAIL(
+ "Consume message from partition %i "
+ "has error: %s",
+ (int)rkmessage->partition,
+ rd_kafka_err2str(rkmessage->err));
+
+ verify_consumed_msg(testid, rkmessage->partition, cons_msg_next,
+ rkmessage);
+
+ if (cons_msg_next == cons_msg_stop) {
+ rd_kafka_yield(NULL /*FIXME*/);
+ }
+
+ cons_msg_next++;
+}
+
+static void consume_messages_callback_multi(const char *desc,
+ uint64_t testid,
+ const char *topic,
+ int32_t partition,
+ const char *offset_store_method,
+ int msg_base,
+ int msg_cnt,
+ int64_t initial_offset,
+ int iterations) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ int i;
+
+ TEST_SAY("%s: Consume messages %d+%d from %s [%" PRId32
+ "] "
+ "from offset %" PRId64 " in %d iterations\n",
+ desc, msg_base, msg_cnt, topic, partition, initial_offset,
+ iterations);
+
+ test_conf_init(&conf, &topic_conf, 20);
+
+ test_topic_conf_set(topic_conf, "offset.store.method",
+ offset_store_method);
+
+ if (!strcmp(offset_store_method, "broker")) {
+ /* Broker based offset storage requires a group.id */
+ test_conf_set(conf, "group.id", topic);
+ }
+
+ test_conf_set(conf, "enable.partition.eof", "true");
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ rd_kafka_topic_conf_set(topic_conf, "auto.offset.reset", "smallest",
+ NULL, 0);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("%s: Failed to create topic: %s\n", desc,
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ cons_msg_stop = cons_msg_next + msg_cnt - 1;
+
+ /* Consume the same batch of messages multiple times to
+ * make sure back-to-back start&stops work. */
+ for (i = 0; i < iterations; i++) {
+ int cnta;
+ test_timing_t t_stop;
+
+ TEST_SAY(
+ "%s: Iteration #%i: Consuming from "
+ "partition %i at offset %" PRId64
+ ", "
+ "msgs range %d..%d\n",
+ desc, i, partition, initial_offset, cons_msg_next,
+ cons_msg_stop);
+
+ /* Consume messages */
+ if (rd_kafka_consume_start(rkt, partition, initial_offset) ==
+ -1)
+ TEST_FAIL("%s: consume_start(%i) failed: %s", desc,
+ (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+
+ /* Stop consuming messages when this number of messages
+ * is reached. */
+ cnta = cons_msg_next;
+ do {
+ rd_kafka_consume_callback(rkt, partition, 1000,
+ consume_cb, &testid);
+ } while (cons_msg_next < cons_msg_stop);
+
+ TEST_SAY("%s: Iteration #%i: consumed %i messages\n", desc, i,
+ cons_msg_next - cnta);
+
+ TIMING_START(&t_stop, "rd_kafka_consume_stop()");
+ rd_kafka_consume_stop(rkt, partition);
+ TIMING_STOP(&t_stop);
+
+ /* Advance next offset so we dont reconsume
+ * messages on the next run. */
+ if (initial_offset != RD_KAFKA_OFFSET_STORED) {
+ initial_offset = cons_last_offset + 1;
+ cons_msg_stop = cons_msg_next + msg_cnt - 1;
+ }
+ }
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("%s: Destroying kafka instance %s\n", desc, rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+}
+
+
+
+static void test_produce_consume(const char *offset_store_method) {
+ int msgcnt = 100;
+ int partition_cnt = 1;
+ int i;
+ uint64_t testid;
+ int msg_base = 0;
+ const char *topic;
+
+ /* Generate a testid so we can differentiate messages
+ * from other tests */
+ testid = test_id_generate();
+
+ /* Read test.conf to configure topic name */
+ test_conf_init(NULL, NULL, 20);
+ topic = test_mk_topic_name("0014", 1 /*random*/);
+
+ TEST_SAY("Topic %s, testid %" PRIu64 ", offset.store.method=%s\n",
+ topic, testid, offset_store_method);
+
+ /* Produce messages */
+ produce_messages(testid, topic, partition_cnt, msg_base, msgcnt);
+
+ /* 100% of messages */
+ verify_consumed_msg_reset(msgcnt);
+
+ /* Consume 50% of messages with callbacks: stored offsets with no prior
+ * offset stored. */
+ for (i = 0; i < partition_cnt; i++)
+ consume_messages_callback_multi("STORED.1/2", testid, topic, i,
+ offset_store_method, msg_base,
+ (msgcnt / partition_cnt) / 2,
+ RD_KAFKA_OFFSET_STORED, 1);
+ verify_consumed_msg_check("STORED.1/2", msgcnt / 2);
+
+ /* Consume the rest using the now stored offset */
+ for (i = 0; i < partition_cnt; i++)
+ consume_messages_callback_multi("STORED.2/2", testid, topic, i,
+ offset_store_method, msg_base,
+ (msgcnt / partition_cnt) / 2,
+ RD_KAFKA_OFFSET_STORED, 1);
+ verify_consumed_msg_check("STORED.2/2", msgcnt);
+
+
+ /* Consume messages with callbacks: logical offsets */
+ verify_consumed_msg_reset(msgcnt);
+ for (i = 0; i < partition_cnt; i++) {
+ int p_msg_cnt = msgcnt / partition_cnt;
+ int64_t initial_offset = RD_KAFKA_OFFSET_TAIL(p_msg_cnt);
+ const int iterations = 4;
+ consume_messages_callback_multi("TAIL+", testid, topic, i,
+ offset_store_method,
+ /* start here (msgid) */
+ msg_base,
+ /* consume this many messages
+ * per iteration. */
+ p_msg_cnt / iterations,
+ /* start here (offset) */
+ initial_offset, iterations);
+ }
+
+ verify_consumed_msg_check("TAIL+", msgcnt);
+
+ verify_consumed_msg_reset(0);
+
+ return;
+}
+
+
+
+int main_0014_reconsume_191(int argc, char **argv) {
+ if (test_broker_version >= TEST_BRKVER(0, 8, 2, 0))
+ test_produce_consume("broker");
+ test_produce_consume("file");
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c
new file mode 100644
index 000000000..a551a0b53
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0015-offset_seeks.c
@@ -0,0 +1,172 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+
+static void do_legacy_seek(const char *topic, uint64_t testid, int msg_cnt) {
+ rd_kafka_t *rk_c;
+ rd_kafka_topic_t *rkt_c;
+ int32_t partition = 0;
+ int i;
+ int64_t offset_last, offset_base;
+ int dance_iterations = 10;
+ int msgs_per_dance = 10;
+ const int msg_base = 0;
+
+ SUB_TEST_QUICK();
+
+ rk_c = test_create_consumer(NULL, NULL, NULL, NULL);
+ rkt_c = test_create_consumer_topic(rk_c, topic);
+
+ /* Start consumer tests */
+ test_consumer_start("verify.all", rkt_c, partition,
+ RD_KAFKA_OFFSET_BEGINNING);
+ /* Make sure all messages are available */
+ offset_last = test_consume_msgs("verify.all", rkt_c, testid, partition,
+ TEST_NO_SEEK, msg_base, msg_cnt,
+ 1 /* parse format*/);
+
+ /* Rewind offset back to its base. */
+ offset_base = offset_last - msg_cnt + 1;
+
+ TEST_SAY("%s [%" PRId32
+ "]: Do random seek&consume for msgs #%d+%d with "
+ "offsets %" PRId64 "..%" PRId64 "\n",
+ rd_kafka_topic_name(rkt_c), partition, msg_base, msg_cnt,
+ offset_base, offset_last);
+
+ /* Now go dancing over the entire range with offset seeks. */
+ for (i = 0; i < dance_iterations; i++) {
+ int64_t offset =
+ jitter((int)offset_base, (int)offset_base + msg_cnt);
+
+ test_consume_msgs(
+ "dance", rkt_c, testid, partition, offset,
+ msg_base + (int)(offset - offset_base),
+ RD_MIN(msgs_per_dance, (int)(offset_last - offset)),
+ 1 /* parse format */);
+ }
+
+ test_consumer_stop("1", rkt_c, partition);
+
+ rd_kafka_topic_destroy(rkt_c);
+ rd_kafka_destroy(rk_c);
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_seek(const char *topic,
+ uint64_t testid,
+ int msg_cnt,
+ rd_bool_t with_timeout) {
+ rd_kafka_t *c;
+ rd_kafka_topic_partition_list_t *partitions;
+ char errstr[512];
+ int i;
+
+ SUB_TEST_QUICK("%s timeout", with_timeout ? "with" : "without");
+
+ c = test_create_consumer(topic, NULL, NULL, NULL);
+
+ partitions = rd_kafka_topic_partition_list_new(3);
+ for (i = 0; i < 3; i++)
+ rd_kafka_topic_partition_list_add(partitions, topic, i)
+ ->offset = RD_KAFKA_OFFSET_END;
+
+ TEST_CALL__(rd_kafka_assign(c, partitions));
+
+ /* Should see no messages */
+ test_consumer_poll_no_msgs("NO.MSGS", c, testid, 3000);
+
+ /* Seek to beginning */
+ for (i = 0; i < 3; i++) {
+ /* Sentinel to verify that this field is reset by
+ * seek_partitions() */
+ partitions->elems[i].err = RD_KAFKA_RESP_ERR__BAD_MSG;
+ partitions->elems[i].offset =
+ i == 0 ?
+ /* Logical and absolute offsets for the same thing */
+ RD_KAFKA_OFFSET_BEGINNING
+ : 0;
+ }
+
+ TEST_SAY("Seeking\n");
+ TEST_CALL_ERROR__(
+ rd_kafka_seek_partitions(c, partitions, with_timeout ? 7000 : -1));
+
+ /* Verify that there are no per-partition errors */
+ for (i = 0; i < 3; i++)
+ TEST_ASSERT_LATER(!partitions->elems[i].err,
+ "Partition #%d has unexpected error: %s", i,
+ rd_kafka_err2name(partitions->elems[i].err));
+ TEST_LATER_CHECK();
+
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ /* Should now see all messages */
+ test_consumer_poll("MSGS", c, testid, -1, 0, msg_cnt, NULL);
+
+ /* Some close/destroy variation */
+ if (with_timeout)
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0015_offsets_seek(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0015", 1);
+ int msg_cnt_per_part = test_quick ? 100 : 1000;
+ int msg_cnt = 3 * msg_cnt_per_part;
+ uint64_t testid;
+
+ testid = test_id_generate();
+
+ test_produce_msgs_easy_multi(
+ testid, topic, 0, 0 * msg_cnt_per_part, msg_cnt_per_part, topic, 1,
+ 1 * msg_cnt_per_part, msg_cnt_per_part, topic, 2,
+ 2 * msg_cnt_per_part, msg_cnt_per_part, NULL);
+
+ /* legacy seek: only reads partition 0 */
+ do_legacy_seek(topic, testid, msg_cnt_per_part);
+
+ do_seek(topic, testid, msg_cnt, rd_true /*with timeout*/);
+
+ do_seek(topic, testid, msg_cnt, rd_true /*without timeout*/);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c
new file mode 100644
index 000000000..2d0605b88
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0016-client_swname.c
@@ -0,0 +1,166 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+/**
+ * @name Verify KIP-511, client.software.name and client.software.version
+ *
+ */
+static char jmx_cmd[512];
+
+/**
+ * @brief Verify that the expected software name and version is reported
+ * in JMX metrics.
+ */
+static void jmx_verify(const char *exp_swname, const char *exp_swversion) {
+#if _WIN32
+ return;
+#else
+ int r;
+ char cmd[512 + 256];
+
+ if (!*jmx_cmd)
+ return;
+
+ rd_snprintf(cmd, sizeof(cmd),
+ "%s | "
+ "grep -F 'clientSoftwareName=%s,clientSoftwareVersion=%s'",
+ jmx_cmd, exp_swname, exp_swversion ? exp_swversion : "");
+ r = system(cmd);
+ if (WEXITSTATUS(r) == 1)
+ TEST_FAIL(
+ "Expected software name and version not found in "
+ "JMX metrics with command \"%s\"",
+ cmd);
+ else if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r))
+ TEST_FAIL(
+ "Failed to execute JmxTool command \"%s\": "
+ "exit code %d",
+ cmd, r);
+
+ TEST_SAY(
+ "Expected software name \"%s\" and version \"%s\" "
+ "found in JMX metrics\n",
+ exp_swname, exp_swversion);
+#endif /* !_WIN32 */
+}
+
+
+static void do_test_swname(const char *broker,
+ const char *swname,
+ const char *swversion,
+ const char *exp_swname,
+ const char *exp_swversion) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ const rd_kafka_metadata_t *md;
+ rd_kafka_resp_err_t err;
+
+ TEST_SAY(_C_MAG
+ "[ Test client.software.name=%s, "
+ "client.software.version=%s ]\n",
+ swname ? swname : "NULL", swversion ? swversion : "NULL");
+
+ test_conf_init(&conf, NULL, 30 /* jmxtool is severely slow */);
+ if (broker)
+ test_conf_set(conf, "bootstrap.servers", broker);
+ if (swname)
+ test_conf_set(conf, "client.software.name", swname);
+ if (swversion)
+ test_conf_set(conf, "client.software.version", swversion);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* Trigger a metadata request so we know we're connected. */
+ err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000));
+ TEST_ASSERT(!err, "metadata() failed: %s", rd_kafka_err2str(err));
+ rd_kafka_metadata_destroy(md);
+
+ /* Verify JMX metrics, if possible */
+ jmx_verify(exp_swname, exp_swversion);
+
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN
+ "[ Test client.software.name=%s, "
+ "client.software.version=%s: PASS ]\n",
+ swname ? swname : "NULL", swversion ? swversion : "NULL");
+}
+
+int main_0016_client_swname(int argc, char **argv) {
+ const char *broker;
+ const char *kafka_path;
+ const char *jmx_port;
+ const char *reason = NULL;
+
+ /* If available, use the Kafka JmxTool to query software name
+ * in broker JMX metrics */
+ if (!(broker = test_getenv("BROKER_ADDRESS_2", NULL)))
+ reason =
+ "Env var BROKER_ADDRESS_2 missing "
+ "(not running in trivup or trivup too old?)";
+ else if (test_broker_version < TEST_BRKVER(2, 5, 0, 0))
+ reason =
+ "Client software JMX metrics not exposed prior to "
+ "Apache Kafka 2.5.0.0";
+ else if (!(kafka_path = test_getenv("KAFKA_PATH", NULL)))
+ reason = "Env var KAFKA_PATH missing (not running in trivup?)";
+ else if (!(jmx_port = test_getenv("BROKER_JMX_PORT_2", NULL)))
+ reason =
+ "Env var BROKER_JMX_PORT_2 missing "
+ "(not running in trivup or trivup too old?)";
+ else
+ rd_snprintf(jmx_cmd, sizeof(jmx_cmd),
+ "%s/bin/kafka-run-class.sh kafka.tools.JmxTool "
+ "--jmx-url "
+ "service:jmx:rmi:///jndi/rmi://:%s/jmxrmi "
+ "--attributes connections --one-time true | "
+ "grep clientSoftware",
+ kafka_path, jmx_port);
+
+ if (reason)
+ TEST_WARN("Will not be able to verify JMX metrics: %s\n",
+ reason);
+
+ /* Default values, the version is not checked since the
+ * built librdkafka may not use the same string, and additionally we
+ * don't want to perform the string mangling here to make the string
+ * protocol safe. */
+ do_test_swname(broker, NULL, NULL, "librdkafka", NULL);
+ /* Properly formatted */
+ do_test_swname(broker, "my-little-version", "1.2.3.4",
+ "my-little-version", "1.2.3.4");
+ /* Containing invalid characters, verify that safing the strings works
+ */
+ do_test_swname(broker, "?1?this needs! ESCAPING?", "--v99.11 ~b~",
+ "1-this-needs--ESCAPING", "v99.11--b");
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c
new file mode 100644
index 000000000..f28f63f24
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0017-compression.c
@@ -0,0 +1,142 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Basic compression tests, with rather lacking verification.
+ */
+
+
+int main_0017_compression(int argc, char **argv) {
+ rd_kafka_t *rk_p, *rk_c;
+ const int msg_cnt = 1000;
+ int msg_base = 0;
+ uint64_t testid;
+#define CODEC_CNT 5
+ const char *codecs[CODEC_CNT + 1] = {
+ "none",
+#if WITH_ZLIB
+ "gzip",
+#endif
+#if WITH_SNAPPY
+ "snappy",
+#endif
+#if WITH_ZSTD
+ "zstd",
+#endif
+ "lz4",
+ NULL
+ };
+ char *topics[CODEC_CNT];
+ const int32_t partition = 0;
+ int i;
+ int crc;
+
+ testid = test_id_generate();
+
+ /* Produce messages */
+ rk_p = test_create_producer();
+ for (i = 0; codecs[i] != NULL; i++) {
+ rd_kafka_topic_t *rkt_p;
+
+ topics[i] = rd_strdup(test_mk_topic_name(codecs[i], 1));
+ TEST_SAY(
+ "Produce %d messages with %s compression to "
+ "topic %s\n",
+ msg_cnt, codecs[i], topics[i]);
+ rkt_p = test_create_producer_topic(
+ rk_p, topics[i], "compression.codec", codecs[i], NULL);
+
+ /* Produce small message that will not decrease with
+ * compression (issue #781) */
+ test_produce_msgs(rk_p, rkt_p, testid, partition,
+ msg_base + (partition * msg_cnt), 1, NULL, 5);
+
+ /* Produce standard sized messages */
+ test_produce_msgs(rk_p, rkt_p, testid, partition,
+ msg_base + (partition * msg_cnt) + 1,
+ msg_cnt - 1, NULL, 512);
+ rd_kafka_topic_destroy(rkt_p);
+ }
+
+ rd_kafka_destroy(rk_p);
+
+
+ /* restart timeout (mainly for helgrind use since it is very slow) */
+ test_timeout_set(30);
+
+ /* Consume messages: Without and with CRC checking */
+ for (crc = 0; crc < 2; crc++) {
+ const char *crc_tof = crc ? "true" : "false";
+ rd_kafka_conf_t *conf;
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "check.crcs", crc_tof);
+
+ rk_c = test_create_consumer(NULL, NULL, conf, NULL);
+
+ for (i = 0; codecs[i] != NULL; i++) {
+ rd_kafka_topic_t *rkt_c =
+ rd_kafka_topic_new(rk_c, topics[i], NULL);
+
+ TEST_SAY("Consume %d messages from topic %s (crc=%s)\n",
+ msg_cnt, topics[i], crc_tof);
+ /* Start consuming */
+ test_consumer_start(codecs[i], rkt_c, partition,
+ RD_KAFKA_OFFSET_BEGINNING);
+
+ /* Consume messages */
+ test_consume_msgs(
+ codecs[i], rkt_c, testid, partition,
+ /* Use offset 0 here, which is wrong, should
+ * be TEST_NO_SEEK, but it exposed a bug
+ * where the Offset query was postponed
+ * till after the seek, causing messages
+ * to be replayed. */
+ 0, msg_base, msg_cnt, 1 /* parse format */);
+
+ test_consumer_stop(codecs[i], rkt_c, partition);
+
+ rd_kafka_topic_destroy(rkt_c);
+ }
+
+ rd_kafka_destroy(rk_c);
+ }
+
+ for (i = 0; codecs[i] != NULL; i++)
+ rd_free(topics[i]);
+
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c
new file mode 100644
index 000000000..6b22339d7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0018-cgrp_term.c
@@ -0,0 +1,332 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdstring.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * KafkaConsumer balanced group testing: termination
+ *
+ * Runs two consumers subscribing to the same topics, waits for both to
+ * get an assignment and then closes one of them.
+ */
+
+
+static int assign_cnt = 0;
+static int consumed_msg_cnt = 0;
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ char *memberid = rd_kafka_memberid(rk);
+
+ TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
+ rd_kafka_name(rk), memberid, rd_kafka_err2str(err));
+
+ if (memberid)
+ free(memberid);
+
+ test_print_partition_list(partitions);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ assign_cnt++;
+ rd_kafka_assign(rk, partitions);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ if (assign_cnt == 0)
+ TEST_FAIL("asymetric rebalance_cb\n");
+ assign_cnt--;
+ rd_kafka_assign(rk, NULL);
+ break;
+
+ default:
+ TEST_FAIL("rebalance failed: %s\n", rd_kafka_err2str(err));
+ break;
+ }
+}
+
+
+static void consume_all(rd_kafka_t **rk_c,
+ int rk_cnt,
+ int exp_msg_cnt,
+ int max_time /*ms*/) {
+ int64_t ts_start = test_clock();
+ int i;
+
+ max_time *= 1000;
+ while (ts_start + max_time > test_clock()) {
+ for (i = 0; i < rk_cnt; i++) {
+ rd_kafka_message_t *rkmsg;
+
+ if (!rk_c[i])
+ continue;
+
+ rkmsg = rd_kafka_consumer_poll(rk_c[i], 500);
+
+ if (!rkmsg)
+ continue;
+ else if (rkmsg->err)
+ TEST_SAY(
+ "Message error "
+ "(at offset %" PRId64
+ " after "
+ "%d/%d messages and %dms): %s\n",
+ rkmsg->offset, consumed_msg_cnt,
+ exp_msg_cnt,
+ (int)(test_clock() - ts_start) / 1000,
+ rd_kafka_message_errstr(rkmsg));
+ else
+ consumed_msg_cnt++;
+
+ rd_kafka_message_destroy(rkmsg);
+
+ if (consumed_msg_cnt >= exp_msg_cnt) {
+ static int once = 0;
+ if (!once++)
+ TEST_SAY("All messages consumed\n");
+ return;
+ }
+ }
+ }
+}
+
+struct args {
+ rd_kafka_t *c;
+ rd_kafka_queue_t *queue;
+};
+
+static int poller_thread_main(void *p) {
+ struct args *args = (struct args *)p;
+
+ while (!rd_kafka_consumer_closed(args->c)) {
+ rd_kafka_message_t *rkm;
+
+ /* Using a long timeout (1 minute) to verify that the
+ * queue is woken when close is done. */
+ rkm = rd_kafka_consume_queue(args->queue, 60 * 1000);
+ if (rkm)
+ rd_kafka_message_destroy(rkm);
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Close consumer using async queue.
+ */
+static void consumer_close_queue(rd_kafka_t *c) {
+ /* Use the standard consumer queue rather than a temporary queue,
+ * the latter is covered by test 0116. */
+ rd_kafka_queue_t *queue = rd_kafka_queue_get_consumer(c);
+ struct args args = {c, queue};
+ thrd_t thrd;
+ int ret;
+
+ /* Spin up poller thread */
+ if (thrd_create(&thrd, poller_thread_main, (void *)&args) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread");
+
+ TEST_SAY("Closing consumer %s using queue\n", rd_kafka_name(c));
+ TEST_CALL_ERROR__(rd_kafka_consumer_close_queue(c, queue));
+
+ if (thrd_join(thrd, &ret) != thrd_success)
+ TEST_FAIL("thrd_join failed");
+
+ rd_kafka_queue_destroy(queue);
+}
+
+
+static void do_test(rd_bool_t with_queue) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+#define _CONS_CNT 2
+ rd_kafka_t *rk_p, *rk_c[_CONS_CNT];
+ rd_kafka_topic_t *rkt_p;
+ int msg_cnt = test_quick ? 100 : 1000;
+ int msg_base = 0;
+ int partition_cnt = 2;
+ int partition;
+ uint64_t testid;
+ rd_kafka_topic_conf_t *default_topic_conf;
+ rd_kafka_topic_partition_list_t *topics;
+ rd_kafka_resp_err_t err;
+ test_timing_t t_assign, t_consume;
+ char errstr[512];
+ int i;
+
+ SUB_TEST("with_queue=%s", RD_STR_ToF(with_queue));
+
+ testid = test_id_generate();
+
+ /* Produce messages */
+ rk_p = test_create_producer();
+ rkt_p = test_create_producer_topic(rk_p, topic, NULL);
+
+ for (partition = 0; partition < partition_cnt; partition++) {
+ test_produce_msgs(rk_p, rkt_p, testid, partition,
+ msg_base + (partition * msg_cnt), msg_cnt,
+ NULL, 0);
+ }
+
+ rd_kafka_topic_destroy(rkt_p);
+ rd_kafka_destroy(rk_p);
+
+
+ test_conf_init(NULL, &default_topic_conf,
+ 5 + ((test_session_timeout_ms * 3 * 2) / 1000));
+ if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset",
+ "smallest", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s\n", errstr);
+
+ /* Fill in topic subscription set */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, topic, -1);
+
+ /* Create consumers and start subscription */
+ for (i = 0; i < _CONS_CNT; i++) {
+ rk_c[i] = test_create_consumer(
+ topic /*group_id*/, rebalance_cb, NULL,
+ rd_kafka_topic_conf_dup(default_topic_conf));
+
+ err = rd_kafka_poll_set_consumer(rk_c[i]);
+ if (err)
+ TEST_FAIL("poll_set_consumer: %s\n",
+ rd_kafka_err2str(err));
+
+ err = rd_kafka_subscribe(rk_c[i], topics);
+ if (err)
+ TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
+ }
+
+ rd_kafka_topic_conf_destroy(default_topic_conf);
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+
+ /* Wait for both consumers to get an assignment */
+ TEST_SAY("Awaiting assignments for %d consumer(s)\n", _CONS_CNT);
+ TIMING_START(&t_assign, "WAIT.ASSIGN");
+ while (assign_cnt < _CONS_CNT)
+ consume_all(rk_c, _CONS_CNT, msg_cnt,
+ test_session_timeout_ms + 3000);
+ TIMING_STOP(&t_assign);
+
+ /* Now close one of the consumers, this will cause a rebalance. */
+ TEST_SAY("Closing down 1/%d consumer(s): %s\n", _CONS_CNT,
+ rd_kafka_name(rk_c[0]));
+ if (with_queue)
+ consumer_close_queue(rk_c[0]);
+ else
+ TEST_CALL_ERR__(rd_kafka_consumer_close(rk_c[0]));
+
+ rd_kafka_destroy(rk_c[0]);
+ rk_c[0] = NULL;
+
+ /* Let remaining consumers run for a while to take over the now
+ * lost partitions. */
+
+ if (assign_cnt != _CONS_CNT - 1)
+ TEST_FAIL("assign_cnt %d, should be %d\n", assign_cnt,
+ _CONS_CNT - 1);
+
+ TIMING_START(&t_consume, "CONSUME.WAIT");
+ consume_all(rk_c, _CONS_CNT, msg_cnt, test_session_timeout_ms + 3000);
+ TIMING_STOP(&t_consume);
+
+ TEST_SAY("Closing remaining consumers\n");
+ for (i = 0; i < _CONS_CNT; i++) {
+ test_timing_t t_close;
+ rd_kafka_topic_partition_list_t *sub;
+ int j;
+
+ if (!rk_c[i])
+ continue;
+
+ /* Query subscription */
+ err = rd_kafka_subscription(rk_c[i], &sub);
+ if (err)
+ TEST_FAIL("%s: subscription() failed: %s\n",
+ rd_kafka_name(rk_c[i]),
+ rd_kafka_err2str(err));
+ TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c[i]),
+ sub->cnt);
+ for (j = 0; j < sub->cnt; j++)
+ TEST_SAY(" %s\n", sub->elems[j].topic);
+ rd_kafka_topic_partition_list_destroy(sub);
+
+ /* Run an explicit unsubscribe() (async) prior to close()
+ * to trigger race condition issues on termination. */
+ TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c[i]));
+ err = rd_kafka_unsubscribe(rk_c[i]);
+ if (err)
+ TEST_FAIL("%s: unsubscribe failed: %s\n",
+ rd_kafka_name(rk_c[i]),
+ rd_kafka_err2str(err));
+
+ TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
+ TIMING_START(&t_close, "CONSUMER.CLOSE");
+ if (with_queue)
+ consumer_close_queue(rk_c[i]);
+ else
+ TEST_CALL_ERR__(rd_kafka_consumer_close(rk_c[i]));
+ TIMING_STOP(&t_close);
+
+ rd_kafka_destroy(rk_c[i]);
+ rk_c[i] = NULL;
+ }
+
+ TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, msg_cnt);
+ if (consumed_msg_cnt < msg_cnt)
+ TEST_FAIL("Only %d/%d messages were consumed\n",
+ consumed_msg_cnt, msg_cnt);
+ else if (consumed_msg_cnt > msg_cnt)
+ TEST_SAY(
+ "At least %d/%d messages were consumed "
+ "multiple times\n",
+ consumed_msg_cnt - msg_cnt, msg_cnt);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0018_cgrp_term(int argc, char **argv) {
+ do_test(rd_false /* rd_kafka_consumer_close() */);
+ do_test(rd_true /* rd_kafka_consumer_close_queue() */);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c
new file mode 100644
index 000000000..02729c339
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0019-list_groups.c
@@ -0,0 +1,289 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * List consumer groups
+ *
+ * Runs two consumers in two different groups and lists them.
+ */
+
+
+
+/**
+ * Verify that all groups in 'groups' are seen, if so returns group_cnt,
+ * else returns -1.
+ */
+static int verify_groups(const struct rd_kafka_group_list *grplist,
+ char **groups,
+ int group_cnt) {
+ int i;
+ int seen = 0;
+
+ for (i = 0; i < grplist->group_cnt; i++) {
+ const struct rd_kafka_group_info *gi = &grplist->groups[i];
+ int j;
+
+ for (j = 0; j < group_cnt; j++) {
+ if (strcmp(gi->group, groups[j]))
+ continue;
+
+ if (gi->err)
+ TEST_SAY(
+ "Group %s has broker-reported "
+ "error: %s\n",
+ gi->group, rd_kafka_err2str(gi->err));
+
+ seen++;
+ }
+ }
+
+ TEST_SAY("Found %d/%d desired groups in list of %d groups\n", seen,
+ group_cnt, grplist->group_cnt);
+
+ if (seen != group_cnt)
+ return -1;
+ else
+ return seen;
+}
+
+
+/**
+ * List groups by:
+ * - List all groups, check that the groups in 'groups' are seen.
+ * - List each group in 'groups', one by one.
+ *
+ * Returns 'group_cnt' if all groups in 'groups' were seen by both
+ * methods, else 0, or -1 on error.
+ */
+static int
+list_groups(rd_kafka_t *rk, char **groups, int group_cnt, const char *desc) {
+ rd_kafka_resp_err_t err = 0;
+ const struct rd_kafka_group_list *grplist;
+ int i, r;
+ int fails = 0;
+ int seen = 0;
+ int seen_all = 0;
+ int retries = 5;
+
+ TEST_SAY("List groups (expect %d): %s\n", group_cnt, desc);
+
+ /* FIXME: Wait for broker to come up. This should really be abstracted
+ * by librdkafka. */
+ do {
+ if (err) {
+ TEST_SAY("Retrying group list in 1s because of: %s\n",
+ rd_kafka_err2str(err));
+ rd_sleep(1);
+ }
+ err = rd_kafka_list_groups(rk, NULL, &grplist,
+ tmout_multip(5000));
+ } while ((err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR_GROUP_LOAD_IN_PROGRESS) &&
+ retries-- > 0);
+
+ if (err) {
+ TEST_SAY("Failed to list all groups: %s\n",
+ rd_kafka_err2str(err));
+ return -1;
+ }
+
+ seen_all = verify_groups(grplist, groups, group_cnt);
+ rd_kafka_group_list_destroy(grplist);
+
+ for (i = 0; i < group_cnt; i++) {
+ err = rd_kafka_list_groups(rk, groups[i], &grplist, 5000);
+ if (err) {
+ TEST_SAY("Failed to list group %s: %s\n", groups[i],
+ rd_kafka_err2str(err));
+ fails++;
+ continue;
+ }
+
+ r = verify_groups(grplist, &groups[i], 1);
+ if (r == 1)
+ seen++;
+ rd_kafka_group_list_destroy(grplist);
+ }
+
+
+ if (seen_all != seen)
+ return 0;
+
+ return seen;
+}
+
+
+
+static void do_test_list_groups(void) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+#define _CONS_CNT 2
+ char *groups[_CONS_CNT];
+ rd_kafka_t *rk, *rk_c[_CONS_CNT];
+ rd_kafka_topic_partition_list_t *topics;
+ rd_kafka_resp_err_t err;
+ test_timing_t t_grps;
+ int i;
+ int groups_seen;
+ rd_kafka_topic_t *rkt;
+ const struct rd_kafka_group_list *grplist;
+
+ SUB_TEST();
+
+ /* Handle for group listings */
+ rk = test_create_producer();
+
+ /* Produce messages so that topic is auto created */
+ rkt = test_create_topic_object(rk, topic, NULL);
+ test_produce_msgs(rk, rkt, 0, 0, 0, 10, NULL, 64);
+ rd_kafka_topic_destroy(rkt);
+
+ /* Query groups before creation, should not list our groups. */
+ groups_seen = list_groups(rk, NULL, 0, "should be none");
+ if (groups_seen != 0)
+ TEST_FAIL(
+ "Saw %d groups when there wasn't "
+ "supposed to be any\n",
+ groups_seen);
+
+ /* Fill in topic subscription set */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, topic, -1);
+
+ /* Create consumers and start subscription */
+ for (i = 0; i < _CONS_CNT; i++) {
+ groups[i] = malloc(32);
+ test_str_id_generate(groups[i], 32);
+ rk_c[i] = test_create_consumer(groups[i], NULL, NULL, NULL);
+
+ err = rd_kafka_poll_set_consumer(rk_c[i]);
+ if (err)
+ TEST_FAIL("poll_set_consumer: %s\n",
+ rd_kafka_err2str(err));
+
+ err = rd_kafka_subscribe(rk_c[i], topics);
+ if (err)
+ TEST_FAIL("subscribe: %s\n", rd_kafka_err2str(err));
+ }
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+
+ TIMING_START(&t_grps, "WAIT.GROUPS");
+ /* Query groups again until both groups are seen. */
+ while (1) {
+ groups_seen = list_groups(rk, (char **)groups, _CONS_CNT,
+ "should see my groups");
+ if (groups_seen == _CONS_CNT)
+ break;
+ rd_sleep(1);
+ }
+ TIMING_STOP(&t_grps);
+
+ /* Try a list_groups with a low enough timeout to fail. */
+ grplist = NULL;
+ TIMING_START(&t_grps, "WAIT.GROUPS.TIMEOUT0");
+ err = rd_kafka_list_groups(rk, NULL, &grplist, 0);
+ TIMING_STOP(&t_grps);
+ TEST_SAY("list_groups(timeout=0) returned %d groups and status: %s\n",
+ grplist ? grplist->group_cnt : -1, rd_kafka_err2str(err));
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected list_groups(timeout=0) to fail "
+ "with timeout, got %s",
+ rd_kafka_err2str(err));
+
+
+ TEST_SAY("Closing remaining consumers\n");
+ for (i = 0; i < _CONS_CNT; i++) {
+ test_timing_t t_close;
+ if (!rk_c[i])
+ continue;
+
+ TEST_SAY("Closing %s\n", rd_kafka_name(rk_c[i]));
+ TIMING_START(&t_close, "CONSUMER.CLOSE");
+ err = rd_kafka_consumer_close(rk_c[i]);
+ TIMING_STOP(&t_close);
+ if (err)
+ TEST_FAIL("consumer_close failed: %s\n",
+ rd_kafka_err2str(err));
+
+ rd_kafka_destroy(rk_c[i]);
+ rk_c[i] = NULL;
+
+ free(groups[i]);
+ }
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief #3705: Verify that list_groups() doesn't hang if unable to
+ * connect to the cluster.
+ */
+static void do_test_list_groups_hang(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ const struct rd_kafka_group_list *grplist;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+
+ SUB_TEST();
+ test_conf_init(&conf, NULL, 20);
+
+ /* An unavailable broker */
+ test_conf_set(conf, "bootstrap.servers", "127.0.0.1:65531");
+
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ TIMING_START(&timing, "list_groups");
+ err = rd_kafka_list_groups(rk, NULL, &grplist, 5 * 1000);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected ERR__TIMED_OUT, not %s", rd_kafka_err2name(err));
+ TIMING_ASSERT(&timing, 5 * 1000, 7 * 1000);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0019_list_groups(int argc, char **argv) {
+ do_test_list_groups();
+ do_test_list_groups_hang();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c
new file mode 100644
index 000000000..a8a6552fa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0020-destroy_hang.c
@@ -0,0 +1,162 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Various regression tests for hangs on destroy.
+ */
+
+
+
+/**
+ * Request offset for nonexisting partition.
+ * Will cause rd_kafka_destroy() to hang.
+ */
+
+static int nonexist_part(void) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ rd_kafka_t *rk;
+ rd_kafka_topic_partition_list_t *parts;
+ rd_kafka_resp_err_t err;
+ test_timing_t t_pos;
+ const int msgcnt = 100;
+ uint64_t testid;
+ int i;
+ int it, iterations = 5;
+
+ /* Produce messages */
+ testid =
+ test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt);
+
+ for (it = 0; it < iterations; it++) {
+ char group_id[32];
+
+ test_conf_init(NULL, NULL, 15);
+
+ test_str_id_generate(group_id, sizeof(group_id));
+
+ TEST_SAY("Iteration %d/%d, using group.id %s\n", it, iterations,
+ group_id);
+
+ /* Consume messages */
+ test_consume_msgs_easy(group_id, topic, testid, -1, msgcnt,
+ NULL);
+
+ /*
+ * Now start a new consumer and query stored offsets (positions)
+ */
+
+ rk = test_create_consumer(group_id, NULL, NULL, NULL);
+
+ /* Fill in partition set */
+ parts = rd_kafka_topic_partition_list_new(2);
+ /* existing */
+ rd_kafka_topic_partition_list_add(parts, topic, 0);
+ /* non-existing */
+ rd_kafka_topic_partition_list_add(parts, topic, 123);
+
+
+ TIMING_START(&t_pos, "COMMITTED");
+ err = rd_kafka_committed(rk, parts, tmout_multip(5000));
+ TIMING_STOP(&t_pos);
+ if (err)
+ TEST_FAIL("Failed to acquire committed offsets: %s\n",
+ rd_kafka_err2str(err));
+
+ for (i = 0; i < parts->cnt; i++) {
+ TEST_SAY("%s [%" PRId32 "] returned offset %" PRId64
+ ": %s\n",
+ parts->elems[i].topic,
+ parts->elems[i].partition,
+ parts->elems[i].offset,
+ rd_kafka_err2str(parts->elems[i].err));
+ if (parts->elems[i].partition == 0 &&
+ parts->elems[i].offset <= 0)
+ TEST_FAIL("Partition %" PRId32
+ " should have a "
+ "proper offset, not %" PRId64 "\n",
+ parts->elems[i].partition,
+ parts->elems[i].offset);
+ else if (parts->elems[i].partition == 123 &&
+ parts->elems[i].offset !=
+ RD_KAFKA_OFFSET_INVALID)
+ TEST_FAIL("Partition %" PRId32
+ " should have failed\n",
+ parts->elems[i].partition);
+ }
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ test_consumer_close(rk);
+
+ /* Hangs if bug isn't fixed */
+ rd_kafka_destroy(rk);
+ }
+
+ return 0;
+}
+
+
+/**
+ * Issue #691: Producer hangs on destroy if group.id is configured.
+ */
+static int producer_groupid(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+
+ TEST_SAY("producer_groupid hang test\n");
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "group.id", "dummy");
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("Destroying producer\n");
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
+
+int main_0020_destroy_hang(int argc, char **argv) {
+ int fails = 0;
+
+ test_conf_init(NULL, NULL, 30);
+
+ fails += nonexist_part();
+ fails += producer_groupid();
+ if (fails > 0)
+ TEST_FAIL("See %d previous error(s)\n", fails);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c
new file mode 100644
index 000000000..76b4dd16b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0021-rkt_destroy.c
@@ -0,0 +1,71 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Issue #502
+ * Crash if rd_kafka_topic_destroy() is called before all messages
+ * have been produced.
+ * This only happens when using a partitioner (producing to PARTITION_UA)
+ */
+
+
+
+int main_0021_rkt_destroy(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 0);
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ const int msgcnt = 1000;
+ uint64_t testid;
+ int remains = 0;
+
+ test_conf_init(NULL, NULL, 10);
+
+
+ testid = test_id_generate();
+ rk = test_create_producer();
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+
+ test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt, NULL, 0, 0, &remains);
+
+ rd_kafka_topic_destroy(rkt);
+
+ test_wait_delivery(rk, &remains);
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c
new file mode 100644
index 000000000..64e826d03
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0022-consume_batch.c
@@ -0,0 +1,212 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Consume with batch + queue interface
+ *
+ */
+
+
+static void do_test_consume_batch(void) {
+#define topic_cnt 2
+ char *topics[topic_cnt];
+ const int partition_cnt = 2;
+ rd_kafka_t *rk;
+ rd_kafka_queue_t *rkq;
+ rd_kafka_topic_t *rkts[topic_cnt];
+ rd_kafka_resp_err_t err;
+ const int msgcnt = test_quick ? 1000 : 10000;
+ uint64_t testid;
+ int i, p;
+ int batch_cnt = 0;
+ int remains;
+
+ SUB_TEST();
+
+ testid = test_id_generate();
+
+ /* Produce messages */
+ for (i = 0; i < topic_cnt; i++) {
+ topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ for (p = 0; p < partition_cnt; p++)
+ test_produce_msgs_easy(topics[i], testid, p,
+ msgcnt / topic_cnt /
+ partition_cnt);
+ }
+
+
+ /* Create simple consumer */
+ rk = test_create_consumer(NULL, NULL, NULL, NULL);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_new(rk);
+
+ for (i = 0; i < topic_cnt; i++) {
+ /* Create topic object */
+ rkts[i] = test_create_topic_object(
+ rk, topics[i], "auto.offset.reset", "smallest", NULL);
+
+ /* Start consuming each partition and redirect
+ * messages to queue */
+
+ TEST_SAY("Start consuming topic %s partitions 0..%d\n",
+ rd_kafka_topic_name(rkts[i]), partition_cnt);
+
+ for (p = 0; p < partition_cnt; p++) {
+ err = rd_kafka_consume_start_queue(
+ rkts[i], p, RD_KAFKA_OFFSET_BEGINNING, rkq);
+ if (err)
+ TEST_FAIL("Failed to start consuming: %s\n",
+ rd_kafka_err2str(err));
+ }
+ }
+
+ remains = msgcnt;
+
+ /* Consume messages from common queue using batch interface. */
+ TEST_SAY("Consume %d messages from queue\n", remains);
+ while (remains > 0) {
+ rd_kafka_message_t *rkmessage[1000];
+ ssize_t r;
+ test_timing_t t_batch;
+
+ TIMING_START(&t_batch, "CONSUME.BATCH");
+ r = rd_kafka_consume_batch_queue(rkq, 1000, rkmessage, 1000);
+ TIMING_STOP(&t_batch);
+
+ TEST_SAY("Batch consume iteration #%d: Consumed %" PRIdsz
+ "/1000 messages\n",
+ batch_cnt, r);
+
+ if (r == -1)
+ TEST_FAIL("Failed to consume messages: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ remains -= (int)r;
+
+ for (i = 0; i < r; i++)
+ rd_kafka_message_destroy(rkmessage[i]);
+
+ batch_cnt++;
+ }
+
+
+ TEST_SAY("Stopping consumer\n");
+ for (i = 0; i < topic_cnt; i++) {
+ for (p = 0; p < partition_cnt; p++) {
+ err = rd_kafka_consume_stop(rkts[i], p);
+ if (err)
+ TEST_FAIL("Failed to stop consuming: %s\n",
+ rd_kafka_err2str(err));
+ }
+
+ rd_kafka_topic_destroy(rkts[i]);
+ rd_free(topics[i]);
+ }
+
+ rd_kafka_queue_destroy(rkq);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+#if WITH_SASL_OAUTHBEARER
+/**
+ * @brief Verify that the oauthbearer_refresh_cb() is triggered
+ * when using consume_batch_queue() (as opposed to consumer_poll()).
+ */
+
+static rd_bool_t refresh_called = rd_false;
+
+static void
+refresh_cb(rd_kafka_t *rk, const char *oauthbearer_config, void *opaque) {
+ TEST_SAY("Refresh callback called\n");
+ TEST_ASSERT(!refresh_called);
+ refresh_called = rd_true;
+ rd_kafka_oauthbearer_set_token_failure(rk, "Refresh called");
+}
+
+static void do_test_consume_batch_oauthbearer_cb(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *rkq;
+ rd_kafka_message_t *rkms[1];
+ ssize_t r;
+
+ SUB_TEST_QUICK();
+
+ refresh_called = rd_false;
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "security.protocol", "sasl_plaintext");
+ test_conf_set(conf, "sasl.mechanism", "OAUTHBEARER");
+ rd_kafka_conf_set_oauthbearer_token_refresh_cb(conf, refresh_cb);
+
+ /* Create simple consumer */
+ rk = test_create_consumer(NULL, NULL, conf, NULL);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_main(rk);
+
+ r = rd_kafka_consume_batch_queue(rkq, 1000, rkms, 1);
+ TEST_ASSERT(r == 0, "Expected return value 0, not %d", (int)r);
+
+ TEST_SAY("refresh_called = %d\n", refresh_called);
+ TEST_ASSERT(refresh_called,
+ "Expected refresh callback to have been called");
+
+ rd_kafka_queue_destroy(rkq);
+
+ rd_kafka_destroy(rk);
+}
+#endif
+
+
+int main_0022_consume_batch(int argc, char **argv) {
+ do_test_consume_batch();
+ return 0;
+}
+
+
+int main_0022_consume_batch_local(int argc, char **argv) {
+#if WITH_SASL_OAUTHBEARER
+ do_test_consume_batch_oauthbearer_cb();
+#else
+ TEST_SKIP("No OAUTHBEARER support\n");
+#endif
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c
new file mode 100644
index 000000000..318fc0a1b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0025-timers.c
@@ -0,0 +1,147 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+/**
+ * Tests that rdkafka's internal timers behave.
+ */
+
+
+
+struct state {
+ int calls;
+ int64_t ts_last;
+ int interval;
+ int fails;
+};
+
+struct state state;
+
+
+static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
+ const int64_t now = test_clock();
+ /* Fake the first elapsed time since we dont really know how
+ * long rd_kafka_new() takes and at what time the timer is started. */
+ const int64_t elapsed =
+ state.ts_last ? now - state.ts_last : state.interval;
+ const int64_t overshoot = elapsed - state.interval;
+ const int wiggleroom_up =
+ (int)((double)state.interval *
+ (!strcmp(test_mode, "bare") ? 0.2 : 1.0));
+ const int wiggleroom_down = (int)((double)state.interval * 0.1);
+
+ TEST_SAY("Call #%d: after %" PRId64
+ "ms, %.0f%% outside "
+ "interval %" PRId64 " >-%d <+%d\n",
+ state.calls, elapsed / 1000,
+ ((double)overshoot / state.interval) * 100.0,
+ (int64_t)state.interval / 1000, wiggleroom_down / 1000,
+ wiggleroom_up / 1000);
+
+ if (overshoot < -wiggleroom_down || overshoot > wiggleroom_up) {
+ TEST_WARN("^ outside range\n");
+ state.fails++;
+ }
+
+ state.ts_last = now;
+ state.calls++;
+
+ return 0;
+}
+
+
+/**
+ * Enable statistics with a set interval, make sure the stats callbacks are
+ * called within reasonable intervals.
+ */
+static void do_test_stats_timer(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ const int exp_calls = 10;
+ test_timing_t t_new;
+
+ memset(&state, 0, sizeof(state));
+
+ state.interval = 600 * 1000;
+
+ test_conf_init(&conf, NULL, 200);
+
+ test_conf_set(conf, "statistics.interval.ms", "600");
+ test_conf_set(conf, "bootstrap.servers", NULL); /*no need for brokers*/
+ rd_kafka_conf_set_stats_cb(conf, stats_cb);
+
+ TIMING_START(&t_new, "rd_kafka_new()");
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+ TIMING_STOP(&t_new);
+
+ TEST_SAY(
+ "Starting wait loop for %d expected stats_cb calls "
+ "with an interval of %dms\n",
+ exp_calls, state.interval / 1000);
+
+
+ while (state.calls < exp_calls) {
+ test_timing_t t_poll;
+ TIMING_START(&t_poll, "rd_kafka_poll()");
+ rd_kafka_poll(rk, 100);
+ TIMING_STOP(&t_poll);
+
+ if (TIMING_DURATION(&t_poll) > 150 * 1000)
+ TEST_WARN(
+ "rd_kafka_poll(rk,100) "
+ "took more than 50%% extra\n");
+ }
+
+ rd_kafka_destroy(rk);
+
+ if (state.calls > exp_calls)
+ TEST_SAY("Got more calls than expected: %d > %d\n", state.calls,
+ exp_calls);
+
+ if (state.fails) {
+ /* We can't rely on CIs giving our test job enough CPU to finish
+ * in time, so don't error out even if the time is outside
+ * the window */
+ if (test_on_ci)
+ TEST_WARN("%d/%d intervals failed\n", state.fails,
+ state.calls);
+ else
+ TEST_FAIL("%d/%d intervals failed\n", state.fails,
+ state.calls);
+ } else
+ TEST_SAY("All %d intervals okay\n", state.calls);
+}
+
+
+int main_0025_timers(int argc, char **argv) {
+ do_test_stats_timer();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c
new file mode 100644
index 000000000..c8adc3885
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0026-consume_pause.c
@@ -0,0 +1,541 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Consumer: pause and resume.
+ * Make sure no messages are lost or duplicated.
+ */
+
+
+
+static void consume_pause(void) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int partition_cnt = 3;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_topic_partition_list_t *topics;
+ rd_kafka_resp_err_t err;
+ const int msgcnt = 1000;
+ uint64_t testid;
+ int it, iterations = 3;
+ int msg_base = 0;
+ int fails = 0;
+ char group_id[32];
+
+ SUB_TEST();
+
+ test_conf_init(&conf, &tconf,
+ 60 + (test_session_timeout_ms * 3 / 1000));
+ test_conf_set(conf, "enable.partition.eof", "true");
+ test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
+
+ test_create_topic(NULL, topic, partition_cnt, 1);
+
+ /* Produce messages */
+ testid =
+ test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt);
+
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, topic, -1);
+
+ for (it = 0; it < iterations; it++) {
+ const int pause_cnt = 5;
+ int per_pause_msg_cnt = msgcnt / pause_cnt;
+ const int pause_time = 1200 /* 1.2s */;
+ int eof_cnt = -1;
+ int pause;
+ rd_kafka_topic_partition_list_t *parts;
+ test_msgver_t mv_all;
+ int j;
+
+ test_msgver_init(&mv_all, testid); /* All messages */
+
+ /* On the last iteration reuse the previous group.id
+ * to make consumer start at committed offsets which should
+ * also be EOF. This to trigger #1307. */
+ if (it < iterations - 1)
+ test_str_id_generate(group_id, sizeof(group_id));
+ else {
+ TEST_SAY("Reusing previous group.id %s\n", group_id);
+ per_pause_msg_cnt = 0;
+ eof_cnt = partition_cnt;
+ }
+
+ TEST_SAY(
+ "Iteration %d/%d, using group.id %s, "
+ "expecting %d messages/pause and %d EOFs\n",
+ it, iterations - 1, group_id, per_pause_msg_cnt, eof_cnt);
+
+ rk = test_create_consumer(group_id, NULL,
+ rd_kafka_conf_dup(conf),
+ rd_kafka_topic_conf_dup(tconf));
+
+
+ TEST_SAY("Subscribing to %d topic(s): %s\n", topics->cnt,
+ topics->elems[0].topic);
+ if ((err = rd_kafka_subscribe(rk, topics)))
+ TEST_FAIL("Failed to subscribe: %s\n",
+ rd_kafka_err2str(err));
+
+
+ for (pause = 0; pause < pause_cnt; pause++) {
+ int rcnt;
+ test_timing_t t_assignment;
+ test_msgver_t mv;
+
+ test_msgver_init(&mv, testid);
+ mv.fwd = &mv_all;
+
+ /* Consume sub-part of the messages. */
+ TEST_SAY(
+ "Pause-Iteration #%d: Consume %d messages at "
+ "msg_base %d\n",
+ pause, per_pause_msg_cnt, msg_base);
+ rcnt = test_consumer_poll(
+ "consume.part", rk, testid, eof_cnt, msg_base,
+ per_pause_msg_cnt == 0 ? -1 : per_pause_msg_cnt,
+ &mv);
+
+ TEST_ASSERT(rcnt == per_pause_msg_cnt,
+ "expected %d messages, got %d",
+ per_pause_msg_cnt, rcnt);
+
+ test_msgver_verify("pause.iteration", &mv,
+ TEST_MSGVER_PER_PART, msg_base,
+ per_pause_msg_cnt);
+ test_msgver_clear(&mv);
+
+ msg_base += per_pause_msg_cnt;
+
+ TIMING_START(&t_assignment, "rd_kafka_assignment()");
+ if ((err = rd_kafka_assignment(rk, &parts)))
+ TEST_FAIL("failed to get assignment: %s\n",
+ rd_kafka_err2str(err));
+ TIMING_STOP(&t_assignment);
+
+ TEST_ASSERT(parts->cnt > 0,
+ "parts->cnt %d, expected > 0", parts->cnt);
+
+ TEST_SAY("Now pausing %d partition(s) for %dms\n",
+ parts->cnt, pause_time);
+ if ((err = rd_kafka_pause_partitions(rk, parts)))
+ TEST_FAIL("Failed to pause: %s\n",
+ rd_kafka_err2str(err));
+
+ /* Check per-partition errors */
+ for (j = 0; j < parts->cnt; j++) {
+ if (parts->elems[j].err) {
+ TEST_WARN(
+ "pause failure for "
+ "%s %" PRId32 "]: %s\n",
+ parts->elems[j].topic,
+ parts->elems[j].partition,
+ rd_kafka_err2str(
+ parts->elems[j].err));
+ fails++;
+ }
+ }
+ TEST_ASSERT(fails == 0, "See previous warnings\n");
+
+ TEST_SAY(
+ "Waiting for %dms, should not receive any "
+ "messages during this time\n",
+ pause_time);
+
+ test_consumer_poll_no_msgs("silence.while.paused", rk,
+ testid, pause_time);
+
+ TEST_SAY("Resuming %d partitions\n", parts->cnt);
+ if ((err = rd_kafka_resume_partitions(rk, parts)))
+ TEST_FAIL("Failed to resume: %s\n",
+ rd_kafka_err2str(err));
+
+ /* Check per-partition errors */
+ for (j = 0; j < parts->cnt; j++) {
+ if (parts->elems[j].err) {
+ TEST_WARN(
+ "resume failure for "
+ "%s %" PRId32 "]: %s\n",
+ parts->elems[j].topic,
+ parts->elems[j].partition,
+ rd_kafka_err2str(
+ parts->elems[j].err));
+ fails++;
+ }
+ }
+ TEST_ASSERT(fails == 0, "See previous warnings\n");
+
+ rd_kafka_topic_partition_list_destroy(parts);
+ }
+
+ if (per_pause_msg_cnt > 0)
+ test_msgver_verify("all.msgs", &mv_all,
+ TEST_MSGVER_ALL_PART, 0, msgcnt);
+ else
+ test_msgver_verify("all.msgs", &mv_all,
+ TEST_MSGVER_ALL_PART, 0, 0);
+ test_msgver_clear(&mv_all);
+
+ /* Should now not see any more messages. */
+ test_consumer_poll_no_msgs("end.exp.no.msgs", rk, testid, 3000);
+
+ test_consumer_close(rk);
+
+ /* Hangs if bug isn't fixed */
+ rd_kafka_destroy(rk);
+ }
+
+ rd_kafka_topic_partition_list_destroy(topics);
+ rd_kafka_conf_destroy(conf);
+ rd_kafka_topic_conf_destroy(tconf);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Verify that the paused partition state is not used after
+ * the partition has been re-assigned.
+ *
+ * 1. Produce N messages
+ * 2. Consume N/4 messages
+ * 3. Pause partitions
+ * 4. Manually commit offset N/2
+ * 5. Unassign partitions
+ * 6. Assign partitions again
+ * 7. Verify that consumption starts at N/2 and not N/4
+ */
+static void consume_pause_resume_after_reassign(void) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int32_t partition = 0;
+ const int msgcnt = 4000;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_partition_list_t *partitions, *pos;
+ rd_kafka_resp_err_t err;
+ int exp_msg_cnt;
+ uint64_t testid;
+ int r;
+ int msg_base = 0;
+ test_msgver_t mv;
+ rd_kafka_topic_partition_t *toppar;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_create_topic(NULL, topic, (int)partition + 1, 1);
+
+ /* Produce messages */
+ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
+
+ /* Set start offset to beginning */
+ partitions = rd_kafka_topic_partition_list_new(1);
+ toppar =
+ rd_kafka_topic_partition_list_add(partitions, topic, partition);
+ toppar->offset = RD_KAFKA_OFFSET_BEGINNING;
+
+
+ /**
+ * Create consumer.
+ */
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "enable.partition.eof", "true");
+ rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_assign("assign", rk, partitions);
+
+
+ exp_msg_cnt = msgcnt / 4;
+ TEST_SAY("Consuming first quarter (%d) of messages\n", exp_msg_cnt);
+ test_msgver_init(&mv, testid);
+ r = test_consumer_poll("consume.first.quarter", rk, testid, 0, msg_base,
+ exp_msg_cnt, &mv);
+ TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d",
+ exp_msg_cnt, r);
+
+
+ TEST_SAY("Pausing partitions\n");
+ if ((err = rd_kafka_pause_partitions(rk, partitions)))
+ TEST_FAIL("Failed to pause: %s", rd_kafka_err2str(err));
+
+ TEST_SAY("Verifying pause, should see no new messages...\n");
+ test_consumer_poll_no_msgs("silence.while.paused", rk, testid, 3000);
+
+ test_msgver_verify("first.quarter", &mv, TEST_MSGVER_ALL_PART, msg_base,
+ exp_msg_cnt);
+ test_msgver_clear(&mv);
+
+
+ /* Check position */
+ pos = rd_kafka_topic_partition_list_copy(partitions);
+ if ((err = rd_kafka_position(rk, pos)))
+ TEST_FAIL("position() failed: %s", rd_kafka_err2str(err));
+
+ TEST_ASSERT(!pos->elems[0].err,
+ "position() returned error for our partition: %s",
+ rd_kafka_err2str(pos->elems[0].err));
+ TEST_SAY("Current application consume position is %" PRId64 "\n",
+ pos->elems[0].offset);
+ TEST_ASSERT(pos->elems[0].offset == (int64_t)exp_msg_cnt,
+ "expected position %" PRId64 ", not %" PRId64,
+ (int64_t)exp_msg_cnt, pos->elems[0].offset);
+ rd_kafka_topic_partition_list_destroy(pos);
+
+
+ toppar->offset = (int64_t)(msgcnt / 2);
+ TEST_SAY("Committing (yet unread) offset %" PRId64 "\n",
+ toppar->offset);
+ if ((err = rd_kafka_commit(rk, partitions, 0 /*sync*/)))
+ TEST_FAIL("Commit failed: %s", rd_kafka_err2str(err));
+
+
+ TEST_SAY("Unassigning\n");
+ test_consumer_unassign("Unassign", rk);
+
+ /* Set start offset to INVALID so that the standard start offset
+ * logic kicks in. */
+ toppar->offset = RD_KAFKA_OFFSET_INVALID;
+
+ TEST_SAY("Reassigning\n");
+ test_consumer_assign("Reassign", rk, partitions);
+
+
+ TEST_SAY("Resuming partitions\n");
+ if ((err = rd_kafka_resume_partitions(rk, partitions)))
+ TEST_FAIL("Failed to resume: %s", rd_kafka_err2str(err));
+
+ msg_base = msgcnt / 2;
+ exp_msg_cnt = msgcnt / 2;
+ TEST_SAY("Consuming second half (%d) of messages at msg_base %d\n",
+ exp_msg_cnt, msg_base);
+ test_msgver_init(&mv, testid);
+ r = test_consumer_poll("consume.second.half", rk, testid, 1 /*exp eof*/,
+ msg_base, exp_msg_cnt, &mv);
+ TEST_ASSERT(r == exp_msg_cnt, "expected %d messages, got %d",
+ exp_msg_cnt, r);
+
+ test_msgver_verify("second.half", &mv, TEST_MSGVER_ALL_PART, msg_base,
+ exp_msg_cnt);
+ test_msgver_clear(&mv);
+
+
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ rd_kafka_resp_err_t err2;
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ /* Set start offset to beginning,
+ * while auto.offset.reset is default at `latest`. */
+
+ parts->elems[0].offset = RD_KAFKA_OFFSET_BEGINNING;
+ test_consumer_assign("rebalance", rk, parts);
+ TEST_SAY("Pausing partitions\n");
+ if ((err2 = rd_kafka_pause_partitions(rk, parts)))
+ TEST_FAIL("Failed to pause: %s",
+ rd_kafka_err2str(err2));
+ TEST_SAY("Resuming partitions\n");
+ if ((err2 = rd_kafka_resume_partitions(rk, parts)))
+ TEST_FAIL("Failed to pause: %s",
+ rd_kafka_err2str(err2));
+ break;
+ default:
+ test_consumer_unassign("rebalance", rk);
+ break;
+ }
+}
+
+
+/**
+ * @brief Verify that the assigned offset is used after pause+resume
+ * if no messages were consumed prior to pause. #2105
+ *
+ * We do this by setting the start offset to BEGINNING in the rebalance_cb
+ * and relying on auto.offset.reset=latest (default) to catch the failure case
+ * where the assigned offset was not honoured.
+ */
+static void consume_subscribe_assign_pause_resume(void) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int32_t partition = 0;
+ const int msgcnt = 1;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ uint64_t testid;
+ int r;
+ test_msgver_t mv;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 20);
+
+ test_create_topic(NULL, topic, (int)partition + 1, 1);
+
+ /* Produce messages */
+ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
+
+ /**
+ * Create consumer.
+ */
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "enable.partition.eof", "true");
+ rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(rk, topic);
+
+ test_msgver_init(&mv, testid);
+ r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/, 0, msgcnt,
+ &mv);
+ TEST_ASSERT(r == msgcnt, "expected %d messages, got %d", msgcnt, r);
+
+ test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, msgcnt);
+ test_msgver_clear(&mv);
+
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief seek() prior to pause() may overwrite the seek()ed offset
+ * when later resume()ing. #3471
+ */
+static void consume_seek_pause_resume(void) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int32_t partition = 0;
+ const int msgcnt = 1000;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ uint64_t testid;
+ int r;
+ test_msgver_t mv;
+ rd_kafka_topic_partition_list_t *parts;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 20);
+
+ test_create_topic(NULL, topic, (int)partition + 1, 1);
+
+ /* Produce messages */
+ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
+
+ /**
+ * Create consumer.
+ */
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "enable.partition.eof", "true");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, partition);
+
+ TEST_SAY("Assigning partition\n");
+ TEST_CALL_ERR__(rd_kafka_assign(rk, parts));
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+
+ TEST_SAY("Consuming messages 0..100\n");
+ test_msgver_init(&mv, testid);
+ r = test_consumer_poll("consume", rk, testid, 0, 0, 100, &mv);
+ TEST_ASSERT(r == 100, "expected %d messages, got %d", 100, r);
+
+ test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 0, 100);
+ test_msgver_clear(&mv);
+
+ parts = rd_kafka_topic_partition_list_new(1);
+ TEST_SAY("Seeking to offset 500\n");
+ rd_kafka_topic_partition_list_add(parts, topic, partition)->offset =
+ 500;
+ TEST_CALL_ERROR__(rd_kafka_seek_partitions(rk, parts, -1));
+
+ TEST_SAY("Pausing\n");
+ TEST_CALL_ERR__(rd_kafka_pause_partitions(rk, parts));
+
+ TEST_SAY("Waiting a short while for things to settle\n");
+ rd_sleep(2);
+
+ TEST_SAY("Resuming\n");
+ TEST_CALL_ERR__(rd_kafka_resume_partitions(rk, parts));
+
+ TEST_SAY("Consuming remaining messages from offset 500.. hopefully\n");
+ r = test_consumer_poll("consume", rk, testid, 1 /*exp eof*/,
+ 500 /* base msgid */,
+ -1 /* remaining messages */, &mv);
+ TEST_ASSERT_LATER(r == 500, "expected %d messages, got %d", 500, r);
+
+ test_msgver_verify("consumed", &mv, TEST_MSGVER_ALL_PART, 500, 500);
+ test_msgver_clear(&mv);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0026_consume_pause(int argc, char **argv) {
+
+ consume_pause();
+ consume_pause_resume_after_reassign();
+ consume_subscribe_assign_pause_resume();
+ consume_seek_pause_resume();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c
new file mode 100644
index 000000000..999d8f135
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0028-long_topicnames.c
@@ -0,0 +1,79 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Test long topic names (>=255 characters), issue #529.
+ * This broker-side issue only seems to occur when explicitly creating
+ * topics with kafka-topics.sh --create, not with auto-created topics.
+ */
+
+
+int main_0028_long_topicnames(int argc, char **argv) {
+ const int msgcnt = 1000;
+ uint64_t testid;
+ char topic[256];
+ rd_kafka_t *rk_c;
+
+ if (!test_can_create_topics(1))
+ return 0;
+
+ memset(topic, 'a', sizeof(topic) - 1);
+ topic[sizeof(topic) - 1] = '\0';
+
+ strncpy(topic, test_mk_topic_name(topic, 1), sizeof(topic) - 1);
+
+ TEST_SAY("Using topic name of %d bytes: %s\n", (int)strlen(topic),
+ topic);
+
+ /* First try a non-verifying consumer. The consumer has been known
+ * to crash when the broker bug kicks in. */
+ rk_c = test_create_consumer(topic, NULL, NULL, NULL);
+
+ /* Create topic */
+ test_create_topic(rk_c, topic, 1, 1);
+
+ test_consumer_subscribe(rk_c, topic);
+ test_consumer_poll_no_msgs("consume.nomsgs", rk_c, 0, 5000);
+ test_consumer_close(rk_c);
+
+ /* Produce messages */
+ testid =
+ test_produce_msgs_easy(topic, 0, RD_KAFKA_PARTITION_UA, msgcnt);
+
+ /* Consume messages */
+ test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c
new file mode 100644
index 000000000..5b3595baf
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0029-assign_offset.c
@@ -0,0 +1,198 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Consumer: make sure specifying offsets in assign() works.
+ */
+
+
+static const int msgcnt = 100; /* per-partition msgcnt */
+static const int partitions = 4;
+
+/* method 1: lower half of partitions use fixed offset
+ * upper half uses END */
+#define REB_METHOD_1 1
+/* method 2: first two partitions: fixed offset,
+ * rest: INVALID (== stored == END)
+ * issue #583 */
+#define REB_METHOD_2 2
+static int reb_method;
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ int i;
+
+ TEST_SAY("rebalance_cb: %s:\n", rd_kafka_err2str(err));
+ test_print_partition_list(parts);
+
+ if (parts->cnt < partitions)
+ TEST_FAIL("rebalance_cb: Expected %d partitions, not %d",
+ partitions, parts->cnt);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ for (i = 0; i < parts->cnt; i++) {
+ if (i >= partitions) {
+ /* Dont assign() partitions we dont want. */
+ rd_kafka_topic_partition_list_del_by_idx(parts,
+ i);
+ continue;
+ }
+
+ if (reb_method == REB_METHOD_1) {
+ if (i < partitions)
+ parts->elems[i].offset = msgcnt / 2;
+ else
+ parts->elems[i].offset =
+ RD_KAFKA_OFFSET_END;
+ } else if (reb_method == REB_METHOD_2) {
+ if (i < 2)
+ parts->elems[i].offset = msgcnt / 2;
+ else
+ parts->elems[i].offset =
+ RD_KAFKA_OFFSET_INVALID;
+ }
+ }
+ TEST_SAY("Use these offsets:\n");
+ test_print_partition_list(parts);
+ test_consumer_assign("HL.REBALANCE", rk, parts);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ test_consumer_unassign("HL.REBALANCE", rk);
+ break;
+
+ default:
+ TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
+ }
+}
+
+int main_0029_assign_offset(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_topic_partition_list_t *parts;
+ uint64_t testid;
+ int i;
+ test_timing_t t_simple, t_hl;
+ test_msgver_t mv;
+
+ test_conf_init(NULL, NULL, 20 + (test_session_timeout_ms * 3 / 1000));
+
+ /* Produce X messages to Y partitions so we get a
+ * nice seekable 0..X offset one each partition. */
+ /* Produce messages */
+ testid = test_id_generate();
+ rk = test_create_producer();
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ parts = rd_kafka_topic_partition_list_new(partitions);
+
+ for (i = 0; i < partitions; i++) {
+ test_produce_msgs(rk, rkt, testid, i, 0, msgcnt, NULL, 0);
+ /* Set start offset */
+ rd_kafka_topic_partition_list_add(parts, topic, i)->offset =
+ msgcnt / 2;
+ }
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+
+ /* Simple consumer */
+ TIMING_START(&t_simple, "SIMPLE.CONSUMER");
+ rk = test_create_consumer(topic, NULL, NULL, NULL);
+ test_msgver_init(&mv, testid);
+ test_consumer_assign("SIMPLE.ASSIGN", rk, parts);
+ test_consumer_poll("SIMPLE.CONSUME", rk, testid, -1, 0,
+ partitions * (msgcnt / 2), &mv);
+ for (i = 0; i < partitions; i++)
+ test_msgver_verify_part("HL.MSGS", &mv, TEST_MSGVER_ALL_PART,
+ topic, i, msgcnt / 2, msgcnt / 2);
+ test_msgver_clear(&mv);
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_simple);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+
+ /* High-level consumer: method 1
+ * Offsets are set in rebalance callback. */
+ if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) {
+ reb_method = REB_METHOD_1;
+ TIMING_START(&t_hl, "HL.CONSUMER");
+ test_msgver_init(&mv, testid);
+ rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
+ test_consumer_subscribe(rk, topic);
+ test_consumer_poll("HL.CONSUME", rk, testid, -1, 0,
+ partitions * (msgcnt / 2), &mv);
+ for (i = 0; i < partitions; i++)
+ test_msgver_verify_part("HL.MSGS", &mv,
+ TEST_MSGVER_ALL_PART, topic, i,
+ msgcnt / 2, msgcnt / 2);
+ test_msgver_clear(&mv);
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_hl);
+
+
+ /* High-level consumer: method 2:
+ * first two partitions are with fixed absolute offset, rest are
+ * auto offset (stored, which is now at end).
+ * Offsets are set in rebalance callback. */
+ reb_method = REB_METHOD_2;
+ TIMING_START(&t_hl, "HL.CONSUMER2");
+ test_msgver_init(&mv, testid);
+ rk = test_create_consumer(topic, rebalance_cb, NULL, NULL);
+ test_consumer_subscribe(rk, topic);
+ test_consumer_poll("HL.CONSUME2", rk, testid, partitions, 0,
+ 2 * (msgcnt / 2), &mv);
+ for (i = 0; i < partitions; i++) {
+ if (i < 2)
+ test_msgver_verify_part(
+ "HL.MSGS2.A", &mv, TEST_MSGVER_ALL_PART,
+ topic, i, msgcnt / 2, msgcnt / 2);
+ }
+ test_msgver_clear(&mv);
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_hl);
+ }
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c
new file mode 100644
index 000000000..9b05cb420
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0030-offset_commit.c
@@ -0,0 +1,589 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Consumer: various offset commit constellations, matrix:
+ * enable.auto.commit, enable.auto.offset.store, async
+ */
+
+static char *topic;
+static const int msgcnt = 100;
+static const int partition = 0;
+static uint64_t testid;
+
+static int64_t expected_offset = 0;
+static int64_t committed_offset = -1;
+
+
+static void offset_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ rd_kafka_topic_partition_t *rktpar;
+
+ TEST_SAYL(3, "Offset committed: %s:\n", rd_kafka_err2str(err));
+ if (err == RD_KAFKA_RESP_ERR__NO_OFFSET)
+ return;
+
+ test_print_partition_list(offsets);
+ if (err)
+ TEST_FAIL("Offset commit failed: %s", rd_kafka_err2str(err));
+ if (offsets->cnt == 0)
+ TEST_FAIL(
+ "Expected at least one partition in offset_commit_cb");
+
+ /* Find correct partition */
+ if (!(rktpar = rd_kafka_topic_partition_list_find(offsets, topic,
+ partition)))
+ return;
+
+ if (rktpar->err)
+ TEST_FAIL("Offset commit failed for partitioń : %s",
+ rd_kafka_err2str(rktpar->err));
+
+ if (rktpar->offset > expected_offset)
+ TEST_FAIL("Offset committed %" PRId64
+ " > expected offset %" PRId64,
+ rktpar->offset, expected_offset);
+
+ if (rktpar->offset < committed_offset)
+ TEST_FAIL("Old offset %" PRId64
+ " (re)committed: "
+ "should be above committed_offset %" PRId64,
+ rktpar->offset, committed_offset);
+ else if (rktpar->offset == committed_offset)
+ TEST_SAYL(1, "Current offset re-committed: %" PRId64 "\n",
+ rktpar->offset);
+ else
+ committed_offset = rktpar->offset;
+
+ if (rktpar->offset < expected_offset) {
+ TEST_SAYL(3,
+ "Offset committed %" PRId64
+ " < expected offset %" PRId64 "\n",
+ rktpar->offset, expected_offset);
+ return;
+ }
+
+ TEST_SAYL(3, "Expected offset committed: %" PRId64 "\n",
+ rktpar->offset);
+}
+
+
+static void do_offset_test(const char *what,
+ int auto_commit,
+ int auto_store,
+ int async,
+ int subscribe) {
+ test_timing_t t_all;
+ char groupid[64];
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ int cnt = 0;
+ const int extra_cnt = 5;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *parts;
+ rd_kafka_topic_partition_t *rktpar;
+ int64_t next_offset = -1;
+
+ SUB_TEST_QUICK("%s", what);
+
+ test_conf_init(&conf, &tconf, subscribe ? 30 : 10);
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "enable.auto.commit",
+ auto_commit ? "true" : "false");
+ test_conf_set(conf, "enable.auto.offset.store",
+ auto_store ? "true" : "false");
+ test_conf_set(conf, "auto.commit.interval.ms", "500");
+ rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
+ test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
+ test_str_id_generate(groupid, sizeof(groupid));
+ test_conf_set(conf, "group.id", groupid);
+ rd_kafka_conf_set_default_topic_conf(conf, tconf);
+
+ TIMING_START(&t_all, "%s", what);
+
+ expected_offset = 0;
+ committed_offset = -1;
+
+ /* MO:
+ * - Create consumer.
+ * - Start consuming from beginning
+ * - Perform store & commits according to settings
+ * - Stop storing&committing when half of the messages are consumed,
+ * - but consume 5 more to check against.
+ * - Query position.
+ * - Destroy consumer.
+ * - Create new consumer with same group.id using stored offsets
+ * - Should consume the expected message.
+ */
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, rd_kafka_conf_dup(conf));
+
+ rd_kafka_poll_set_consumer(rk);
+
+ if (subscribe) {
+ test_consumer_subscribe(rk, topic);
+ } else {
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, partition);
+ test_consumer_assign("ASSIGN", rk, parts);
+ rd_kafka_topic_partition_list_destroy(parts);
+ }
+
+ while (cnt - extra_cnt < msgcnt / 2) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(rk, 10 * 1000);
+ if (!rkm)
+ continue;
+
+ if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ TEST_FAIL("%s: Timed out waiting for message %d", what,
+ cnt);
+ else if (rkm->err)
+ TEST_FAIL("%s: Consumer error: %s", what,
+ rd_kafka_message_errstr(rkm));
+
+ /* Offset of next message. */
+ next_offset = rkm->offset + 1;
+
+ if (cnt < msgcnt / 2) {
+ if (!auto_store) {
+ err = rd_kafka_offset_store(
+ rkm->rkt, rkm->partition, rkm->offset);
+ if (err)
+ TEST_FAIL(
+ "%s: offset_store failed: %s\n",
+ what, rd_kafka_err2str(err));
+ }
+ expected_offset = rkm->offset + 1;
+ if (!auto_commit) {
+ test_timing_t t_commit;
+ TIMING_START(&t_commit, "%s @ %" PRId64,
+ async ? "commit.async"
+ : "commit.sync",
+ rkm->offset + 1);
+ err = rd_kafka_commit_message(rk, rkm, async);
+ TIMING_STOP(&t_commit);
+ if (err)
+ TEST_FAIL("%s: commit failed: %s\n",
+ what, rd_kafka_err2str(err));
+ }
+
+ } else if (auto_store && auto_commit)
+ expected_offset = rkm->offset + 1;
+
+ rd_kafka_message_destroy(rkm);
+ cnt++;
+ }
+
+ TEST_SAY("%s: done consuming after %d messages, at offset %" PRId64
+ ", next_offset %" PRId64 "\n",
+ what, cnt, expected_offset, next_offset);
+
+ if ((err = rd_kafka_assignment(rk, &parts)))
+ TEST_FAIL("%s: failed to get assignment(): %s\n", what,
+ rd_kafka_err2str(err));
+
+ /* Verify position */
+ if ((err = rd_kafka_position(rk, parts)))
+ TEST_FAIL("%s: failed to get position(): %s\n", what,
+ rd_kafka_err2str(err));
+ if (!(rktpar =
+ rd_kafka_topic_partition_list_find(parts, topic, partition)))
+ TEST_FAIL("%s: position(): topic lost\n", what);
+ if (rktpar->offset != next_offset)
+ TEST_FAIL("%s: Expected position() offset %" PRId64
+ ", got %" PRId64,
+ what, next_offset, rktpar->offset);
+ TEST_SAY("%s: Position is at %" PRId64 ", good!\n", what,
+ rktpar->offset);
+
+ /* Pause messages while waiting so we can serve callbacks
+ * without having more messages received. */
+ if ((err = rd_kafka_pause_partitions(rk, parts)))
+ TEST_FAIL("%s: failed to pause partitions: %s\n", what,
+ rd_kafka_err2str(err));
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ /* Fire off any enqueued offset_commit_cb */
+ test_consumer_poll_no_msgs(what, rk, testid, 0);
+
+ TEST_SAY("%s: committed_offset %" PRId64 ", expected_offset %" PRId64
+ "\n",
+ what, committed_offset, expected_offset);
+
+ if (!auto_commit && !async) {
+ /* Sync commits should be up to date at this point. */
+ if (committed_offset != expected_offset)
+ TEST_FAIL("%s: Sync commit: committed offset %" PRId64
+ " should be same as expected offset "
+ "%" PRId64,
+ what, committed_offset, expected_offset);
+ } else {
+
+ /* Wait for offset commits to catch up */
+ while (committed_offset < expected_offset) {
+ TEST_SAYL(2,
+ "%s: Wait for committed offset %" PRId64
+ " to reach expected offset %" PRId64 "\n",
+ what, committed_offset, expected_offset);
+ test_consumer_poll_no_msgs(what, rk, testid, 1000);
+ }
+ }
+
+ TEST_SAY(
+ "%s: phase 1 complete, %d messages consumed, "
+ "next expected offset is %" PRId64 "\n",
+ what, cnt, expected_offset);
+
+ /* Issue #827: cause committed() to return prematurely by specifying
+ * low timeout. The bug (use after free) will only
+ * be catched by valgrind.
+ *
+ * rusage: this triggers a bunch of protocol requests which
+ * increase .ucpu, .scpu, .ctxsw.
+ */
+ do {
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, partition);
+ err = rd_kafka_committed(rk, parts, 1);
+ rd_kafka_topic_partition_list_destroy(parts);
+ if (err)
+ TEST_SAY("Issue #827: committed() returned %s\n",
+ rd_kafka_err2str(err));
+ } while (err != RD_KAFKA_RESP_ERR__TIMED_OUT);
+
+ /* Query position */
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, partition);
+
+ err = rd_kafka_committed(rk, parts, tmout_multip(5 * 1000));
+ if (err)
+ TEST_FAIL("%s: committed() failed: %s", what,
+ rd_kafka_err2str(err));
+ if (!(rktpar =
+ rd_kafka_topic_partition_list_find(parts, topic, partition)))
+ TEST_FAIL("%s: committed(): topic lost\n", what);
+ if (rktpar->offset != expected_offset)
+ TEST_FAIL("%s: Expected committed() offset %" PRId64
+ ", got %" PRId64,
+ what, expected_offset, rktpar->offset);
+ TEST_SAY("%s: Committed offset is at %" PRId64 ", good!\n", what,
+ rktpar->offset);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+
+
+ /* Fire up a new consumer and continue from where we left off. */
+ TEST_SAY("%s: phase 2: starting new consumer to resume consumption\n",
+ what);
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+ rd_kafka_poll_set_consumer(rk);
+
+ if (subscribe) {
+ test_consumer_subscribe(rk, topic);
+ } else {
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, partition);
+ test_consumer_assign("ASSIGN", rk, parts);
+ rd_kafka_topic_partition_list_destroy(parts);
+ }
+
+ while (cnt < msgcnt) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(rk, 10 * 1000);
+ if (!rkm)
+ continue;
+
+ if (rkm->err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ TEST_FAIL("%s: Timed out waiting for message %d", what,
+ cnt);
+ else if (rkm->err)
+ TEST_FAIL("%s: Consumer error: %s", what,
+ rd_kafka_message_errstr(rkm));
+
+ if (rkm->offset != expected_offset)
+ TEST_FAIL("%s: Received message offset %" PRId64
+ ", expected %" PRId64 " at msgcnt %d/%d\n",
+ what, rkm->offset, expected_offset, cnt,
+ msgcnt);
+
+ rd_kafka_message_destroy(rkm);
+ expected_offset++;
+ cnt++;
+ }
+
+
+ TEST_SAY("%s: phase 2: complete\n", what);
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ TIMING_STOP(&t_all);
+
+ SUB_TEST_PASS();
+}
+
+
+static void empty_offset_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ rd_kafka_resp_err_t expected = *(rd_kafka_resp_err_t *)opaque;
+ int valid_offsets = 0;
+ int i;
+
+ TEST_SAY(
+ "Offset commit callback for %d partitions: %s (expecting %s)\n",
+ offsets ? offsets->cnt : 0, rd_kafka_err2str(err),
+ rd_kafka_err2str(expected));
+
+ if (expected != err)
+ TEST_FAIL("Offset commit cb: expected %s, got %s",
+ rd_kafka_err2str(expected), rd_kafka_err2str(err));
+
+ for (i = 0; i < offsets->cnt; i++) {
+ TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n",
+ offsets->elems[i].topic, offsets->elems[i].partition,
+ offsets->elems[i].offset,
+ rd_kafka_err2str(offsets->elems[i].err));
+
+ if (expected == RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_ASSERT(offsets->elems[i].err == expected);
+ if (offsets->elems[i].offset > 0)
+ valid_offsets++;
+ }
+
+ if (expected == RD_KAFKA_RESP_ERR_NO_ERROR) {
+ /* If no error is expected we instead expect one proper offset
+ * to have been committed. */
+ TEST_ASSERT(valid_offsets > 0);
+ }
+}
+
+
+/**
+ * Trigger an empty cgrp commit (issue #803)
+ */
+static void do_empty_commit(void) {
+ rd_kafka_t *rk;
+ char group_id[64];
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_resp_err_t err, expect;
+
+ SUB_TEST_QUICK();
+
+ test_conf_init(&conf, &tconf, 20);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
+ test_str_id_generate(group_id, sizeof(group_id));
+
+ TEST_SAY(_C_MAG "[ do_empty_commit group.id %s ]\n", group_id);
+
+ rk = test_create_consumer(group_id, NULL, conf, tconf);
+
+ test_consumer_subscribe(rk, topic);
+
+ test_consumer_poll("consume", rk, testid, -1, -1, 100, NULL);
+
+ TEST_SAY("First commit\n");
+ expect = RD_KAFKA_RESP_ERR_NO_ERROR;
+ err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb,
+ &expect);
+ if (err != expect)
+ TEST_FAIL("commit failed: %s", rd_kafka_err2str(err));
+ else
+ TEST_SAY("First commit returned %s\n", rd_kafka_err2str(err));
+
+ TEST_SAY("Second commit, should be empty\n");
+ expect = RD_KAFKA_RESP_ERR__NO_OFFSET;
+ err = rd_kafka_commit_queue(rk, NULL, NULL, empty_offset_commit_cb,
+ &expect);
+ if (err != RD_KAFKA_RESP_ERR__NO_OFFSET)
+ TEST_FAIL("unexpected commit result, wanted NO_OFFSET, got: %s",
+ rd_kafka_err2str(err));
+ else
+ TEST_SAY("Second commit returned %s\n", rd_kafka_err2str(err));
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * Commit non-existent topic (issue #704)
+ */
+static void nonexist_offset_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ int i;
+ int failed_offsets = 0;
+
+ TEST_SAY("Offset commit callback for %d partitions: %s\n",
+ offsets ? offsets->cnt : 0, rd_kafka_err2str(err));
+
+ TEST_ASSERT(offsets != NULL);
+
+ for (i = 0; i < offsets->cnt; i++) {
+ TEST_SAY("committed: %s [%" PRId32 "] offset %" PRId64 ": %s\n",
+ offsets->elems[i].topic, offsets->elems[i].partition,
+ offsets->elems[i].offset,
+ rd_kafka_err2str(offsets->elems[i].err));
+ failed_offsets += offsets->elems[i].err ? 1 : 0;
+ }
+
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+ "expected unknown Topic or partition, not %s",
+ rd_kafka_err2str(err));
+ TEST_ASSERT(offsets->cnt == 2, "expected %d offsets", offsets->cnt);
+ TEST_ASSERT(failed_offsets == offsets->cnt,
+ "expected %d offsets to have failed, got %d", offsets->cnt,
+ failed_offsets);
+}
+
+static void do_nonexist_commit(void) {
+ rd_kafka_t *rk;
+ char group_id[64];
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_topic_partition_list_t *offsets;
+ const char *unk_topic = test_mk_topic_name(__FUNCTION__, 1);
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST_QUICK();
+
+ test_conf_init(&conf, &tconf, 20);
+ /* Offset commit deferrals when the broker is down is limited to
+ * session.timeout.ms. With 0.9 brokers and api.version.request=true
+ * the initial connect to all brokers will take 10*2 seconds
+ * and the commit_queue() below will time out too quickly.
+ * Set the session timeout high here to avoid it. */
+ test_conf_set(conf, "session.timeout.ms", "60000");
+
+ test_str_id_generate(group_id, sizeof(group_id));
+ test_conf_set(conf, "group.id", group_id);
+
+ rd_kafka_conf_set_default_topic_conf(conf, tconf);
+
+ TEST_SAY(_C_MAG "[ do_nonexist_commit group.id %s ]\n", group_id);
+
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+ rd_kafka_poll_set_consumer(rk);
+
+ TEST_SAY("Try nonexist commit\n");
+ offsets = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(offsets, unk_topic, 0)->offset = 123;
+ rd_kafka_topic_partition_list_add(offsets, unk_topic, 1)->offset = 456;
+
+ err = rd_kafka_commit_queue(rk, offsets, NULL,
+ nonexist_offset_commit_cb, NULL);
+ TEST_SAY("nonexist commit returned %s\n", rd_kafka_err2str(err));
+ if (err != RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART)
+ TEST_FAIL("commit() should give UnknownTopicOrPart, not: %s",
+ rd_kafka_err2str(err));
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0030_offset_commit(int argc, char **argv) {
+
+ topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ testid = test_produce_msgs_easy(topic, 0, partition, msgcnt);
+
+ do_empty_commit();
+
+ do_nonexist_commit();
+
+ do_offset_test("AUTO.COMMIT & AUTO.STORE", 1 /* enable.auto.commit */,
+ 1 /* enable.auto.offset.store */, 0 /* not used. */,
+ 1 /* use subscribe */);
+
+ do_offset_test("MANUAL.COMMIT.ASYNC & AUTO.STORE",
+ 0 /* enable.auto.commit */,
+ 1 /* enable.auto.offset.store */, 1 /* async */,
+ 1 /* use subscribe */);
+
+ do_offset_test("AUTO.COMMIT.ASYNC & AUTO.STORE & ASSIGN",
+ 1 /* enable.auto.commit */,
+ 1 /* enable.auto.offset.store */, 0 /* not used. */,
+ 0 /* use assign */);
+
+ if (test_quick) {
+ rd_free(topic);
+ return 0;
+ }
+
+ do_offset_test("AUTO.COMMIT & MANUAL.STORE", 1 /* enable.auto.commit */,
+ 0 /* enable.auto.offset.store */, 0 /* not used */,
+ 1 /* use subscribe */);
+
+ do_offset_test("MANUAL.COMMIT.SYNC & AUTO.STORE",
+ 0 /* enable.auto.commit */,
+ 1 /* enable.auto.offset.store */, 0 /* async */,
+ 1 /* use subscribe */);
+
+ do_offset_test("MANUAL.COMMIT.ASYNC & MANUAL.STORE",
+ 0 /* enable.auto.commit */,
+ 0 /* enable.auto.offset.store */, 1 /* sync */,
+ 1 /* use subscribe */);
+
+ do_offset_test("MANUAL.COMMIT.SYNC & MANUAL.STORE",
+ 0 /* enable.auto.commit */,
+ 0 /* enable.auto.offset.store */, 0 /* sync */,
+ 1 /* use subscribe */);
+
+ rd_free(topic);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c
new file mode 100644
index 000000000..327be43df
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0031-get_offsets.c
@@ -0,0 +1,119 @@
+
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Verify that rd_kafka_(query|get)_watermark_offsets() works.
+ */
+
+
+int main_0031_get_offsets(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int msgcnt = test_quick ? 10 : 100;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ int64_t qry_low = -1234, qry_high = -1235;
+ int64_t get_low = -1234, get_high = -1235;
+ rd_kafka_resp_err_t err;
+ test_timing_t t_qry, t_get;
+ uint64_t testid;
+
+ /* Produce messages */
+ testid = test_produce_msgs_easy(topic, 0, 0, msgcnt);
+
+ /* Get offsets */
+ rk = test_create_consumer(NULL, NULL, NULL, NULL);
+
+ TIMING_START(&t_qry, "query_watermark_offsets");
+ err = rd_kafka_query_watermark_offsets(
+ rk, topic, 0, &qry_low, &qry_high, tmout_multip(10 * 1000));
+ TIMING_STOP(&t_qry);
+ if (err)
+ TEST_FAIL("query_watermark_offsets failed: %s\n",
+ rd_kafka_err2str(err));
+
+ if (qry_low != 0 && qry_high != msgcnt)
+ TEST_FAIL(
+ "Expected low,high %d,%d, but got "
+ "%" PRId64 ",%" PRId64,
+ 0, msgcnt, qry_low, qry_high);
+
+ TEST_SAY(
+ "query_watermark_offsets: "
+ "offsets %" PRId64 ", %" PRId64 "\n",
+ qry_low, qry_high);
+
+ /* Now start consuming to update the offset cache, then query it
+ * with the get_ API. */
+ rkt = test_create_topic_object(rk, topic, NULL);
+
+ test_consumer_start("get", rkt, 0, RD_KAFKA_OFFSET_BEGINNING);
+ test_consume_msgs("get", rkt, testid, 0, TEST_NO_SEEK, 0, msgcnt, 0);
+ /* After at least one message has been consumed the
+ * watermarks are cached. */
+
+ TIMING_START(&t_get, "get_watermark_offsets");
+ err = rd_kafka_get_watermark_offsets(rk, topic, 0, &get_low, &get_high);
+ TIMING_STOP(&t_get);
+ if (err)
+ TEST_FAIL("get_watermark_offsets failed: %s\n",
+ rd_kafka_err2str(err));
+
+ TEST_SAY(
+ "get_watermark_offsets: "
+ "offsets %" PRId64 ", %" PRId64 "\n",
+ get_low, get_high);
+
+ if (get_high != qry_high)
+ TEST_FAIL(
+ "query/get discrepancies: "
+ "low: %" PRId64 "/%" PRId64 ", high: %" PRId64 "/%" PRId64,
+ qry_low, get_low, qry_high, get_high);
+ if (get_low >= get_high)
+ TEST_FAIL(
+ "get_watermark_offsets: "
+ "low %" PRId64 " >= high %" PRId64,
+ get_low, get_high);
+
+ /* FIXME: We currently dont bother checking the get_low offset
+ * since it requires stats to be enabled. */
+
+ test_consumer_stop("get", rkt, 0);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c
new file mode 100644
index 000000000..f31d33ebc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0033-regex_subscribe.c
@@ -0,0 +1,509 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * KafkaConsumer: regex topic subscriptions
+ */
+
+
+
+struct expect {
+ char *name; /* sub-test name */
+ const char *sub[4]; /* subscriptions */
+ const char *exp[4]; /* expected topics */
+ int exp_err; /* expected error from subscribe() */
+ int stat[4]; /* per exp status */
+ int fails;
+ enum { _EXP_NONE,
+ _EXP_FAIL,
+ _EXP_OK,
+ _EXP_ASSIGN,
+ _EXP_REVOKE,
+ _EXP_ASSIGNED,
+ _EXP_REVOKED,
+ } result;
+};
+
+static struct expect *exp_curr;
+
+static uint64_t testid;
+
+static void expect_match(struct expect *exp,
+ const rd_kafka_topic_partition_list_t *parts) {
+ int i;
+ int e = 0;
+ int fails = 0;
+
+ memset(exp->stat, 0, sizeof(exp->stat));
+
+ for (i = 0; i < parts->cnt; i++) {
+ int found = 0;
+ e = 0;
+ while (exp->exp[e]) {
+ if (!strcmp(parts->elems[i].topic, exp->exp[e])) {
+ exp->stat[e]++;
+ found++;
+ }
+ e++;
+ }
+
+ if (!found) {
+ TEST_WARN("%s: got unexpected topic match: %s\n",
+ exp->name, parts->elems[i].topic);
+ fails++;
+ }
+ }
+
+
+ e = 0;
+ while (exp->exp[e]) {
+ if (!exp->stat[e]) {
+ TEST_WARN(
+ "%s: expected topic not "
+ "found in assignment: %s\n",
+ exp->name, exp->exp[e]);
+ fails++;
+ } else {
+ TEST_SAY("%s: expected topic %s seen in assignment\n",
+ exp->name, exp->exp[e]);
+ }
+ e++;
+ }
+
+ exp->fails += fails;
+ if (fails) {
+ TEST_WARN("%s: see %d previous failures\n", exp->name, fails);
+ exp->result = _EXP_FAIL;
+ } else {
+ TEST_SAY(_C_MAG "[ %s: assignment matched ]\n", exp->name);
+ exp->result = _EXP_OK;
+ }
+}
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ struct expect *exp = exp_curr;
+
+ TEST_ASSERT(exp_curr, "exp_curr not set");
+
+ TEST_SAY("rebalance_cb: %s with %d partition(s)\n",
+ rd_kafka_err2str(err), parts->cnt);
+ test_print_partition_list(parts);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ /* Check that provided partitions match our expectations */
+ if (exp->result != _EXP_ASSIGN) {
+ TEST_WARN(
+ "%s: rebalance called while expecting %d: "
+ "too many or undesired assignment(s?\n",
+ exp->name, exp->result);
+ }
+ expect_match(exp, parts);
+ test_consumer_assign("rebalance", rk, parts);
+ exp->result = _EXP_ASSIGNED;
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ if (exp->result != _EXP_REVOKE) {
+ TEST_WARN(
+ "%s: rebalance called while expecting %d: "
+ "too many or undesired assignment(s?\n",
+ exp->name, exp->result);
+ }
+
+ test_consumer_unassign("rebalance", rk);
+ exp->result = _EXP_REVOKED;
+ break;
+
+ default:
+ TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
+ }
+}
+
+
+/**
+ * @brief Poll the consumer once.
+ */
+static void consumer_poll_once(rd_kafka_t *rk) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(rk, 1000);
+ if (!rkmessage)
+ return;
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("%s [%" PRId32
+ "] reached EOF at "
+ "offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+
+ } else if (rkmessage->err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART) {
+ if (strstr(rd_kafka_topic_name(rkmessage->rkt), "NONEXIST"))
+ TEST_SAY("%s: %s: error is expected for this topic\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rd_kafka_message_errstr(rkmessage));
+ else
+ TEST_FAIL(
+ "%s [%" PRId32 "] error (offset %" PRId64 "): %s",
+ rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
+ : "(no-topic)",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+ }
+
+ rd_kafka_message_destroy(rkmessage);
+}
+
+
+
+static int test_subscribe(rd_kafka_t *rk, struct expect *exp) {
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *tlist;
+ int i;
+ test_timing_t t_sub, t_assign, t_unsub;
+
+ exp_curr = exp;
+
+ test_timeout_set((test_session_timeout_ms / 1000) * 3);
+
+ tlist = rd_kafka_topic_partition_list_new(4);
+ TEST_SAY(_C_MAG "[ %s: begin ]\n", exp->name);
+ i = 0;
+ TEST_SAY("Topic subscription:\n");
+ while (exp->sub[i]) {
+ TEST_SAY("%s: %s\n", exp->name, exp->sub[i]);
+ rd_kafka_topic_partition_list_add(tlist, exp->sub[i],
+ RD_KAFKA_PARTITION_UA);
+ i++;
+ }
+
+ /* Subscribe */
+ TIMING_START(&t_sub, "subscribe");
+ err = rd_kafka_subscribe(rk, tlist);
+ TIMING_STOP(&t_sub);
+ TEST_ASSERT(err == exp->exp_err, "subscribe() failed: %s (expected %s)",
+ rd_kafka_err2str(err), rd_kafka_err2str(exp->exp_err));
+
+ if (exp->exp[0]) {
+ /* Wait for assignment, actual messages are ignored. */
+ exp->result = _EXP_ASSIGN;
+ TEST_SAY("%s: waiting for assignment\n", exp->name);
+ TIMING_START(&t_assign, "assignment");
+ while (exp->result == _EXP_ASSIGN)
+ consumer_poll_once(rk);
+ TIMING_STOP(&t_assign);
+ TEST_ASSERT(exp->result == _EXP_ASSIGNED,
+ "got %d instead of assignment", exp->result);
+
+ } else {
+ /* Not expecting any assignment */
+ int64_t ts_end = test_clock() + 5000;
+ exp->result = _EXP_NONE; /* Not expecting a rebalance */
+ while (exp->result == _EXP_NONE && test_clock() < ts_end)
+ consumer_poll_once(rk);
+ TEST_ASSERT(exp->result == _EXP_NONE);
+ }
+
+ /* Unsubscribe */
+ TIMING_START(&t_unsub, "unsubscribe");
+ err = rd_kafka_unsubscribe(rk);
+ TIMING_STOP(&t_unsub);
+ TEST_ASSERT(!err, "unsubscribe() failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_topic_partition_list_destroy(tlist);
+
+ if (exp->exp[0]) {
+ /* Wait for revoke, actual messages are ignored. */
+ TEST_SAY("%s: waiting for revoke\n", exp->name);
+ exp->result = _EXP_REVOKE;
+ TIMING_START(&t_assign, "revoke");
+ while (exp->result != _EXP_REVOKED)
+ consumer_poll_once(rk);
+ TIMING_STOP(&t_assign);
+ TEST_ASSERT(exp->result == _EXP_REVOKED,
+ "got %d instead of revoke", exp->result);
+ } else {
+ /* Not expecting any revoke */
+ int64_t ts_end = test_clock() + 5000;
+ exp->result = _EXP_NONE; /* Not expecting a rebalance */
+ while (exp->result == _EXP_NONE && test_clock() < ts_end)
+ consumer_poll_once(rk);
+ TEST_ASSERT(exp->result == _EXP_NONE);
+ }
+
+ TEST_SAY(_C_MAG "[ %s: done with %d failures ]\n", exp->name,
+ exp->fails);
+
+ return exp->fails;
+}
+
+
+static int do_test(const char *assignor) {
+ static char topics[3][128];
+ static char nonexist_topic[128];
+ const int topic_cnt = 3;
+ rd_kafka_t *rk;
+ const int msgcnt = 10;
+ int i;
+ char groupid[64];
+ int fails = 0;
+ rd_kafka_conf_t *conf;
+
+ if (!test_check_builtin("regex")) {
+ TEST_SKIP("regex support not built in\n");
+ return 0;
+ }
+
+ testid = test_id_generate();
+ test_str_id_generate(groupid, sizeof(groupid));
+
+ rd_snprintf(topics[0], sizeof(topics[0]), "%s_%s",
+ test_mk_topic_name("regex_subscribe_TOPIC_0001_UNO", 0),
+ groupid);
+ rd_snprintf(topics[1], sizeof(topics[1]), "%s_%s",
+ test_mk_topic_name("regex_subscribe_topic_0002_dup", 0),
+ groupid);
+ rd_snprintf(topics[2], sizeof(topics[2]), "%s_%s",
+ test_mk_topic_name("regex_subscribe_TOOTHPIC_0003_3", 0),
+ groupid);
+
+ /* To avoid auto topic creation to kick in we use
+ * an invalid topic name. */
+ rd_snprintf(
+ nonexist_topic, sizeof(nonexist_topic), "%s_%s",
+ test_mk_topic_name("regex_subscribe_NONEXISTENT_0004_IV#!", 0),
+ groupid);
+
+ /* Produce messages to topics to ensure creation. */
+ for (i = 0; i < topic_cnt; i++)
+ test_produce_msgs_easy(topics[i], testid, RD_KAFKA_PARTITION_UA,
+ msgcnt);
+
+ test_conf_init(&conf, NULL, 20);
+ test_conf_set(conf, "partition.assignment.strategy", assignor);
+ /* Speed up propagation of new topics */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
+ test_conf_set(conf, "allow.auto.create.topics", "true");
+
+ /* Create a single consumer to handle all subscriptions.
+ * Has the nice side affect of testing multiple subscriptions. */
+ rk = test_create_consumer(groupid, rebalance_cb, conf, NULL);
+
+ /*
+ * Test cases
+ */
+ {
+ struct expect expect = {.name = rd_strdup(tsprintf(
+ "%s: no regexps (0&1)", assignor)),
+ .sub = {topics[0], topics[1], NULL},
+ .exp = {topics[0], topics[1], NULL}};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ }
+
+ {
+ struct expect expect = {.name =
+ rd_strdup(tsprintf("%s: no regexps "
+ "(no matches)",
+ assignor)),
+ .sub = {nonexist_topic, NULL},
+ .exp = {NULL}};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ }
+
+ {
+ struct expect expect = {
+ .name = rd_strdup(tsprintf("%s: regex all", assignor)),
+ .sub = {rd_strdup(tsprintf("^.*_%s", groupid)), NULL},
+ .exp = {topics[0], topics[1], topics[2], NULL}};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ rd_free((void *)expect.sub[0]);
+ }
+
+ {
+ struct expect expect = {
+ .name = rd_strdup(tsprintf("%s: regex 0&1", assignor)),
+ .sub = {rd_strdup(tsprintf(
+ "^.*[tToOpPiIcC]_0+[12]_[^_]+_%s", groupid)),
+ NULL},
+ .exp = {topics[0], topics[1], NULL}};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ rd_free((void *)expect.sub[0]);
+ }
+
+ {
+ struct expect expect = {
+ .name = rd_strdup(tsprintf("%s: regex 2", assignor)),
+ .sub = {rd_strdup(
+ tsprintf("^.*TOOTHPIC_000._._%s", groupid)),
+ NULL},
+ .exp = {topics[2], NULL}};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ rd_free((void *)expect.sub[0]);
+ }
+
+ {
+ struct expect expect = {
+ .name = rd_strdup(tsprintf("%s: regex 2 and "
+ "nonexistent(not seen)",
+ assignor)),
+ .sub = {rd_strdup(tsprintf("^.*_000[34]_..?_%s", groupid)),
+ NULL},
+ .exp = {topics[2], NULL}};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ rd_free((void *)expect.sub[0]);
+ }
+
+ {
+ struct expect expect = {
+ .name = rd_strdup(
+ tsprintf("%s: broken regex (no matches)", assignor)),
+ .sub = {"^.*[0", NULL},
+ .exp = {NULL},
+ .exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG};
+
+ fails += test_subscribe(rk, &expect);
+ rd_free(expect.name);
+ }
+
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+
+ if (fails)
+ TEST_FAIL("See %d previous failures", fails);
+
+ return 0;
+}
+
+
+int main_0033_regex_subscribe(int argc, char **argv) {
+ do_test("range");
+ do_test("roundrobin");
+ return 0;
+}
+
+
+/**
+ * @brief Subscription API tests that dont require a broker
+ */
+int main_0033_regex_subscribe_local(int argc, char **argv) {
+ rd_kafka_topic_partition_list_t *valids, *invalids, *none, *empty,
+ *alot;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_resp_err_t err;
+ char errstr[256];
+ int i;
+
+ valids = rd_kafka_topic_partition_list_new(0);
+ invalids = rd_kafka_topic_partition_list_new(100);
+ none = rd_kafka_topic_partition_list_new(1000);
+ empty = rd_kafka_topic_partition_list_new(5);
+ alot = rd_kafka_topic_partition_list_new(1);
+
+ rd_kafka_topic_partition_list_add(valids, "not_a_regex", 0);
+ rd_kafka_topic_partition_list_add(valids, "^My[vV]alid..regex+", 0);
+ rd_kafka_topic_partition_list_add(valids, "^another_one$", 55);
+
+ rd_kafka_topic_partition_list_add(invalids, "not_a_regex", 0);
+ rd_kafka_topic_partition_list_add(invalids, "^My[vV]alid..regex+", 0);
+ rd_kafka_topic_partition_list_add(invalids, "^a[b", 99);
+
+ rd_kafka_topic_partition_list_add(empty, "not_a_regex", 0);
+ rd_kafka_topic_partition_list_add(empty, "", 0);
+ rd_kafka_topic_partition_list_add(empty, "^ok", 0);
+
+ for (i = 0; i < 10000; i++) {
+ char topic[32];
+ rd_snprintf(topic, sizeof(topic), "^Va[lLid]_regex_%d$", i);
+ rd_kafka_topic_partition_list_add(alot, topic, i);
+ }
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "group.id", "group");
+ test_conf_set(conf, "client.id", test_curr->name);
+
+ rk = rd_kafka_new(RD_KAFKA_CONSUMER, conf, errstr, sizeof(errstr));
+ if (!rk)
+ TEST_FAIL("Failed to create consumer: %s", errstr);
+
+ err = rd_kafka_subscribe(rk, valids);
+ TEST_ASSERT(!err, "valids failed: %s", rd_kafka_err2str(err));
+
+ err = rd_kafka_subscribe(rk, invalids);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "invalids failed with wrong return: %s",
+ rd_kafka_err2str(err));
+
+ err = rd_kafka_subscribe(rk, none);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "none failed with wrong return: %s", rd_kafka_err2str(err));
+
+ err = rd_kafka_subscribe(rk, empty);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "empty failed with wrong return: %s",
+ rd_kafka_err2str(err));
+
+ err = rd_kafka_subscribe(rk, alot);
+ TEST_ASSERT(!err, "alot failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ rd_kafka_topic_partition_list_destroy(valids);
+ rd_kafka_topic_partition_list_destroy(invalids);
+ rd_kafka_topic_partition_list_destroy(none);
+ rd_kafka_topic_partition_list_destroy(empty);
+ rd_kafka_topic_partition_list_destroy(alot);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c
new file mode 100644
index 000000000..9276764c8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0034-offset_reset.c
@@ -0,0 +1,377 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+#include "../src/rdkafka_protocol.h"
+
+
+/**
+ * Issue #559: make sure auto.offset.reset works with invalid offsets.
+ */
+
+
+static void do_test_reset(const char *topic,
+ int partition,
+ const char *reset,
+ int64_t initial_offset,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ int exp_errcnt,
+ int exp_resetcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ int eofcnt = 0, msgcnt = 0, errcnt = 0, resetcnt = 0;
+ rd_kafka_conf_t *conf;
+
+ TEST_SAY(
+ "Test auto.offset.reset=%s, "
+ "expect %d msgs, %d EOFs, %d errors, %d resets\n",
+ reset, exp_msgcnt, exp_eofcnt, exp_errcnt, exp_resetcnt);
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.partition.eof", "true");
+
+ rk = test_create_consumer(NULL, NULL, conf, NULL);
+ rkt = test_create_topic_object(rk, topic, "auto.offset.reset", reset,
+ NULL);
+
+ test_consumer_start(reset, rkt, partition, initial_offset);
+ while (1) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consume(rkt, partition, tmout_multip(1000 * 10));
+ if (!rkm)
+ TEST_FAIL(
+ "%s: no message for 10s: "
+ "%d/%d messages, %d/%d EOFs, %d/%d errors\n",
+ reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt,
+ errcnt, exp_errcnt);
+
+ if (rkm->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("%s: received EOF at offset %" PRId64 "\n",
+ reset, rkm->offset);
+ eofcnt++;
+ } else if (rkm->err == RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) {
+ TEST_SAY(
+ "%s: auto.offset.reset error at offset %" PRId64
+ ": %s: %s\n",
+ reset, rkm->offset, rd_kafka_err2name(rkm->err),
+ rd_kafka_message_errstr(rkm));
+ resetcnt++;
+ } else if (rkm->err) {
+ TEST_SAY(
+ "%s: consume error at offset %" PRId64 ": %s\n",
+ reset, rkm->offset, rd_kafka_message_errstr(rkm));
+ errcnt++;
+ } else {
+ msgcnt++;
+ }
+
+ rd_kafka_message_destroy(rkm);
+
+ if (eofcnt == exp_eofcnt && errcnt == exp_errcnt &&
+ msgcnt == exp_msgcnt && resetcnt == exp_resetcnt)
+ break;
+ else if (eofcnt > exp_eofcnt || errcnt > exp_errcnt ||
+ msgcnt > exp_msgcnt || resetcnt > exp_resetcnt)
+ TEST_FAIL(
+ "%s: unexpected: "
+ "%d/%d messages, %d/%d EOFs, %d/%d errors, "
+ "%d/%d resets\n",
+ reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt,
+ errcnt, exp_errcnt, resetcnt, exp_resetcnt);
+ }
+
+ TEST_SAY(
+ "%s: Done: "
+ "%d/%d messages, %d/%d EOFs, %d/%d errors, %d/%d resets\n",
+ reset, msgcnt, exp_msgcnt, eofcnt, exp_eofcnt, errcnt, exp_errcnt,
+ resetcnt, exp_resetcnt);
+
+ test_consumer_stop(reset, rkt, partition);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+}
+
+int main_0034_offset_reset(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int partition = 0;
+ const int msgcnt = test_quick ? 20 : 100;
+
+ /* Produce messages */
+ test_produce_msgs_easy(topic, 0, partition, msgcnt);
+
+ /* auto.offset.reset=latest: Consume messages from invalid offset:
+ * Should return EOF. */
+ do_test_reset(topic, partition, "latest", msgcnt + 5, 1, 0, 0, 0);
+
+ /* auto.offset.reset=earliest: Consume messages from invalid offset:
+ * Should return messages from beginning. */
+ do_test_reset(topic, partition, "earliest", msgcnt + 5, 1, msgcnt, 0,
+ 0);
+
+ /* auto.offset.reset=error: Consume messages from invalid offset:
+ * Should return error. */
+ do_test_reset(topic, partition, "error", msgcnt + 5, 0, 0, 0, 1);
+
+ return 0;
+}
+
+
+/**
+ * @brief Verify auto.offset.reset=error behaviour for a range of different
+ * error cases.
+ */
+static void offset_reset_errors(void) {
+ rd_kafka_t *c;
+ rd_kafka_conf_t *conf;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+ const char *topic = "topic";
+ const int32_t partition = 0;
+ const int msgcnt = 10;
+ const int broker_id = 1;
+ rd_kafka_queue_t *queue;
+ int i;
+ struct {
+ rd_kafka_resp_err_t inject;
+ rd_kafka_resp_err_t expect;
+ /** Note: don't use OFFSET_BEGINNING since it might
+ * use the cached low wmark, and thus not be subject to
+ * the injected mock error. Use TAIL(msgcnt) instead.*/
+ int64_t start_offset;
+ int64_t expect_offset;
+ rd_bool_t broker_down; /**< Bring the broker down */
+ } test[] = {
+ {
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ RD_KAFKA_OFFSET_TAIL(msgcnt),
+ 0,
+ .broker_down = rd_true,
+ },
+ {
+ RD_KAFKA_RESP_ERR__TRANSPORT,
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ RD_KAFKA_OFFSET_TAIL(msgcnt),
+ 0,
+ /* only disconnect on the ListOffsets request */
+ .broker_down = rd_false,
+ },
+ {RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
+ RD_KAFKA_OFFSET_TAIL(msgcnt), -1},
+ {RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR__NO_OFFSET,
+ RD_KAFKA_OFFSET_STORED, /* There's no committed offset */
+ -1},
+
+ };
+
+ SUB_TEST_QUICK();
+
+ mcluster = test_mock_cluster_new(1, &bootstraps);
+
+ /* Seed partition 0 with some messages so we can differ
+ * between beginning and end. */
+ test_produce_msgs_easy_v(topic, 0, partition, 0, msgcnt, 10,
+ "security.protocol", "plaintext",
+ "bootstrap.servers", bootstraps, NULL);
+
+ test_conf_init(&conf, NULL, 60 * 5);
+
+ test_conf_set(conf, "security.protocol", "plaintext");
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "enable.partition.eof", "true");
+ test_conf_set(conf, "enable.auto.commit", "false");
+ /* Speed up reconnects */
+ test_conf_set(conf, "reconnect.backoff.max.ms", "1000");
+
+ /* Raise an error (ERR__AUTO_OFFSET_RESET) so we can verify
+ * if auto.offset.reset is triggered or not. */
+ test_conf_set(conf, "auto.offset.reset", "error");
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ queue = rd_kafka_queue_get_consumer(c);
+
+ for (i = 0; i < (int)RD_ARRAYSIZE(test); i++) {
+ rd_kafka_event_t *ev;
+ rd_bool_t broker_down = rd_false;
+
+ /* Make sure consumer is connected */
+ test_wait_topic_exists(c, topic, 5000);
+
+ TEST_SAY(_C_YEL "#%d: injecting %s, expecting %s\n", i,
+ rd_kafka_err2name(test[i].inject),
+ rd_kafka_err2name(test[i].expect));
+
+ if (test[i].broker_down) {
+ TEST_SAY("Bringing down the broker\n");
+ rd_kafka_mock_broker_set_down(mcluster, broker_id);
+ broker_down = rd_true;
+
+ } else if (test[i].inject) {
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_ListOffsets, 5, test[i].inject,
+ test[i].inject, test[i].inject, test[i].inject,
+ test[i].inject);
+
+ /* mock handler will close the connection on this
+ * request */
+ if (test[i].inject == RD_KAFKA_RESP_ERR__TRANSPORT)
+ broker_down = rd_true;
+ }
+
+ test_consumer_assign_partition("ASSIGN", c, topic, partition,
+ test[i].start_offset);
+
+ while (1) {
+ /* Poll until we see an AUTO_OFFSET_RESET error,
+ * timeout, or a message, depending on what we're
+ * looking for. */
+ ev = rd_kafka_queue_poll(queue, 5000);
+
+ if (!ev) {
+ TEST_ASSERT(broker_down,
+ "#%d: poll timeout, but broker "
+ "was not down",
+ i);
+
+ /* Bring the broker back up and continue */
+ TEST_SAY("Bringing up the broker\n");
+ if (test[i].broker_down)
+ rd_kafka_mock_broker_set_up(mcluster,
+ broker_id);
+
+ broker_down = rd_false;
+
+ } else if (rd_kafka_event_type(ev) ==
+ RD_KAFKA_EVENT_ERROR) {
+
+ if (rd_kafka_event_error(ev) !=
+ RD_KAFKA_RESP_ERR__AUTO_OFFSET_RESET) {
+ TEST_SAY(
+ "#%d: Ignoring %s event: %s\n", i,
+ rd_kafka_event_name(ev),
+ rd_kafka_event_error_string(ev));
+ rd_kafka_event_destroy(ev);
+ continue;
+ }
+
+ TEST_SAY(
+ "#%d: injected %s, got error %s: %s\n", i,
+ rd_kafka_err2name(test[i].inject),
+ rd_kafka_err2name(rd_kafka_event_error(ev)),
+ rd_kafka_event_error_string(ev));
+
+ /* The auto reset error code is always
+ * ERR__AUTO_OFFSET_RESET, and the original
+ * error is provided in the error string.
+ * So use err2str() to compare the error
+ * string to the expected error. */
+ TEST_ASSERT(
+ strstr(rd_kafka_event_error_string(ev),
+ rd_kafka_err2str(test[i].expect)),
+ "#%d: expected %s, got %s", i,
+ rd_kafka_err2name(test[i].expect),
+ rd_kafka_err2name(
+ rd_kafka_event_error(ev)));
+
+ rd_kafka_event_destroy(ev);
+ break;
+
+ } else if (rd_kafka_event_type(ev) ==
+ RD_KAFKA_EVENT_FETCH) {
+ const rd_kafka_message_t *rkm =
+ rd_kafka_event_message_next(ev);
+
+ TEST_ASSERT(rkm, "#%d: got null message", i);
+
+ TEST_SAY("#%d: message at offset %" PRId64
+ " (%s)\n",
+ i, rkm->offset,
+ rd_kafka_err2name(rkm->err));
+
+ TEST_ASSERT(!test[i].expect,
+ "#%d: got message when expecting "
+ "error",
+ i);
+
+ TEST_ASSERT(
+ test[i].expect_offset == rkm->offset,
+ "#%d: expected message offset "
+ "%" PRId64 ", got %" PRId64 " (%s)",
+ i, test[i].expect_offset, rkm->offset,
+ rd_kafka_err2name(rkm->err));
+
+ TEST_SAY(
+ "#%d: got expected message at "
+ "offset %" PRId64 " (%s)\n",
+ i, rkm->offset,
+ rd_kafka_err2name(rkm->err));
+
+ rd_kafka_event_destroy(ev);
+ break;
+
+ } else {
+ TEST_SAY("#%d: Ignoring %s event: %s\n", i,
+ rd_kafka_event_name(ev),
+ rd_kafka_event_error_string(ev));
+ rd_kafka_event_destroy(ev);
+ }
+ }
+
+
+
+ rd_kafka_mock_clear_request_errors(mcluster,
+ RD_KAFKAP_ListOffsets);
+ }
+
+ rd_kafka_queue_destroy(queue);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+int main_0034_offset_reset_mock(int argc, char **argv) {
+ offset_reset_errors();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c
new file mode 100644
index 000000000..d005b1e9e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0035-api_version.c
@@ -0,0 +1,73 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Issue #606: test that api.version.request=true works or reverts to
+ * fallback within reasonable amount of time.
+ * Brokers 0.9.0 and 0.9.0.1 had a regression (wouldnt close the connection)
+ * which caused these requests to time out (slowly) in librdkafka.
+ */
+
+
+int main_0035_api_version(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ const struct rd_kafka_metadata *metadata;
+ rd_kafka_resp_err_t err;
+ test_timing_t t_meta;
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "socket.timeout.ms", "12000");
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("Querying for metadata\n");
+ TIMING_START(&t_meta, "metadata()");
+ err = rd_kafka_metadata(rk, 0, NULL, &metadata, tmout_multip(5 * 1000));
+ TIMING_STOP(&t_meta);
+ if (err)
+ TEST_FAIL("metadata() failed: %s", rd_kafka_err2str(err));
+
+ if (TIMING_DURATION(&t_meta) / 1000 > 15 * 1000)
+ TEST_FAIL("metadata() took too long: %.3fms",
+ (float)TIMING_DURATION(&t_meta) / 1000.0f);
+
+ rd_kafka_metadata_destroy(metadata);
+
+ TEST_SAY("Metadata succeeded\n");
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c
new file mode 100644
index 000000000..69ee9864c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0036-partial_fetch.c
@@ -0,0 +1,86 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Issue #641: correct handling of partial messages in FetchResponse
+ *
+ * General idea:
+ * - Produce messages of 1000 bytes each
+ * - Set fetch.message.max.bytes to 1500 so that only one full message
+ * can be fetched per request.
+ * - Make sure all messages are received correctly and in order.
+ */
+
+
+int main_0036_partial_fetch(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int partition = 0;
+ const int msgcnt = 100;
+ const int msgsize = 1000;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+
+ TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt,
+ (int)msgsize, topic, partition);
+ testid = test_id_generate();
+ rk = test_create_producer();
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, msgsize);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY("Creating consumer\n");
+ test_conf_init(&conf, NULL, 0);
+ /* This should fetch 1.5 messages per fetch, thus resulting in
+ * partial fetches, hopefully. */
+ test_conf_set(conf, "fetch.message.max.bytes", "1500");
+ rk = test_create_consumer(NULL, NULL, conf, NULL);
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ test_consumer_start("CONSUME", rkt, partition,
+ RD_KAFKA_OFFSET_BEGINNING);
+ test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0,
+ msgcnt, 1);
+ test_consumer_stop("CONSUME", rkt, partition);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c
new file mode 100644
index 000000000..3b543fb6f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0037-destroy_hang_local.c
@@ -0,0 +1,85 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Various regression tests for hangs on destroy.
+ */
+
+
+
+/**
+ * Issue #530:
+ * "Legacy Consumer. Delete hangs if done right after RdKafka::Consumer::create.
+ * But If I put a start and stop in between, there is no issue."
+ */
+static int legacy_consumer_early_destroy(void) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ int pass;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 0);
+
+ for (pass = 0; pass < 2; pass++) {
+ TEST_SAY("%s: pass #%d\n", __FUNCTION__, pass);
+
+ rk = test_create_handle(RD_KAFKA_CONSUMER, NULL);
+
+ if (pass == 1) {
+ /* Second pass, create a topic too. */
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+ TEST_ASSERT(rkt, "failed to create topic: %s",
+ rd_kafka_err2str(rd_kafka_last_error()));
+ rd_sleep(1);
+ rd_kafka_topic_destroy(rkt);
+ }
+
+ rd_kafka_destroy(rk);
+ }
+
+ return 0;
+}
+
+
+int main_0037_destroy_hang_local(int argc, char **argv) {
+ int fails = 0;
+
+ test_conf_init(NULL, NULL, 30);
+
+ fails += legacy_consumer_early_destroy();
+
+ if (fails > 0)
+ TEST_FAIL("See %d previous error(s)\n", fails);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c
new file mode 100644
index 000000000..674964dc9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0038-performance.c
@@ -0,0 +1,120 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Basic performance tests.
+ * These tests dont fail but provide a throughput rate indication.
+ *
+ * + Produce N messages to one partition, acks=1, size=100
+ */
+
+
+int main_0038_performance(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int partition = 0;
+ const int msgsize = 100;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ test_timing_t t_create, t_produce, t_consume;
+ int totsize = 1024 * 1024 * (test_quick ? 8 : 128);
+ int msgcnt;
+
+ if (!strcmp(test_mode, "valgrind") || !strcmp(test_mode, "helgrind") ||
+ !strcmp(test_mode, "drd"))
+ totsize = 1024 * 1024 * 8; /* 8 meg, valgrind is slow. */
+
+ msgcnt = totsize / msgsize;
+
+ TEST_SAY("Producing %d messages of size %d to %s [%d]\n", msgcnt,
+ (int)msgsize, topic, partition);
+ testid = test_id_generate();
+ test_conf_init(&conf, NULL, 120);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_conf_set(conf, "queue.buffering.max.messages", "10000000");
+ test_conf_set(conf, "linger.ms", "100");
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_producer_topic(rk, topic, "acks", "1", NULL);
+
+ /* First produce one message to create the topic, etc, this might take
+ * a while and we dont want this to affect the throughput timing. */
+ TIMING_START(&t_create, "CREATE TOPIC");
+ test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, msgsize);
+ TIMING_STOP(&t_create);
+
+ TIMING_START(&t_produce, "PRODUCE");
+ test_produce_msgs(rk, rkt, testid, partition, 1, msgcnt - 1, NULL,
+ msgsize);
+ TIMING_STOP(&t_produce);
+
+ TEST_SAY("Destroying producer\n");
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY("Creating consumer\n");
+ test_conf_init(&conf, NULL, 120);
+ rk = test_create_consumer(NULL, NULL, conf, NULL);
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ test_consumer_start("CONSUME", rkt, partition,
+ RD_KAFKA_OFFSET_BEGINNING);
+ TIMING_START(&t_consume, "CONSUME");
+ test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0,
+ msgcnt, 1);
+ TIMING_STOP(&t_consume);
+ test_consumer_stop("CONSUME", rkt, partition);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_REPORT(
+ "{ \"producer\": "
+ " { \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f },"
+ " \"consumer\": "
+ "{ \"mb_per_sec\": %.2f, \"records_per_sec\": %.2f } "
+ "}",
+ (double)(totsize /
+ ((double)TIMING_DURATION(&t_produce) / 1000000.0f)) /
+ 1000000.0f,
+ (float)(msgcnt /
+ ((double)TIMING_DURATION(&t_produce) / 1000000.0f)),
+ (double)(totsize /
+ ((double)TIMING_DURATION(&t_consume) / 1000000.0f)) /
+ 1000000.0f,
+ (float)(msgcnt /
+ ((double)TIMING_DURATION(&t_consume) / 1000000.0f)));
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c
new file mode 100644
index 000000000..8d6b9f0ee
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0039-event.c
@@ -0,0 +1,284 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests event API.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgid_next = 0;
+static int fails = 0;
+
+/**
+ * Handle delivery reports
+ */
+static void handle_drs(rd_kafka_event_t *rkev) {
+ const rd_kafka_message_t *rkmessage;
+
+ while ((rkmessage = rd_kafka_event_message_next(rkev))) {
+ int32_t broker_id = rd_kafka_message_broker_id(rkmessage);
+ int msgid = *(int *)rkmessage->_private;
+ free(rkmessage->_private);
+
+ TEST_SAYL(3,
+ "Got rkmessage %s [%" PRId32 "] @ %" PRId64
+ ": "
+ "from broker %" PRId32 ": %s\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset, broker_id,
+ rd_kafka_err2str(rkmessage->err));
+
+
+ if (rkmessage->err != RD_KAFKA_RESP_ERR_NO_ERROR)
+ TEST_FAIL("Message delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+
+ if (msgid != msgid_next) {
+ fails++;
+ TEST_FAIL("Delivered msg %i, expected %i\n", msgid,
+ msgid_next);
+ return;
+ }
+
+ TEST_ASSERT(broker_id >= 0, "Message %d has no broker id set",
+ msgid);
+
+ msgid_next = msgid + 1;
+ }
+}
+
+
+/**
+ * @brief Test delivery report events
+ */
+int main_0039_event_dr(int argc, char **argv) {
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char msg[128];
+ int msgcnt = test_quick ? 500 : 50000;
+ int i;
+ test_timing_t t_produce, t_delivery;
+ rd_kafka_queue_t *eventq;
+
+ test_conf_init(&conf, &topic_conf, 10);
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_DR);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ eventq = rd_kafka_queue_get_main(rk);
+
+ rkt = rd_kafka_topic_new(rk, test_mk_topic_name("0005", 0), topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", rd_strerror(errno));
+
+ /* Produce messages */
+ TIMING_START(&t_produce, "PRODUCE");
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
+ i);
+ r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, msgidp);
+ if (r == -1)
+ TEST_FAIL("Failed to produce message #%i: %s\n", i,
+ rd_strerror(errno));
+ }
+ TIMING_STOP(&t_produce);
+ TEST_SAY("Produced %i messages, waiting for deliveries\n", msgcnt);
+
+ /* Wait for messages to be delivered */
+ TIMING_START(&t_delivery, "DELIVERY");
+ while (rd_kafka_outq_len(rk) > 0) {
+ rd_kafka_event_t *rkev;
+ rkev = rd_kafka_queue_poll(eventq, 1000);
+ switch (rd_kafka_event_type(rkev)) {
+ case RD_KAFKA_EVENT_DR:
+ TEST_SAYL(3, "%s event with %" PRIusz " messages\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_event_message_count(rkev));
+ handle_drs(rkev);
+ break;
+ default:
+ TEST_SAY("Unhandled event: %s\n",
+ rd_kafka_event_name(rkev));
+ break;
+ }
+ rd_kafka_event_destroy(rkev);
+ }
+ TIMING_STOP(&t_delivery);
+
+ if (fails)
+ TEST_FAIL("%i failures, see previous errors", fails);
+
+ if (msgid_next != msgcnt)
+ TEST_FAIL("Still waiting for messages: next %i != end %i\n",
+ msgid_next, msgcnt);
+
+ rd_kafka_queue_destroy(eventq);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
+
+/**
+ * @brief Local test: test log events
+ */
+int main_0039_event_log(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *eventq;
+ int waitevent = 1;
+
+ const char *fac;
+ const char *msg;
+ char ctx[60];
+ int level;
+
+ conf = rd_kafka_conf_new();
+ rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);
+ rd_kafka_conf_set(conf, "log.queue", "true", NULL, 0);
+ rd_kafka_conf_set(conf, "debug", "all", NULL, 0);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ eventq = rd_kafka_queue_get_main(rk);
+ TEST_CALL_ERR__(rd_kafka_set_log_queue(rk, eventq));
+
+ while (waitevent) {
+ /* reset ctx */
+ memset(ctx, '$', sizeof(ctx) - 2);
+ ctx[sizeof(ctx) - 1] = '\0';
+
+ rd_kafka_event_t *rkev;
+ rkev = rd_kafka_queue_poll(eventq, 1000);
+ switch (rd_kafka_event_type(rkev)) {
+ case RD_KAFKA_EVENT_LOG:
+ rd_kafka_event_log(rkev, &fac, &msg, &level);
+ rd_kafka_event_debug_contexts(rkev, ctx, sizeof(ctx));
+ TEST_SAY(
+ "Got log event: "
+ "level: %d ctx: %s fac: %s: msg: %s\n",
+ level, ctx, fac, msg);
+ if (strchr(ctx, '$')) {
+ TEST_FAIL(
+ "ctx was not set by "
+ "rd_kafka_event_debug_contexts()");
+ }
+ waitevent = 0;
+ break;
+ default:
+ TEST_SAY("Unhandled event: %s\n",
+ rd_kafka_event_name(rkev));
+ break;
+ }
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Destroy rdkafka instance */
+ rd_kafka_queue_destroy(eventq);
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
+
+/**
+ * @brief Local test: test event generation
+ */
+int main_0039_event(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *eventq;
+ int waitevent = 1;
+
+ /* Set up a config with ERROR events enabled and
+ * configure an invalid broker so that _TRANSPORT or ALL_BROKERS_DOWN
+ * is promptly generated. */
+
+ conf = rd_kafka_conf_new();
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_ERROR);
+ rd_kafka_conf_set(conf, "bootstrap.servers", "0:65534", NULL, 0);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ eventq = rd_kafka_queue_get_main(rk);
+
+ while (waitevent) {
+ rd_kafka_event_t *rkev;
+ rkev = rd_kafka_queue_poll(eventq, 1000);
+ switch (rd_kafka_event_type(rkev)) {
+ case RD_KAFKA_EVENT_ERROR:
+ TEST_SAY("Got %s%s event: %s: %s\n",
+ rd_kafka_event_error_is_fatal(rkev) ? "FATAL "
+ : "",
+ rd_kafka_event_name(rkev),
+ rd_kafka_err2name(rd_kafka_event_error(rkev)),
+ rd_kafka_event_error_string(rkev));
+ waitevent = 0;
+ break;
+ default:
+ TEST_SAY("Unhandled event: %s\n",
+ rd_kafka_event_name(rkev));
+ break;
+ }
+ rd_kafka_event_destroy(rkev);
+ }
+
+ rd_kafka_queue_destroy(eventq);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c
new file mode 100644
index 000000000..d47da5206
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0040-io_event.c
@@ -0,0 +1,251 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests the queue IO event signalling.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+#include <fcntl.h>
+#ifdef _WIN32
+#include <io.h>
+#pragma comment(lib, "ws2_32.lib")
+#else
+#include <unistd.h>
+#include <poll.h>
+#endif
+
+
+
+int main_0040_io_event(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_t *rk_p, *rk_c;
+ const char *topic;
+ rd_kafka_topic_t *rkt_p;
+ rd_kafka_queue_t *queue;
+ uint64_t testid;
+ int msgcnt = test_quick ? 10 : 100;
+ int recvd = 0;
+ int fds[2];
+ int wait_multiplier = 1;
+ struct pollfd pfd;
+ int r;
+ rd_kafka_resp_err_t err;
+ enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE;
+
+#ifdef _WIN32
+ TEST_SKIP("WSAPoll and pipes are not reliable on Win32 (FIXME)\n");
+ return 0;
+#endif
+ testid = test_id_generate();
+ topic = test_mk_topic_name(__FUNCTION__, 1);
+
+ rk_p = test_create_producer();
+ rkt_p = test_create_producer_topic(rk_p, topic, NULL);
+ err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000));
+ TEST_ASSERT(!err, "Topic auto creation failed: %s",
+ rd_kafka_err2str(err));
+
+ test_conf_init(&conf, &tconf, 0);
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "enable.partition.eof", "false");
+ /* Speed up propagation of new topics */
+ test_conf_set(conf, "metadata.max.age.ms", "1000");
+ test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
+ rk_c = test_create_consumer(topic, NULL, conf, tconf);
+
+ queue = rd_kafka_queue_get_consumer(rk_c);
+
+ test_consumer_subscribe(rk_c, topic);
+
+#ifndef _WIN32
+ r = pipe(fds);
+#else
+ r = _pipe(fds, 2, _O_BINARY);
+#endif
+ if (r == -1)
+ TEST_FAIL("pipe() failed: %s\n", strerror(errno));
+
+ rd_kafka_queue_io_event_enable(queue, fds[1], "1", 1);
+
+ pfd.fd = fds[0];
+ pfd.events = POLLIN;
+ pfd.revents = 0;
+
+ /**
+ * 1) Wait for rebalance event
+ * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
+ * 3) Produce half the messages
+ * 4) Expect IO
+ * 5) Consume the available messages
+ * 6) Wait 1 interval expecting no IO.
+ * 7) Produce remaing half
+ * 8) Expect IO
+ * 9) Done.
+ */
+ while (recvd < msgcnt) {
+#ifndef _WIN32
+ r = poll(&pfd, 1, 1000 * wait_multiplier);
+#else
+ r = WSAPoll(&pfd, 1, 1000 * wait_multiplier);
+#endif
+ if (r == -1) {
+ TEST_FAIL("poll() failed: %s", strerror(errno));
+
+ } else if (r == 1) {
+ rd_kafka_event_t *rkev;
+ char b;
+ int eventcnt = 0;
+
+ if (pfd.events & POLLERR)
+ TEST_FAIL("Poll error\n");
+ if (!(pfd.events & POLLIN)) {
+ TEST_SAY("Stray event 0x%x\n", (int)pfd.events);
+ continue;
+ }
+
+ TEST_SAY("POLLIN\n");
+ /* Read signaling token to purge socket queue and
+ * eventually silence POLLIN */
+#ifndef _WIN32
+ r = read(pfd.fd, &b, 1);
+#else
+ r = _read((int)pfd.fd, &b, 1);
+#endif
+ if (r == -1)
+ TEST_FAIL("read failed: %s\n", strerror(errno));
+
+ if (!expecting_io)
+ TEST_WARN(
+ "Got unexpected IO after %d/%d msgs\n",
+ recvd, msgcnt);
+
+ while ((rkev = rd_kafka_queue_poll(queue, 0))) {
+ eventcnt++;
+ switch (rd_kafka_event_type(rkev)) {
+ case RD_KAFKA_EVENT_REBALANCE:
+ TEST_SAY(
+ "Got %s: %s\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_err2str(
+ rd_kafka_event_error(rkev)));
+ if (expecting_io != _REBALANCE)
+ TEST_FAIL(
+ "Got Rebalance when "
+ "expecting message\n");
+ if (rd_kafka_event_error(rkev) ==
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ rd_kafka_assign(
+ rk_c,
+ rd_kafka_event_topic_partition_list(
+ rkev));
+ expecting_io = _NOPE;
+ } else
+ rd_kafka_assign(rk_c, NULL);
+ break;
+
+ case RD_KAFKA_EVENT_FETCH:
+ if (expecting_io != _YEP)
+ TEST_FAIL(
+ "Did not expect more "
+ "messages at %d/%d\n",
+ recvd, msgcnt);
+ recvd++;
+ if (recvd == (msgcnt / 2) ||
+ recvd == msgcnt)
+ expecting_io = _NOPE;
+ break;
+
+ case RD_KAFKA_EVENT_ERROR:
+ TEST_FAIL(
+ "Error: %s\n",
+ rd_kafka_event_error_string(rkev));
+ break;
+
+ default:
+ TEST_SAY("Ignoring event %s\n",
+ rd_kafka_event_name(rkev));
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ TEST_SAY("%d events, Consumed %d/%d messages\n",
+ eventcnt, recvd, msgcnt);
+
+ wait_multiplier = 1;
+
+ } else {
+ if (expecting_io == _REBALANCE) {
+ continue;
+ } else if (expecting_io == _YEP) {
+ TEST_FAIL(
+ "Did not see expected IO after %d/%d "
+ "msgs\n",
+ recvd, msgcnt);
+ }
+
+ TEST_SAY("IO poll timeout (good)\n");
+
+ TEST_SAY("Got idle period, producing\n");
+ test_produce_msgs(rk_p, rkt_p, testid, 0, recvd,
+ msgcnt / 2, NULL, 10);
+
+ expecting_io = _YEP;
+ /* When running slowly (e.g., valgrind) it might take
+ * some time before the first message is received
+ * after producing. */
+ wait_multiplier = 3;
+ }
+ }
+ TEST_SAY("Done\n");
+
+ rd_kafka_topic_destroy(rkt_p);
+ rd_kafka_destroy(rk_p);
+
+ rd_kafka_queue_destroy(queue);
+ rd_kafka_consumer_close(rk_c);
+ rd_kafka_destroy(rk_c);
+
+#ifndef _WIN32
+ close(fds[0]);
+ close(fds[1]);
+#else
+ _close(fds[0]);
+ _close(fds[1]);
+#endif
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c
new file mode 100644
index 000000000..e243dc8ac
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0041-fetch_max_bytes.c
@@ -0,0 +1,96 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Issue #597: increase fetch.message.max.bytes until large messages can
+ * be fetched.
+ *
+ * General idea:
+ * - Produce 1000 small messages < MAX_BYTES
+ * - Produce 1000 large messages > MAX_BYTES
+ * - Create consumer with fetch.message.max.bytes=MAX_BYTES
+ * - Consume from beginning
+ * - All messages should be received.
+ */
+
+
+int main_0041_fetch_max_bytes(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int partition = 0;
+ const int msgcnt = 2 * 1000;
+ const int MAX_BYTES = 100000;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+
+ test_conf_init(NULL, NULL, 60);
+
+ testid = test_id_generate();
+ rk = test_create_producer();
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt / 2, NULL,
+ MAX_BYTES / 10);
+ test_produce_msgs(rk, rkt, testid, partition, msgcnt / 2, msgcnt / 2,
+ NULL, MAX_BYTES * 5);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY("Creating consumer\n");
+ test_conf_init(&conf, NULL, 0);
+
+ test_conf_set(conf, "fetch.message.max.bytes",
+ tsprintf("%d", MAX_BYTES));
+
+ /* This test may be slower when running with SSL or Helgrind,
+ * restart the timeout. */
+ test_timeout_set(60);
+
+ rk = test_create_consumer(NULL, NULL, conf, NULL);
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ test_consumer_start("CONSUME", rkt, partition,
+ RD_KAFKA_OFFSET_BEGINNING);
+ test_consume_msgs("CONSUME", rkt, testid, partition, TEST_NO_SEEK, 0,
+ msgcnt, 1);
+ test_consumer_stop("CONSUME", rkt, partition);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c
new file mode 100644
index 000000000..6ea5aa669
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0042-many_topics.c
@@ -0,0 +1,252 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * #781: handle many (?) topics.
+ */
+
+
+const int msgs_per_topic = 100;
+
+
+/**
+ * Request offset for nonexisting partition.
+ * Will cause rd_kafka_destroy() to hang.
+ */
+
+
+
+static void produce_many(char **topics, int topic_cnt, uint64_t testid) {
+ rd_kafka_t *rk;
+ test_timing_t t_rkt_create;
+ int i;
+ rd_kafka_topic_t **rkts;
+
+ TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
+
+ rk = test_create_producer();
+
+ TEST_SAY("Creating %d topic objects\n", topic_cnt);
+
+ rkts = malloc(sizeof(*rkts) * topic_cnt);
+ TIMING_START(&t_rkt_create, "Topic object create");
+ for (i = 0; i < topic_cnt; i++) {
+ rkts[i] = test_create_topic_object(rk, topics[i], "acks", "all",
+ NULL);
+ }
+ TIMING_STOP(&t_rkt_create);
+
+ TEST_SAY("Producing %d messages to each %d topics\n", msgs_per_topic,
+ topic_cnt);
+ /* Produce messages to each topic (so they are created) */
+ for (i = 0; i < topic_cnt; i++) {
+ test_produce_msgs(rk, rkts[i], testid, 0, i * msgs_per_topic,
+ msgs_per_topic, NULL, 100);
+ }
+
+ TEST_SAY("Destroying %d topic objects\n", topic_cnt);
+ for (i = 0; i < topic_cnt; i++) {
+ rd_kafka_topic_destroy(rkts[i]);
+ }
+ free(rkts);
+
+ test_flush(rk, 30000);
+
+ rd_kafka_destroy(rk);
+}
+
+
+static void legacy_consume_many(char **topics, int topic_cnt, uint64_t testid) {
+ rd_kafka_t *rk;
+ test_timing_t t_rkt_create;
+ int i;
+ rd_kafka_topic_t **rkts;
+ int msg_base = 0;
+
+ TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(NULL, NULL, 60);
+
+ rk = test_create_consumer(NULL, NULL, NULL, NULL);
+
+ TEST_SAY("Creating %d topic objects\n", topic_cnt);
+
+ rkts = malloc(sizeof(*rkts) * topic_cnt);
+ TIMING_START(&t_rkt_create, "Topic object create");
+ for (i = 0; i < topic_cnt; i++)
+ rkts[i] = test_create_topic_object(rk, topics[i], NULL);
+ TIMING_STOP(&t_rkt_create);
+
+ TEST_SAY("Start consumer for %d topics\n", topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ test_consumer_start("legacy", rkts[i], 0,
+ RD_KAFKA_OFFSET_BEGINNING);
+
+ TEST_SAY("Consuming from %d messages from each %d topics\n",
+ msgs_per_topic, topic_cnt);
+ for (i = 0; i < topic_cnt; i++) {
+ test_consume_msgs("legacy", rkts[i], testid, 0, TEST_NO_SEEK,
+ msg_base, msgs_per_topic, 1);
+ msg_base += msgs_per_topic;
+ }
+
+ TEST_SAY("Stopping consumers\n");
+ for (i = 0; i < topic_cnt; i++)
+ test_consumer_stop("legacy", rkts[i], 0);
+
+
+ TEST_SAY("Destroying %d topic objects\n", topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ rd_kafka_topic_destroy(rkts[i]);
+
+ free(rkts);
+
+ rd_kafka_destroy(rk);
+}
+
+
+
+static void
+subscribe_consume_many(char **topics, int topic_cnt, uint64_t testid) {
+ rd_kafka_t *rk;
+ int i;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_topic_partition_list_t *parts;
+ rd_kafka_resp_err_t err;
+ test_msgver_t mv;
+
+ TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(NULL, &tconf, 60);
+ test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
+ rk = test_create_consumer(__FUNCTION__, NULL, NULL, tconf);
+
+ parts = rd_kafka_topic_partition_list_new(topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ rd_kafka_topic_partition_list_add(parts, topics[i],
+ RD_KAFKA_PARTITION_UA);
+
+ TEST_SAY("Subscribing to %d topics\n", topic_cnt);
+ err = rd_kafka_subscribe(rk, parts);
+ if (err)
+ TEST_FAIL("subscribe() failed: %s\n", rd_kafka_err2str(err));
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ test_msgver_init(&mv, testid);
+ test_consumer_poll("consume.subscribe", rk, testid, -1, 0,
+ msgs_per_topic * topic_cnt, &mv);
+
+ for (i = 0; i < topic_cnt; i++)
+ test_msgver_verify_part("subscribe", &mv, TEST_MSGVER_ALL_PART,
+ topics[i], 0, i * msgs_per_topic,
+ msgs_per_topic);
+ test_msgver_clear(&mv);
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+}
+
+
+
+static void assign_consume_many(char **topics, int topic_cnt, uint64_t testid) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_partition_list_t *parts;
+ int i;
+ test_msgver_t mv;
+
+ TEST_SAY(_C_MAG "%s\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(NULL, NULL, 60);
+ rk = test_create_consumer(__FUNCTION__, NULL, NULL, NULL);
+
+ parts = rd_kafka_topic_partition_list_new(topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ rd_kafka_topic_partition_list_add(parts, topics[i], 0)->offset =
+ RD_KAFKA_OFFSET_TAIL(msgs_per_topic);
+
+ test_consumer_assign("consume.assign", rk, parts);
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ test_msgver_init(&mv, testid);
+ test_consumer_poll("consume.assign", rk, testid, -1, 0,
+ msgs_per_topic * topic_cnt, &mv);
+
+ for (i = 0; i < topic_cnt; i++)
+ test_msgver_verify_part("assign", &mv, TEST_MSGVER_ALL_PART,
+ topics[i], 0, i * msgs_per_topic,
+ msgs_per_topic);
+ test_msgver_clear(&mv);
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+}
+
+
+
+int main_0042_many_topics(int argc, char **argv) {
+ char **topics;
+ int topic_cnt = test_quick ? 4 : 20; /* up this as needed,
+ * topic creation takes time so
+ * unless hunting a bug
+ * we keep this low to keep the
+ * test suite run time down. */
+ uint64_t testid;
+ int i;
+
+ test_conf_init(NULL, NULL, 60);
+
+ testid = test_id_generate();
+
+ /* Generate unique topic names */
+ topics = malloc(sizeof(*topics) * topic_cnt);
+ for (i = 0; i < topic_cnt; i++)
+ topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+
+ produce_many(topics, topic_cnt, testid);
+ legacy_consume_many(topics, topic_cnt, testid);
+ if (test_broker_version >= TEST_BRKVER(0, 9, 0, 0)) {
+ subscribe_consume_many(topics, topic_cnt, testid);
+ assign_consume_many(topics, topic_cnt, testid);
+ }
+
+ for (i = 0; i < topic_cnt; i++)
+ free(topics[i]);
+ free(topics);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c
new file mode 100644
index 000000000..3470c4ae1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0043-no_connection.c
@@ -0,0 +1,77 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+/**
+ * Make sure library behaves even if there is no broker connection.
+ */
+
+
+
+static void test_producer_no_connection(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ int i;
+ const int partition_cnt = 2;
+ int msgcnt = 0;
+ test_timing_t t_destroy;
+
+ test_conf_init(&conf, NULL, 20);
+
+ test_conf_set(conf, "bootstrap.servers", NULL);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms",
+ "5000", NULL);
+
+ test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0, 100,
+ NULL, 100, 0, &msgcnt);
+ for (i = 0; i < partition_cnt; i++)
+ test_produce_msgs_nowait(rk, rkt, 0, i, 0, 100, NULL, 100, 0,
+ &msgcnt);
+
+ rd_kafka_poll(rk, 1000);
+
+ TEST_SAY("%d messages in queue\n", rd_kafka_outq_len(rk));
+
+ rd_kafka_topic_destroy(rkt);
+
+ TIMING_START(&t_destroy, "rd_kafka_destroy()");
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_destroy);
+}
+
+int main_0043_no_connection(int argc, char **argv) {
+ test_producer_no_connection();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c
new file mode 100644
index 000000000..51ef318c3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0044-partition_cnt.c
@@ -0,0 +1,93 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+/**
+ * Make sure library behaves when the partition count for a topic changes.
+ * This test requires to be run under trivup to be able to use kafka-topics.sh
+ */
+
+
+
+/**
+ * - Create topic with 2 partitions
+ * - Start producing messages to UA partition
+ * - Change to 4 partitions
+ * - Produce more messages to UA partition
+ * - Wait for DRs
+ * - Close
+ */
+
+static void test_producer_partition_cnt_change(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const int partition_cnt = 4;
+ int msgcnt = test_quick ? 500 : 100000;
+ test_timing_t t_destroy;
+ int produced = 0;
+
+ test_conf_init(&conf, NULL, 20);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_create_topic(rk, topic, partition_cnt / 2, 1);
+
+ rkt =
+ test_create_topic_object(rk, __FUNCTION__, "message.timeout.ms",
+ tsprintf("%d", tmout_multip(10000)), NULL);
+
+ test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt / 2, NULL, 100, 0, &produced);
+
+ test_create_partitions(rk, topic, partition_cnt);
+
+ test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, msgcnt / 2,
+ msgcnt / 2, NULL, 100, 0, &produced);
+
+ test_wait_delivery(rk, &produced);
+
+ rd_kafka_topic_destroy(rkt);
+
+ TIMING_START(&t_destroy, "rd_kafka_destroy()");
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_destroy);
+}
+
+int main_0044_partition_cnt(int argc, char **argv) {
+ if (!test_can_create_topics(1))
+ return 0;
+
+ test_producer_partition_cnt_change();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c
new file mode 100644
index 000000000..f804613d7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0045-subscribe_update.c
@@ -0,0 +1,459 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Verify that subscription is updated on metadata changes:
+ * - topic additions
+ * - topic deletions
+ * - partition count changes
+ */
+
+
+
+/**
+ * Wait for REBALANCE ASSIGN event and perform assignment
+ *
+ * Va-args are \p topic_cnt tuples of the expected assignment:
+ * { const char *topic, int partition_cnt }
+ */
+static void await_assignment(const char *pfx,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *queue,
+ int topic_cnt,
+ ...) {
+ rd_kafka_event_t *rkev;
+ rd_kafka_topic_partition_list_t *tps;
+ int i;
+ va_list ap;
+ int fails = 0;
+ int exp_part_cnt = 0;
+
+ TEST_SAY("%s: waiting for assignment\n", pfx);
+ rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
+ if (!rkev)
+ TEST_FAIL("timed out waiting for assignment");
+ TEST_ASSERT(rd_kafka_event_error(rkev) ==
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ "expected ASSIGN, got %s",
+ rd_kafka_err2str(rd_kafka_event_error(rkev)));
+ tps = rd_kafka_event_topic_partition_list(rkev);
+
+ TEST_SAY("%s: assignment:\n", pfx);
+ test_print_partition_list(tps);
+
+ va_start(ap, topic_cnt);
+ for (i = 0; i < topic_cnt; i++) {
+ const char *topic = va_arg(ap, const char *);
+ int partition_cnt = va_arg(ap, int);
+ int p;
+ TEST_SAY("%s: expecting %s with %d partitions\n", pfx, topic,
+ partition_cnt);
+ for (p = 0; p < partition_cnt; p++) {
+ if (!rd_kafka_topic_partition_list_find(tps, topic,
+ p)) {
+ TEST_FAIL_LATER(
+ "%s: expected partition %s [%d] "
+ "not found in assginment",
+ pfx, topic, p);
+ fails++;
+ }
+ }
+ exp_part_cnt += partition_cnt;
+ }
+ va_end(ap);
+
+ TEST_ASSERT(exp_part_cnt == tps->cnt,
+ "expected assignment of %d partitions, got %d",
+ exp_part_cnt, tps->cnt);
+
+ if (fails > 0)
+ TEST_FAIL("%s: assignment mismatch: see above", pfx);
+
+ rd_kafka_assign(rk, tps);
+ rd_kafka_event_destroy(rkev);
+}
+
+
+/**
+ * Wait for REBALANCE REVOKE event and perform unassignment.
+ */
+static void
+await_revoke(const char *pfx, rd_kafka_t *rk, rd_kafka_queue_t *queue) {
+ rd_kafka_event_t *rkev;
+
+ TEST_SAY("%s: waiting for revoke\n", pfx);
+ rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, 30000);
+ if (!rkev)
+ TEST_FAIL("timed out waiting for revoke");
+ TEST_ASSERT(rd_kafka_event_error(rkev) ==
+ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ "expected REVOKE, got %s",
+ rd_kafka_err2str(rd_kafka_event_error(rkev)));
+ rd_kafka_assign(rk, NULL);
+ rd_kafka_event_destroy(rkev);
+}
+
+/**
+ * Wait \p timeout_ms to make sure no rebalance was triggered.
+ */
+static void await_no_rebalance(const char *pfx,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *queue,
+ int timeout_ms) {
+ rd_kafka_event_t *rkev;
+
+ TEST_SAY("%s: waiting for %d ms to not see rebalance\n", pfx,
+ timeout_ms);
+ rkev = test_wait_event(queue, RD_KAFKA_EVENT_REBALANCE, timeout_ms);
+ if (!rkev)
+ return;
+ TEST_ASSERT(rkev, "did not expect %s: %s", rd_kafka_event_name(rkev),
+ rd_kafka_err2str(rd_kafka_event_error(rkev)));
+ rd_kafka_event_destroy(rkev);
+}
+
+static void do_test_non_exist_and_partchange(void) {
+ char *topic_a = rd_strdup(test_mk_topic_name("topic_a", 1));
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *queue;
+
+ /**
+ * Test #1:
+ * - Subscribe to non-existing topic.
+ * - Verify empty assignment
+ * - Create topic
+ * - Verify new assignment containing topic
+ */
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+
+ /* Decrease metadata interval to speed up topic change discovery. */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
+ rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL);
+ queue = rd_kafka_queue_get_consumer(rk);
+
+ TEST_SAY("#1: Subscribing to %s\n", topic_a);
+ test_consumer_subscribe(rk, topic_a);
+
+ /* Should not see a rebalance since no topics are matched. */
+ await_no_rebalance("#1: empty", rk, queue, 10000);
+
+ TEST_SAY("#1: creating topic %s\n", topic_a);
+ test_create_topic(NULL, topic_a, 2, 1);
+
+ await_assignment("#1: proper", rk, queue, 1, topic_a, 2);
+
+
+ /**
+ * Test #2 (continue with #1 consumer)
+ * - Increase the partition count
+ * - Verify updated assignment
+ */
+ test_kafka_topics("--alter --topic %s --partitions 4", topic_a);
+ await_revoke("#2", rk, queue);
+
+ await_assignment("#2: more partitions", rk, queue, 1, topic_a, 4);
+
+ test_consumer_close(rk);
+ rd_kafka_queue_destroy(queue);
+ rd_kafka_destroy(rk);
+
+ rd_free(topic_a);
+
+ SUB_TEST_PASS();
+}
+
+
+
+static void do_test_regex(void) {
+ char *base_topic = rd_strdup(test_mk_topic_name("topic", 1));
+ char *topic_b = rd_strdup(tsprintf("%s_b", base_topic));
+ char *topic_c = rd_strdup(tsprintf("%s_c", base_topic));
+ char *topic_d = rd_strdup(tsprintf("%s_d", base_topic));
+ char *topic_e = rd_strdup(tsprintf("%s_e", base_topic));
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *queue;
+
+ /**
+ * Regex test:
+ * - Create topic b
+ * - Subscribe to b & d & e
+ * - Verify b assignment
+ * - Create topic c
+ * - Verify no rebalance
+ * - Create topic d
+ * - Verify b & d assignment
+ */
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+
+ /* Decrease metadata interval to speed up topic change discovery. */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
+ rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL);
+ queue = rd_kafka_queue_get_consumer(rk);
+
+ TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_b);
+ test_create_topic(NULL, topic_b, 2, 1);
+ rd_sleep(1); // FIXME: do check&wait loop instead
+
+ TEST_SAY("Regex: Subscribing to %s & %s & %s\n", topic_b, topic_d,
+ topic_e);
+ test_consumer_subscribe(rk, tsprintf("^%s_[bde]$", base_topic));
+
+ await_assignment("Regex: just one topic exists", rk, queue, 1, topic_b,
+ 2);
+
+ TEST_SAY("Regex: creating topic %s (not subscribed)\n", topic_c);
+ test_create_topic(NULL, topic_c, 4, 1);
+
+ /* Should not see a rebalance since no topics are matched. */
+ await_no_rebalance("Regex: empty", rk, queue, 10000);
+
+ TEST_SAY("Regex: creating topic %s (subscribed)\n", topic_d);
+ test_create_topic(NULL, topic_d, 1, 1);
+
+ await_revoke("Regex: rebalance after topic creation", rk, queue);
+
+ await_assignment("Regex: two topics exist", rk, queue, 2, topic_b, 2,
+ topic_d, 1);
+
+ test_consumer_close(rk);
+ rd_kafka_queue_destroy(queue);
+ rd_kafka_destroy(rk);
+
+ rd_free(base_topic);
+ rd_free(topic_b);
+ rd_free(topic_c);
+ rd_free(topic_d);
+ rd_free(topic_e);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @remark Requires scenario=noautocreate.
+ */
+static void do_test_topic_remove(void) {
+ char *topic_f = rd_strdup(test_mk_topic_name("topic_f", 1));
+ char *topic_g = rd_strdup(test_mk_topic_name("topic_g", 1));
+ int parts_f = 5;
+ int parts_g = 9;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *queue;
+ rd_kafka_topic_partition_list_t *topics;
+ rd_kafka_resp_err_t err;
+
+ /**
+ * Topic removal test:
+ * - Create topic f & g
+ * - Subscribe to f & g
+ * - Verify f & g assignment
+ * - Remove topic f
+ * - Verify g assignment
+ * - Remove topic g
+ * - Verify empty assignment
+ */
+
+ SUB_TEST("Topic removal testing");
+
+ test_conf_init(&conf, NULL, 60);
+
+ /* Decrease metadata interval to speed up topic change discovery. */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "5000");
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
+ rk = test_create_consumer(test_str_id_generate_tmp(), NULL, conf, NULL);
+ queue = rd_kafka_queue_get_consumer(rk);
+
+ TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_f);
+ test_create_topic(NULL, topic_f, parts_f, 1);
+
+ TEST_SAY("Topic removal: creating topic %s (subscribed)\n", topic_g);
+ test_create_topic(NULL, topic_g, parts_g, 1);
+
+ rd_sleep(1); // FIXME: do check&wait loop instead
+
+ TEST_SAY("Topic removal: Subscribing to %s & %s\n", topic_f, topic_g);
+ topics = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(topics, topic_f,
+ RD_KAFKA_PARTITION_UA);
+ rd_kafka_topic_partition_list_add(topics, topic_g,
+ RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(rk, topics);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR, "%s",
+ rd_kafka_err2str(err));
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ await_assignment("Topic removal: both topics exist", rk, queue, 2,
+ topic_f, parts_f, topic_g, parts_g);
+
+ TEST_SAY("Topic removal: removing %s\n", topic_f);
+ test_kafka_topics("--delete --topic %s", topic_f);
+
+ await_revoke("Topic removal: rebalance after topic removal", rk, queue);
+
+ await_assignment("Topic removal: one topic exists", rk, queue, 1,
+ topic_g, parts_g);
+
+ TEST_SAY("Topic removal: removing %s\n", topic_g);
+ test_kafka_topics("--delete --topic %s", topic_g);
+
+ await_revoke("Topic removal: rebalance after 2nd topic removal", rk,
+ queue);
+
+ /* Should not see another rebalance since all topics now removed */
+ await_no_rebalance("Topic removal: empty", rk, queue, 10000);
+
+ test_consumer_close(rk);
+ rd_kafka_queue_destroy(queue);
+ rd_kafka_destroy(rk);
+
+ rd_free(topic_f);
+ rd_free(topic_g);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Subscribe to a regex and continually create a lot of matching topics,
+ * triggering many rebalances.
+ *
+ * This is using the mock cluster.
+ *
+ */
+static void do_test_regex_many_mock(const char *assignment_strategy,
+ rd_bool_t lots_of_topics) {
+ const char *base_topic = "topic";
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+ int topic_cnt = lots_of_topics ? 300 : 50;
+ int await_assignment_every = lots_of_topics ? 150 : 15;
+ int i;
+
+ SUB_TEST("%s with %d topics", assignment_strategy, topic_cnt);
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+ test_conf_init(&conf, NULL, 60 * 5);
+
+ test_conf_set(conf, "security.protocol", "plaintext");
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "partition.assignment.strategy",
+ assignment_strategy);
+ /* Decrease metadata interval to speed up topic change discovery. */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "3000");
+
+ rk = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(rk, tsprintf("^%s_.*", base_topic));
+
+ for (i = 0; i < topic_cnt; i++) {
+ char topic[256];
+
+ rd_snprintf(topic, sizeof(topic), "%s_%d", base_topic, i);
+
+
+ TEST_SAY("Creating topic %s\n", topic);
+ TEST_CALL_ERR__(rd_kafka_mock_topic_create(mcluster, topic,
+ 1 + (i % 8), 1));
+
+ test_consumer_poll_no_msgs("POLL", rk, 0,
+ lots_of_topics ? 100 : 300);
+
+ /* Wait for an assignment to let the consumer catch up on
+ * all rebalancing. */
+ if (i % await_assignment_every == await_assignment_every - 1)
+ test_consumer_wait_assignment(rk, rd_true /*poll*/);
+ else if (!lots_of_topics)
+ rd_usleep(100 * 1000, NULL);
+ }
+
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+
+int main_0045_subscribe_update(int argc, char **argv) {
+
+ if (!test_can_create_topics(1))
+ return 0;
+
+ do_test_regex();
+
+ return 0;
+}
+
+int main_0045_subscribe_update_non_exist_and_partchange(int argc, char **argv) {
+
+ do_test_non_exist_and_partchange();
+
+ return 0;
+}
+
+int main_0045_subscribe_update_topic_remove(int argc, char **argv) {
+
+ if (!test_can_create_topics(1))
+ return 0;
+
+ do_test_topic_remove();
+
+ return 0;
+}
+
+
+int main_0045_subscribe_update_mock(int argc, char **argv) {
+ do_test_regex_many_mock("range", rd_false);
+ do_test_regex_many_mock("cooperative-sticky", rd_false);
+ do_test_regex_many_mock("cooperative-sticky", rd_true);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c
new file mode 100644
index 000000000..541c03037
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0046-rkt_cache.c
@@ -0,0 +1,65 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Issue #345, #821
+ * Test that topic_new() + topic_destroy() can be used as a topic-lookup cache,
+ * i.e., as long as the app topic refcount stays above 1 the app can call
+ * new() and destroy() any number of times (symetrically).
+ */
+
+
+int main_0046_rkt_cache(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 0);
+ int i;
+
+ rk = test_create_producer();
+
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ for (i = 0; i < 100; i++) {
+ rd_kafka_topic_t *rkt2;
+
+ rkt2 = rd_kafka_topic_new(rk, topic, NULL);
+ TEST_ASSERT(rkt2 != NULL);
+
+ rd_kafka_topic_destroy(rkt2);
+ }
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c
new file mode 100644
index 000000000..d90004a3a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0047-partial_buf_tmout.c
@@ -0,0 +1,97 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Issue #756
+ *
+ * Partially sent buffers that timeout would cause the next request sent
+ * to appear inside the partially sent buffer, eventually leading to an
+ * InvalidReceiveException exception on the broker.
+ *
+ * This is easily triggered by:
+ * - decrease socket buffers
+ * - decrease message timeout
+ * - produce a bunch of large messages that will need to be partially sent
+ * - requests should timeout which should cause the connection to be closed
+ * by librdkafka.
+ *
+ * How do we monitor for correctness?
+ * - the broker shall not close the connection (but we might)
+ */
+
+static int got_timeout_err = 0;
+
+static void
+my_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ got_timeout_err += (err == RD_KAFKA_RESP_ERR__TIMED_OUT);
+
+ if (err == RD_KAFKA_RESP_ERR__TIMED_OUT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)
+ TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err),
+ reason);
+ else
+ TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err),
+ reason);
+}
+
+int main_0047_partial_buf_tmout(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 0);
+ rd_kafka_conf_t *conf;
+ const size_t msg_size = 10000;
+ int msgcounter = 0;
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "socket.send.buffer.bytes", "1000");
+ test_conf_set(conf, "batch.num.messages", "100");
+ test_conf_set(conf, "queue.buffering.max.messages", "10000000");
+ rd_kafka_conf_set_error_cb(conf, my_error_cb);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = test_create_producer_topic(rk, topic, "message.timeout.ms", "300",
+ NULL);
+
+ while (got_timeout_err == 0) {
+ test_produce_msgs_nowait(rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0,
+ 10000, NULL, msg_size, 0, &msgcounter);
+ rd_kafka_flush(rk, 100);
+ }
+
+ TEST_ASSERT(got_timeout_err > 0);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c
new file mode 100644
index 000000000..84efee7db
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0048-partitioner.c
@@ -0,0 +1,283 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Various partitioner tests
+ *
+ * - Issue #797 - deadlock on failed partitioning
+ * - Verify that partitioning works across partitioners.
+ */
+
+int32_t my_invalid_partitioner(const rd_kafka_topic_t *rkt,
+ const void *keydata,
+ size_t keylen,
+ int32_t partition_cnt,
+ void *rkt_opaque,
+ void *msg_opaque) {
+ int32_t partition = partition_cnt + 10;
+ TEST_SAYL(4, "partition \"%.*s\" to %" PRId32 "\n", (int)keylen,
+ (const char *)keydata, partition);
+ return partition;
+}
+
+
+/* FIXME: This doesn't seem to trigger the bug in #797.
+ * Still a useful test though. */
+static void do_test_failed_partitioning(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_topic_conf_t *tconf;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ int i;
+ int msgcnt = test_quick ? 100 : 10000;
+
+ test_conf_init(&conf, &tconf, 0);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_conf_set(conf, "sticky.partitioning.linger.ms", "0");
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rd_kafka_topic_conf_set_partitioner_cb(tconf, my_invalid_partitioner);
+ test_topic_conf_set(tconf, "message.timeout.ms",
+ tsprintf("%d", tmout_multip(10000)));
+ rkt = rd_kafka_topic_new(rk, topic, tconf);
+ TEST_ASSERT(rkt != NULL, "%s", rd_kafka_err2str(rd_kafka_last_error()));
+
+ /* Produce some messages (to p 0) to create topic */
+ test_produce_msgs(rk, rkt, 0, 0, 0, 2, NULL, 0);
+
+ /* Now use partitioner */
+ for (i = 0; i < msgcnt; i++) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ if (rd_kafka_produce(rkt, RD_KAFKA_PARTITION_UA, 0, NULL, 0,
+ NULL, 0, NULL) == -1)
+ err = rd_kafka_last_error();
+ if (err != RD_KAFKA_RESP_ERR__UNKNOWN_PARTITION)
+ TEST_FAIL(
+ "produce(): "
+ "Expected UNKNOWN_PARTITION, got %s\n",
+ rd_kafka_err2str(err));
+ }
+ test_flush(rk, 5000);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+}
+
+
+static void part_dr_msg_cb(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ int32_t *partp = rkmessage->_private;
+ int *remainsp = opaque;
+
+ if (rkmessage->err) {
+ /* Will fail later */
+ TEST_WARN("Delivery failed: %s\n",
+ rd_kafka_err2str(rkmessage->err));
+ *partp = -1;
+ } else {
+ *partp = rkmessage->partition;
+ }
+
+ (*remainsp)--;
+}
+
+/**
+ * @brief Test single \p partitioner
+ */
+static void do_test_partitioner(const char *topic,
+ const char *partitioner,
+ int msgcnt,
+ const char **keys,
+ const int32_t *exp_part) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ int i;
+ int32_t *parts;
+ int remains = msgcnt;
+ int randcnt = 0;
+ int fails = 0;
+
+ TEST_SAY(_C_MAG "Test partitioner \"%s\"\n", partitioner);
+
+ test_conf_init(&conf, NULL, 30);
+ rd_kafka_conf_set_opaque(conf, &remains);
+ rd_kafka_conf_set_dr_msg_cb(conf, part_dr_msg_cb);
+ test_conf_set(conf, "partitioner", partitioner);
+ test_conf_set(conf, "sticky.partitioning.linger.ms", "0");
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ parts = malloc(msgcnt * sizeof(*parts));
+ for (i = 0; i < msgcnt; i++)
+ parts[i] = -1;
+
+ /*
+ * Produce messages
+ */
+ for (i = 0; i < msgcnt; i++) {
+ rd_kafka_resp_err_t err;
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_KEY(keys[i], keys[i] ? strlen(keys[i]) : 0),
+ RD_KAFKA_V_OPAQUE(&parts[i]), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() failed: %s",
+ rd_kafka_err2str(err));
+
+ randcnt += exp_part[i] == -1;
+ }
+
+ rd_kafka_flush(rk, tmout_multip(10000));
+
+ TEST_ASSERT(remains == 0, "Expected remains=%d, not %d for %d messages",
+ 0, remains, msgcnt);
+
+ /*
+ * Verify produced partitions to expected partitions.
+ */
+
+ /* First look for produce failures */
+ for (i = 0; i < msgcnt; i++) {
+ if (parts[i] == -1) {
+ TEST_WARN("Message #%d (exp part %" PRId32
+ ") "
+ "was not successfully produced\n",
+ i, exp_part[i]);
+ fails++;
+ }
+ }
+
+ TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
+
+
+ if (randcnt == msgcnt) {
+ /* If all expected partitions are random make sure
+ * the produced partitions have some form of
+ * random distribution */
+ int32_t last_part = parts[0];
+ int samecnt = 0;
+
+ for (i = 0; i < msgcnt; i++) {
+ samecnt += parts[i] == last_part;
+ last_part = parts[i];
+ }
+
+ TEST_ASSERT(samecnt < msgcnt,
+ "No random distribution, all on partition %" PRId32,
+ last_part);
+ } else {
+ for (i = 0; i < msgcnt; i++) {
+ if (exp_part[i] != -1 && parts[i] != exp_part[i]) {
+ TEST_WARN(
+ "Message #%d expected partition "
+ "%" PRId32 " but got %" PRId32 ": %s\n",
+ i, exp_part[i], parts[i], keys[i]);
+ fails++;
+ }
+ }
+
+
+ TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
+ }
+
+ free(parts);
+
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN "Test partitioner \"%s\": PASS\n", partitioner);
+}
+
+extern uint32_t rd_crc32(const char *, size_t);
+
+/**
+ * @brief Test all builtin partitioners
+ */
+static void do_test_partitioners(void) {
+ int part_cnt = test_quick ? 7 : 17;
+#define _MSG_CNT 5
+ const char *unaligned = "123456";
+ /* Message keys */
+ const char *keys[_MSG_CNT] = {
+ NULL,
+ "", // empty
+ unaligned + 1,
+ "this is another string with more length to it perhaps", "hejsan"};
+ struct {
+ const char *partitioner;
+ /* Expected partition per message (see keys above) */
+ int32_t exp_part[_MSG_CNT];
+ } ptest[] = {{"random", {-1, -1, -1, -1, -1}},
+ {"consistent",
+ {/* These constants were acquired using
+ * the 'crc32' command on OSX */
+ 0x0 % part_cnt, 0x0 % part_cnt, 0xb1b451d7 % part_cnt,
+ 0xb0150df7 % part_cnt, 0xd077037e % part_cnt}},
+ {"consistent_random",
+ {-1, -1, 0xb1b451d7 % part_cnt, 0xb0150df7 % part_cnt,
+ 0xd077037e % part_cnt}},
+ {"murmur2",
+ {/* .. using tests/java/Murmur2Cli */
+ 0x106e08d9 % part_cnt, 0x106e08d9 % part_cnt,
+ 0x058d780f % part_cnt, 0x4f7703da % part_cnt,
+ 0x5ec19395 % part_cnt}},
+ {"murmur2_random",
+ {-1, 0x106e08d9 % part_cnt, 0x058d780f % part_cnt,
+ 0x4f7703da % part_cnt, 0x5ec19395 % part_cnt}},
+ {"fnv1a",
+ {/* .. using https://play.golang.org/p/hRkA4xtYyJ6 */
+ 0x7ee3623b % part_cnt, 0x7ee3623b % part_cnt,
+ 0x27e6f469 % part_cnt, 0x155e3e5f % part_cnt,
+ 0x17b1e27a % part_cnt}},
+ {"fnv1a_random",
+ {-1, 0x7ee3623b % part_cnt, 0x27e6f469 % part_cnt,
+ 0x155e3e5f % part_cnt, 0x17b1e27a % part_cnt}},
+ {NULL}};
+ int pi;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+
+ test_create_topic(NULL, topic, part_cnt, 1);
+
+ for (pi = 0; ptest[pi].partitioner; pi++) {
+ do_test_partitioner(topic, ptest[pi].partitioner, _MSG_CNT,
+ keys, ptest[pi].exp_part);
+ }
+}
+
+int main_0048_partitioner(int argc, char **argv) {
+ if (test_can_create_topics(0))
+ do_test_partitioners();
+ do_test_failed_partitioning();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c
new file mode 100644
index 000000000..6083a1a76
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0049-consume_conn_close.c
@@ -0,0 +1,162 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#if WITH_SOCKEM
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Verify that consumtion continues after broker connectivity failure.
+ */
+
+static int simulate_network_down = 0;
+
+/**
+ * @brief Sockem connect, called from **internal librdkafka thread** through
+ * librdkafka's connect_cb
+ */
+static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
+ int r;
+
+ TEST_LOCK();
+ r = simulate_network_down;
+ TEST_UNLOCK();
+
+ if (r) {
+ sockem_close(skm);
+ return ECONNREFUSED;
+ } else {
+ /* Let it go real slow so we dont consume all
+ * the messages right away. */
+ sockem_set(skm, "rx.thruput", 100000, NULL);
+ }
+ return 0;
+}
+
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ /* Ignore connectivity errors since we'll be bringing down
+ * .. connectivity.
+ * SASL auther will think a connection-down even in the auth
+ * state means the broker doesn't support SASL PLAIN. */
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
+ err == RD_KAFKA_RESP_ERR__AUTHENTICATION)
+ return 0;
+ return 1;
+}
+
+
+int main_0049_consume_conn_close(int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char *topic = test_mk_topic_name("0049_consume_conn_close", 1);
+ uint64_t testid;
+ int msgcnt = test_quick ? 100 : 10000;
+ test_msgver_t mv;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_topic_partition_list_t *assignment;
+ rd_kafka_resp_err_t err;
+
+ if (!test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
+ TEST_SKIP(
+ "KNOWN ISSUE: ApiVersionRequest+SaslHandshake "
+ "will not play well with sudden disconnects\n");
+ return 0;
+ }
+
+ test_conf_init(&conf, &tconf, 60);
+ /* Want an even number so it is divisable by two without surprises */
+ msgcnt = (msgcnt / (int)test_timeout_multiplier) & ~1;
+
+ testid = test_id_generate();
+ test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);
+
+
+ test_socket_enable(conf);
+ test_curr->connect_cb = connect_cb;
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
+
+ rk = test_create_consumer(topic, NULL, conf, tconf);
+
+ test_consumer_subscribe(rk, topic);
+
+ test_msgver_init(&mv, testid);
+
+ test_consumer_poll("consume.up", rk, testid, -1, 0, msgcnt / 2, &mv);
+
+ err = rd_kafka_assignment(rk, &assignment);
+ TEST_ASSERT(!err, "assignment() failed: %s", rd_kafka_err2str(err));
+ TEST_ASSERT(assignment->cnt > 0, "empty assignment");
+
+ TEST_SAY("Bringing down the network\n");
+
+ TEST_LOCK();
+ simulate_network_down = 1;
+ TEST_UNLOCK();
+ test_socket_close_all(test_curr, 1 /*reinit*/);
+
+ TEST_SAY("Waiting for session timeout to expire (6s), and then some\n");
+
+ /* Commit an offset, which should fail, to trigger the offset commit
+ * callback fallback (CONSUMER_ERR) */
+ assignment->elems[0].offset = 123456789;
+ TEST_SAY("Committing offsets while down, should fail eventually\n");
+ err = rd_kafka_commit(rk, assignment, 1 /*async*/);
+ TEST_ASSERT(!err, "async commit failed: %s", rd_kafka_err2str(err));
+ rd_kafka_topic_partition_list_destroy(assignment);
+
+ rd_sleep(10);
+
+ TEST_SAY("Bringing network back up\n");
+ TEST_LOCK();
+ simulate_network_down = 0;
+ TEST_UNLOCK();
+
+ TEST_SAY("Continuing to consume..\n");
+ test_consumer_poll("consume.up2", rk, testid, -1, msgcnt / 2,
+ msgcnt / 2, &mv);
+
+ test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP,
+ 0, msgcnt);
+
+ test_msgver_clear(&mv);
+
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
+
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c
new file mode 100644
index 000000000..d55e6e09a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0050-subscribe_adds.c
@@ -0,0 +1,124 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Verify that quick subscription additions work.
+ * * Create topics T1,T2,T3
+ * * Create consumer
+ * * Subscribe to T1
+ * * Subscribe to T1,T2
+ * * Subscribe to T1,T2,T3
+ * * Verify that all messages from all three topics are consumed
+ * * Subscribe to T1,T3
+ * * Verify that there were no duplicate messages.
+ */
+
+int main_0050_subscribe_adds(int argc, char **argv) {
+ rd_kafka_t *rk;
+#define TOPIC_CNT 3
+ char *topic[TOPIC_CNT] = {
+ rd_strdup(test_mk_topic_name("0050_subscribe_adds_1", 1)),
+ rd_strdup(test_mk_topic_name("0050_subscribe_adds_2", 1)),
+ rd_strdup(test_mk_topic_name("0050_subscribe_adds_3", 1)),
+ };
+ uint64_t testid;
+ int msgcnt = test_quick ? 100 : 10000;
+ test_msgver_t mv;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ int i;
+ rd_kafka_topic_partition_list_t *tlist;
+ rd_kafka_resp_err_t err;
+
+ msgcnt = (msgcnt / TOPIC_CNT) * TOPIC_CNT;
+ testid = test_id_generate();
+
+ rk = test_create_producer();
+ for (i = 0; i < TOPIC_CNT; i++) {
+ rd_kafka_topic_t *rkt;
+
+ rkt = test_create_producer_topic(rk, topic[i], NULL);
+
+ test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA,
+ (msgcnt / TOPIC_CNT) * i,
+ (msgcnt / TOPIC_CNT), NULL, 1000);
+
+ rd_kafka_topic_destroy(rkt);
+ }
+
+ rd_kafka_destroy(rk);
+
+ test_conf_init(&conf, &tconf, 60);
+ test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
+
+ rk = test_create_consumer(topic[0], NULL, conf, tconf);
+
+ tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT);
+ for (i = 0; i < TOPIC_CNT; i++) {
+ rd_kafka_topic_partition_list_add(tlist, topic[i],
+ RD_KAFKA_PARTITION_UA);
+ TEST_SAY("Subscribe to %d topic(s):\n", tlist->cnt);
+ test_print_partition_list(tlist);
+
+ err = rd_kafka_subscribe(rk, tlist);
+ TEST_ASSERT(!err, "subscribe() failed: %s",
+ rd_kafka_err2str(err));
+ }
+
+ test_msgver_init(&mv, testid);
+
+ test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv);
+
+ /* Now remove T2 */
+ rd_kafka_topic_partition_list_del(tlist, topic[1],
+ RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(rk, tlist);
+ TEST_ASSERT(!err, "subscribe() failed: %s", rd_kafka_err2str(err));
+
+ test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5));
+
+
+ test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP,
+ 0, msgcnt);
+
+ test_msgver_clear(&mv);
+
+ rd_kafka_topic_partition_list_destroy(tlist);
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ for (i = 0; i < TOPIC_CNT; i++)
+ rd_free(topic[i]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c
new file mode 100644
index 000000000..6f97b2ee4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0051-assign_adds.c
@@ -0,0 +1,125 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Verify that quick assignment additions work.
+ * * Create topics T1,T2,T3
+ * * Create consumer
+ * * Assign T1
+ * * Assign T1,T2
+ * * Assign T1,T2,T3
+ * * Verify that all messages from all three topics are consumed
+ * * Assign T1,T3
+ * * Verify that there were no duplicate messages.
+ */
+
+int main_0051_assign_adds(int argc, char **argv) {
+ rd_kafka_t *rk;
+#define TOPIC_CNT 3
+ char *topic[TOPIC_CNT] = {
+ rd_strdup(test_mk_topic_name("0051_assign_adds_1", 1)),
+ rd_strdup(test_mk_topic_name("0051_assign_adds_2", 1)),
+ rd_strdup(test_mk_topic_name("0051_assign_adds_3", 1)),
+ };
+ uint64_t testid;
+ int msgcnt = test_quick ? 100 : 1000;
+ test_msgver_t mv;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ int i;
+ rd_kafka_topic_partition_list_t *tlist;
+ rd_kafka_resp_err_t err;
+
+ msgcnt = (msgcnt / TOPIC_CNT) * TOPIC_CNT;
+ testid = test_id_generate();
+
+ rk = test_create_producer();
+ for (i = 0; i < TOPIC_CNT; i++) {
+ rd_kafka_topic_t *rkt;
+
+ rkt = test_create_producer_topic(rk, topic[i], NULL);
+
+ test_produce_msgs(rk, rkt, testid, 0, (msgcnt / TOPIC_CNT) * i,
+ (msgcnt / TOPIC_CNT), NULL, 100);
+
+ rd_kafka_topic_destroy(rkt);
+ }
+
+ rd_kafka_destroy(rk);
+
+ test_conf_init(&conf, &tconf, 60);
+ test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
+
+ rk = test_create_consumer(topic[0], NULL, conf, tconf);
+
+ tlist = rd_kafka_topic_partition_list_new(TOPIC_CNT);
+ for (i = 0; i < TOPIC_CNT; i++) {
+ rd_kafka_topic_partition_list_add(tlist, topic[i], 0);
+ TEST_SAY("Assign %d topic(s):\n", tlist->cnt);
+ test_print_partition_list(tlist);
+
+ err = rd_kafka_assign(rk, tlist);
+ TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err));
+ }
+
+ test_msgver_init(&mv, testid);
+
+ TEST_SAY("Expecting to consume all %d messages from %d topics\n",
+ msgcnt, TOPIC_CNT);
+
+ test_consumer_poll("consume", rk, testid, -1, 0, msgcnt, &mv);
+
+ /* Now remove T2 */
+ rd_kafka_topic_partition_list_del(tlist, topic[1], 0);
+ err = rd_kafka_assign(rk, tlist);
+ TEST_ASSERT(!err, "assign() failed: %s", rd_kafka_err2str(err));
+
+ TEST_SAY(
+ "Should not see any messages for session.timeout.ms+some more\n");
+ test_consumer_poll_no_msgs("consume", rk, testid, (int)(6000 * 1.5));
+
+ test_msgver_verify("consume", &mv, TEST_MSGVER_ORDER | TEST_MSGVER_DUP,
+ 0, msgcnt);
+
+ test_msgver_clear(&mv);
+
+ rd_kafka_topic_partition_list_destroy(tlist);
+
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ for (i = 0; i < TOPIC_CNT; i++)
+ rd_free(topic[i]);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c
new file mode 100644
index 000000000..ef9b89878
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0052-msg_timestamps.c
@@ -0,0 +1,220 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * Verify message timestamp behaviour on supporting brokers (>=0.10.0.0).
+ * Issue #858
+ */
+struct timestamp_range {
+ int64_t min;
+ int64_t max;
+};
+
+static const struct timestamp_range invalid_timestamp = {-1, -1};
+static struct timestamp_range broker_timestamp;
+static struct timestamp_range my_timestamp;
+
+static void prepare_timestamps(void) {
+ struct timeval ts;
+ rd_gettimeofday(&ts, NULL);
+
+ /* broker timestamps expected to be within 600 seconds */
+ broker_timestamp.min = (int64_t)ts.tv_sec * 1000LLU;
+ broker_timestamp.max = broker_timestamp.min + (600 * 1000LLU);
+
+ /* client timestamps: set in the future (24 hours)
+ * to be outside of broker timestamps */
+ my_timestamp.min = my_timestamp.max =
+ (int64_t)ts.tv_sec + (24 * 3600 * 1000LLU);
+}
+
+/**
+ * @brief Produce messages according to compress \p codec
+ */
+static void produce_msgs(const char *topic,
+ int partition,
+ uint64_t testid,
+ int msgcnt,
+ const char *broker_version,
+ const char *codec) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ int i;
+ char key[128], buf[100];
+ int msgcounter = msgcnt;
+
+ test_conf_init(&conf, NULL, 0);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_conf_set(conf, "compression.codec", codec);
+ test_conf_set(conf, "broker.version.fallback", broker_version);
+ if (!strncmp(broker_version, "0.8", 3) ||
+ !strncmp(broker_version, "0.9", 3)) {
+ test_conf_set(conf, "api.version.request", "false");
+ test_conf_set(conf, "enable.idempotence", "false");
+ }
+
+ /* Make sure to trigger a bunch of MessageSets */
+ test_conf_set(conf, "batch.num.messages", tsprintf("%d", msgcnt / 5));
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ for (i = 0; i < msgcnt; i++) {
+ rd_kafka_resp_err_t err;
+
+ test_prepare_msg(testid, partition, i, buf, sizeof(buf), key,
+ sizeof(key));
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE(buf, sizeof(buf)),
+ RD_KAFKA_V_KEY(key, sizeof(key)),
+ RD_KAFKA_V_TIMESTAMP(my_timestamp.min),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
+ if (err)
+ TEST_FAIL("producev() failed at msg #%d/%d: %s", i,
+ msgcnt, rd_kafka_err2str(err));
+ }
+
+ TEST_SAY("Waiting for %d messages to be produced\n", msgcounter);
+ while (msgcounter > 0)
+ rd_kafka_poll(rk, 100);
+
+ rd_kafka_destroy(rk);
+}
+
+static void
+consume_msgs_verify_timestamps(const char *topic,
+ int partition,
+ uint64_t testid,
+ int msgcnt,
+ const struct timestamp_range *exp_timestamp) {
+ test_msgver_t mv;
+
+ test_msgver_init(&mv, testid);
+ test_consume_msgs_easy_mv(topic, topic, -1, testid, -1, msgcnt, NULL,
+ &mv);
+
+ test_msgver_verify0(
+ __FUNCTION__, __LINE__, topic, &mv,
+ TEST_MSGVER_RANGE | TEST_MSGVER_BY_MSGID | TEST_MSGVER_BY_TIMESTAMP,
+ (struct test_mv_vs) {.msg_base = 0,
+ .exp_cnt = msgcnt,
+ .timestamp_min = exp_timestamp->min,
+ .timestamp_max = exp_timestamp->max});
+
+ test_msgver_clear(&mv);
+}
+
+
+
+static void test_timestamps(const char *broker_tstype,
+ const char *broker_version,
+ const char *codec,
+ const struct timestamp_range *exp_timestamps) {
+ const char *topic =
+ test_mk_topic_name(tsprintf("0052_msg_timestamps_%s_%s_%s",
+ broker_tstype, broker_version, codec),
+ 1);
+ const int msgcnt = 20;
+ uint64_t testid = test_id_generate();
+
+ if ((!strncmp(broker_version, "0.9", 3) ||
+ !strncmp(broker_version, "0.8", 3)) &&
+ !test_conf_match(NULL, "sasl.mechanisms", "GSSAPI")) {
+ TEST_SAY(_C_YEL
+ "Skipping %s, %s test: "
+ "SaslHandshake not supported by broker v%s" _C_CLR
+ "\n",
+ broker_tstype, codec, broker_version);
+ return;
+ }
+
+ TEST_SAY(_C_MAG "Timestamp test using %s\n", topic);
+ test_timeout_set(30);
+
+ test_kafka_topics(
+ "--create --topic \"%s\" "
+ "--replication-factor 1 --partitions 1 "
+ "--config message.timestamp.type=%s",
+ topic, broker_tstype);
+
+ TEST_SAY(_C_MAG "Producing %d messages to %s\n", msgcnt, topic);
+ produce_msgs(topic, 0, testid, msgcnt, broker_version, codec);
+
+ TEST_SAY(_C_MAG
+ "Consuming and verifying %d messages from %s "
+ "with expected timestamps %" PRId64 "..%" PRId64 "\n",
+ msgcnt, topic, exp_timestamps->min, exp_timestamps->max);
+
+ consume_msgs_verify_timestamps(topic, 0, testid, msgcnt,
+ exp_timestamps);
+}
+
+
+int main_0052_msg_timestamps(int argc, char **argv) {
+
+ if (!test_can_create_topics(1))
+ return 0;
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Test cluster requires authentication/SSL\n");
+ return 0;
+ }
+
+ /* Broker version limits the producer's feature set,
+ * for 0.9.0.0 no timestamp will be transmitted,
+ * but for 0.10.1.0 (or newer, api.version.request will be true)
+ * the producer will set the timestamp.
+ * In all cases we want a reasonable timestamp back.
+ *
+ * Explicit broker LogAppendTime setting will overwrite
+ * any producer-provided offset.
+ *
+ * Using the old non-timestamp-aware protocol without
+ * LogAppendTime will cause unset/invalid timestamps .
+ *
+ * Any other option should honour the producer create timestamps.
+ */
+ prepare_timestamps();
+
+ test_timestamps("CreateTime", "0.10.1.0", "none", &my_timestamp);
+ test_timestamps("LogAppendTime", "0.10.1.0", "none", &broker_timestamp);
+ test_timestamps("CreateTime", "0.9.0.0", "none", &invalid_timestamp);
+ test_timestamps("LogAppendTime", "0.9.0.0", "none", &broker_timestamp);
+#if WITH_ZLIB
+ test_timestamps("CreateTime", "0.10.1.0", "gzip", &my_timestamp);
+ test_timestamps("LogAppendTime", "0.10.1.0", "gzip", &broker_timestamp);
+ test_timestamps("CreateTime", "0.9.0.0", "gzip", &invalid_timestamp);
+ test_timestamps("LogAppendTime", "0.9.0.0", "gzip", &broker_timestamp);
+#endif
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp
new file mode 100644
index 000000000..a61755c30
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0053-stats_cb.cpp
@@ -0,0 +1,535 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <fstream>
+#include <iterator>
+#include <string>
+#include "testcpp.h"
+
+#if WITH_RAPIDJSON
+#include <rapidjson/document.h>
+#include <rapidjson/schema.h>
+#include <rapidjson/filereadstream.h>
+#include <rapidjson/stringbuffer.h>
+#include <rapidjson/error/en.h>
+#include <rapidjson/prettywriter.h>
+#endif
+
+static const char *stats_schema_path = "../src/statistics_schema.json";
+
+#if WITH_RAPIDJSON
+/**
+ * @brief Statistics schema validator
+ */
+class TestSchemaValidator {
+ public:
+ TestSchemaValidator() {
+ }
+ TestSchemaValidator(const std::string schema_path) {
+ /* Read schema from file */
+ schema_path_ = schema_path;
+
+ std::ifstream f(schema_path.c_str());
+ if (!f.is_open())
+ Test::Fail(tostr() << "Failed to open schema " << schema_path << ": "
+ << strerror(errno));
+ std::string schema_str((std::istreambuf_iterator<char>(f)),
+ (std::istreambuf_iterator<char>()));
+
+ /* Parse schema */
+ sd_ = new rapidjson::Document();
+ if (sd_->Parse(schema_str.c_str()).HasParseError())
+ Test::Fail(tostr() << "Failed to parse statistics schema: "
+ << rapidjson::GetParseError_En(sd_->GetParseError())
+ << " at " << sd_->GetErrorOffset());
+
+ schema_ = new rapidjson::SchemaDocument(*sd_);
+ validator_ = new rapidjson::SchemaValidator(*schema_);
+ }
+
+ ~TestSchemaValidator() {
+ if (sd_)
+ delete sd_;
+ if (schema_)
+ delete schema_;
+ if (validator_)
+ delete validator_;
+ }
+
+ void validate(const std::string &json_doc) {
+ /* Parse JSON to validate */
+ rapidjson::Document d;
+ if (d.Parse(json_doc.c_str()).HasParseError())
+ Test::Fail(tostr() << "Failed to parse stats JSON: "
+ << rapidjson::GetParseError_En(d.GetParseError())
+ << " at " << d.GetErrorOffset());
+
+ /* Validate using schema */
+ if (!d.Accept(*validator_)) {
+ rapidjson::StringBuffer sb;
+
+ validator_->GetInvalidSchemaPointer().StringifyUriFragment(sb);
+ Test::Say(tostr() << "Schema: " << sb.GetString() << "\n");
+ Test::Say(tostr() << "Invalid keyword: "
+ << validator_->GetInvalidSchemaKeyword() << "\n");
+ sb.Clear();
+
+ validator_->GetInvalidDocumentPointer().StringifyUriFragment(sb);
+ Test::Say(tostr() << "Invalid document: " << sb.GetString() << "\n");
+ sb.Clear();
+
+ Test::Fail(tostr() << "JSON validation using schema " << schema_path_
+ << " failed");
+ }
+
+ Test::Say(3, "JSON document validated using schema " + schema_path_ + "\n");
+ }
+
+ private:
+ std::string schema_path_;
+ rapidjson::Document *sd_;
+ rapidjson::SchemaDocument *schema_;
+ rapidjson::SchemaValidator *validator_;
+};
+
+
+#else
+
+/* Dummy validator doing nothing when RapidJSON is unavailable */
+class TestSchemaValidator {
+ public:
+ TestSchemaValidator() {
+ }
+ TestSchemaValidator(const std::string schema_path) {
+ }
+
+ ~TestSchemaValidator() {
+ }
+
+ void validate(const std::string &json_doc) {
+ }
+};
+
+#endif
+
+class myEventCb : public RdKafka::EventCb {
+ public:
+ myEventCb(const std::string schema_path) :
+ validator_(TestSchemaValidator(schema_path)) {
+ stats_cnt = 0;
+ }
+
+ int stats_cnt;
+ std::string last; /**< Last stats document */
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_STATS:
+ if (!(stats_cnt % 10))
+ Test::Say(tostr() << "Stats (#" << stats_cnt << "): " << event.str()
+ << "\n");
+ if (event.str().length() > 20)
+ stats_cnt += 1;
+ validator_.validate(event.str());
+ last = event.str();
+ break;
+ default:
+ break;
+ }
+ }
+
+ private:
+ TestSchemaValidator validator_;
+};
+
+
+/**
+ * @brief Verify that stats are emitted according to statistics.interval.ms
+ */
+void test_stats_timing() {
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ myEventCb my_event = myEventCb(stats_schema_path);
+ std::string errstr;
+
+ if (conf->set("statistics.interval.ms", "100", errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ if (conf->set("event_cb", &my_event, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ int64_t t_start = test_clock();
+
+ while (my_event.stats_cnt < 12)
+ p->poll(1000);
+
+ int elapsed = (int)((test_clock() - t_start) / 1000);
+ const int expected_time = 1200;
+
+ Test::Say(tostr() << my_event.stats_cnt
+ << " (expected 12) stats callbacks received in " << elapsed
+ << "ms (expected " << expected_time << "ms +-25%)\n");
+
+ if (elapsed < expected_time * 0.75 || elapsed > expected_time * 1.25) {
+ /* We can't rely on CIs giving our test job enough CPU to finish
+ * in time, so don't error out even if the time is outside the window */
+ if (test_on_ci)
+ Test::Say(tostr() << "WARNING: Elapsed time " << elapsed
+ << "ms outside +-25% window (" << expected_time
+ << "ms), cnt " << my_event.stats_cnt);
+ else
+ Test::Fail(tostr() << "Elapsed time " << elapsed
+ << "ms outside +-25% window (" << expected_time
+ << "ms), cnt " << my_event.stats_cnt);
+ }
+ delete p;
+}
+
+
+
+#if WITH_RAPIDJSON
+
+/**
+ * @brief Expected partition stats
+ */
+struct exp_part_stats {
+ std::string topic; /**< Topic */
+ int32_t part; /**< Partition id */
+ int msgcnt; /**< Expected message count */
+ int msgsize; /**< Expected per message size.
+ * This includes both key and value lengths */
+
+ /* Calculated */
+ int64_t totsize; /**< Message size sum */
+};
+
+/**
+ * @brief Verify end-to-end producer and consumer stats.
+ */
+static void verify_e2e_stats(const std::string &prod_stats,
+ const std::string &cons_stats,
+ struct exp_part_stats *exp_parts,
+ int partcnt) {
+ /**
+ * Parse JSON stats
+ * These documents are already validated in the Event callback.
+ */
+ rapidjson::Document p;
+ if (p.Parse<rapidjson::kParseValidateEncodingFlag>(prod_stats.c_str())
+ .HasParseError())
+ Test::Fail(tostr() << "Failed to parse producer stats JSON: "
+ << rapidjson::GetParseError_En(p.GetParseError())
+ << " at " << p.GetErrorOffset());
+
+ rapidjson::Document c;
+ if (c.Parse<rapidjson::kParseValidateEncodingFlag>(cons_stats.c_str())
+ .HasParseError())
+ Test::Fail(tostr() << "Failed to parse consumer stats JSON: "
+ << rapidjson::GetParseError_En(c.GetParseError())
+ << " at " << c.GetErrorOffset());
+
+ assert(p.HasMember("name"));
+ assert(c.HasMember("name"));
+ assert(p.HasMember("type"));
+ assert(c.HasMember("type"));
+
+ Test::Say(tostr() << "Verifying stats from Producer " << p["name"].GetString()
+ << " and Consumer " << c["name"].GetString() << "\n");
+
+ assert(!strcmp(p["type"].GetString(), "producer"));
+ assert(!strcmp(c["type"].GetString(), "consumer"));
+
+ int64_t exp_tot_txmsgs = 0;
+ int64_t exp_tot_txmsg_bytes = 0;
+ int64_t exp_tot_rxmsgs = 0;
+ int64_t exp_tot_rxmsg_bytes = 0;
+
+ for (int part = 0; part < partcnt; part++) {
+ /*
+ * Find partition stats.
+ */
+
+ /* Construct the partition path. */
+ char path[256];
+ rd_snprintf(path, sizeof(path), "/topics/%s/partitions/%d",
+ exp_parts[part].topic.c_str(), exp_parts[part].part);
+ Test::Say(tostr() << "Looking up partition " << exp_parts[part].part
+ << " with path " << path << "\n");
+
+ /* Even though GetValueByPointer() takes a "char[]" it can only be used
+ * with perfectly sized char buffers or string literals since it
+ * does not respect NUL terminators.
+ * So instead convert the path to a Pointer.*/
+ rapidjson::Pointer jpath((const char *)path);
+
+ rapidjson::Value *pp = rapidjson::GetValueByPointer(p, jpath);
+ if (!pp)
+ Test::Fail(tostr() << "Producer: could not find " << path << " in "
+ << prod_stats << "\n");
+
+ rapidjson::Value *cp = rapidjson::GetValueByPointer(c, jpath);
+ if (!pp)
+ Test::Fail(tostr() << "Consumer: could not find " << path << " in "
+ << cons_stats << "\n");
+
+ assert(pp->HasMember("partition"));
+ assert(pp->HasMember("txmsgs"));
+ assert(pp->HasMember("txbytes"));
+
+ assert(cp->HasMember("partition"));
+ assert(cp->HasMember("rxmsgs"));
+ assert(cp->HasMember("rxbytes"));
+
+ Test::Say(tostr() << "partition: " << (*pp)["partition"].GetInt() << "\n");
+
+ int64_t txmsgs = (*pp)["txmsgs"].GetInt();
+ int64_t txbytes = (*pp)["txbytes"].GetInt();
+ int64_t rxmsgs = (*cp)["rxmsgs"].GetInt();
+ int64_t rxbytes = (*cp)["rxbytes"].GetInt();
+
+ exp_tot_txmsgs += txmsgs;
+ exp_tot_txmsg_bytes += txbytes;
+ exp_tot_rxmsgs += rxmsgs;
+ exp_tot_rxmsg_bytes += rxbytes;
+
+ Test::Say(tostr() << "Producer partition: " << (*pp)["partition"].GetInt()
+ << ": "
+ << "txmsgs: " << txmsgs << " vs "
+ << exp_parts[part].msgcnt << ", "
+ << "txbytes: " << txbytes << " vs "
+ << exp_parts[part].totsize << "\n");
+ Test::Say(tostr() << "Consumer partition: " << (*cp)["partition"].GetInt()
+ << ": "
+ << "rxmsgs: " << rxmsgs << " vs "
+ << exp_parts[part].msgcnt << ", "
+ << "rxbytes: " << rxbytes << " vs "
+ << exp_parts[part].totsize << "\n");
+ }
+
+ /* Check top-level total stats */
+
+ assert(p.HasMember("txmsgs"));
+ assert(p.HasMember("txmsg_bytes"));
+ assert(p.HasMember("rxmsgs"));
+ assert(p.HasMember("rxmsg_bytes"));
+
+ int64_t tot_txmsgs = p["txmsgs"].GetInt();
+ int64_t tot_txmsg_bytes = p["txmsg_bytes"].GetInt();
+ int64_t tot_rxmsgs = c["rxmsgs"].GetInt();
+ int64_t tot_rxmsg_bytes = c["rxmsg_bytes"].GetInt();
+
+ Test::Say(tostr() << "Producer total: "
+ << "txmsgs: " << tot_txmsgs << " vs " << exp_tot_txmsgs
+ << ", "
+ << "txbytes: " << tot_txmsg_bytes << " vs "
+ << exp_tot_txmsg_bytes << "\n");
+ Test::Say(tostr() << "Consumer total: "
+ << "rxmsgs: " << tot_rxmsgs << " vs " << exp_tot_rxmsgs
+ << ", "
+ << "rxbytes: " << tot_rxmsg_bytes << " vs "
+ << exp_tot_rxmsg_bytes << "\n");
+}
+
+/**
+ * @brief Verify stats JSON structure and individual metric fields.
+ *
+ * To capture as much verifiable data as possible we run a full
+ * producer - consumer end to end test and verify that counters
+ * and states are emitted accordingly.
+ *
+ * Requires RapidJSON (for parsing the stats).
+ */
+static void test_stats() {
+ std::string errstr;
+ RdKafka::Conf *conf;
+ myEventCb producer_event(stats_schema_path);
+ myEventCb consumer_event(stats_schema_path);
+
+ std::string topic = Test::mk_topic_name("0053_stats", 1);
+
+ const int partcnt = 2;
+ int msgcnt = (test_quick ? 10 : 100) * partcnt;
+ const int msgsize = 6 * 1024;
+
+ /*
+ * Common config for producer and consumer
+ */
+ Test::conf_init(&conf, NULL, 60);
+ if (conf->set("statistics.interval.ms", "1000", errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+
+ /*
+ * Create Producer
+ */
+ if (conf->set("event_cb", &producer_event, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+
+ /*
+ * Create Consumer
+ */
+ conf->set("group.id", topic, errstr);
+ conf->set("auto.offset.reset", "earliest", errstr);
+ conf->set("enable.partition.eof", "false", errstr);
+ if (conf->set("event_cb", &consumer_event, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /*
+ * Set up consumer assignment (but assign after producing
+ * since there will be no topics now) and expected partitions
+ * for later verification.
+ */
+ std::vector<RdKafka::TopicPartition *> toppars;
+ struct exp_part_stats exp_parts[partcnt] = {};
+
+ for (int32_t part = 0; part < (int32_t)partcnt; part++) {
+ toppars.push_back(RdKafka::TopicPartition::create(
+ topic, part, RdKafka::Topic::OFFSET_BEGINNING));
+ exp_parts[part].topic = topic;
+ exp_parts[part].part = part;
+ exp_parts[part].msgcnt = msgcnt / partcnt;
+ exp_parts[part].msgsize = msgsize;
+ exp_parts[part].totsize = 0;
+ }
+
+ /*
+ * Produce messages
+ */
+ uint64_t testid = test_id_generate();
+
+ char key[256];
+ char *buf = (char *)malloc(msgsize);
+
+ for (int32_t part = 0; part < (int32_t)partcnt; part++) {
+ for (int i = 0; i < msgcnt / partcnt; i++) {
+ test_prepare_msg(testid, part, i, buf, msgsize, key, sizeof(key));
+ RdKafka::ErrorCode err =
+ p->produce(topic, part, RdKafka::Producer::RK_MSG_COPY, buf, msgsize,
+ key, sizeof(key), -1, NULL);
+ if (err)
+ Test::Fail("Produce failed: " + RdKafka::err2str(err));
+ exp_parts[part].totsize += msgsize + sizeof(key);
+ p->poll(0);
+ }
+ }
+
+ free(buf);
+
+ Test::Say("Waiting for final message delivery\n");
+ /* Wait for delivery */
+ p->flush(15 * 1000);
+
+ /*
+ * Start consuming partitions
+ */
+ c->assign(toppars);
+ RdKafka::TopicPartition::destroy(toppars);
+
+ /*
+ * Consume the messages
+ */
+ int recvcnt = 0;
+ Test::Say(tostr() << "Consuming " << msgcnt << " messages\n");
+ while (recvcnt < msgcnt) {
+ RdKafka::Message *msg = c->consume(-1);
+ if (msg->err())
+ Test::Fail("Consume failed: " + msg->errstr());
+
+ int msgid;
+ TestMessageVerify(testid, -1, &msgid, msg);
+ recvcnt++;
+ delete msg;
+ }
+
+ /*
+ * Producer:
+ * Wait for one last stats emit when all messages have been delivered.
+ */
+ int prev_cnt = producer_event.stats_cnt;
+ while (prev_cnt == producer_event.stats_cnt) {
+ Test::Say("Waiting for final producer stats event\n");
+ p->poll(100);
+ }
+
+ /*
+ * Consumer:
+ * Wait for a one last stats emit when all messages have been received,
+ * since previous stats may have been enqueued but not served we
+ * skip the first 2.
+ */
+ prev_cnt = consumer_event.stats_cnt;
+ while (prev_cnt + 2 >= consumer_event.stats_cnt) {
+ Test::Say(tostr() << "Waiting for final consumer stats event: "
+ << consumer_event.stats_cnt << "\n");
+ c->poll(100);
+ }
+
+
+ verify_e2e_stats(producer_event.last, consumer_event.last, exp_parts,
+ partcnt);
+
+
+ c->close();
+
+ delete p;
+ delete c;
+}
+#endif
+
+extern "C" {
+int main_0053_stats_timing(int argc, char **argv) {
+ test_stats_timing();
+ return 0;
+}
+
+int main_0053_stats(int argc, char **argv) {
+#if WITH_RAPIDJSON
+ test_stats();
+#else
+ Test::Skip("RapidJSON >=1.1.0 not available\n");
+#endif
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp
new file mode 100644
index 000000000..58c88b4a1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0054-offset_time.cpp
@@ -0,0 +1,236 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+/**
+ * Test offset_for_times (KIP-79): time-based offset lookups.
+ */
+
+
+static int verify_offset(const RdKafka::TopicPartition *tp,
+ int64_t timestamp,
+ int64_t exp_offset,
+ RdKafka::ErrorCode exp_err) {
+ int fails = 0;
+ if (tp->err() != exp_err) {
+ Test::FailLater(tostr()
+ << " " << tp->topic() << " [" << tp->partition() << "] "
+ << "expected error " << RdKafka::err2str(exp_err)
+ << ", got " << RdKafka::err2str(tp->err()) << "\n");
+ fails++;
+ }
+
+ if (!exp_err && tp->offset() != exp_offset) {
+ Test::FailLater(tostr()
+ << " " << tp->topic() << " [" << tp->partition() << "] "
+ << "expected offset " << exp_offset << " for timestamp "
+ << timestamp << ", got " << tp->offset() << "\n");
+ fails++;
+ }
+
+ return fails;
+}
+
+
+static void test_offset_time(void) {
+ std::vector<RdKafka::TopicPartition *> query_parts;
+ std::string topic = Test::mk_topic_name("0054-offset_time", 1);
+ RdKafka::Conf *conf, *tconf;
+ int64_t timestamps[] = {
+ /* timestamp, expected offset */
+ 1234,
+ 0,
+ 999999999999,
+ 1,
+ };
+ const int timestamp_cnt = 2;
+ int fails = 0;
+ std::string errstr;
+
+ Test::conf_init(&conf, &tconf, 0);
+
+ /* Need acks=all to make sure OffsetRequest correctly reads fully
+ * written Produce record. */
+ Test::conf_set(tconf, "acks", "all");
+ Test::conf_set(conf, "api.version.request", "true");
+ conf->set("dr_cb", &Test::DrCb, errstr);
+ conf->set("default_topic_conf", tconf, errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 97, timestamps[0]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 98, timestamps[0]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 99, timestamps[0]));
+
+ /* First query timestamps before topic exists, should fail. */
+ Test::Say("Attempting first offsetsForTimes() query (should fail)\n");
+ RdKafka::ErrorCode err = p->offsetsForTimes(query_parts, tmout_multip(10000));
+ Test::Say("offsetsForTimes #1 with non-existing partitions returned " +
+ RdKafka::err2str(err) + "\n");
+ Test::print_TopicPartitions("offsetsForTimes #1", query_parts);
+
+ if (err != RdKafka::ERR__UNKNOWN_PARTITION)
+ Test::Fail(
+ "offsetsForTimes #1 should have failed with UNKNOWN_PARTITION, "
+ "not " +
+ RdKafka::err2str(err));
+
+ Test::Say("Producing to " + topic + "\n");
+ for (int partition = 0; partition < 2; partition++) {
+ for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
+ err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
+ (void *)topic.c_str(), topic.size(), NULL, 0,
+ timestamps[ti], NULL);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("Produce failed: " + RdKafka::err2str(err));
+ }
+ }
+
+ if (p->flush(tmout_multip(5000)) != 0)
+ Test::Fail("Not all messages flushed");
+
+
+ for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
+ RdKafka::TopicPartition::destroy(query_parts);
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
+
+ Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp "
+ << timestamps[ti] << "\n");
+ err = p->offsetsForTimes(query_parts, tmout_multip(5000));
+ Test::print_TopicPartitions("offsetsForTimes", query_parts);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err));
+
+ fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1],
+ RdKafka::ERR_NO_ERROR);
+ fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1],
+ RdKafka::ERR_NO_ERROR);
+ }
+
+ /* repeat test with -1 timeout */
+ for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
+ RdKafka::TopicPartition::destroy(query_parts);
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
+
+ Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp "
+ << timestamps[ti] << " with a timeout of -1\n");
+ err = p->offsetsForTimes(query_parts, -1);
+ Test::print_TopicPartitions("offsetsForTimes", query_parts);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("offsetsForTimes failed: " + RdKafka::err2str(err));
+
+ fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1],
+ RdKafka::ERR_NO_ERROR);
+ fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1],
+ RdKafka::ERR_NO_ERROR);
+ }
+
+ /* And a negative test with a request that should timeout instantly. */
+ for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
+ RdKafka::TopicPartition::destroy(query_parts);
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
+
+ Test::Say(tostr() << "Attempting offsetsForTimes() for timestamp "
+ << timestamps[ti]
+ << " with minimal timeout (should fail)\n");
+ err = p->offsetsForTimes(query_parts, 0);
+ Test::print_TopicPartitions("offsetsForTimes", query_parts);
+ if (err != RdKafka::ERR__TIMED_OUT)
+ Test::Fail(
+ "expected offsetsForTimes(timeout=0) to fail with TIMED_OUT, not " +
+ RdKafka::err2str(err));
+ }
+
+ /* Include non-existent partitions */
+ for (int ti = 0; ti < timestamp_cnt * 2; ti += 2) {
+ RdKafka::TopicPartition::destroy(query_parts);
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 0, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 1, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 2, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 20, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 3, timestamps[ti]));
+ query_parts.push_back(
+ RdKafka::TopicPartition::create(topic, 21, timestamps[ti]));
+ Test::Say("Attempting offsetsForTimes() with non-existent partitions\n");
+ err = p->offsetsForTimes(query_parts, -1);
+ Test::print_TopicPartitions("offsetsForTimes", query_parts);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("expected offsetsForTimes(timeout=0) to succeed, not " +
+ RdKafka::err2str(err));
+ fails += verify_offset(query_parts[0], timestamps[ti], timestamps[ti + 1],
+ RdKafka::ERR_NO_ERROR);
+ fails += verify_offset(query_parts[1], timestamps[ti], timestamps[ti + 1],
+ RdKafka::ERR_NO_ERROR);
+ fails += verify_offset(query_parts[2], timestamps[ti], -1,
+ RdKafka::ERR_NO_ERROR);
+ fails += verify_offset(query_parts[3], timestamps[ti], -1,
+ RdKafka::ERR__UNKNOWN_PARTITION);
+ fails += verify_offset(query_parts[4], timestamps[ti], -1,
+ RdKafka::ERR_NO_ERROR);
+ fails += verify_offset(query_parts[5], timestamps[ti], -1,
+ RdKafka::ERR__UNKNOWN_PARTITION);
+ }
+
+
+ if (fails > 0)
+ Test::Fail(tostr() << "See " << fails << " previous error(s)");
+
+ RdKafka::TopicPartition::destroy(query_parts);
+
+ delete p;
+ delete conf;
+ delete tconf;
+}
+
+extern "C" {
+int main_0054_offset_time(int argc, char **argv) {
+ test_offset_time();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c
new file mode 100644
index 000000000..e0244cec9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0055-producer_latency.c
@@ -0,0 +1,366 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+#define _MSG_COUNT 10
+struct latconf {
+ const char *name;
+ const char *conf[16];
+ int min; /* Minimum expected latency */
+ int max; /* Maximum expected latency */
+
+ float rtt; /* Network+broker latency */
+
+
+ char linger_ms_conf[32]; /**< Read back to show actual value */
+
+ /* Result vector */
+ rd_bool_t passed;
+ float latency[_MSG_COUNT];
+ float sum;
+ int cnt;
+ int wakeups;
+};
+
+static int tot_wakeups = 0;
+
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ struct latconf *latconf = opaque;
+ int64_t *ts_send = (int64_t *)rkmessage->_private;
+ float delivery_time;
+
+ if (rkmessage->err)
+ TEST_FAIL("%s: delivery failed: %s\n", latconf->name,
+ rd_kafka_err2str(rkmessage->err));
+
+ if (!rkmessage->_private)
+ return; /* Priming message, ignore. */
+
+ delivery_time = (float)(test_clock() - *ts_send) / 1000.0f;
+
+ free(ts_send);
+
+ TEST_ASSERT(latconf->cnt < _MSG_COUNT, "");
+
+ TEST_SAY("%s: Message %d delivered in %.3fms\n", latconf->name,
+ latconf->cnt, delivery_time);
+
+ latconf->latency[latconf->cnt++] = delivery_time;
+ latconf->sum += delivery_time;
+}
+
+
+/**
+ * @brief A stats callback to get the per-broker wakeup counts.
+ *
+ * The JSON "parsing" here is crude..
+ */
+static int stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
+ const char *t = json;
+ int cnt = 0;
+ int total = 0;
+
+ /* Since we're only producing to one partition there will only be
+ * one broker, the leader, who's wakeup counts we're interested in, but
+ * we also want to know that other broker threads aren't spinning
+ * like crazy. So just summarize all the wakeups from all brokers. */
+ while ((t = strstr(t, "\"wakeups\":"))) {
+ int wakeups;
+ const char *next;
+
+ t += strlen("\"wakeups\":");
+ while (isspace((int)*t))
+ t++;
+ wakeups = strtol(t, (char **)&next, 0);
+
+ TEST_ASSERT(t != next, "No wakeup number found at \"%.*s...\"",
+ 16, t);
+
+ total += wakeups;
+ cnt++;
+
+ t = next;
+ }
+
+ TEST_ASSERT(cnt > 0, "No brokers found in stats");
+
+ tot_wakeups = total;
+
+ return 0;
+}
+
+
+static int verify_latency(struct latconf *latconf) {
+ float avg;
+ int fails = 0;
+ double ext_overhead =
+ latconf->rtt + 5.0 /* broker ProduceRequest handling time, maybe */;
+
+ ext_overhead *= test_timeout_multiplier;
+
+ avg = latconf->sum / (float)latconf->cnt;
+
+ TEST_SAY(
+ "%s: average latency %.3fms, allowed range %d..%d +%.0fms, "
+ "%d wakeups\n",
+ latconf->name, avg, latconf->min, latconf->max, ext_overhead,
+ tot_wakeups);
+
+ if (avg < (float)latconf->min ||
+ avg > (float)latconf->max + ext_overhead) {
+ TEST_FAIL_LATER(
+ "%s: average latency %.3fms is "
+ "outside range %d..%d +%.0fms",
+ latconf->name, avg, latconf->min, latconf->max,
+ ext_overhead);
+ fails++;
+ }
+
+ latconf->wakeups = tot_wakeups;
+ if (latconf->wakeups < 10 || latconf->wakeups > 1000) {
+ TEST_FAIL_LATER(
+ "%s: broker wakeups out of range: %d, "
+ "expected 10..1000",
+ latconf->name, latconf->wakeups);
+ fails++;
+ }
+
+
+ return fails;
+}
+
+static void measure_rtt(struct latconf *latconf, rd_kafka_t *rk) {
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_metadata *md;
+ int64_t ts = test_clock();
+
+ err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ latconf->rtt = (float)(test_clock() - ts) / 1000.0f;
+
+ TEST_SAY("%s: broker base RTT is %.3fms\n", latconf->name,
+ latconf->rtt);
+ rd_kafka_metadata_destroy(md);
+}
+
+
+
+static void test_producer_latency(const char *topic, struct latconf *latconf) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_resp_err_t err;
+ int i;
+ size_t sz;
+ rd_bool_t with_transactions = rd_false;
+
+ SUB_TEST("%s (linger.ms=%d)", latconf->name);
+
+ test_conf_init(&conf, NULL, 60);
+
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+ rd_kafka_conf_set_opaque(conf, latconf);
+ rd_kafka_conf_set_stats_cb(conf, stats_cb);
+ test_conf_set(conf, "statistics.interval.ms", "100");
+ tot_wakeups = 0;
+
+ for (i = 0; latconf->conf[i]; i += 2) {
+ TEST_SAY("%s: set conf %s = %s\n", latconf->name,
+ latconf->conf[i], latconf->conf[i + 1]);
+ test_conf_set(conf, latconf->conf[i], latconf->conf[i + 1]);
+ if (!strcmp(latconf->conf[i], "transactional.id"))
+ with_transactions = rd_true;
+ }
+
+ sz = sizeof(latconf->linger_ms_conf);
+ rd_kafka_conf_get(conf, "linger.ms", latconf->linger_ms_conf, &sz);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ if (with_transactions) {
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 10 * 1000));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+ }
+
+ TEST_SAY("%s: priming producer\n", latconf->name);
+ /* Send a priming message to make sure everything is up
+ * and functional before starting measurements */
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("priming", 7),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+ if (err)
+ TEST_FAIL("%s: priming producev failed: %s", latconf->name,
+ rd_kafka_err2str(err));
+
+ if (with_transactions) {
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ } else {
+ /* Await delivery */
+ rd_kafka_flush(rk, tmout_multip(5000));
+ }
+
+ /* Get a network+broker round-trip-time base time. */
+ measure_rtt(latconf, rk);
+
+ TEST_SAY("%s: producing %d messages\n", latconf->name, _MSG_COUNT);
+ for (i = 0; i < _MSG_COUNT; i++) {
+ int64_t *ts_send;
+ int pre_cnt = latconf->cnt;
+
+ if (with_transactions)
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ ts_send = malloc(sizeof(*ts_send));
+ *ts_send = test_clock();
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_OPAQUE(ts_send), RD_KAFKA_V_END);
+ if (err)
+ TEST_FAIL("%s: producev #%d failed: %s", latconf->name,
+ i, rd_kafka_err2str(err));
+
+ /* Await delivery */
+ while (latconf->cnt == pre_cnt)
+ rd_kafka_poll(rk, 5000);
+
+ if (with_transactions) {
+ test_timing_t timing;
+ TIMING_START(&timing, "commit_transaction");
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ TIMING_ASSERT_LATER(&timing, 0,
+ (int)(latconf->rtt + 50.0));
+ }
+ }
+
+ while (tot_wakeups == 0)
+ rd_kafka_poll(rk, 100); /* Get final stats_cb */
+
+ rd_kafka_destroy(rk);
+
+ if (verify_latency(latconf))
+ return; /* verify_latency() has already
+ * called TEST_FAIL_LATER() */
+
+
+ latconf->passed = rd_true;
+
+ SUB_TEST_PASS();
+}
+
+
+static float find_min(const struct latconf *latconf) {
+ int i;
+ float v = 1000000;
+
+ for (i = 0; i < latconf->cnt; i++)
+ if (latconf->latency[i] < v)
+ v = latconf->latency[i];
+
+ return v;
+}
+
+static float find_max(const struct latconf *latconf) {
+ int i;
+ float v = 0;
+
+ for (i = 0; i < latconf->cnt; i++)
+ if (latconf->latency[i] > v)
+ v = latconf->latency[i];
+
+ return v;
+}
+
+int main_0055_producer_latency(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0055_producer_latency", 1);
+ struct latconf latconfs[] = {
+ {"standard settings", {NULL}, 5, 5}, /* default is now 5ms */
+ {"low linger.ms (0ms)", {"linger.ms", "0", NULL}, 0, 0},
+ {"microsecond linger.ms (0.001ms)",
+ {"linger.ms", "0.001", NULL},
+ 0,
+ 1},
+ {"high linger.ms (3000ms)",
+ {"linger.ms", "3000", NULL},
+ 3000,
+ 3100},
+ {"linger.ms < 1000 (500ms)", /* internal block_max_ms */
+ {"linger.ms", "500", NULL},
+ 500,
+ 600},
+ {"no acks (0ms)",
+ {"linger.ms", "0", "acks", "0", "enable.idempotence", "false",
+ NULL},
+ 0,
+ 0},
+ {"idempotence (10ms)",
+ {"linger.ms", "10", "enable.idempotence", "true", NULL},
+ 10,
+ 10},
+ {"transactions (35ms)",
+ {"linger.ms", "35", "transactional.id", topic, NULL},
+ 35,
+ 50 + 35 /* extra time for AddPartitions..*/},
+ {NULL}};
+ struct latconf *latconf;
+
+ if (test_on_ci) {
+ TEST_SKIP("Latency measurements not reliable on CI\n");
+ return 0;
+ }
+
+ /* Create topic without replicas to keep broker-side latency down */
+ test_create_topic(NULL, topic, 1, 1);
+
+ for (latconf = latconfs; latconf->name; latconf++)
+ test_producer_latency(topic, latconf);
+
+ TEST_SAY(_C_YEL "Latency tests summary:\n" _C_CLR);
+ TEST_SAY("%-40s %9s %6s..%-6s %7s %9s %9s %9s %8s\n", "Name",
+ "linger.ms", "MinExp", "MaxExp", "RTT", "Min", "Average",
+ "Max", "Wakeups");
+
+ for (latconf = latconfs; latconf->name; latconf++)
+ TEST_SAY("%-40s %9s %6d..%-6d %7g %9g %9g %9g %8d%s\n",
+ latconf->name, latconf->linger_ms_conf, latconf->min,
+ latconf->max, latconf->rtt, find_min(latconf),
+ latconf->sum / latconf->cnt, find_max(latconf),
+ latconf->wakeups,
+ latconf->passed ? "" : _C_RED " FAILED");
+
+
+ TEST_LATER_CHECK("");
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c
new file mode 100644
index 000000000..e6205ddb6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0056-balanced_group_mt.c
@@ -0,0 +1,311 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+/**
+ * KafkaConsumer balanced group with multithreading tests
+ *
+ * Runs a consumer subscribing to a topic with multiple partitions and farms
+ * consuming of each partition to a separate thread.
+ */
+
+#define MAX_THRD_CNT 4
+
+static int assign_cnt = 0;
+static int consumed_msg_cnt = 0;
+static int consumers_running = 0;
+static int exp_msg_cnt;
+
+static mtx_t lock;
+static thrd_t tids[MAX_THRD_CNT];
+
+typedef struct part_consume_info_s {
+ rd_kafka_queue_t *rkqu;
+ int partition;
+} part_consume_info_t;
+
+static int is_consuming() {
+ int result;
+ mtx_lock(&lock);
+ result = consumers_running;
+ mtx_unlock(&lock);
+ return result;
+}
+
+static int partition_consume(void *args) {
+ part_consume_info_t *info = (part_consume_info_t *)args;
+ rd_kafka_queue_t *rkqu = info->rkqu;
+ int partition = info->partition;
+ int64_t ts_start = test_clock();
+ int max_time = (test_session_timeout_ms + 3000) * 1000;
+ int running = 1;
+
+ free(args); /* Free the parameter struct dynamically allocated for us */
+
+ while (ts_start + max_time > test_clock() && running &&
+ is_consuming()) {
+ rd_kafka_message_t *rkmsg;
+
+ rkmsg = rd_kafka_consume_queue(rkqu, 500);
+
+ if (!rkmsg)
+ continue;
+ else if (rkmsg->err == RD_KAFKA_RESP_ERR__PARTITION_EOF)
+ running = 0;
+ else if (rkmsg->err) {
+ mtx_lock(&lock);
+ TEST_FAIL(
+ "Message error "
+ "(at offset %" PRId64
+ " after "
+ "%d/%d messages and %dms): %s",
+ rkmsg->offset, consumed_msg_cnt, exp_msg_cnt,
+ (int)(test_clock() - ts_start) / 1000,
+ rd_kafka_message_errstr(rkmsg));
+ mtx_unlock(&lock);
+ } else {
+ if (rkmsg->partition != partition) {
+ mtx_lock(&lock);
+ TEST_FAIL(
+ "Message consumed has partition %d "
+ "but we expected partition %d.",
+ rkmsg->partition, partition);
+ mtx_unlock(&lock);
+ }
+ }
+ rd_kafka_message_destroy(rkmsg);
+
+ mtx_lock(&lock);
+ if (running && ++consumed_msg_cnt >= exp_msg_cnt) {
+ TEST_SAY("All messages consumed\n");
+ running = 0;
+ }
+ mtx_unlock(&lock);
+ }
+
+ rd_kafka_queue_destroy(rkqu);
+
+ return thrd_success;
+}
+
+static thrd_t spawn_thread(rd_kafka_queue_t *rkqu, int partition) {
+ thrd_t thr;
+ part_consume_info_t *info = malloc(sizeof(part_consume_info_t));
+
+ info->rkqu = rkqu;
+ info->partition = partition;
+
+ if (thrd_create(&thr, &partition_consume, info) != thrd_success) {
+ TEST_FAIL("Failed to create consumer thread.");
+ }
+ return thr;
+}
+
+static int rebalanced = 0;
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque) {
+ int i;
+ char *memberid = rd_kafka_memberid(rk);
+
+ TEST_SAY("%s: MemberId \"%s\": Consumer group rebalanced: %s\n",
+ rd_kafka_name(rk), memberid, rd_kafka_err2str(err));
+
+ if (memberid)
+ free(memberid);
+
+ test_print_partition_list(partitions);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ assign_cnt++;
+
+ rd_kafka_assign(rk, partitions);
+ mtx_lock(&lock);
+ consumers_running = 1;
+ mtx_unlock(&lock);
+
+ for (i = 0; i < partitions->cnt && i < MAX_THRD_CNT; ++i) {
+ rd_kafka_topic_partition_t part = partitions->elems[i];
+ rd_kafka_queue_t *rkqu;
+ /* This queue is loosed in partition-consume. */
+ rkqu = rd_kafka_queue_get_partition(rk, part.topic,
+ part.partition);
+
+ rd_kafka_queue_forward(rkqu, NULL);
+ tids[part.partition] =
+ spawn_thread(rkqu, part.partition);
+ }
+
+ rebalanced = 1;
+
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ if (assign_cnt == 0)
+ TEST_FAIL("asymetric rebalance_cb");
+ assign_cnt--;
+ rd_kafka_assign(rk, NULL);
+ mtx_lock(&lock);
+ consumers_running = 0;
+ mtx_unlock(&lock);
+
+ break;
+
+ default:
+ TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err));
+ break;
+ }
+}
+
+static void get_assignment(rd_kafka_t *rk_c) {
+ while (!rebalanced) {
+ rd_kafka_message_t *rkmsg;
+ rkmsg = rd_kafka_consumer_poll(rk_c, 500);
+ if (rkmsg)
+ rd_kafka_message_destroy(rkmsg);
+ }
+}
+
+int main_0056_balanced_group_mt(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ rd_kafka_t *rk_p, *rk_c;
+ rd_kafka_topic_t *rkt_p;
+ int msg_cnt = test_quick ? 100 : 1000;
+ int msg_base = 0;
+ int partition_cnt = 2;
+ int partition;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *default_topic_conf;
+ rd_kafka_topic_partition_list_t *sub, *topics;
+ rd_kafka_resp_err_t err;
+ test_timing_t t_assign, t_close, t_consume;
+ int i;
+
+ exp_msg_cnt = msg_cnt * partition_cnt;
+
+ testid = test_id_generate();
+
+ /* Produce messages */
+ rk_p = test_create_producer();
+ rkt_p = test_create_producer_topic(rk_p, topic, NULL);
+
+ for (partition = 0; partition < partition_cnt; partition++) {
+ test_produce_msgs(rk_p, rkt_p, testid, partition,
+ msg_base + (partition * msg_cnt), msg_cnt,
+ NULL, 0);
+ }
+
+ rd_kafka_topic_destroy(rkt_p);
+ rd_kafka_destroy(rk_p);
+
+ if (mtx_init(&lock, mtx_plain) != thrd_success)
+ TEST_FAIL("Cannot create mutex.");
+
+ test_conf_init(&conf, &default_topic_conf,
+ (test_session_timeout_ms * 3) / 1000);
+
+ test_conf_set(conf, "enable.partition.eof", "true");
+
+ test_topic_conf_set(default_topic_conf, "auto.offset.reset",
+ "smallest");
+
+ /* Fill in topic subscription set */
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);
+
+ /* Create consumers and start subscription */
+ rk_c = test_create_consumer(topic /*group_id*/, rebalance_cb, conf,
+ default_topic_conf);
+
+ test_consumer_subscribe(rk_c, topic);
+
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ /* Wait for both consumers to get an assignment */
+ TIMING_START(&t_assign, "WAIT.ASSIGN");
+ get_assignment(rk_c);
+ TIMING_STOP(&t_assign);
+
+ TIMING_START(&t_consume, "CONSUME.WAIT");
+ for (i = 0; i < MAX_THRD_CNT; ++i) {
+ int res;
+ if (tids[i] != 0)
+ thrd_join(tids[i], &res);
+ }
+ TIMING_STOP(&t_consume);
+
+ TEST_SAY("Closing remaining consumers\n");
+ /* Query subscription */
+ err = rd_kafka_subscription(rk_c, &sub);
+ TEST_ASSERT(!err, "%s: subscription () failed: %s", rd_kafka_name(rk_c),
+ rd_kafka_err2str(err));
+ TEST_SAY("%s: subscription (%d):\n", rd_kafka_name(rk_c), sub->cnt);
+ for (i = 0; i < sub->cnt; ++i)
+ TEST_SAY(" %s\n", sub->elems[i].topic);
+ rd_kafka_topic_partition_list_destroy(sub);
+
+ /* Run an explicit unsubscribe () (async) prior to close ()
+ * to trigger race condition issues on termination. */
+ TEST_SAY("Unsubscribing instance %s\n", rd_kafka_name(rk_c));
+ err = rd_kafka_unsubscribe(rk_c);
+ TEST_ASSERT(!err, "%s: unsubscribe failed: %s", rd_kafka_name(rk_c),
+ rd_kafka_err2str(err));
+
+ TEST_SAY("Closing %s\n", rd_kafka_name(rk_c));
+ TIMING_START(&t_close, "CONSUMER.CLOSE");
+ err = rd_kafka_consumer_close(rk_c);
+ TIMING_STOP(&t_close);
+ TEST_ASSERT(!err, "consumer_close failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_destroy(rk_c);
+ rk_c = NULL;
+
+ TEST_SAY("%d/%d messages consumed\n", consumed_msg_cnt, exp_msg_cnt);
+ TEST_ASSERT(consumed_msg_cnt >= exp_msg_cnt,
+ "Only %d/%d messages were consumed", consumed_msg_cnt,
+ exp_msg_cnt);
+
+ if (consumed_msg_cnt > exp_msg_cnt)
+ TEST_SAY(
+ "At least %d/%d messages were consumed "
+ "multiple times\n",
+ consumed_msg_cnt - exp_msg_cnt, exp_msg_cnt);
+
+ mtx_destroy(&lock);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp
new file mode 100644
index 000000000..0b50b40ad
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0057-invalid_topic.cpp
@@ -0,0 +1,112 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+/**
+ * Proper handling of invalid topic names, not by local client enforcement
+ * but by proper propagation of broker errors.
+ *
+ * E.g.: produce messages to invalid topic should fail quickly, not by timeout.
+ */
+
+
+
+#define check_err(ERR, EXP) \
+ do { \
+ if ((ERR) != (EXP)) \
+ Test::Fail(tostr() << __FUNCTION__ << ":" << __LINE__ << ": " \
+ << "Expected " << RdKafka::err2str(EXP) << ", got " \
+ << RdKafka::err2str(ERR)); \
+ } while (0)
+
+class DrCb0057 : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &msg) {
+ std::string val((const char *)msg.payload());
+
+ Test::Say(tostr() << "DeliveryReport for " << val << " message on "
+ << msg.topic_name() << " [" << msg.partition()
+ << "]: " << msg.errstr() << "\n");
+
+ if (val == "good")
+ check_err(msg.err(), RdKafka::ERR_NO_ERROR);
+ else if (val == "bad") {
+ if (test_broker_version >= TEST_BRKVER(0, 8, 2, 2))
+ check_err(msg.err(), RdKafka::ERR_TOPIC_EXCEPTION);
+ else
+ check_err(msg.err(), RdKafka::ERR_UNKNOWN);
+ }
+ }
+};
+
+static void test_invalid_topic(void) {
+ std::string topic_bad = Test::mk_topic_name("0057-invalid_topic$#!", 1);
+ std::string topic_good = Test::mk_topic_name("0057-invalid_topic_good", 1);
+ RdKafka::Conf *conf;
+ std::string errstr;
+
+ Test::conf_init(&conf, NULL, 0);
+
+ DrCb0057 MyDr;
+ conf->set("dr_cb", &MyDr, errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ RdKafka::ErrorCode err;
+
+ for (int i = -1; i < 3; i++) {
+ err = p->produce(topic_bad, i, RdKafka::Producer::RK_MSG_COPY,
+ (void *)"bad", 4, NULL, 0, 0, NULL);
+ if (err) /* Error is probably delayed until delivery report */
+ check_err(err, RdKafka::ERR_TOPIC_EXCEPTION);
+
+ err = p->produce(topic_good, i, RdKafka::Producer::RK_MSG_COPY,
+ (void *)"good", 5, NULL, 0, 0, NULL);
+ check_err(err, RdKafka::ERR_NO_ERROR);
+ }
+
+ p->flush(tmout_multip(10000));
+
+ if (p->outq_len() > 0)
+ Test::Fail(tostr() << "Expected producer to be flushed, " << p->outq_len()
+ << " messages remain");
+
+ delete p;
+ delete conf;
+}
+
+extern "C" {
+int main_0057_invalid_topic(int argc, char **argv) {
+ test_invalid_topic();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp
new file mode 100644
index 000000000..4da46e7f7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0058-log.cpp
@@ -0,0 +1,123 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+
+/**
+ * @brief Test log callbacks and log queues
+ */
+
+class myLogCb : public RdKafka::EventCb {
+ private:
+ enum { _EXP_NONE, _EXP_LOG } state_;
+ int cnt_;
+
+ public:
+ myLogCb() : state_(_EXP_NONE), cnt_(0) {
+ }
+ void expecting(bool b) {
+ state_ = b ? _EXP_LOG : _EXP_NONE;
+ }
+ int count() {
+ return cnt_;
+ }
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_LOG:
+ cnt_++;
+ Test::Say(tostr() << "Log: "
+ << "level " << event.severity() << ", facility "
+ << event.fac() << ", str " << event.str() << "\n");
+ if (state_ != _EXP_LOG)
+ Test::Fail(
+ "Received unexpected "
+ "log message");
+ break;
+ default:
+ break;
+ }
+ }
+};
+
+static void test_log(std::string what, bool main_queue) {
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ myLogCb my_log;
+ std::string errstr;
+
+ Test::conf_set(conf, "client.id", test_curr_name());
+ Test::conf_set(conf, "debug", "generic"); // generate some logs
+ Test::conf_set(conf, "log.queue", "true");
+
+ if (conf->set("event_cb", &my_log, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ Test::Say(what + "Creating producer, not expecting any log messages\n");
+ my_log.expecting(false);
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail(what + "Failed to create Producer: " + errstr);
+ delete conf;
+
+ RdKafka::Queue *queue = NULL;
+ if (!main_queue) {
+ queue = RdKafka::Queue::create(p);
+ queue->poll(1000);
+ } else {
+ p->poll(1000);
+ }
+
+ Test::Say(what + "Setting log queue\n");
+ p->set_log_queue(queue); /* Redirect logs to main queue */
+
+ Test::Say(what + "Expecting at least one log message\n");
+ my_log.expecting(true);
+ if (queue)
+ queue->poll(1000);
+ else
+ p->poll(1000); /* Should not spontaneously call logs */
+
+ Test::Say(tostr() << what << "Saw " << my_log.count() << " logs\n");
+ if (my_log.count() < 1)
+ Test::Fail(what +
+ "No logs seen: expected at least one broker "
+ "failure");
+
+ if (queue)
+ delete queue;
+ delete (p);
+}
+
+extern "C" {
+int main_0058_log(int argc, char **argv) {
+ test_log("main.queue: ", true);
+ test_log("local.queue: ", false);
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp
new file mode 100644
index 000000000..67508ff82
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0059-bsearch.cpp
@@ -0,0 +1,237 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+/**
+ * binary search by timestamp: excercices KafkaConsumer's seek() API.
+ */
+
+
+static std::string topic;
+static const int partition = 0;
+static int64_t golden_timestamp = -1;
+static int64_t golden_offset = -1;
+
+/**
+ * @brief Seek to offset and consume that message.
+ *
+ * Asserts on failure.
+ */
+static RdKafka::Message *get_msg(RdKafka::KafkaConsumer *c,
+ int64_t offset,
+ bool use_seek) {
+ RdKafka::TopicPartition *next =
+ RdKafka::TopicPartition::create(topic, partition, offset);
+ RdKafka::ErrorCode err;
+
+ /* Since seek() can only be used to change the currently consumed
+ * offset we need to start consuming the first time we run this
+ * loop by calling assign() */
+
+ test_timing_t t_seek;
+ TIMING_START(&t_seek, "seek");
+ if (!use_seek) {
+ std::vector<RdKafka::TopicPartition *> parts;
+ parts.push_back(next);
+ err = c->assign(parts);
+ if (err)
+ Test::Fail("assign() failed: " + RdKafka::err2str(err));
+ } else {
+ err = c->seek(*next, tmout_multip(5000));
+ if (err)
+ Test::Fail("seek() failed: " + RdKafka::err2str(err));
+ }
+ TIMING_STOP(&t_seek);
+ delete next;
+
+ test_timing_t t_consume;
+ TIMING_START(&t_consume, "consume");
+
+ RdKafka::Message *msg = c->consume(tmout_multip(5000));
+ if (!msg)
+ Test::Fail("consume() returned NULL");
+ TIMING_STOP(&t_consume);
+
+ if (msg->err())
+ Test::Fail("consume() returned error: " + msg->errstr());
+
+ if (msg->offset() != offset)
+ Test::Fail(tostr() << "seek()ed to offset " << offset
+ << " but consume() returned offset " << msg->offset());
+
+ return msg;
+}
+
+class MyDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &msg) {
+ if (msg.err())
+ Test::Fail("Delivery failed: " + msg.errstr());
+
+ if (!msg.msg_opaque())
+ return;
+
+ RdKafka::MessageTimestamp ts = msg.timestamp();
+ if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
+ Test::Fail(tostr() << "Dr msg timestamp type wrong: " << ts.type);
+
+ golden_timestamp = ts.timestamp;
+ golden_offset = msg.offset();
+ }
+};
+
+static void do_test_bsearch(void) {
+ RdKafka::Conf *conf, *tconf;
+ int msgcnt = 1000;
+ int64_t timestamp;
+ std::string errstr;
+ RdKafka::ErrorCode err;
+ MyDeliveryReportCb my_dr;
+
+ topic = Test::mk_topic_name("0059-bsearch", 1);
+ Test::conf_init(&conf, &tconf, 0);
+ Test::conf_set(tconf, "acks", "all");
+ Test::conf_set(conf, "api.version.request", "true");
+ conf->set("dr_cb", &my_dr, errstr);
+ conf->set("default_topic_conf", tconf, errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+ delete tconf;
+
+ timestamp = 1000;
+ for (int i = 0; i < msgcnt; i++) {
+ err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
+ (void *)topic.c_str(), topic.size(), NULL, 0, timestamp,
+ i == 357 ? (void *)1 /*golden*/ : NULL);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("Produce failed: " + RdKafka::err2str(err));
+ timestamp += 100 + (timestamp % 9);
+ }
+
+ if (p->flush(tmout_multip(5000)) != 0)
+ Test::Fail("Not all messages flushed");
+
+ Test::Say(tostr() << "Produced " << msgcnt << " messages, "
+ << "golden message with timestamp " << golden_timestamp
+ << " at offset " << golden_offset << "\n");
+
+ delete p;
+
+ /*
+ * Now find the golden message using bsearch
+ */
+
+ /* Create consumer */
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "group.id", topic);
+ Test::conf_set(conf, "api.version.request", "true");
+ Test::conf_set(conf, "fetch.wait.max.ms", "1");
+ Test::conf_set(conf, "fetch.error.backoff.ms", "1");
+ Test::conf_set(conf, "queued.min.messages", "1");
+ Test::conf_set(conf, "enable.auto.commit", "false");
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ Test::Say("Find initial middle offset\n");
+ int64_t low, high;
+ test_timing_t t_qr;
+ TIMING_START(&t_qr, "query_watermark_offsets");
+ err = c->query_watermark_offsets(topic, partition, &low, &high,
+ tmout_multip(5000));
+ TIMING_STOP(&t_qr);
+ if (err)
+ Test::Fail("query_watermark_offsets failed: " + RdKafka::err2str(err));
+
+ /* Divide and conquer */
+ test_timing_t t_bsearch;
+ TIMING_START(&t_bsearch, "actual bsearch");
+ int itcnt = 0;
+ do {
+ int64_t mid;
+
+ mid = low + ((high - low) / 2);
+
+ Test::Say(1, tostr() << "Get message at mid point of " << low << ".."
+ << high << " -> " << mid << "\n");
+
+ RdKafka::Message *msg = get_msg(c, mid,
+ /* use assign() on first iteration,
+ * then seek() */
+ itcnt > 0);
+
+ RdKafka::MessageTimestamp ts = msg->timestamp();
+ if (ts.type != RdKafka::MessageTimestamp::MSG_TIMESTAMP_CREATE_TIME)
+ Test::Fail(tostr() << "Expected CreateTime timestamp, not " << ts.type
+ << " at offset " << msg->offset());
+
+ Test::Say(1, tostr() << "Message at offset " << msg->offset()
+ << " with timestamp " << ts.timestamp << "\n");
+
+ if (ts.timestamp == golden_timestamp) {
+ Test::Say(1, tostr() << "Found golden timestamp " << ts.timestamp
+ << " at offset " << msg->offset() << " in "
+ << itcnt + 1 << " iterations\n");
+ delete msg;
+ break;
+ }
+
+ if (low == high) {
+ Test::Fail(tostr() << "Search exhausted at offset " << msg->offset()
+ << " with timestamp " << ts.timestamp
+ << " without finding golden timestamp "
+ << golden_timestamp << " at offset " << golden_offset);
+
+ } else if (ts.timestamp < golden_timestamp)
+ low = msg->offset() + 1;
+ else if (ts.timestamp > golden_timestamp)
+ high = msg->offset() - 1;
+
+ delete msg;
+ itcnt++;
+ } while (true);
+ TIMING_STOP(&t_bsearch);
+
+ c->close();
+
+ delete c;
+}
+
+extern "C" {
+int main_0059_bsearch(int argc, char **argv) {
+ do_test_bsearch();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp
new file mode 100644
index 000000000..156b8a57a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0060-op_prio.cpp
@@ -0,0 +1,163 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+/**
+ * Verify prioritization of non-message ops.
+ * MO:
+ *
+ * - Seed topic with 1000 messages
+ * - Start consumer with auto offset commit disabled,
+ * but with commit and stats callbacks registered,
+ * - Consume one message
+ * - Commit that message manually
+ * - Consume one message per second
+ * - The commit callback should be fired within reasonable time, long before
+ * - The stats callback should behave the same.
+ * all messages are consumed.
+ */
+
+
+
+class MyCbs : public RdKafka::OffsetCommitCb, public RdKafka::EventCb {
+ public:
+ int seen_commit;
+ int seen_stats;
+
+ void offset_commit_cb(RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &offsets) {
+ if (err)
+ Test::Fail("Offset commit failed: " + RdKafka::err2str(err));
+
+ seen_commit++;
+ Test::Say("Got commit callback!\n");
+ }
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_STATS:
+ Test::Say("Got stats callback!\n");
+ seen_stats++;
+ break;
+ default:
+ break;
+ }
+ }
+};
+
+
+
+static void do_test_commit_cb(void) {
+ const int msgcnt = test_quick ? 100 : 1000;
+ std::string errstr;
+ RdKafka::ErrorCode err;
+ std::string topic = Test::mk_topic_name("0060-op_prio", 1);
+
+ test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt);
+
+ /*
+ * Create consumer
+ */
+
+ /* Create consumer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "group.id", topic);
+ Test::conf_set(conf, "socket.timeout.ms", "10000");
+ Test::conf_set(conf, "enable.auto.commit", "false");
+ Test::conf_set(conf, "enable.partition.eof", "false");
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ Test::conf_set(conf, "statistics.interval.ms", "1000");
+
+ MyCbs cbs;
+ cbs.seen_commit = 0;
+ cbs.seen_stats = 0;
+ if (conf->set("offset_commit_cb", (RdKafka::OffsetCommitCb *)&cbs, errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to set commit callback: " + errstr);
+ if (conf->set("event_cb", (RdKafka::EventCb *)&cbs, errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to set event callback: " + errstr);
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Subscribe */
+ std::vector<std::string> topics;
+ topics.push_back(topic);
+ if ((err = c->subscribe(topics)))
+ Test::Fail("subscribe failed: " + RdKafka::err2str(err));
+
+ /* Wait for messages and commit callback. */
+ Test::Say("Consuming topic " + topic + "\n");
+ int cnt = 0;
+ while (!cbs.seen_commit || !cbs.seen_stats) {
+ RdKafka::Message *msg = c->consume(tmout_multip(1000));
+ if (!msg->err()) {
+ cnt++;
+ Test::Say(tostr() << "Received message #" << cnt << "\n");
+ if (cnt > 10)
+ Test::Fail(tostr() << "Should've seen the "
+ "offset commit ("
+ << cbs.seen_commit
+ << ") and "
+ "stats callbacks ("
+ << cbs.seen_stats << ") by now");
+
+ /* Commit the first message to trigger the offset commit_cb */
+ if (cnt == 1) {
+ err = c->commitAsync(msg);
+ if (err)
+ Test::Fail("commitAsync() failed: " + RdKafka::err2str(err));
+ rd_sleep(1); /* Sleep to simulate slow processing, making sure
+ * that the offset commit callback op gets
+ * inserted on the consume queue in front of
+ * the messages. */
+ }
+
+ } else if (msg->err() == RdKafka::ERR__TIMED_OUT)
+ ; /* Stil rebalancing? */
+ else
+ Test::Fail("consume() failed: " + msg->errstr());
+ delete msg;
+ }
+
+ c->close();
+ delete c;
+}
+
+extern "C" {
+int main_0060_op_prio(int argc, char **argv) {
+ do_test_commit_cb();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp
new file mode 100644
index 000000000..759541583
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0061-consumer_lag.cpp
@@ -0,0 +1,275 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+/**
+ * Verify consumer_lag
+ */
+
+static std::string topic;
+
+class StatsCb : public RdKafka::EventCb {
+ public:
+ int64_t calc_lag; // calculated lag
+ int lag_valid; // number of times lag has been valid
+
+ StatsCb() {
+ calc_lag = -1;
+ lag_valid = 0;
+ }
+
+ /**
+ * @brief Event callback
+ */
+ void event_cb(RdKafka::Event &event) {
+ if (event.type() == RdKafka::Event::EVENT_LOG) {
+ Test::Say(tostr() << "LOG-" << event.severity() << "-" << event.fac()
+ << ": " << event.str() << "\n");
+ return;
+ } else if (event.type() != RdKafka::Event::EVENT_STATS) {
+ Test::Say(tostr() << "Dropping event " << event.type() << "\n");
+ return;
+ }
+
+ int64_t consumer_lag = parse_json(event.str().c_str());
+
+ Test::Say(3, tostr() << "Stats: consumer_lag is " << consumer_lag << "\n");
+ if (consumer_lag == -1) {
+ Test::Say(2, "Skipping old stats with invalid consumer_lag\n");
+ return; /* Old stats generated before first message consumed */
+ } else if (consumer_lag != calc_lag)
+ Test::Fail(tostr() << "Stats consumer_lag " << consumer_lag
+ << ", expected " << calc_lag << "\n");
+ else
+ lag_valid++;
+ }
+
+
+ /**
+ * @brief Naiive JSON parsing, find the consumer_lag for partition 0
+ * and return it.
+ */
+ static int64_t parse_json(const char *json_doc) {
+ const std::string match_topic(std::string("\"") + topic + "\":");
+ const char *search[] = {
+ "\"topics\":", match_topic.c_str(), "\"partitions\":",
+ "\"0\":", "\"consumer_lag_stored\":", NULL};
+ const char *remain = json_doc;
+
+ for (const char **sp = search; *sp; sp++) {
+ const char *t = strstr(remain, *sp);
+ if (!t)
+ Test::Fail(tostr() << "Couldnt find " << *sp
+ << " in remaining stats output:\n"
+ << remain << "\n====================\n"
+ << json_doc << "\n");
+ remain = t + strlen(*sp);
+ }
+
+ while (*remain == ' ')
+ remain++;
+
+ if (!*remain)
+ Test::Fail("Nothing following consumer_lag");
+
+ int64_t lag = strtoull(remain, NULL, 0);
+ if (lag == -1) {
+ Test::Say(tostr() << "Consumer lag " << lag << " is invalid, stats:\n");
+ Test::Say(3, tostr() << json_doc << "\n");
+ }
+ return lag;
+ }
+};
+
+
+/**
+ * @brief Produce \p msgcnt in a transaction that is aborted.
+ */
+static void produce_aborted_txns(const std::string &topic,
+ int32_t partition,
+ int msgcnt) {
+ RdKafka::Producer *p;
+ RdKafka::Conf *conf;
+ RdKafka::Error *error;
+
+ Test::Say(tostr() << "Producing " << msgcnt << " transactional messages "
+ << "which will be aborted\n");
+ Test::conf_init(&conf, NULL, 0);
+
+ Test::conf_set(conf, "transactional.id", "txn_id_" + topic);
+
+ std::string errstr;
+ p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ error = p->init_transactions(-1);
+ if (error)
+ Test::Fail("init_transactions() failed: " + error->str());
+
+ error = p->begin_transaction();
+ if (error)
+ Test::Fail("begin_transaction() failed: " + error->str());
+
+ for (int i = 0; i < msgcnt; i++) {
+ RdKafka::ErrorCode err;
+
+ err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY, &i,
+ sizeof(i), NULL, 0, 0, NULL);
+ if (err)
+ Test::Fail("produce() failed: " + RdKafka::err2str(err));
+ }
+
+ /* Flush is typically not needed for transactions since
+ * commit_transaction() will do it automatically, but in the case of
+ * abort_transaction() nothing might have been sent to the broker yet,
+ * so call flush() here so we know the messages are sent and the
+ * partitions are added to the transaction, so that a control(abort)
+ * message is written to the partition. */
+ p->flush(-1);
+
+ error = p->abort_transaction(-1);
+ if (error)
+ Test::Fail("abort_transaction() failed: " + error->str());
+
+ delete p;
+}
+
+
+static void do_test_consumer_lag(bool with_txns) {
+ int msgcnt = test_quick ? 5 : 10;
+ int txn_msgcnt = 3;
+ int addcnt = 0;
+ std::string errstr;
+ RdKafka::ErrorCode err;
+
+ SUB_TEST("Test consumer lag %s transactions", with_txns ? "with" : "without");
+
+ topic = Test::mk_topic_name("0061-consumer_lag", 1);
+
+ test_produce_msgs_easy(topic.c_str(), 0, 0, msgcnt);
+
+ if (with_txns) {
+ /* After the standard messages have been produced,
+ * produce some transactional messages that are aborted to advance
+ * the end offset with control messages. */
+ produce_aborted_txns(topic, 0, txn_msgcnt);
+ addcnt = txn_msgcnt + 1 /* ctrl msg */;
+ }
+
+ /*
+ * Create consumer
+ */
+
+ /* Create consumer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 40);
+ StatsCb stats;
+ if (conf->set("event_cb", &stats, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail("set event_cb failed: " + errstr);
+ Test::conf_set(conf, "group.id", topic);
+ Test::conf_set(conf, "enable.auto.commit", "false");
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ Test::conf_set(conf, "statistics.interval.ms", "100");
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Assign partitions */
+ std::vector<RdKafka::TopicPartition *> parts;
+ parts.push_back(RdKafka::TopicPartition::create(topic, 0));
+ if ((err = c->assign(parts)))
+ Test::Fail("assign failed: " + RdKafka::err2str(err));
+ RdKafka::TopicPartition::destroy(parts);
+
+ /* Start consuming */
+ Test::Say("Consuming topic " + topic + "\n");
+ int cnt = 0;
+ while (cnt < msgcnt + addcnt) {
+ RdKafka::Message *msg = c->consume(1000);
+
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ if (with_txns && cnt >= msgcnt && stats.calc_lag == 0)
+ addcnt = 0; /* done */
+ break;
+ case RdKafka::ERR__PARTITION_EOF:
+ Test::Fail(tostr() << "Unexpected PARTITION_EOF (not enbaled) after "
+ << cnt << "/" << msgcnt
+ << " messages: " << msg->errstr());
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ /* Proper message. Update calculated lag for later
+ * checking in stats callback */
+ if (msg->offset() + 1 >= msgcnt && with_txns)
+ stats.calc_lag = 0;
+ else
+ stats.calc_lag = (msgcnt + addcnt) - (msg->offset() + 1);
+ cnt++;
+ Test::Say(2, tostr() << "Received message #" << cnt << "/" << msgcnt
+ << " at offset " << msg->offset() << " (calc lag "
+ << stats.calc_lag << ")\n");
+ /* Slow down message "processing" to make sure we get
+ * at least one stats callback per message. */
+ if (cnt < msgcnt)
+ rd_sleep(1);
+ break;
+
+ default:
+ Test::Fail("Consume error: " + msg->errstr());
+ break;
+ }
+
+ delete msg;
+ }
+ Test::Say(tostr() << "Done, lag was valid " << stats.lag_valid << " times\n");
+ if (stats.lag_valid == 0)
+ Test::Fail("No valid consumer_lag in statistics seen");
+
+ c->close();
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+extern "C" {
+int main_0061_consumer_lag(int argc, char **argv) {
+ do_test_consumer_lag(false /*no txns*/);
+ if (test_broker_version >= TEST_BRKVER(0, 11, 0, 0))
+ do_test_consumer_lag(true /*txns*/);
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c
new file mode 100644
index 000000000..bdddda5e0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0062-stats_event.c
@@ -0,0 +1,126 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2017, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests messages are produced in order.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int stats_count = 0;
+
+/**
+ * Handle stats
+ */
+static void handle_stats(rd_kafka_event_t *rkev) {
+ const char *stats_json = NULL;
+ stats_json = rd_kafka_event_stats(rkev);
+ if (stats_json != NULL) {
+ TEST_SAY("Stats: %s\n", stats_json);
+ stats_count++;
+ } else {
+ TEST_FAIL("Stats: failed to get stats\n");
+ }
+}
+
+int main_0062_stats_event(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ test_timing_t t_delivery;
+ rd_kafka_queue_t *eventq;
+ const int iterations = 5;
+ int i;
+ test_conf_init(NULL, NULL, 10);
+
+ /* Set up a global config object */
+ conf = rd_kafka_conf_new();
+ rd_kafka_conf_set(conf, "statistics.interval.ms", "100", NULL, 0);
+
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_STATS);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ eventq = rd_kafka_queue_get_main(rk);
+
+ /* Wait for stats event */
+ for (i = 0; i < iterations; i++) {
+ TIMING_START(&t_delivery, "STATS_EVENT");
+ stats_count = 0;
+ while (stats_count == 0) {
+ rd_kafka_event_t *rkev;
+ rkev = rd_kafka_queue_poll(eventq, 100);
+ switch (rd_kafka_event_type(rkev)) {
+ case RD_KAFKA_EVENT_STATS:
+ TEST_SAY("%s event\n",
+ rd_kafka_event_name(rkev));
+ handle_stats(rkev);
+ break;
+ case RD_KAFKA_EVENT_NONE:
+ break;
+ default:
+ TEST_SAY("Ignore event: %s\n",
+ rd_kafka_event_name(rkev));
+ break;
+ }
+ rd_kafka_event_destroy(rkev);
+ }
+ TIMING_STOP(&t_delivery);
+
+ if (TIMING_DURATION(&t_delivery) < 1000 * 100 * 0.5 ||
+ TIMING_DURATION(&t_delivery) > 1000 * 100 * 1.5) {
+ /* CIs and valgrind are too flaky/slow to
+ * make this failure meaningful. */
+ if (!test_on_ci && !strcmp(test_mode, "bare")) {
+ TEST_FAIL(
+ "Stats duration %.3fms is >= 50%% "
+ "outside statistics.interval.ms 100",
+ (float)TIMING_DURATION(&t_delivery) /
+ 1000.0f);
+ } else {
+ TEST_WARN(
+ "Stats duration %.3fms is >= 50%% "
+ "outside statistics.interval.ms 100\n",
+ (float)TIMING_DURATION(&t_delivery) /
+ 1000.0f);
+ }
+ }
+ }
+
+ rd_kafka_queue_destroy(eventq);
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp
new file mode 100644
index 000000000..dda8d6ddb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0063-clusterid.cpp
@@ -0,0 +1,180 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+/**
+ * Test Handle::clusterid() and Handle::controllerid()
+ */
+
+static void do_test_clusterid(void) {
+ Test::Say("[ do_test_clusterid ]\n");
+
+ /*
+ * Create client with appropriate protocol support for
+ * retrieving clusterid
+ */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "api.version.request", "true");
+ std::string errstr;
+ RdKafka::Producer *p_good = RdKafka::Producer::create(conf, errstr);
+ if (!p_good)
+ Test::Fail("Failed to create client: " + errstr);
+ delete conf;
+
+ /*
+ * Create client with lacking protocol support.
+ */
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "api.version.request", "false");
+ Test::conf_set(conf, "broker.version.fallback", "0.9.0");
+ RdKafka::Producer *p_bad = RdKafka::Producer::create(conf, errstr);
+ if (!p_bad)
+ Test::Fail("Failed to create client: " + errstr);
+ delete conf;
+
+
+ std::string clusterid;
+
+ /*
+ * good producer, give the first call a timeout to allow time
+ * for background metadata requests to finish.
+ */
+ std::string clusterid_good_1 = p_good->clusterid(tmout_multip(2000));
+ if (clusterid_good_1.empty())
+ Test::Fail("good producer(w timeout): ClusterId is empty");
+ Test::Say("good producer(w timeout): ClusterId " + clusterid_good_1 + "\n");
+
+ /* Then retrieve a cached copy. */
+ std::string clusterid_good_2 = p_good->clusterid(0);
+ if (clusterid_good_2.empty())
+ Test::Fail("good producer(0): ClusterId is empty");
+ Test::Say("good producer(0): ClusterId " + clusterid_good_2 + "\n");
+
+ if (clusterid_good_1 != clusterid_good_2)
+ Test::Fail("Good ClusterId mismatch: " + clusterid_good_1 +
+ " != " + clusterid_good_2);
+
+ /*
+ * Try bad producer, should return empty string.
+ */
+ std::string clusterid_bad_1 = p_bad->clusterid(tmout_multip(2000));
+ if (!clusterid_bad_1.empty())
+ Test::Fail("bad producer(w timeout): ClusterId should be empty, not " +
+ clusterid_bad_1);
+ std::string clusterid_bad_2 = p_bad->clusterid(0);
+ if (!clusterid_bad_2.empty())
+ Test::Fail("bad producer(0): ClusterId should be empty, not " +
+ clusterid_bad_2);
+
+ delete p_good;
+ delete p_bad;
+}
+
+
+/**
+ * @brief controllerid() testing.
+ * This instantiates its own client to avoid having the value cached
+ * from do_test_clusterid(), but they are basically the same tests.
+ */
+static void do_test_controllerid(void) {
+ Test::Say("[ do_test_controllerid ]\n");
+
+ /*
+ * Create client with appropriate protocol support for
+ * retrieving controllerid
+ */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "api.version.request", "true");
+ std::string errstr;
+ RdKafka::Producer *p_good = RdKafka::Producer::create(conf, errstr);
+ if (!p_good)
+ Test::Fail("Failed to create client: " + errstr);
+ delete conf;
+
+ /*
+ * Create client with lacking protocol support.
+ */
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "api.version.request", "false");
+ Test::conf_set(conf, "broker.version.fallback", "0.9.0");
+ RdKafka::Producer *p_bad = RdKafka::Producer::create(conf, errstr);
+ if (!p_bad)
+ Test::Fail("Failed to create client: " + errstr);
+ delete conf;
+
+ /*
+ * good producer, give the first call a timeout to allow time
+ * for background metadata requests to finish.
+ */
+ int32_t controllerid_good_1 = p_good->controllerid(tmout_multip(2000));
+ if (controllerid_good_1 == -1)
+ Test::Fail("good producer(w timeout): Controllerid is -1");
+ Test::Say(tostr() << "good producer(w timeout): Controllerid "
+ << controllerid_good_1 << "\n");
+
+ /* Then retrieve a cached copy. */
+ int32_t controllerid_good_2 = p_good->controllerid(0);
+ if (controllerid_good_2 == -1)
+ Test::Fail("good producer(0): Controllerid is -1");
+ Test::Say(tostr() << "good producer(0): Controllerid " << controllerid_good_2
+ << "\n");
+
+ if (controllerid_good_1 != controllerid_good_2)
+ Test::Fail(tostr() << "Good Controllerid mismatch: " << controllerid_good_1
+ << " != " << controllerid_good_2);
+
+ /*
+ * Try bad producer, should return -1
+ */
+ int32_t controllerid_bad_1 = p_bad->controllerid(tmout_multip(2000));
+ if (controllerid_bad_1 != -1)
+ Test::Fail(
+ tostr() << "bad producer(w timeout): Controllerid should be -1, not "
+ << controllerid_bad_1);
+ int32_t controllerid_bad_2 = p_bad->controllerid(0);
+ if (controllerid_bad_2 != -1)
+ Test::Fail(tostr() << "bad producer(0): Controllerid should be -1, not "
+ << controllerid_bad_2);
+
+ delete p_good;
+ delete p_bad;
+}
+
+extern "C" {
+int main_0063_clusterid(int argc, char **argv) {
+ do_test_clusterid();
+ do_test_controllerid();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c
new file mode 100644
index 000000000..e5c5b047a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0064-interceptors.c
@@ -0,0 +1,481 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2017, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+#include <ctype.h>
+
+/**
+ * Verify interceptor functionality.
+ *
+ * Producer MO:
+ * - create a chain of N interceptors
+ * - allocate a state struct with unique id for each message produced,
+ * provide as msg_opaque and reference from payload.
+ * - in on_send: verify expected interceptor order by counting number
+ * of consecutive bits.
+ * - in on_acknowledge: same
+ * - produce message to invalid topic which should trigger on_send+on_ack..
+ * from within produce().
+ *
+ * Consumer MO:
+ * - create a chain of M interceptors
+ * - subscribe to the previously produced topic
+ * - in on_consume: find message by id, verify expected order by bit counting.
+ * - on on_commit: just count order per on_commit chain run.
+ */
+
+
+#define msgcnt 100
+static const int producer_ic_cnt = 5;
+static const int consumer_ic_cnt = 10;
+
+/* The base values help differentiating opaque values between interceptors */
+static const int on_send_base = 1 << 24;
+static const int on_ack_base = 1 << 25;
+static const int on_consume_base = 1 << 26;
+static const int on_commit_base = 1 << 27;
+static const int base_mask = 0xff << 24;
+
+#define _ON_SEND 0
+#define _ON_ACK 1
+#define _ON_CONSUME 2
+#define _ON_CNT 3
+struct msg_state {
+ int id;
+ int bits[_ON_CNT]; /* Bit field, one bit per interceptor */
+ mtx_t lock;
+};
+
+/* Per-message state */
+static struct msg_state msgs[msgcnt];
+
+/* on_commit bits */
+static int on_commit_bits = 0;
+
+/**
+ * @brief Verify that \p bits matches the number of expected interceptor
+ * call cnt.
+ *
+ * Verify interceptor order: the lower bits of ic_id
+ * denotes the order in which interceptors were added and it
+ * must be reflected here, meaning that all lower bits must be set,
+ * and no higher ones.
+ */
+static void msg_verify_ic_cnt(const struct msg_state *msg,
+ const char *what,
+ int bits,
+ int exp_cnt) {
+ int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0;
+
+ TEST_ASSERT(bits == exp_bits,
+ "msg #%d: %s: expected bits 0x%x (%d), got 0x%x", msg->id,
+ what, exp_bits, exp_cnt, bits);
+}
+
+/*
+ * @brief Same as msg_verify_ic_cnt() without the msg reliance
+ */
+static void verify_ic_cnt(const char *what, int bits, int exp_cnt) {
+ int exp_bits = exp_cnt ? (1 << exp_cnt) - 1 : 0;
+
+ TEST_ASSERT(bits == exp_bits, "%s: expected bits 0x%x (%d), got 0x%x",
+ what, exp_bits, exp_cnt, bits);
+}
+
+
+
+static void verify_msg(const char *what,
+ int base,
+ int bitid,
+ rd_kafka_message_t *rkmessage,
+ void *ic_opaque) {
+ const char *id_str = rkmessage->key;
+ struct msg_state *msg;
+ int id;
+ int ic_id = (int)(intptr_t)ic_opaque;
+
+ /* Verify opaque (base | ic id) */
+ TEST_ASSERT((ic_id & base_mask) == base);
+ ic_id &= ~base_mask;
+
+ /* Find message by id */
+ TEST_ASSERT(rkmessage->key && rkmessage->key_len > 0 &&
+ id_str[(int)rkmessage->key_len - 1] == '\0' &&
+ strlen(id_str) > 0 && isdigit(*id_str));
+ id = atoi(id_str);
+ TEST_ASSERT(id >= 0 && id < msgcnt, "%s: bad message id %s", what,
+ id_str);
+ msg = &msgs[id];
+
+ mtx_lock(&msg->lock);
+
+ TEST_ASSERT(msg->id == id, "expected msg #%d has wrong id %d", id,
+ msg->id);
+
+ /* Verify message opaque */
+ if (!strcmp(what, "on_send") || !strncmp(what, "on_ack", 6))
+ TEST_ASSERT(rkmessage->_private == (void *)msg);
+
+ TEST_SAYL(3, "%s: interceptor #%d called for message #%d (%d)\n", what,
+ ic_id, id, msg->id);
+
+ msg_verify_ic_cnt(msg, what, msg->bits[bitid], ic_id);
+
+ /* Set this interceptor's bit */
+ msg->bits[bitid] |= 1 << ic_id;
+
+ mtx_unlock(&msg->lock);
+}
+
+
+static rd_kafka_resp_err_t
+on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ TEST_ASSERT(ic_opaque != NULL);
+ verify_msg("on_send", on_send_base, _ON_SEND, rkmessage, ic_opaque);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+static rd_kafka_resp_err_t
+on_ack(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ TEST_ASSERT(ic_opaque != NULL);
+ verify_msg("on_ack", on_ack_base, _ON_ACK, rkmessage, ic_opaque);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+static rd_kafka_resp_err_t
+on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ TEST_ASSERT(ic_opaque != NULL);
+ verify_msg("on_consume", on_consume_base, _ON_CONSUME, rkmessage,
+ ic_opaque);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static rd_kafka_resp_err_t
+on_commit(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque) {
+ int ic_id = (int)(intptr_t)ic_opaque;
+
+ /* Since on_commit is triggered a bit randomly and not per
+ * message we only try to make sure it gets fully set at least once. */
+ TEST_ASSERT(ic_opaque != NULL);
+
+ /* Verify opaque (base | ic id) */
+ TEST_ASSERT((ic_id & base_mask) == on_commit_base);
+ ic_id &= ~base_mask;
+
+ TEST_ASSERT(ic_opaque != NULL);
+
+ TEST_SAYL(3, "on_commit: interceptor #%d called: %s\n", ic_id,
+ rd_kafka_err2str(err));
+ if (test_level >= 4)
+ test_print_partition_list(offsets);
+
+ /* Check for rollover where a previous on_commit stint was
+ * succesful and it just now started over */
+ if (on_commit_bits > 0 && ic_id == 0) {
+ /* Verify completeness of previous stint */
+ verify_ic_cnt("on_commit", on_commit_bits, consumer_ic_cnt);
+ /* Reset */
+ on_commit_bits = 0;
+ }
+
+ verify_ic_cnt("on_commit", on_commit_bits, ic_id);
+
+ /* Set this interceptor's bit */
+ on_commit_bits |= 1 << ic_id;
+
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static void do_test_produce(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int msgid,
+ int exp_fail,
+ int exp_ic_cnt) {
+ rd_kafka_resp_err_t err;
+ char key[16];
+ struct msg_state *msg = &msgs[msgid];
+ int i;
+
+ /* Message state should be empty, no interceptors should have
+ * been called yet.. */
+ for (i = 0; i < _ON_CNT; i++)
+ TEST_ASSERT(msg->bits[i] == 0);
+
+ mtx_init(&msg->lock, mtx_plain);
+ msg->id = msgid;
+ rd_snprintf(key, sizeof(key), "%d", msgid);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_KEY(key, strlen(key) + 1),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_OPAQUE(msg), RD_KAFKA_V_END);
+
+ mtx_lock(&msg->lock);
+ msg_verify_ic_cnt(msg, "on_send", msg->bits[_ON_SEND], exp_ic_cnt);
+
+ if (err) {
+ msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK],
+ exp_ic_cnt);
+ TEST_ASSERT(exp_fail, "producev() failed: %s",
+ rd_kafka_err2str(err));
+ } else {
+ msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0);
+ TEST_ASSERT(!exp_fail,
+ "expected produce failure for msg #%d, not %s",
+ msgid, rd_kafka_err2str(err));
+ }
+ mtx_unlock(&msg->lock);
+}
+
+
+
+static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ int i;
+
+ for (i = 0; i < producer_ic_cnt; i++) {
+ rd_kafka_resp_err_t err;
+
+ err = rd_kafka_interceptor_add_on_send(
+ rk, tsprintf("on_send:%d", i), on_send,
+ (void *)(intptr_t)(on_send_base | i));
+ TEST_ASSERT(!err, "add_on_send failed: %s",
+ rd_kafka_err2str(err));
+
+ err = rd_kafka_interceptor_add_on_acknowledgement(
+ rk, tsprintf("on_acknowledgement:%d", i), on_ack,
+ (void *)(intptr_t)(on_ack_base | i));
+ TEST_ASSERT(!err, "add_on_ack.. failed: %s",
+ rd_kafka_err2str(err));
+
+
+ /* Add consumer interceptors as well to make sure
+ * they are not called. */
+ err = rd_kafka_interceptor_add_on_consume(
+ rk, tsprintf("on_consume:%d", i), on_consume, NULL);
+ TEST_ASSERT(!err, "add_on_consume failed: %s",
+ rd_kafka_err2str(err));
+
+
+ err = rd_kafka_interceptor_add_on_commit(
+ rk, tsprintf("on_commit:%d", i), on_commit, NULL);
+ TEST_ASSERT(!err, "add_on_commit failed: %s",
+ rd_kafka_err2str(err));
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+static void do_test_producer(const char *topic) {
+ rd_kafka_conf_t *conf;
+ int i;
+ rd_kafka_t *rk;
+
+ TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(&conf, NULL, 0);
+
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_prodcer",
+ on_new_producer, NULL);
+
+ /* Create producer */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ for (i = 0; i < msgcnt - 1; i++)
+ do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0,
+ producer_ic_cnt);
+
+ /* Wait for messages to be delivered */
+ test_flush(rk, -1);
+
+ /* Now send a message that will fail in produce()
+ * due to bad partition */
+ do_test_produce(rk, topic, 1234, i, 1, producer_ic_cnt);
+
+
+ /* Verify acks */
+ for (i = 0; i < msgcnt; i++) {
+ struct msg_state *msg = &msgs[i];
+ mtx_lock(&msg->lock);
+ msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK],
+ producer_ic_cnt);
+ mtx_unlock(&msg->lock);
+ }
+
+ rd_kafka_destroy(rk);
+}
+
+
+static rd_kafka_resp_err_t on_new_consumer(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ int i;
+
+ for (i = 0; i < consumer_ic_cnt; i++) {
+ rd_kafka_interceptor_add_on_consume(
+ rk, tsprintf("on_consume:%d", i), on_consume,
+ (void *)(intptr_t)(on_consume_base | i));
+
+ rd_kafka_interceptor_add_on_commit(
+ rk, tsprintf("on_commit:%d", i), on_commit,
+ (void *)(intptr_t)(on_commit_base | i));
+
+ /* Add producer interceptors as well to make sure they
+ * are not called. */
+ rd_kafka_interceptor_add_on_send(rk, tsprintf("on_send:%d", i),
+ on_send, NULL);
+
+ rd_kafka_interceptor_add_on_acknowledgement(
+ rk, tsprintf("on_acknowledgement:%d", i), on_ack, NULL);
+ }
+
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static void do_test_consumer(const char *topic) {
+
+ rd_kafka_conf_t *conf;
+ int i;
+ rd_kafka_t *rk;
+
+ TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);
+
+ test_conf_init(&conf, NULL, 0);
+
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_consumer",
+ on_new_consumer, NULL);
+
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ /* Create producer */
+ rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(rk, topic);
+
+ /* Consume messages (-1 for the one that failed producing) */
+ test_consumer_poll("interceptors.consume", rk, 0, -1, -1, msgcnt - 1,
+ NULL);
+
+ /* Verify on_consume */
+ for (i = 0; i < msgcnt - 1; i++) {
+ struct msg_state *msg = &msgs[i];
+ mtx_lock(&msg->lock);
+ msg_verify_ic_cnt(msg, "on_consume", msg->bits[_ON_CONSUME],
+ consumer_ic_cnt);
+ mtx_unlock(&msg->lock);
+ }
+
+ /* Verify that the produce-failed message didnt have
+ * interceptors called */
+ mtx_lock(&msgs[msgcnt - 1].lock);
+ msg_verify_ic_cnt(&msgs[msgcnt - 1], "on_consume",
+ msgs[msgcnt - 1].bits[_ON_CONSUME], 0);
+ mtx_unlock(&msgs[msgcnt - 1].lock);
+
+ test_consumer_close(rk);
+
+ verify_ic_cnt("on_commit", on_commit_bits, consumer_ic_cnt);
+
+ rd_kafka_destroy(rk);
+}
+
+/**
+ * @brief Interceptors must not be copied automatically by conf_dup()
+ * unless the interceptors have added on_conf_dup().
+ * This behaviour makes sure an interceptor's instance
+ * is not duplicated without the interceptor's knowledge or
+ * assistance.
+ */
+static void do_test_conf_copy(const char *topic) {
+ rd_kafka_conf_t *conf, *conf2;
+ int i;
+ rd_kafka_t *rk;
+
+ TEST_SAY(_C_MAG "[ %s ]\n" _C_CLR, __FUNCTION__);
+
+ memset(&msgs[0], 0, sizeof(msgs));
+
+ test_conf_init(&conf, NULL, 0);
+
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_conf_copy",
+ on_new_producer, NULL);
+
+ /* Now copy the configuration to verify that interceptors are
+ * NOT copied. */
+ conf2 = conf;
+ conf = rd_kafka_conf_dup(conf2);
+ rd_kafka_conf_destroy(conf2);
+
+ /* Create producer */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ for (i = 0; i < msgcnt - 1; i++)
+ do_test_produce(rk, topic, RD_KAFKA_PARTITION_UA, i, 0, 0);
+
+ /* Wait for messages to be delivered */
+ test_flush(rk, -1);
+
+ /* Verify acks */
+ for (i = 0; i < msgcnt; i++) {
+ struct msg_state *msg = &msgs[i];
+ mtx_lock(&msg->lock);
+ msg_verify_ic_cnt(msg, "on_ack", msg->bits[_ON_ACK], 0);
+ mtx_unlock(&msg->lock);
+ }
+
+ rd_kafka_destroy(rk);
+}
+
+
+int main_0064_interceptors(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+
+ do_test_producer(topic);
+
+ do_test_consumer(topic);
+
+ do_test_conf_copy(topic);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp
new file mode 100644
index 000000000..6f2dbb0ac
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0065-yield.cpp
@@ -0,0 +1,140 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+/**
+ * Verify that yield() works.
+ *
+ * In two iterations, do:
+ * - Register a DR callback that counts the number of messages and
+ * calls yield() in iteration 1, and not in iteration 2.
+ * - Produce 100 messages quickly (to ensure same-batch)
+ * - Verify that only one DR callback is triggered per poll() call
+ * in iteration 1, and all messages in iteration 2.
+ */
+
+class DrCb0065 : public RdKafka::DeliveryReportCb {
+ public:
+ int cnt; // dr messages seen
+ bool do_yield; // whether to yield for each message or not
+ RdKafka::Producer *p;
+
+ DrCb0065(bool yield) : cnt(0), do_yield(yield), p(NULL) {
+ }
+
+ void dr_cb(RdKafka::Message &message) {
+ if (message.err())
+ Test::Fail("DR: message failed: " + RdKafka::err2str(message.err()));
+
+ Test::Say(3, tostr() << "DR #" << cnt << "\n");
+ cnt++;
+
+ if (do_yield)
+ p->yield();
+ }
+};
+
+
+static void do_test_producer(bool do_yield) {
+ int msgcnt = 100;
+ std::string errstr;
+ RdKafka::ErrorCode err;
+ std::string topic = Test::mk_topic_name("0065_yield", 1);
+
+ /*
+ * Create Producer
+ */
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+ DrCb0065 dr(do_yield);
+ conf->set("dr_cb", &dr, errstr);
+ /* Make sure messages are produced in batches of 100 */
+ conf->set("batch.num.messages", "100", errstr);
+ conf->set("linger.ms", "10000", errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create producer: " + errstr);
+ delete conf;
+
+ dr.p = p;
+
+ Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ") << "Producing "
+ << msgcnt << " messages to " << topic << "\n");
+
+ for (int i = 0; i < msgcnt; i++) {
+ err = p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"hi", 2,
+ NULL, 0, 0, NULL);
+ if (err)
+ Test::Fail("produce() failed: " + RdKafka::err2str(err));
+ }
+
+
+ int exp_msgs_per_poll = do_yield ? 1 : msgcnt;
+
+ while (dr.cnt < msgcnt) {
+ int pre_cnt = dr.cnt;
+ p->poll(1000);
+
+ int this_dr_cnt = dr.cnt - pre_cnt;
+ if (this_dr_cnt == 0) {
+ /* Other callbacks may cause poll() to return early
+ * before DRs are available, ignore these. */
+ Test::Say(3, "Zero DRs called, ignoring\n");
+ continue;
+ }
+
+ if (this_dr_cnt != exp_msgs_per_poll)
+ Test::Fail(tostr() << "Expected " << exp_msgs_per_poll
+ << " DRs per poll() call, got " << this_dr_cnt);
+ else
+ Test::Say(3, tostr() << dr.cnt << "/" << msgcnt << "\n");
+ }
+
+ if (dr.cnt != msgcnt)
+ Test::Fail(tostr() << "Expected " << msgcnt << " DRs, got " << dr.cnt);
+
+ Test::Say(tostr() << (do_yield ? "Yield: " : "Dont Yield: ")
+ << "Success: " << dr.cnt << " DRs received in batches of "
+ << exp_msgs_per_poll << "\n");
+
+ delete p;
+}
+
+extern "C" {
+int main_0065_yield(int argc, char **argv) {
+ do_test_producer(1 /*yield*/);
+ do_test_producer(0 /*dont yield*/);
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp
new file mode 100644
index 000000000..9f9f31240
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0066-plugins.cpp
@@ -0,0 +1,129 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+#ifdef _WIN32
+#include <direct.h>
+#endif
+
+
+extern "C" {
+#include "interceptor_test/interceptor_test.h"
+
+struct ictest ictest;
+};
+
+
+/**
+ * Verify plugin.library.paths and interceptors
+ * using interceptor_test/...
+ *
+ */
+
+
+static void do_test_plugin() {
+ std::string errstr;
+ std::string topic = Test::mk_topic_name("0066_plugins", 1);
+ static const char *config[] = {
+ "session.timeout.ms",
+ "6000", /* Before plugin */
+ "plugin.library.paths",
+ "interceptor_test/interceptor_test",
+ "socket.timeout.ms",
+ "12", /* After plugin */
+ "interceptor_test.config1",
+ "one",
+ "interceptor_test.config2",
+ "two",
+ "topic.metadata.refresh.interval.ms",
+ "1234",
+ NULL,
+ };
+
+ char cwd[512], *pcwd;
+#ifdef _WIN32
+ pcwd = _getcwd(cwd, sizeof(cwd) - 1);
+#else
+ pcwd = getcwd(cwd, sizeof(cwd) - 1);
+#endif
+ if (pcwd)
+ Test::Say(tostr() << "running test from cwd " << cwd << "\n");
+
+ /* Interceptor back-channel config */
+ ictest_init(&ictest);
+ ictest_cnt_init(&ictest.conf_init, 1, 1000);
+ ictest_cnt_init(&ictest.on_new, 1, 1);
+
+ /* Config for intercepted client */
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ for (int i = 0; config[i]; i += 2) {
+ Test::Say(tostr() << "set(" << config[i] << ", " << config[i + 1] << ")\n");
+ if (conf->set(config[i], config[i + 1], errstr))
+ Test::Fail(tostr() << "set(" << config[i] << ") failed: " << errstr);
+ }
+
+ /* Create producer */
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create producer: " + errstr);
+
+ if (ictest.on_new.cnt < ictest.on_new.min ||
+ ictest.on_new.cnt > ictest.on_new.max)
+ Test::Fail(tostr() << "on_new.cnt " << ictest.on_new.cnt
+ << " not within range " << ictest.on_new.min << ".."
+ << ictest.on_new.max);
+
+ /* Verification */
+ if (!ictest.config1 || strcmp(ictest.config1, "one"))
+ Test::Fail(tostr() << "config1 was " << ictest.config1);
+ if (!ictest.config2 || strcmp(ictest.config2, "two"))
+ Test::Fail(tostr() << "config2 was " << ictest.config2);
+ if (!ictest.session_timeout_ms || strcmp(ictest.session_timeout_ms, "6000"))
+ Test::Fail(tostr() << "session.timeout.ms was "
+ << ictest.session_timeout_ms);
+ if (!ictest.socket_timeout_ms || strcmp(ictest.socket_timeout_ms, "12"))
+ Test::Fail(tostr() << "socket.timeout.ms was " << ictest.socket_timeout_ms);
+
+ delete conf;
+
+ delete p;
+
+ ictest_free(&ictest);
+}
+
+extern "C" {
+int main_0066_plugins(int argc, char **argv) {
+ do_test_plugin();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp
new file mode 100644
index 000000000..f71489fa1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0067-empty_topic.cpp
@@ -0,0 +1,148 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+
+
+/**
+ * Issue #1306
+ *
+ * Consume from an empty topic using Consumer and KafkaConsumer.
+ */
+
+
+static void do_test_empty_topic_consumer() {
+ std::string errstr;
+ std::string topic = Test::mk_topic_name("0067_empty_topic", 1);
+ const int32_t partition = 0;
+
+ RdKafka::Conf *conf;
+
+ Test::conf_init(&conf, NULL, 0);
+
+ Test::conf_set(conf, "enable.partition.eof", "true");
+ Test::conf_set(conf, "allow.auto.create.topics", "true");
+
+ /* Create simple consumer */
+ RdKafka::Consumer *consumer = RdKafka::Consumer::create(conf, errstr);
+ if (!consumer)
+ Test::Fail("Failed to create Consumer: " + errstr);
+
+ RdKafka::Topic *rkt = RdKafka::Topic::create(consumer, topic, NULL, errstr);
+ if (!rkt)
+ Test::Fail("Simple Topic failed: " + errstr);
+
+
+ /* Create the topic through a metadata request. */
+ Test::Say("Creating empty topic " + topic + "\n");
+ RdKafka::Metadata *md;
+ RdKafka::ErrorCode err =
+ consumer->metadata(false, rkt, &md, tmout_multip(10 * 1000));
+ if (err)
+ Test::Fail("Failed to create topic " + topic + ": " +
+ RdKafka::err2str(err));
+ delete md;
+
+ /* Start consumer */
+ err = consumer->start(rkt, partition, RdKafka::Topic::OFFSET_BEGINNING);
+ if (err)
+ Test::Fail("Consume start() failed: " + RdKafka::err2str(err));
+
+ /* Consume using legacy consumer, should give an EOF and nothing else. */
+ Test::Say("Simple Consumer: consuming\n");
+ RdKafka::Message *msg =
+ consumer->consume(rkt, partition, tmout_multip(10 * 1000));
+ if (msg->err() != RdKafka::ERR__PARTITION_EOF)
+ Test::Fail("Simple consume() expected EOF, got " +
+ RdKafka::err2str(msg->err()));
+ delete msg;
+
+ /* Nothing else should come now, just a consume() timeout */
+ msg = consumer->consume(rkt, partition, 1 * 1000);
+ if (msg->err() != RdKafka::ERR__TIMED_OUT)
+ Test::Fail("Simple consume() expected timeout, got " +
+ RdKafka::err2str(msg->err()));
+ delete msg;
+
+ consumer->stop(rkt, partition);
+
+ delete rkt;
+ delete consumer;
+
+
+ /*
+ * Now do the same thing using the high-level KafkaConsumer.
+ */
+
+ Test::conf_set(conf, "group.id", topic);
+
+ Test::conf_set(conf, "enable.partition.eof", "true");
+ Test::conf_set(conf, "allow.auto.create.topics", "true");
+
+ RdKafka::KafkaConsumer *kconsumer =
+ RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!kconsumer)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+
+ std::vector<RdKafka::TopicPartition *> part;
+ part.push_back(RdKafka::TopicPartition::create(topic, partition));
+
+ err = kconsumer->assign(part);
+ if (err)
+ Test::Fail("assign() failed: " + RdKafka::err2str(err));
+
+ RdKafka::TopicPartition::destroy(part);
+
+ Test::Say("KafkaConsumer: consuming\n");
+ msg = kconsumer->consume(tmout_multip(5 * 1000));
+ if (msg->err() != RdKafka::ERR__PARTITION_EOF)
+ Test::Fail("KafkaConsumer consume() expected EOF, got " +
+ RdKafka::err2str(msg->err()));
+ delete msg;
+
+ /* Nothing else should come now, just a consume() timeout */
+ msg = kconsumer->consume(1 * 1000);
+ if (msg->err() != RdKafka::ERR__TIMED_OUT)
+ Test::Fail("KafkaConsumer consume() expected timeout, got " +
+ RdKafka::err2str(msg->err()));
+ delete msg;
+
+ kconsumer->close();
+
+ delete kconsumer;
+ delete conf;
+}
+
+extern "C" {
+int main_0067_empty_topic(int argc, char **argv) {
+ do_test_empty_topic_consumer();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c
new file mode 100644
index 000000000..a7ad37e16
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0068-produce_timeout.c
@@ -0,0 +1,138 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#if WITH_SOCKEM
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * Force produce requests to timeout to test error handling.
+ */
+
+/**
+ * @brief Sockem connect, called from **internal librdkafka thread** through
+ * librdkafka's connect_cb
+ */
+static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
+
+ /* Let delay be high to trigger the local timeout */
+ sockem_set(skm, "delay", 10000, NULL);
+ return 0;
+}
+
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ /* Ignore connectivity errors since we'll be bringing down
+ * .. connectivity.
+ * SASL auther will think a connection-down even in the auth
+ * state means the broker doesn't support SASL PLAIN. */
+ TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
+ err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ return 0;
+ return 1;
+}
+
+static int msg_dr_cnt = 0;
+static int msg_dr_fail_cnt = 0;
+
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ msg_dr_cnt++;
+ if (rkmessage->err != RD_KAFKA_RESP_ERR__MSG_TIMED_OUT)
+ TEST_FAIL_LATER(
+ "Expected message to fail with MSG_TIMED_OUT, "
+ "got: %s",
+ rd_kafka_err2str(rkmessage->err));
+ else {
+ TEST_ASSERT_LATER(rd_kafka_message_status(rkmessage) ==
+ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED,
+ "Message should have status "
+ "PossiblyPersisted (%d), not %d",
+ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED,
+ rd_kafka_message_status(rkmessage));
+ msg_dr_fail_cnt++;
+ }
+}
+
+
+
+int main_0068_produce_timeout(int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char *topic = test_mk_topic_name("0068_produce_timeout", 1);
+ uint64_t testid;
+ const int msgcnt = 10;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ int msgcounter = 0;
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ test_socket_enable(conf);
+ test_curr->connect_cb = connect_cb;
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_producer_topic(rk, topic, "message.timeout.ms",
+ "2000", NULL);
+
+ TEST_SAY("Auto-creating topic %s\n", topic);
+ test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000));
+
+ TEST_SAY("Producing %d messages that should timeout\n", msgcnt);
+ test_produce_msgs_nowait(rk, rkt, testid, 0, 0, msgcnt, NULL, 0, 0,
+ &msgcounter);
+
+
+ TEST_SAY("Flushing..\n");
+ rd_kafka_flush(rk, 10000);
+
+ TEST_SAY("%d/%d delivery reports, where of %d with proper error\n",
+ msg_dr_cnt, msgcnt, msg_dr_fail_cnt);
+
+ TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt,
+ msg_dr_cnt);
+ TEST_ASSERT(msg_dr_fail_cnt == msgcnt, "expected %d, got %d", msgcnt,
+ msg_dr_fail_cnt);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
+
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c
new file mode 100644
index 000000000..933e53775
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0069-consumer_add_parts.c
@@ -0,0 +1,123 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/**
+ * Issue #1371:
+ * Run two consumers in the same group for a 2-partition topic,
+ * alter the topic to have 4 partitions, kill off the first consumer,
+ * the second consumer will segfault.
+ */
+
+#include "rdkafka.h"
+
+
+static rd_kafka_t *c1, *c2;
+static rd_kafka_resp_err_t state1, state2;
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ rd_kafka_resp_err_t *statep = NULL;
+
+ if (rk == c1)
+ statep = &state1;
+ else if (rk == c2)
+ statep = &state2;
+ else
+ TEST_FAIL("Invalid rk %p", rk);
+
+ TEST_SAY("Rebalance for %s: %s:\n", rd_kafka_name(rk),
+ rd_kafka_err2str(err));
+ test_print_partition_list(parts);
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+ rd_kafka_assign(rk, parts);
+ else if (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS)
+ rd_kafka_assign(rk, NULL);
+
+ *statep = err;
+}
+
+
+int main_0069_consumer_add_parts(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1);
+ int64_t ts_start;
+ int wait_sec;
+
+ test_conf_init(NULL, NULL, 60);
+
+ TEST_SAY("Creating 2 consumers\n");
+ c1 = test_create_consumer(topic, rebalance_cb, NULL, NULL);
+ c2 = test_create_consumer(topic, rebalance_cb, NULL, NULL);
+
+ TEST_SAY("Creating topic %s with 2 partitions\n", topic);
+ test_create_topic(c1, topic, 2, 1);
+
+ test_wait_topic_exists(c1, topic, 10 * 1000);
+
+ TEST_SAY("Subscribing\n");
+ test_consumer_subscribe(c1, topic);
+ test_consumer_subscribe(c2, topic);
+
+
+ TEST_SAY("Waiting for initial assignment for both consumers\n");
+ while (state1 != RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS ||
+ state2 != RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ test_consumer_poll_no_msgs("wait-rebalance", c1, 0, 1000);
+ test_consumer_poll_no_msgs("wait-rebalance", c2, 0, 1000);
+ }
+
+
+ TEST_SAY("Changing partition count for topic %s\n", topic);
+ test_create_partitions(NULL, topic, 4);
+
+ TEST_SAY(
+ "Closing consumer 1 (to quickly trigger rebalance with new "
+ "partitions)\n");
+ test_consumer_close(c1);
+ rd_kafka_destroy(c1);
+
+ TEST_SAY("Wait 10 seconds for consumer 2 not to crash\n");
+ wait_sec = test_quick ? 5 : 10;
+ ts_start = test_clock();
+ do {
+ test_consumer_poll_no_msgs("wait-stable", c2, 0, 1000);
+ } while (test_clock() < ts_start + (wait_sec * 1000000));
+
+ TEST_ASSERT(state2 == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ "Expected consumer 2 to have assignment, not in state %s",
+ rd_kafka_err2str(state2));
+
+ test_consumer_close(c2);
+ rd_kafka_destroy(c2);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp
new file mode 100644
index 000000000..fac48185c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0070-null_empty.cpp
@@ -0,0 +1,197 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "testcpp.h"
+#include <cstring>
+
+/**
+ * Verification of difference between empty and null Key and Value
+ */
+
+
+static int check_equal(const char *exp,
+ const char *actual,
+ size_t len,
+ std::string what) {
+ size_t exp_len = exp ? strlen(exp) : 0;
+ int failures = 0;
+
+ if (!actual && len != 0) {
+ Test::FailLater(tostr()
+ << what << ": expected length 0 for Null, not " << len);
+ failures++;
+ }
+
+ if (exp) {
+ if (!actual) {
+ Test::FailLater(tostr()
+ << what << ": expected \"" << exp << "\", not Null");
+ failures++;
+
+ } else if (len != exp_len || strncmp(exp, actual, exp_len)) {
+ Test::FailLater(tostr() << what << ": expected \"" << exp << "\", not \""
+ << actual << "\" (" << len << " bytes)");
+ failures++;
+ }
+
+ } else {
+ if (actual) {
+ Test::FailLater(tostr() << what << ": expected Null, not \"" << actual
+ << "\" (" << len << " bytes)");
+ failures++;
+ }
+ }
+
+ if (!failures)
+ Test::Say(3, tostr() << what << ": matched expectation\n");
+
+ return failures;
+}
+
+
+static void do_test_null_empty(bool api_version_request) {
+ std::string topic = Test::mk_topic_name("0070_null_empty", 1);
+ const int partition = 0;
+
+ Test::Say(tostr() << "Testing with api.version.request="
+ << api_version_request << " on topic " << topic
+ << " partition " << partition << "\n");
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 0);
+ Test::conf_set(conf, "api.version.request",
+ api_version_request ? "true" : "false");
+ Test::conf_set(conf, "acks", "all");
+
+
+ std::string errstr;
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ const int msgcnt = 8;
+ static const char *msgs[msgcnt * 2] = {NULL, NULL, "key2", NULL, "key3",
+ "val3", NULL, "val4", "", NULL,
+ NULL, "", "", ""};
+
+ RdKafka::ErrorCode err;
+
+ for (int i = 0; i < msgcnt * 2; i += 2) {
+ Test::Say(3, tostr() << "Produce message #" << (i / 2) << ": key=\""
+ << (msgs[i] ? msgs[i] : "Null") << "\", value=\""
+ << (msgs[i + 1] ? msgs[i + 1] : "Null") << "\"\n");
+ err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
+ /* Value */
+ (void *)msgs[i + 1], msgs[i + 1] ? strlen(msgs[i + 1]) : 0,
+ /* Key */
+ (void *)msgs[i], msgs[i] ? strlen(msgs[i]) : 0, 0, NULL);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("Produce failed: " + RdKafka::err2str(err));
+ }
+
+ if (p->flush(tmout_multip(3 * 5000)) != 0)
+ Test::Fail("Not all messages flushed");
+
+ Test::Say(tostr() << "Produced " << msgcnt << " messages to " << topic
+ << "\n");
+
+ delete p;
+
+ /*
+ * Now consume messages from the beginning, making sure they match
+ * what was produced.
+ */
+
+ /* Create consumer */
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "group.id", topic);
+ Test::conf_set(conf, "api.version.request",
+ api_version_request ? "true" : "false");
+ Test::conf_set(conf, "enable.auto.commit", "false");
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Assign the partition */
+ std::vector<RdKafka::TopicPartition *> parts;
+ parts.push_back(RdKafka::TopicPartition::create(
+ topic, partition, RdKafka::Topic::OFFSET_BEGINNING));
+ err = c->assign(parts);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("assign() failed: " + RdKafka::err2str(err));
+ RdKafka::TopicPartition::destroy(parts);
+
+ /* Start consuming */
+ int failures = 0;
+ for (int i = 0; i < msgcnt * 2; i += 2) {
+ RdKafka::Message *msg = c->consume(tmout_multip(5000));
+ if (msg->err())
+ Test::Fail(tostr() << "consume() failed at message " << (i / 2) << ": "
+ << msg->errstr());
+
+ /* verify key */
+ failures += check_equal(msgs[i], msg->key() ? msg->key()->c_str() : NULL,
+ msg->key_len(),
+ tostr() << "message #" << (i / 2) << " (offset "
+ << msg->offset() << ") key");
+ /* verify key_pointer() API as too */
+ failures +=
+ check_equal(msgs[i], (const char *)msg->key_pointer(), msg->key_len(),
+ tostr() << "message #" << (i / 2) << " (offset "
+ << msg->offset() << ") key");
+
+ /* verify value */
+ failures +=
+ check_equal(msgs[i + 1], (const char *)msg->payload(), msg->len(),
+ tostr() << "message #" << (i / 2) << " (offset "
+ << msg->offset() << ") value");
+ delete msg;
+ }
+
+ Test::Say(tostr() << "Done consuming, closing. " << failures
+ << " test failures\n");
+ if (failures)
+ Test::Fail(tostr() << "See " << failures << " previous test failure(s)");
+
+ c->close();
+ delete c;
+}
+
+
+extern "C" {
+int main_0070_null_empty(int argc, char **argv) {
+ if (test_broker_version >= TEST_BRKVER(0, 10, 0, 0))
+ do_test_null_empty(true);
+ do_test_null_empty(false);
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c
new file mode 100644
index 000000000..0576d611a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0072-headers_ut.c
@@ -0,0 +1,448 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * Local (no broker) unit-like tests of Message Headers
+ */
+
+
+
+static int exp_msgid = 0;
+
+struct expect {
+ const char *name;
+ const char *value;
+};
+
+/**
+ * @brief returns the message id
+ */
+static int expect_check(const char *what,
+ const struct expect *expected,
+ const rd_kafka_message_t *rkmessage) {
+ const struct expect *exp;
+ rd_kafka_resp_err_t err;
+ size_t idx = 0;
+ const char *name;
+ const char *value;
+ size_t size;
+ rd_kafka_headers_t *hdrs;
+ int msgid;
+
+ if (rkmessage->len != sizeof(msgid))
+ TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)",
+ what, rkmessage->len);
+
+ memcpy(&msgid, rkmessage->payload, rkmessage->len);
+
+ if ((err = rd_kafka_message_headers(rkmessage, &hdrs))) {
+ if (msgid == 0)
+ return 0; /* No headers expected for first message */
+
+ TEST_FAIL("%s: Expected headers in message %d: %s", what, msgid,
+ rd_kafka_err2str(err));
+ } else {
+ TEST_ASSERT(msgid != 0,
+ "%s: first message should have no headers", what);
+ }
+
+ /* msgid should always be first and has a variable value so hard to
+ * match with the expect struct. */
+ for (idx = 0, exp = expected; !rd_kafka_header_get_all(
+ hdrs, idx, &name, (const void **)&value, &size);
+ idx++, exp++) {
+
+ TEST_SAYL(3,
+ "%s: Msg #%d: "
+ "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n",
+ what, msgid, idx, name, value ? value : "(NULL)",
+ exp->name, exp->value ? exp->value : "(NULL)");
+
+ if (strcmp(name, exp->name))
+ TEST_FAIL("%s: Expected header %s at idx #%" PRIusz
+ ", not %s",
+ what, exp->name, idx - 1, name);
+
+ if (!strcmp(name, "msgid")) {
+ int vid;
+
+ /* Special handling: compare msgid header value
+ * to message body, should be identical */
+ if (size != rkmessage->len || size != sizeof(int))
+ TEST_FAIL(
+ "%s: "
+ "Expected msgid/int-sized payload "
+ "%" PRIusz ", got %" PRIusz,
+ what, size, rkmessage->len);
+
+ /* Copy to avoid unaligned access (by cast) */
+ memcpy(&vid, value, size);
+
+ if (vid != msgid)
+ TEST_FAIL("%s: Header msgid %d != payload %d",
+ what, vid, msgid);
+
+ if (exp_msgid != vid)
+ TEST_FAIL("%s: Expected msgid %d, not %d", what,
+ exp_msgid, vid);
+ continue;
+ }
+
+ if (!exp->value) {
+ /* Expected NULL value */
+ TEST_ASSERT(!value,
+ "%s: Expected NULL value for %s, got %s",
+ what, exp->name, value);
+
+ } else {
+ TEST_ASSERT(value,
+ "%s: "
+ "Expected non-NULL value for %s, got NULL",
+ what, exp->name);
+
+ TEST_ASSERT(size == strlen(exp->value),
+ "%s: Expected size %" PRIusz
+ " for %s, "
+ "not %" PRIusz,
+ what, strlen(exp->value), exp->name, size);
+
+ TEST_ASSERT(value[size] == '\0',
+ "%s: "
+ "Expected implicit null-terminator for %s",
+ what, exp->name);
+
+ TEST_ASSERT(!strcmp(exp->value, value),
+ "%s: "
+ "Expected value %s for %s, not %s",
+ what, exp->value, exp->name, value);
+ }
+ }
+
+ TEST_ASSERT(exp->name == NULL,
+ "%s: Expected the expected, but stuck at %s which was "
+ "unexpected",
+ what, exp->name);
+
+ return msgid;
+}
+
+
+/**
+ * @brief Delivery report callback
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ const struct expect expected[] = {
+ {"msgid", NULL}, /* special handling */
+ {"static", "hey"}, {"null", NULL}, {"empty", ""},
+ {"send1", "1"}, {"multi", "multi5"}, {NULL}};
+ const struct expect replace_expected[] = {
+ {"msgid", NULL}, {"new", "one"},
+ {"this is the", NULL}, {"replaced headers\"", ""},
+ {"new", "right?"}, {NULL}};
+ const struct expect *exp;
+ rd_kafka_headers_t *new_hdrs;
+ int msgid;
+
+ TEST_ASSERT(rkmessage->err == RD_KAFKA_RESP_ERR__MSG_TIMED_OUT,
+ "Expected message to fail with MSG_TIMED_OUT, not %s",
+ rd_kafka_err2str(rkmessage->err));
+
+ msgid = expect_check(__FUNCTION__, expected, rkmessage);
+
+ /* Replace entire headers list */
+ if (msgid > 0) {
+ new_hdrs = rd_kafka_headers_new(1);
+ rd_kafka_header_add(new_hdrs, "msgid", -1, &msgid,
+ sizeof(msgid));
+ for (exp = &replace_expected[1]; exp->name; exp++)
+ rd_kafka_header_add(new_hdrs, exp->name, -1, exp->value,
+ -1);
+
+ rd_kafka_message_set_headers((rd_kafka_message_t *)rkmessage,
+ new_hdrs);
+
+ expect_check(__FUNCTION__, replace_expected, rkmessage);
+ }
+
+ exp_msgid++;
+}
+
+static void expect_iter(const char *what,
+ const rd_kafka_headers_t *hdrs,
+ const char *name,
+ const char **expected,
+ size_t cnt) {
+ size_t idx;
+ rd_kafka_resp_err_t err;
+ const void *value;
+ size_t size;
+
+ for (idx = 0;
+ !(err = rd_kafka_header_get(hdrs, idx, name, &value, &size));
+ idx++) {
+ TEST_ASSERT(idx < cnt,
+ "%s: too many headers matching '%s', "
+ "expected %" PRIusz,
+ what, name, cnt);
+ TEST_SAYL(3,
+ "%s: get(%" PRIusz
+ ", '%s') "
+ "expecting '%s' =? '%s'\n",
+ what, idx, name, expected[idx], (const char *)value);
+
+
+ TEST_ASSERT(
+ !strcmp((const char *)value, expected[idx]),
+ "%s: get(%" PRIusz ", '%s') expected '%s', not '%s'", what,
+ idx, name, expected[idx], (const char *)value);
+ }
+
+ TEST_ASSERT(idx == cnt,
+ "%s: expected %" PRIusz
+ " headers matching '%s', not %" PRIusz,
+ what, cnt, name, idx);
+}
+
+
+
+/**
+ * @brief First on_send() interceptor
+ */
+static rd_kafka_resp_err_t
+on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ const struct expect expected[] = {
+ {"msgid", NULL}, /* special handling */
+ {"static", "hey"},
+ {"multi", "multi1"},
+ {"multi", "multi2"},
+ {"multi", "multi3"},
+ {"null", NULL},
+ {"empty", ""},
+ {NULL}};
+ const char *expect_iter_multi[4] = {
+ "multi1", "multi2", "multi3", "multi4" /* added below */
+ };
+ const char *expect_iter_static[1] = {"hey"};
+ rd_kafka_headers_t *hdrs;
+ size_t header_cnt;
+ rd_kafka_resp_err_t err;
+ const void *value;
+ size_t size;
+
+ expect_check(__FUNCTION__, expected, rkmessage);
+
+ err = rd_kafka_message_headers(rkmessage, &hdrs);
+ if (err) /* First message has no headers. */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 7, "Expected 7 length got %" PRIusz "",
+ header_cnt);
+
+ rd_kafka_header_add(hdrs, "multi", -1, "multi4", -1);
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 8, "Expected 8 length got %" PRIusz "",
+ header_cnt);
+
+ /* test iter() */
+ expect_iter(__FUNCTION__, hdrs, "multi", expect_iter_multi, 4);
+ expect_iter(__FUNCTION__, hdrs, "static", expect_iter_static, 1);
+ expect_iter(__FUNCTION__, hdrs, "notexists", NULL, 0);
+
+ rd_kafka_header_add(hdrs, "send1", -1, "1", -1);
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 9, "Expected 9 length got %" PRIusz "",
+ header_cnt);
+
+ rd_kafka_header_remove(hdrs, "multi");
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 5, "Expected 5 length got %" PRIusz "",
+ header_cnt);
+
+ rd_kafka_header_add(hdrs, "multi", -1, "multi5", -1);
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 6, "Expected 6 length got %" PRIusz "",
+ header_cnt);
+
+ /* test get_last() */
+ err = rd_kafka_header_get_last(hdrs, "multi", &value, &size);
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ TEST_ASSERT(size == strlen("multi5") &&
+ !strcmp((const char *)value, "multi5"),
+ "expected 'multi5', not '%s'", (const char *)value);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Second on_send() interceptor
+ */
+static rd_kafka_resp_err_t
+on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ const struct expect expected[] = {
+ {"msgid", NULL}, /* special handling */
+ {"static", "hey"}, {"null", NULL}, {"empty", ""},
+ {"send1", "1"}, {"multi", "multi5"}, {NULL}};
+
+ expect_check(__FUNCTION__, expected, rkmessage);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief on_new() interceptor to set up message interceptors
+ * from rd_kafka_new().
+ */
+static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL);
+ rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+int main_0072_headers_ut(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 0);
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ int i;
+ size_t header_cnt;
+ const int msgcnt = 10;
+ rd_kafka_resp_err_t err;
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "message.timeout.ms", "1");
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, NULL);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* First message is without headers (negative testing) */
+ i = 0;
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_VALUE(&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err));
+ exp_msgid++;
+
+ for (i = 1; i < msgcnt; i++, exp_msgid++) {
+ /* Use headers list on one message */
+ if (i == 3) {
+ rd_kafka_headers_t *hdrs = rd_kafka_headers_new(4);
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 0,
+ "Expected 0 length got %" PRIusz "",
+ header_cnt);
+
+ rd_kafka_headers_t *copied;
+
+ rd_kafka_header_add(hdrs, "msgid", -1, &i, sizeof(i));
+ rd_kafka_header_add(hdrs, "static", -1, "hey", -1);
+ rd_kafka_header_add(hdrs, "multi", -1, "multi1", -1);
+ rd_kafka_header_add(hdrs, "multi", -1, "multi2", 6);
+ rd_kafka_header_add(hdrs, "multi", -1, "multi3",
+ strlen("multi3"));
+ rd_kafka_header_add(hdrs, "null", -1, NULL, 0);
+
+ /* Make a copy of the headers to verify copy() */
+ copied = rd_kafka_headers_copy(hdrs);
+
+ header_cnt = rd_kafka_header_cnt(hdrs);
+ TEST_ASSERT(header_cnt == 6,
+ "Expected 6 length got %" PRIusz "",
+ header_cnt);
+
+ rd_kafka_headers_destroy(hdrs);
+
+ /* Last header ("empty") is added below */
+
+ /* Try unsupported _V_HEADER() and _V_HEADERS() mix,
+ * must fail with CONFLICT */
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE(&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_HEADER("will_be_removed", "yep", -1),
+ RD_KAFKA_V_HEADERS(copied),
+ RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__CONFLICT,
+ "producev(): expected CONFLICT, got %s",
+ rd_kafka_err2str(err));
+
+ /* Proper call using only _V_HEADERS() */
+ rd_kafka_header_add(copied, "empty", -1, "", -1);
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE(&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_HEADERS(copied), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() failed: %s",
+ rd_kafka_err2str(err));
+
+ } else {
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE(&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)),
+ RD_KAFKA_V_HEADER("static", "hey", -1),
+ RD_KAFKA_V_HEADER("multi", "multi1", -1),
+ RD_KAFKA_V_HEADER("multi", "multi2", 6),
+ RD_KAFKA_V_HEADER("multi", "multi3",
+ strlen("multi3")),
+ RD_KAFKA_V_HEADER("null", NULL, 0),
+ RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() failed: %s",
+ rd_kafka_err2str(err));
+ }
+ }
+
+ /* Reset expected message id for dr */
+ exp_msgid = 0;
+
+ /* Wait for timeouts and delivery reports */
+ rd_kafka_flush(rk, 5000);
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c
new file mode 100644
index 000000000..e7e5c4074
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0073-headers.c
@@ -0,0 +1,381 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * Message Headers end-to-end tests
+ */
+
+
+
+static int exp_msgid = 0;
+
+struct expect {
+ const char *name;
+ const char *value;
+};
+
+
+
+static void expect_check(const char *what,
+ const struct expect *expected,
+ rd_kafka_message_t *rkmessage,
+ int is_const) {
+ const struct expect *exp;
+ rd_kafka_resp_err_t err;
+ size_t idx = 0;
+ const char *name;
+ const char *value;
+ size_t size;
+ rd_kafka_headers_t *hdrs;
+ int msgid;
+
+ if (rkmessage->len != sizeof(msgid))
+ TEST_FAIL("%s: expected message len %" PRIusz " == sizeof(int)",
+ what, rkmessage->len);
+
+ memcpy(&msgid, rkmessage->payload, rkmessage->len);
+
+ if ((err = rd_kafka_message_headers(rkmessage, &hdrs))) {
+ if (msgid == 0) {
+ rd_kafka_resp_err_t err2;
+ TEST_SAYL(3, "%s: Msg #%d: no headers, good\n", what,
+ msgid);
+
+ err2 =
+ rd_kafka_message_detach_headers(rkmessage, &hdrs);
+ TEST_ASSERT(err == err2,
+ "expected detach_headers() error %s "
+ "to match headers() error %s",
+ rd_kafka_err2str(err2),
+ rd_kafka_err2str(err));
+
+ return; /* No headers expected for first message */
+ }
+
+ TEST_FAIL("%s: Expected headers in message %d: %s", what, msgid,
+ rd_kafka_err2str(err));
+ } else {
+ TEST_ASSERT(msgid != 0,
+ "%s: first message should have no headers", what);
+ }
+
+ test_headers_dump(what, 3, hdrs);
+
+ for (idx = 0, exp = expected; !rd_kafka_header_get_all(
+ hdrs, idx, &name, (const void **)&value, &size);
+ idx++, exp++) {
+
+ TEST_SAYL(3,
+ "%s: Msg #%d: "
+ "Header #%" PRIusz ": %s='%s' (expecting %s='%s')\n",
+ what, msgid, idx, name, value ? value : "(NULL)",
+ exp->name, exp->value ? exp->value : "(NULL)");
+
+ if (strcmp(name, exp->name))
+ TEST_FAIL(
+ "%s: Msg #%d: "
+ "Expected header %s at idx #%" PRIusz
+ ", not '%s' (%" PRIusz ")",
+ what, msgid, exp->name, idx, name, strlen(name));
+
+ if (!strcmp(name, "msgid")) {
+ int vid;
+
+ /* Special handling: compare msgid header value
+ * to message body, should be identical */
+ if (size != rkmessage->len || size != sizeof(int))
+ TEST_FAIL(
+ "%s: "
+ "Expected msgid/int-sized payload "
+ "%" PRIusz ", got %" PRIusz,
+ what, size, rkmessage->len);
+
+ /* Copy to avoid unaligned access (by cast) */
+ memcpy(&vid, value, size);
+
+ if (vid != msgid)
+ TEST_FAIL("%s: Header msgid %d != payload %d",
+ what, vid, msgid);
+
+ if (exp_msgid != vid)
+ TEST_FAIL("%s: Expected msgid %d, not %d", what,
+ exp_msgid, vid);
+ continue;
+ }
+
+ if (!exp->value) {
+ /* Expected NULL value */
+ TEST_ASSERT(!value,
+ "%s: Expected NULL value for %s, got %s",
+ what, exp->name, value);
+
+ } else {
+ TEST_ASSERT(value,
+ "%s: "
+ "Expected non-NULL value for %s, got NULL",
+ what, exp->name);
+
+ TEST_ASSERT(size == strlen(exp->value),
+ "%s: Expected size %" PRIusz
+ " for %s, "
+ "not %" PRIusz,
+ what, strlen(exp->value), exp->name, size);
+
+ TEST_ASSERT(value[size] == '\0',
+ "%s: "
+ "Expected implicit null-terminator for %s",
+ what, exp->name);
+
+ TEST_ASSERT(!strcmp(exp->value, value),
+ "%s: "
+ "Expected value %s for %s, not %s",
+ what, exp->value, exp->name, value);
+ }
+ }
+
+ TEST_ASSERT(exp->name == NULL,
+ "%s: Expected the expected, but stuck at %s which was "
+ "unexpected",
+ what, exp->name);
+
+ if (!strcmp(what, "handle_consumed_msg") && !is_const &&
+ (msgid % 3) == 0) {
+ rd_kafka_headers_t *dhdrs;
+
+ err = rd_kafka_message_detach_headers(rkmessage, &dhdrs);
+ TEST_ASSERT(!err, "detach_headers() should not fail, got %s",
+ rd_kafka_err2str(err));
+ TEST_ASSERT(hdrs == dhdrs);
+
+ /* Verify that a new headers object can be obtained */
+ err = rd_kafka_message_headers(rkmessage, &hdrs);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR);
+ TEST_ASSERT(hdrs != dhdrs);
+ rd_kafka_headers_destroy(dhdrs);
+
+ expect_check("post_detach_headers", expected, rkmessage,
+ is_const);
+ }
+}
+
+
+/**
+ * @brief Final (as in no more header modifications) message check.
+ */
+static void
+msg_final_check(const char *what, rd_kafka_message_t *rkmessage, int is_const) {
+ const struct expect expected[] = {
+ {"msgid", NULL}, /* special handling */
+ {"static", "hey"}, {"null", NULL}, {"empty", ""},
+ {"send1", "1"}, {"multi", "multi5"}, {NULL}};
+
+ expect_check(what, expected, rkmessage, is_const);
+
+ exp_msgid++;
+}
+
+/**
+ * @brief Handle consumed message, must be identical to dr_msg_cb
+ */
+static void handle_consumed_msg(rd_kafka_message_t *rkmessage) {
+ msg_final_check(__FUNCTION__, rkmessage, 0);
+}
+
+/**
+ * @brief Delivery report callback
+ */
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ TEST_ASSERT(!rkmessage->err, "Message delivery failed: %s",
+ rd_kafka_err2str(rkmessage->err));
+
+ msg_final_check(__FUNCTION__, (rd_kafka_message_t *)rkmessage, 1);
+}
+
+
+/**
+ * @brief First on_send() interceptor
+ */
+static rd_kafka_resp_err_t
+on_send1(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ const struct expect expected[] = {
+ {"msgid", NULL}, /* special handling */
+ {"static", "hey"},
+ {"multi", "multi1"},
+ {"multi", "multi2"},
+ {"multi", "multi3"},
+ {"null", NULL},
+ {"empty", ""},
+ {NULL}};
+ rd_kafka_headers_t *hdrs;
+ rd_kafka_resp_err_t err;
+
+ expect_check(__FUNCTION__, expected, rkmessage, 0);
+
+ err = rd_kafka_message_headers(rkmessage, &hdrs);
+ if (err) /* First message has no headers. */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ rd_kafka_header_add(hdrs, "multi", -1, "multi4", -1);
+ rd_kafka_header_add(hdrs, "send1", -1, "1", -1);
+ rd_kafka_header_remove(hdrs, "multi");
+ rd_kafka_header_add(hdrs, "multi", -1, "multi5", -1);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Second on_send() interceptor
+ */
+static rd_kafka_resp_err_t
+on_send2(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ const struct expect expected[] = {
+ {"msgid", NULL}, /* special handling */
+ {"static", "hey"}, {"null", NULL}, {"empty", ""},
+ {"send1", "1"}, {"multi", "multi5"}, {NULL}};
+
+ expect_check(__FUNCTION__, expected, rkmessage, 0);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief on_new() interceptor to set up message interceptors
+ * from rd_kafka_new().
+ */
+static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send1, NULL);
+ rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send2, NULL);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static void do_produce(const char *topic, int msgcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ int i;
+ rd_kafka_resp_err_t err;
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "acks", "all");
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, NULL);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* First message is without headers (negative testing) */
+ i = 0;
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE(&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() failed: %s", rd_kafka_err2str(err));
+ exp_msgid++;
+
+ for (i = 1; i < msgcnt; i++, exp_msgid++) {
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE(&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_HEADER("msgid", &i, sizeof(i)),
+ RD_KAFKA_V_HEADER("static", "hey", -1),
+ RD_KAFKA_V_HEADER("multi", "multi1", -1),
+ RD_KAFKA_V_HEADER("multi", "multi2", 6),
+ RD_KAFKA_V_HEADER("multi", "multi3", strlen("multi3")),
+ RD_KAFKA_V_HEADER("null", NULL, 0),
+ RD_KAFKA_V_HEADER("empty", "", 0), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() failed: %s",
+ rd_kafka_err2str(err));
+ }
+
+ /* Reset expected message id for dr */
+ exp_msgid = 0;
+
+ /* Wait for timeouts and delivery reports */
+ rd_kafka_flush(rk, tmout_multip(5000));
+
+ rd_kafka_destroy(rk);
+}
+
+static void do_consume(const char *topic, int msgcnt) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_partition_list_t *parts;
+
+ rk = test_create_consumer(topic, NULL, NULL, NULL);
+
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, 0)->offset =
+ RD_KAFKA_OFFSET_BEGINNING;
+
+ test_consumer_assign("assign", rk, parts);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ exp_msgid = 0;
+
+ while (exp_msgid < msgcnt) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(rk, 1000);
+ if (!rkm)
+ continue;
+
+ if (rkm->err)
+ TEST_FAIL(
+ "consume error while expecting msgid %d/%d: "
+ "%s",
+ exp_msgid, msgcnt, rd_kafka_message_errstr(rkm));
+
+ handle_consumed_msg(rkm);
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+}
+
+
+int main_0073_headers(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1);
+ const int msgcnt = 10;
+
+ do_produce(topic, msgcnt);
+ do_consume(topic, msgcnt);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c
new file mode 100644
index 000000000..544a84734
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0074-producev.c
@@ -0,0 +1,87 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * @brief Simple producev() and produceva() verification
+ */
+
+/**
+ * @brief Verify #1478: The internal shared rkt reference was not destroyed
+ * when producev() failed.
+ */
+static void do_test_srkt_leak(void) {
+ rd_kafka_conf_t *conf;
+ char buf[2000];
+ rd_kafka_t *rk;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+ rd_kafka_vu_t vus[3];
+
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "message.max.bytes", "1000");
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"),
+ RD_KAFKA_V_VALUE(buf, sizeof(buf)),
+ RD_KAFKA_V_END);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
+ "expected MSG_SIZE_TOO_LARGE, not %s",
+ rd_kafka_err2str(err));
+
+ vus[0].vtype = RD_KAFKA_VTYPE_TOPIC;
+ vus[0].u.cstr = "test";
+ vus[1].vtype = RD_KAFKA_VTYPE_VALUE;
+ vus[1].u.mem.ptr = buf;
+ vus[1].u.mem.size = sizeof(buf);
+ vus[2].vtype = RD_KAFKA_VTYPE_HEADER;
+ vus[2].u.header.name = "testheader";
+ vus[2].u.header.val = "test value";
+ vus[2].u.header.size = -1;
+
+ error = rd_kafka_produceva(rk, vus, 3);
+ TEST_ASSERT(error, "expected failure");
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR_MSG_SIZE_TOO_LARGE,
+ "expected MSG_SIZE_TOO_LARGE, not %s",
+ rd_kafka_error_string(error));
+ TEST_SAY("produceva() error (expected): %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_destroy(rk);
+}
+
+
+int main_0074_producev(int argc, char **argv) {
+ do_test_srkt_leak();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c
new file mode 100644
index 000000000..7e1e4f0f5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0075-retry.c
@@ -0,0 +1,252 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#if WITH_SOCKEM
+#include "rdkafka.h"
+
+#include <stdarg.h>
+#include <errno.h>
+
+/**
+ * Request retry testing
+ */
+
+/* Hang on to the first broker socket we see in connect_cb,
+ * reject all the rest (connection refused) to make sure we're only
+ * playing with one single broker for this test. */
+static struct {
+ mtx_t lock;
+ cnd_t cnd;
+ sockem_t *skm;
+ thrd_t thrd;
+ struct {
+ int64_t ts_at; /* to ctrl thread: at this time, set delay */
+ int delay;
+ int ack; /* from ctrl thread: new delay acked */
+ } cmd;
+ struct {
+ int64_t ts_at; /* to ctrl thread: at this time, set delay */
+ int delay;
+
+ } next;
+ int term;
+} ctrl;
+
+static int ctrl_thrd_main(void *arg) {
+
+
+ mtx_lock(&ctrl.lock);
+ while (!ctrl.term) {
+ int64_t now;
+
+ cnd_timedwait_ms(&ctrl.cnd, &ctrl.lock, 10);
+
+ if (ctrl.cmd.ts_at) {
+ ctrl.next.ts_at = ctrl.cmd.ts_at;
+ ctrl.next.delay = ctrl.cmd.delay;
+ ctrl.cmd.ts_at = 0;
+ ctrl.cmd.ack = 1;
+ printf(_C_CYA
+ "## %s: sockem: "
+ "receieved command to set delay "
+ "to %d in %dms\n" _C_CLR,
+ __FILE__, ctrl.next.delay,
+ (int)(ctrl.next.ts_at - test_clock()) / 1000);
+ }
+
+ now = test_clock();
+ if (ctrl.next.ts_at && now > ctrl.next.ts_at) {
+ assert(ctrl.skm);
+ printf(_C_CYA
+ "## %s: "
+ "sockem: setting socket delay to %d\n" _C_CLR,
+ __FILE__, ctrl.next.delay);
+ sockem_set(ctrl.skm, "delay", ctrl.next.delay, NULL);
+ ctrl.next.ts_at = 0;
+ cnd_signal(&ctrl.cnd); /* signal back to caller */
+ }
+ }
+ mtx_unlock(&ctrl.lock);
+
+ return 0;
+}
+
+
+/**
+ * @brief Sockem connect, called from **internal librdkafka thread** through
+ * librdkafka's connect_cb
+ */
+static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
+
+ mtx_lock(&ctrl.lock);
+ if (ctrl.skm) {
+ /* Reject all but the first connect */
+ mtx_unlock(&ctrl.lock);
+ return ECONNREFUSED;
+ }
+
+ ctrl.skm = skm;
+
+ /* signal wakeup to main thread */
+ cnd_broadcast(&ctrl.cnd);
+ mtx_unlock(&ctrl.lock);
+
+ return 0;
+}
+
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ /* Ignore connectivity errors since we'll be bringing down
+ * .. connectivity.
+ * SASL auther will think a connection-down even in the auth
+ * state means the broker doesn't support SASL PLAIN. */
+ TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
+ err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ return 0;
+ return 1;
+}
+
+/**
+ * @brief Set socket delay to kick in after \p after ms
+ */
+static void set_delay(int after, int delay) {
+ TEST_SAY("Set delay to %dms (after %dms)\n", delay, after);
+
+ mtx_lock(&ctrl.lock);
+ ctrl.cmd.ts_at = test_clock() + (after * 1000);
+ ctrl.cmd.delay = delay;
+ ctrl.cmd.ack = 0;
+ cnd_broadcast(&ctrl.cnd);
+
+ /* Wait for ack from sockem thread */
+ while (!ctrl.cmd.ack) {
+ TEST_SAY("Waiting for sockem control ack\n");
+ cnd_timedwait_ms(&ctrl.cnd, &ctrl.lock, 1000);
+ }
+ mtx_unlock(&ctrl.lock);
+}
+
+/**
+ * @brief Test that Metadata requests are retried properly when
+ * timing out due to high broker rtt.
+ */
+static void do_test_low_socket_timeout(const char *topic) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_resp_err_t err;
+ const struct rd_kafka_metadata *md;
+ int res;
+
+ mtx_init(&ctrl.lock, mtx_plain);
+ cnd_init(&ctrl.cnd);
+
+ TEST_SAY("Test Metadata request retries on timeout\n");
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "socket.timeout.ms", "1000");
+ test_conf_set(conf, "socket.max.fails", "12345");
+ test_conf_set(conf, "retry.backoff.ms", "5000");
+ /* Avoid api version requests (with their own timeout) to get in
+ * the way of our test */
+ test_conf_set(conf, "api.version.request", "false");
+ test_socket_enable(conf);
+ test_curr->connect_cb = connect_cb;
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ TEST_SAY("Waiting for sockem connect..\n");
+ mtx_lock(&ctrl.lock);
+ while (!ctrl.skm)
+ cnd_wait(&ctrl.cnd, &ctrl.lock);
+ mtx_unlock(&ctrl.lock);
+
+ TEST_SAY(
+ "Connected, fire off a undelayed metadata() to "
+ "make sure connection is up\n");
+
+ err = rd_kafka_metadata(rk, 0, rkt, &md, tmout_multip(2000));
+ TEST_ASSERT(!err, "metadata(undelayed) failed: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_metadata_destroy(md);
+
+ if (thrd_create(&ctrl.thrd, ctrl_thrd_main, NULL) != thrd_success)
+ TEST_FAIL("Failed to create sockem ctrl thread");
+
+ set_delay(0, 3000); /* Takes effect immediately */
+
+ /* After two retries, remove the delay, the third retry
+ * should kick in and work. */
+ set_delay(
+ ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) -
+ 2000,
+ 0);
+
+ TEST_SAY(
+ "Calling metadata() again which should succeed after "
+ "3 internal retries\n");
+ /* Metadata should be returned after the third retry */
+ err = rd_kafka_metadata(
+ rk, 0, rkt, &md,
+ ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) +
+ 5000);
+ TEST_SAY("metadata() returned %s\n", rd_kafka_err2str(err));
+ TEST_ASSERT(!err, "metadata(undelayed) failed: %s",
+ rd_kafka_err2str(err));
+ rd_kafka_metadata_destroy(md);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ /* Join controller thread */
+ mtx_lock(&ctrl.lock);
+ ctrl.term = 1;
+ mtx_unlock(&ctrl.lock);
+ thrd_join(ctrl.thrd, &res);
+
+ cnd_destroy(&ctrl.cnd);
+ mtx_destroy(&ctrl.lock);
+}
+
+int main_0075_retry(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0075_retry", 1);
+
+ do_test_low_socket_timeout(topic);
+
+ return 0;
+}
+
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c
new file mode 100644
index 000000000..16d6f602c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0076-produce_retry.c
@@ -0,0 +1,350 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#include <stdarg.h>
+#include <errno.h>
+
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ /* Ignore connectivity errors since we'll be bringing down
+ * .. connectivity.
+ * SASL auther will think a connection-down even in the auth
+ * state means the broker doesn't support SASL PLAIN. */
+ TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
+ err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ return 0;
+ return 1;
+}
+
+
+#if WITH_SOCKEM
+/**
+ * Producer message retry testing
+ */
+
+/* Hang on to the first broker socket we see in connect_cb,
+ * reject all the rest (connection refused) to make sure we're only
+ * playing with one single broker for this test. */
+
+#include "sockem_ctrl.h"
+
+
+/**
+ * @brief Test produce retries.
+ *
+ * @param should_fail If true, do negative testing which should fail.
+ */
+static void do_test_produce_retries(const char *topic,
+ int idempotence,
+ int try_fail,
+ int should_fail) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ uint64_t testid;
+ rd_kafka_resp_err_t err;
+ int msgcnt = 1;
+ sockem_ctrl_t ctrl;
+
+ TEST_SAY(_C_BLU
+ "Test produce retries "
+ "(idempotence=%d,try_fail=%d,should_fail=%d)\n",
+ idempotence, try_fail, should_fail);
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+
+ if (should_fail &&
+ !strcmp(test_conf_get(conf, "enable.sparse.connections"), "true")) {
+ rd_kafka_conf_destroy(conf);
+ TEST_SAY(_C_YEL
+ "Sparse connections enabled: "
+ "skipping connection-timing related test\n");
+ return;
+ }
+
+ sockem_ctrl_init(&ctrl);
+
+ test_conf_set(conf, "socket.timeout.ms", "1000");
+ /* Avoid disconnects on request timeouts */
+ test_conf_set(conf, "socket.max.fails", "100");
+ test_conf_set(conf, "enable.idempotence",
+ idempotence ? "true" : "false");
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED;
+ if (!try_fail) {
+ test_conf_set(conf, "retries", "5");
+ } else {
+ /* enable.idempotence=true request retries >= 1 which
+ * makes the test pass. Adjust expected error accordingly. */
+ if (idempotence)
+ test_conf_set(conf, "retries", "5");
+ else
+ test_conf_set(conf, "retries", "0");
+ if (should_fail) {
+ test_curr->exp_dr_err =
+ RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+ test_curr->exp_dr_status =
+ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED;
+ }
+ }
+ test_conf_set(conf, "retry.backoff.ms", "5000");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_socket_enable(conf);
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ /* Create the topic to make sure connections are up and ready. */
+ err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000));
+ TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err));
+
+ /* Set initial delay to 3s */
+ sockem_ctrl_set_delay(&ctrl, 0, 3000); /* Takes effect immediately */
+
+ /* After two retries, remove the delay, the third retry
+ * should kick in and work. */
+ sockem_ctrl_set_delay(
+ &ctrl,
+ ((1000 /*socket.timeout.ms*/ + 5000 /*retry.backoff.ms*/) * 2) -
+ 2000,
+ 0);
+
+ test_produce_msgs(rk, rkt, testid, RD_KAFKA_PARTITION_UA, 0, msgcnt,
+ NULL, 0);
+
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ if (!should_fail) {
+ TEST_SAY("Verifying messages with consumer\n");
+ test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL);
+ }
+
+ sockem_ctrl_term(&ctrl);
+
+ TEST_SAY(_C_GRN
+ "Test produce retries "
+ "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n",
+ idempotence, try_fail, should_fail);
+}
+#endif
+
+
+
+/**
+ * @brief Simple on_request_sent interceptor that simply disconnects
+ * the socket when first ProduceRequest is seen.
+ * Sub-sequent ProduceRequests will not trigger a disconnect, to allow
+ * for retries.
+ */
+static mtx_t produce_disconnect_lock;
+static int produce_disconnects = 0;
+static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ void *ic_opaque) {
+
+ /* Ignore if not a ProduceRequest */
+ if (ApiKey != 0)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ mtx_lock(&produce_disconnect_lock);
+ if (produce_disconnects == 0) {
+ char buf[512];
+ ssize_t r;
+ printf(_C_CYA "%s:%d: shutting down socket %d (%s)\n" _C_CLR,
+ __FILE__, __LINE__, sockfd, brokername);
+#ifdef _WIN32
+ closesocket(sockfd);
+#else
+ close(sockfd);
+#endif
+ /* There is a chance the broker responded in the
+ * time it took us to get here, so purge the
+ * socket recv buffer to make sure librdkafka does not see
+ * the response. */
+ while ((r = recv(sockfd, buf, sizeof(buf), 0)) > 0)
+ printf(_C_CYA
+ "%s:%d: "
+ "purged %" PRIdsz " bytes from socket\n",
+ __FILE__, __LINE__, r);
+ produce_disconnects = 1;
+ }
+ mtx_unlock(&produce_disconnect_lock);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ return rd_kafka_interceptor_add_on_request_sent(
+ rk, "disconnect_on_send", on_request_sent, NULL);
+}
+
+/**
+ * @brief Test produce retries by disconnecting right after ProduceRequest
+ * has been sent.
+ *
+ * @param should_fail If true, do negative testing which should fail.
+ */
+static void do_test_produce_retries_disconnect(const char *topic,
+ int idempotence,
+ int try_fail,
+ int should_fail) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ uint64_t testid;
+ rd_kafka_resp_err_t err;
+ int msgcnt = 1;
+ int partition_cnt;
+
+ TEST_SAY(_C_BLU
+ "Test produce retries by disconnect "
+ "(idempotence=%d,try_fail=%d,should_fail=%d)\n",
+ idempotence, try_fail, should_fail);
+
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_conf_set(conf, "socket.timeout.ms", test_quick ? "3000" : "10000");
+ test_conf_set(conf, "message.timeout.ms",
+ test_quick ? "9000" : "30000");
+ test_conf_set(conf, "enable.idempotence",
+ idempotence ? "true" : "false");
+ if (!try_fail) {
+ test_conf_set(conf, "retries", "1");
+ } else {
+ /* enable.idempotence=true request retries >= 1 which
+ * makes the test pass. */
+ if (!idempotence)
+ test_conf_set(conf, "retries", "0");
+ }
+
+ mtx_init(&produce_disconnect_lock, mtx_plain);
+ produce_disconnects = 0;
+
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
+ on_new_producer, NULL);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ err = test_produce_sync(rk, rkt, testid, 0);
+
+ if (should_fail) {
+ if (!err)
+ TEST_FAIL("Expected produce to fail\n");
+ else
+ TEST_SAY("Produced message failed as expected: %s\n",
+ rd_kafka_err2str(err));
+ } else {
+ if (err)
+ TEST_FAIL("Produced message failed: %s\n",
+ rd_kafka_err2str(err));
+ else
+ TEST_SAY("Produced message delivered\n");
+ }
+
+ mtx_lock(&produce_disconnect_lock);
+ TEST_ASSERT(produce_disconnects == 1, "expected %d disconnects, not %d",
+ 1, produce_disconnects);
+ mtx_unlock(&produce_disconnect_lock);
+
+
+ partition_cnt = test_get_partition_count(rk, topic, tmout_multip(5000));
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY("Verifying messages with consumer\n");
+ test_consume_msgs_easy(NULL, topic, testid, partition_cnt,
+ /* Since we don't know the number of
+ * messages that got thru on the socket
+ * before disconnect we can't let the
+ * expected message count be 0 in case of
+ * should_fail, so instead ignore the message
+ * count (-1). */
+ should_fail ? -1 : msgcnt, NULL);
+
+ TEST_SAY(_C_GRN
+ "Test produce retries by disconnect "
+ "(idempotence=%d,try_fail=%d,should_fail=%d): PASS\n",
+ idempotence, try_fail, should_fail);
+}
+
+
+int main_0076_produce_retry(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0076_produce_retry", 1);
+ const rd_bool_t has_idempotence =
+ test_broker_version >= TEST_BRKVER(0, 11, 0, 0);
+
+#if WITH_SOCKEM
+ if (has_idempotence) {
+ /* Idempotence, no try fail, should succeed. */
+ do_test_produce_retries(topic, 1, 0, 0);
+ /* Idempotence, try fail, should succeed. */
+ do_test_produce_retries(topic, 1, 1, 0);
+ }
+ /* No idempotence, try fail, should fail. */
+ do_test_produce_retries(topic, 0, 1, 1);
+#endif
+
+ if (has_idempotence) {
+ /* Idempotence, no try fail, should succeed. */
+ do_test_produce_retries_disconnect(topic, 1, 0, 0);
+ /* Idempotence, try fail, should succeed. */
+ do_test_produce_retries_disconnect(topic, 1, 1, 0);
+ }
+ /* No idempotence, try fail, should fail. */
+ do_test_produce_retries_disconnect(topic, 0, 1, 1);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c
new file mode 100644
index 000000000..01667114c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0077-compaction.c
@@ -0,0 +1,357 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * @brief Verify handling of compacted topics.
+ *
+ * General idea:
+ * - create a compacted topic with a low cleanup interval to promote quick
+ * compaction.
+ * - produce messages for 3 keys and interleave with unkeyed messages.
+ * interleave tombstones for k1 and k2, but not k3.
+ * - consume before compaction - verify all messages in place
+ * - wait for compaction
+ * - consume after compaction - verify expected messages.
+ */
+
+
+
+/**
+ * @brief Get low watermark in partition, we use this see if compaction
+ * has kicked in.
+ */
+static int64_t
+get_low_wmark(rd_kafka_t *rk, const char *topic, int32_t partition) {
+ rd_kafka_resp_err_t err;
+ int64_t low, high;
+
+ err = rd_kafka_query_watermark_offsets(rk, topic, partition, &low,
+ &high, tmout_multip(10000));
+
+ TEST_ASSERT(!err, "query_warmark_offsets(%s, %d) failed: %s", topic,
+ (int)partition, rd_kafka_err2str(err));
+
+ return low;
+}
+
+
+/**
+ * @brief Wait for compaction by checking for
+ * partition low-watermark increasing */
+static void wait_compaction(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t low_offset,
+ int timeout_ms) {
+ int64_t low = -1;
+ int64_t ts_start = test_clock();
+
+ TEST_SAY(
+ "Waiting for compaction to kick in and increase the "
+ "Low watermark offset from %" PRId64 " on %s [%" PRId32 "]\n",
+ low_offset, topic, partition);
+
+ while (1) {
+ low = get_low_wmark(rk, topic, partition);
+
+ TEST_SAY("Low watermark offset for %s [%" PRId32
+ "] is "
+ "%" PRId64 " (want > %" PRId64 ")\n",
+ topic, partition, low, low_offset);
+
+ if (low > low_offset)
+ break;
+
+ if (ts_start + (timeout_ms * 1000) < test_clock())
+ break;
+
+ rd_sleep(5);
+ }
+}
+
+static void produce_compactable_msgs(const char *topic,
+ int32_t partition,
+ uint64_t testid,
+ int msgcnt,
+ size_t msgsize) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ int i;
+ char *val;
+ char key[16];
+ rd_kafka_resp_err_t err;
+ int msgcounter = msgcnt;
+
+ if (!testid)
+ testid = test_id_generate();
+
+ test_str_id_generate(key, sizeof(key));
+
+ val = calloc(1, msgsize);
+
+ TEST_SAY("Producing %d messages (total of %" PRIusz
+ " bytes) of "
+ "compactable messages\n",
+ msgcnt, (size_t)msgcnt * msgsize);
+
+ test_conf_init(&conf, NULL, 0);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ /* Make sure batch size does not exceed segment.bytes since that
+ * will make the ProduceRequest fail. */
+ test_conf_set(conf, "batch.num.messages", "1");
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ for (i = 0; i < msgcnt - 1; i++) {
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_KEY(key, sizeof(key) - 1),
+ RD_KAFKA_V_VALUE(val, msgsize),
+ RD_KAFKA_V_OPAQUE(&msgcounter),
+ RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev(): %s", rd_kafka_err2str(err));
+ }
+
+ /* Final message is the tombstone */
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_KEY(key, sizeof(key) - 1),
+ RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev(): %s", rd_kafka_err2str(err));
+
+ test_flush(rk, tmout_multip(10000));
+ TEST_ASSERT(msgcounter == 0, "%d messages unaccounted for", msgcounter);
+
+ rd_kafka_destroy(rk);
+
+ free(val);
+}
+
+
+
+static void do_test_compaction(int msgs_per_key, const char *compression) {
+ const char *topic = test_mk_topic_name(__FILE__, 1);
+#define _KEY_CNT 4
+ const char *keys[_KEY_CNT] = {"k1", "k2", "k3",
+ NULL /*generate unique*/};
+ int msgcnt = msgs_per_key * _KEY_CNT;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ uint64_t testid;
+ int32_t partition = 0;
+ int cnt = 0;
+ test_msgver_t mv;
+ test_msgver_t mv_correct;
+ int msgcounter = 0;
+ const int fillcnt = 20;
+
+ testid = test_id_generate();
+
+ TEST_SAY(
+ _C_MAG
+ "Test compaction on topic %s with %s compression (%d messages)\n",
+ topic, compression ? compression : "no", msgcnt);
+
+ test_kafka_topics(
+ "--create --topic \"%s\" "
+ "--partitions %d "
+ "--replication-factor 1 "
+ "--config cleanup.policy=compact "
+ "--config segment.ms=10000 "
+ "--config segment.bytes=10000 "
+ "--config min.cleanable.dirty.ratio=0.01 "
+ "--config delete.retention.ms=86400 "
+ "--config file.delete.delay.ms=10000 "
+ "--config max.compaction.lag.ms=100",
+ topic, partition + 1);
+
+ test_conf_init(&conf, NULL, 120);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ if (compression)
+ test_conf_set(conf, "compression.codec", compression);
+ /* Limit max batch size below segment.bytes to avoid messages
+ * to accumulate into a batch that will be rejected by the broker. */
+ test_conf_set(conf, "message.max.bytes", "6000");
+ test_conf_set(conf, "linger.ms", "10");
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ /* The low watermark is not updated on message deletion(compaction)
+ * but on segment deletion, so fill up the first segment with
+ * random messages eligible for hasty compaction. */
+ produce_compactable_msgs(topic, 0, partition, fillcnt, 1000);
+
+ /* Populate a correct msgver for later comparison after compact. */
+ test_msgver_init(&mv_correct, testid);
+
+ TEST_SAY("Producing %d messages for %d keys\n", msgcnt, _KEY_CNT);
+ for (cnt = 0; cnt < msgcnt;) {
+ int k;
+
+ for (k = 0; k < _KEY_CNT; k++) {
+ rd_kafka_resp_err_t err;
+ int is_last = cnt + _KEY_CNT >= msgcnt;
+ /* Let keys[0] have some tombstones */
+ int is_tombstone = (k == 0 && (is_last || !(cnt % 7)));
+ char *valp;
+ size_t valsize;
+ char rdk_msgid[256];
+ char unique_key[16];
+ const void *key;
+ size_t keysize;
+ int64_t offset = fillcnt + cnt;
+
+ test_msg_fmt(rdk_msgid, sizeof(rdk_msgid), testid,
+ partition, cnt);
+
+ if (is_tombstone) {
+ valp = NULL;
+ valsize = 0;
+ } else {
+ valp = rdk_msgid;
+ valsize = strlen(valp);
+ }
+
+ if (!(key = keys[k])) {
+ rd_snprintf(unique_key, sizeof(unique_key),
+ "%d", cnt);
+ key = unique_key;
+ }
+ keysize = strlen(key);
+
+ /* All unique-key messages should remain intact
+ * after compaction. */
+ if (!keys[k] || is_last) {
+ TEST_SAYL(4,
+ "Add to correct msgvec: "
+ "msgid: %d: %s is_last=%d, "
+ "is_tomb=%d\n",
+ cnt, (const char *)key, is_last,
+ is_tombstone);
+ test_msgver_add_msg00(
+ __FUNCTION__, __LINE__, rd_kafka_name(rk),
+ &mv_correct, testid, topic, partition,
+ offset, -1, -1, 0, cnt);
+ }
+
+
+ msgcounter++;
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_KEY(key, keysize),
+ RD_KAFKA_V_VALUE(valp, valsize),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_HEADER("rdk_msgid", rdk_msgid, -1),
+ /* msgcounter as msg_opaque is used
+ * by test delivery report callback to
+ * count number of messages. */
+ RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev(#%d) failed: %s", cnt,
+ rd_kafka_err2str(err));
+
+ cnt++;
+ }
+ }
+
+ TEST_ASSERT(cnt == msgcnt, "cnt %d != msgcnt %d", cnt, msgcnt);
+
+ msgcounter = cnt;
+ test_wait_delivery(rk, &msgcounter);
+
+ /* Trigger compaction by filling up the segment with dummy messages,
+ * do it in chunks to avoid too good compression which then won't
+ * fill up the segments..
+ * We can't reuse the existing producer instance because it
+ * might be using compression which makes it hard to know how
+ * much data we need to produce to trigger compaction. */
+ produce_compactable_msgs(topic, 0, partition, 20, 1024);
+
+ /* Wait for compaction:
+ * this doesn't really work because the low watermark offset
+ * is not updated on compaction if the first segment is not deleted.
+ * But it serves as a pause to let compaction kick in
+ * which is triggered by the dummy produce above. */
+ wait_compaction(rk, topic, partition, 0, 20 * 1000);
+
+ TEST_SAY(_C_YEL "Verify messages after compaction\n");
+ /* After compaction we expect the following messages:
+ * last message for each of k1, k2, k3, all messages for unkeyed. */
+ test_msgver_init(&mv, testid);
+ mv.msgid_hdr = "rdk_msgid";
+ test_consume_msgs_easy_mv(NULL, topic, -1, testid, 1, -1, NULL, &mv);
+ test_msgver_verify_compare("post-compaction", &mv, &mv_correct,
+ TEST_MSGVER_BY_MSGID |
+ TEST_MSGVER_BY_OFFSET);
+ test_msgver_clear(&mv);
+
+ test_msgver_clear(&mv_correct);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN "Compaction test with %s compression: PASS\n",
+ compression ? compression : "no");
+}
+
+int main_0077_compaction(int argc, char **argv) {
+
+ if (!test_can_create_topics(1))
+ return 0;
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Test cluster requires authentication/SSL\n");
+ return 0;
+ }
+
+ do_test_compaction(10, NULL);
+
+ if (test_quick) {
+ TEST_SAY(
+ "Skipping further compaction tests "
+ "due to quick mode\n");
+ return 0;
+ }
+
+ do_test_compaction(1000, NULL);
+#if WITH_SNAPPY
+ do_test_compaction(10, "snappy");
+#endif
+#if WITH_ZSTD
+ do_test_compaction(10, "zstd");
+#endif
+#if WITH_ZLIB
+ do_test_compaction(10000, "gzip");
+#endif
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp
new file mode 100644
index 000000000..41d6886cb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0078-c_from_cpp.cpp
@@ -0,0 +1,96 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "rdkafka.h" /* Include before rdkafkacpp.h (from testcpp.h) */
+#include "testcpp.h"
+#include <cstring>
+
+/**
+ * @name Verify that the c_ptr()'s returned from C++ can be used
+ * to interact directly with the C API.
+ */
+
+
+extern "C" {
+int main_0078_c_from_cpp(int argc, char **argv) {
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ std::string errstr;
+
+ if (conf->set("client.id", "myclient", errstr))
+ Test::Fail("conf->set() failed: " + errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ delete conf;
+
+ /*
+ * Acquire rd_kafka_t and compare its name to the configured client.id
+ */
+ rd_kafka_t *rk = p->c_ptr();
+ if (!rk)
+ Test::Fail("Failed to acquire c_ptr");
+
+ std::string name = p->name();
+ std::string c_name = rd_kafka_name(rk);
+
+ Test::Say("Compare C name " + c_name + " to C++ name " + name + "\n");
+ if (c_name != name)
+ Test::Fail("Expected C client name " + c_name + " to match C++ " + name);
+
+ /*
+ * Create topic object, acquire rd_kafka_topic_t and compare
+ * its topic name.
+ */
+
+ RdKafka::Topic *topic = RdKafka::Topic::create(p, "mytopic", NULL, errstr);
+ if (!topic)
+ Test::Fail("Failed to create Topic: " + errstr);
+
+ rd_kafka_topic_t *rkt = topic->c_ptr();
+ if (!rkt)
+ Test::Fail("Failed to acquire topic c_ptr");
+
+ std::string topicname = topic->name();
+ std::string c_topicname = rd_kafka_topic_name(rkt);
+
+ Test::Say("Compare C topic " + c_topicname + " to C++ topic " + topicname +
+ "\n");
+ if (c_topicname != topicname)
+ Test::Fail("Expected C topic " + c_topicname + " to match C++ topic " +
+ topicname);
+
+ delete topic;
+ delete p;
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c
new file mode 100644
index 000000000..506dd62a3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0079-fork.c
@@ -0,0 +1,93 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#ifndef _WIN32
+#include <unistd.h>
+#include <sys/wait.h>
+#endif
+
+/**
+ * @brief Forking a threaded process will not transfer threads (such as
+ * librdkafka's background threads) to the child process.
+ * There is no way such a forked client instance will work
+ * in the child process, but it should not crash on destruction: #1674
+ */
+
+int main_0079_fork(int argc, char **argv) {
+
+#if __SANITIZE_ADDRESS__
+ TEST_SKIP(
+ "AddressSanitizer is enabled: this test leaks memory (due to "
+ "fork())\n");
+ return 0;
+#endif
+#ifdef _WIN32
+ TEST_SKIP("No fork() support on Windows");
+ return 0;
+#else
+ pid_t pid;
+ rd_kafka_t *rk;
+ int status;
+
+ rk = test_create_producer();
+
+ rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+
+ pid = fork();
+ TEST_ASSERT(pid != 1, "fork() failed: %s", strerror(errno));
+
+ if (pid == 0) {
+ /* Child process */
+
+ /* This call will enqueue the message on a queue
+ * which is not served by any thread, but it should not crash */
+ rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("atopic"),
+ RD_KAFKA_V_VALUE("hello", 5), RD_KAFKA_V_END);
+
+ /* Don't crash on us */
+ rd_kafka_destroy(rk);
+
+ exit(0);
+ }
+
+ /* Parent process, wait for child to exit cleanly. */
+ if (waitpid(pid, &status, 0) == -1)
+ TEST_FAIL("waitpid(%d) failed: %s", (int)pid, strerror(errno));
+
+ if (!WIFEXITED(status) || WEXITSTATUS(status) != 0)
+ TEST_FAIL("child exited with status %d", WEXITSTATUS(status));
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+#endif
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c
new file mode 100644
index 000000000..9d049e5b1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0080-admin_ut.c
@@ -0,0 +1,2535 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * @brief Admin API local dry-run unit-tests.
+ */
+
+#define MY_SOCKET_TIMEOUT_MS 100
+#define MY_SOCKET_TIMEOUT_MS_STR "100"
+
+
+
+static mtx_t last_event_lock;
+static cnd_t last_event_cnd;
+static rd_kafka_event_t *last_event = NULL;
+
+/**
+ * @brief The background event callback is called automatically
+ * by librdkafka from a background thread.
+ */
+static void
+background_event_cb(rd_kafka_t *rk, rd_kafka_event_t *rkev, void *opaque) {
+ mtx_lock(&last_event_lock);
+ TEST_ASSERT(!last_event,
+ "Multiple events seen in background_event_cb "
+ "(existing %s, new %s)",
+ rd_kafka_event_name(last_event), rd_kafka_event_name(rkev));
+ last_event = rkev;
+ mtx_unlock(&last_event_lock);
+ cnd_broadcast(&last_event_cnd);
+ rd_sleep(1);
+}
+
+static rd_kafka_event_t *wait_background_event_cb(void) {
+ rd_kafka_event_t *rkev;
+ mtx_lock(&last_event_lock);
+ while (!(rkev = last_event))
+ cnd_wait(&last_event_cnd, &last_event_lock);
+ last_event = NULL;
+ mtx_unlock(&last_event_lock);
+
+ return rkev;
+}
+
+
+/**
+ * @brief CreateTopics tests
+ *
+ *
+ *
+ */
+static void do_test_CreateTopics(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_background_event_cb,
+ int with_options) {
+ rd_kafka_queue_t *q;
+#define MY_NEW_TOPICS_CNT 6
+ rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_CreateTopics_result_t *res;
+ const rd_kafka_topic_result_t **restopics;
+ size_t restopic_cnt;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s CreateTopics with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct NewTopic array with different properties for
+ * different partitions.
+ */
+ for (i = 0; i < MY_NEW_TOPICS_CNT; i++) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ int num_parts = i * 51 + 1;
+ int num_replicas = jitter(1, MY_NEW_TOPICS_CNT - 1);
+ int set_config = (i & 2);
+ int set_replicas = !(i % 1);
+
+ new_topics[i] = rd_kafka_NewTopic_new(
+ topic, num_parts, set_replicas ? -1 : num_replicas, NULL,
+ 0);
+
+ if (set_config) {
+ /*
+ * Add various (unverified) configuration properties
+ */
+ err = rd_kafka_NewTopic_set_config(new_topics[i],
+ "dummy.doesntexist",
+ "butThere'sNothing "
+ "to verify that");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ err = rd_kafka_NewTopic_set_config(
+ new_topics[i], "try.a.null.value", NULL);
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ err = rd_kafka_NewTopic_set_config(new_topics[i],
+ "or.empty", "");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ if (set_replicas) {
+ int32_t p;
+ int32_t replicas[MY_NEW_TOPICS_CNT];
+ int j;
+
+ for (j = 0; j < num_replicas; j++)
+ replicas[j] = j;
+
+ /*
+ * Set valid replica assignments
+ */
+ for (p = 0; p < num_parts; p++) {
+ /* Try adding an existing out of order,
+ * should fail */
+ if (p == 1) {
+ err =
+ rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], p + 1, replicas,
+ num_replicas, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(
+ err ==
+ RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "%s", rd_kafka_err2str(err));
+ }
+
+ err = rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], p, replicas, num_replicas,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+ }
+
+ /* Try to add an existing partition, should fail */
+ err = rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], 0, replicas, num_replicas, NULL, 0);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG, "%s",
+ rd_kafka_err2str(err));
+
+ } else {
+ int32_t dummy_replicas[1] = {1};
+
+ /* Test invalid partition */
+ err = rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], num_parts + 1, dummy_replicas, 1,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "%s: %s", rd_kafka_err2str(err),
+ err == RD_KAFKA_RESP_ERR_NO_ERROR ? ""
+ : errstr);
+
+ /* Setting replicas with with default replicas != -1
+ * is an error. */
+ err = rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], 0, dummy_replicas, 1, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "%s: %s", rd_kafka_err2str(err),
+ err == RD_KAFKA_RESP_ERR_NO_ERROR ? ""
+ : errstr);
+ }
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "CreateTopics");
+ TEST_SAY("Call CreateTopics, timeout is %dms\n", exp_timeout);
+ rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "CreateTopics.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "CreateTopics.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("CreateTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_CreateTopics_result(rkev);
+ TEST_ASSERT(res, "expected CreateTopics_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected CreateTopics to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract topics anyway, should return NULL. */
+ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt);
+ TEST_ASSERT(!restopics && restopic_cnt == 0,
+ "expected no result_topics, got %p cnt %" PRIusz, restopics,
+ restopic_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_NewTopic_destroy_array(new_topics, MY_NEW_TOPICS_CNT);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief DeleteTopics tests
+ *
+ *
+ *
+ */
+static void do_test_DeleteTopics(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options) {
+ rd_kafka_queue_t *q;
+#define MY_DEL_TOPICS_CNT 4
+ rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteTopics_result_t *res;
+ const rd_kafka_topic_result_t **restopics;
+ size_t restopic_cnt;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s DeleteTopics with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < MY_DEL_TOPICS_CNT; i++)
+ del_topics[i] = rd_kafka_DeleteTopic_new(
+ test_mk_topic_name(__FUNCTION__, 1));
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)456;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ TIMING_START(&timing, "DeleteTopics");
+ TEST_SAY("Call DeleteTopics, timeout is %dms\n", exp_timeout);
+ rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /* Poll result queue */
+ TIMING_START(&timing, "DeleteTopics.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DeleteTopics: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteTopics_result(rkev);
+ TEST_ASSERT(res, "expected DeleteTopics_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected DeleteTopics to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract topics anyway, should return NULL. */
+ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt);
+ TEST_ASSERT(!restopics && restopic_cnt == 0,
+ "expected no result_topics, got %p cnt %" PRIusz, restopics,
+ restopic_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_DeleteTopic_destroy_array(del_topics, MY_DEL_TOPICS_CNT);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+#undef MY_DEL_TOPICS_CNT
+
+ SUB_TEST_QUICK();
+}
+
+/**
+ * @brief DeleteGroups tests
+ *
+ *
+ *
+ */
+static void do_test_DeleteGroups(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options,
+ rd_bool_t destroy) {
+ rd_kafka_queue_t *q;
+#define MY_DEL_GROUPS_CNT 4
+ char *group_names[MY_DEL_GROUPS_CNT];
+ rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteGroups_result_t *res;
+ const rd_kafka_group_result_t **resgroups;
+ size_t resgroup_cnt;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s DeleteGroups with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
+ group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ del_groups[i] = rd_kafka_DeleteGroup_new(group_names[i]);
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)456;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ TIMING_START(&timing, "DeleteGroups");
+ TEST_SAY("Call DeleteGroups, timeout is %dms\n", exp_timeout);
+ rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (destroy)
+ goto destroy;
+
+ /* Poll result queue */
+ TIMING_START(&timing, "DeleteGroups.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DeleteGroups: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteGroups_result(rkev);
+ TEST_ASSERT(res, "expected DeleteGroups_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting no error (errors will be per-group) */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_NO_ERROR,
+ "expected DeleteGroups to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Extract groups, should return MY_DEL_GROUPS_CNT groups. */
+ resgroups = rd_kafka_DeleteGroups_result_groups(res, &resgroup_cnt);
+ TEST_ASSERT(resgroups && resgroup_cnt == MY_DEL_GROUPS_CNT,
+ "expected %d result_groups, got %p cnt %" PRIusz,
+ MY_DEL_GROUPS_CNT, resgroups, resgroup_cnt);
+
+ /* The returned groups should be in the original order, and
+ * should all have timed out. */
+ for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
+ TEST_ASSERT(!strcmp(group_names[i],
+ rd_kafka_group_result_name(resgroups[i])),
+ "expected group '%s' at position %d, not '%s'",
+ group_names[i], i,
+ rd_kafka_group_result_name(resgroups[i]));
+ TEST_ASSERT(rd_kafka_error_code(rd_kafka_group_result_error(
+ resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected group '%s' to have timed out, got %s",
+ group_names[i],
+ rd_kafka_error_string(
+ rd_kafka_group_result_error(resgroups[i])));
+ }
+
+ rd_kafka_event_destroy(rkev);
+
+destroy:
+ for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
+ rd_kafka_DeleteGroup_destroy(del_groups[i]);
+ rd_free(group_names[i]);
+ }
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+#undef MY_DEL_GROUPS_CNT
+
+ SUB_TEST_QUICK();
+}
+
+/**
+ * @brief ListConsumerGroups tests
+ *
+ *
+ *
+ */
+static void do_test_ListConsumerGroups(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options,
+ rd_bool_t destroy) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_ListConsumerGroups_result_t *res;
+ const rd_kafka_error_t **errors;
+ size_t errors_cnt, valid_cnt;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s ListConsumerGroups with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (with_options) {
+ rd_kafka_consumer_group_state_t duplicate[2] = {
+ RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY,
+ RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY};
+
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS);
+
+ /* Test duplicate error on match states */
+ rd_kafka_error_t *error =
+ rd_kafka_AdminOptions_set_match_consumer_group_states(
+ options, duplicate, 2);
+ TEST_ASSERT(error && rd_kafka_error_code(error), "%s",
+ "Expected error on duplicate states,"
+ " got no error");
+ rd_kafka_error_destroy(error);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr)));
+
+ if (useq) {
+ my_opaque = (void *)456;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ TIMING_START(&timing, "ListConsumerGroups");
+ TEST_SAY("Call ListConsumerGroups, timeout is %dms\n", exp_timeout);
+ rd_kafka_ListConsumerGroups(rk, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (destroy)
+ goto destroy;
+
+ /* Poll result queue */
+ TIMING_START(&timing, "ListConsumerGroups.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("ListConsumerGroups: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_ListConsumerGroups_result(rkev);
+ TEST_ASSERT(res, "expected ListConsumerGroups_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting no error here, the real error will be in the error array */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(
+ err == RD_KAFKA_RESP_ERR_NO_ERROR,
+ "expected ListConsumerGroups to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ errors = rd_kafka_ListConsumerGroups_result_errors(rkev, &errors_cnt);
+ TEST_ASSERT(errors_cnt == 1, "expected one error, got %" PRIusz,
+ errors_cnt);
+ rd_kafka_ListConsumerGroups_result_valid(rkev, &valid_cnt);
+ TEST_ASSERT(valid_cnt == 0, "expected zero valid groups, got %" PRIusz,
+ valid_cnt);
+
+ err = rd_kafka_error_code(errors[0]);
+ errstr2 = rd_kafka_error_string(errors[0]);
+ TEST_ASSERT(
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected ListConsumerGroups to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ rd_kafka_event_destroy(rkev);
+
+destroy:
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief DescribeConsumerGroups tests
+ *
+ *
+ *
+ */
+static void do_test_DescribeConsumerGroups(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options,
+ rd_bool_t destroy) {
+ rd_kafka_queue_t *q;
+#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4
+ const char *group_names[TEST_DESCRIBE_CONSUMER_GROUPS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteGroups_result_t *res;
+ const rd_kafka_ConsumerGroupDescription_t **resgroups;
+ size_t resgroup_cnt;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
+ group_names[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)456;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ TIMING_START(&timing, "DescribeConsumerGroups");
+ TEST_SAY("Call DescribeConsumerGroups, timeout is %dms\n", exp_timeout);
+ rd_kafka_DescribeConsumerGroups(
+ rk, group_names, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (destroy)
+ goto destroy;
+
+ /* Poll result queue */
+ TIMING_START(&timing, "DescribeConsumerGroups.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DescribeConsumerGroups: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DescribeConsumerGroups_result(rkev);
+ TEST_ASSERT(res, "expected DescribeConsumerGroups_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting no error (errors will be per-group) */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(
+ err == RD_KAFKA_RESP_ERR_NO_ERROR,
+ "expected DescribeConsumerGroups to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR_NO_ERROR), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ /* Extract groups, should return TEST_DESCRIBE_GROUPS_CNT groups. */
+ resgroups =
+ rd_kafka_DescribeConsumerGroups_result_groups(res, &resgroup_cnt);
+ TEST_ASSERT(resgroups &&
+ resgroup_cnt == TEST_DESCRIBE_CONSUMER_GROUPS_CNT,
+ "expected %d result_groups, got %p cnt %" PRIusz,
+ TEST_DESCRIBE_CONSUMER_GROUPS_CNT, resgroups, resgroup_cnt);
+
+ /* The returned groups should be in the original order, and
+ * should all have timed out. */
+ for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
+ TEST_ASSERT(
+ !strcmp(group_names[i],
+ rd_kafka_ConsumerGroupDescription_group_id(
+ resgroups[i])),
+ "expected group '%s' at position %d, not '%s'",
+ group_names[i], i,
+ rd_kafka_ConsumerGroupDescription_group_id(resgroups[i]));
+ TEST_ASSERT(
+ rd_kafka_error_code(rd_kafka_ConsumerGroupDescription_error(
+ resgroups[i])) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected group '%s' to have timed out, got %s",
+ group_names[i],
+ rd_kafka_error_string(
+ rd_kafka_ConsumerGroupDescription_error(resgroups[i])));
+ }
+
+ rd_kafka_event_destroy(rkev);
+
+destroy:
+ for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
+ rd_free((char *)group_names[i]);
+ }
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT
+
+ SUB_TEST_PASS();
+}
+
+static void do_test_DeleteRecords(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options,
+ rd_bool_t destroy) {
+ rd_kafka_queue_t *q;
+#define MY_DEL_RECORDS_CNT 4
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_topic_partition_list_t *offsets = NULL;
+ rd_kafka_DeleteRecords_t *del_records;
+ const rd_kafka_DeleteRecords_result_t *res;
+ char *topics[MY_DEL_RECORDS_CNT];
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s DeleteRecords with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
+ topics[i] = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)4567;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ offsets = rd_kafka_topic_partition_list_new(MY_DEL_RECORDS_CNT);
+
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++)
+ rd_kafka_topic_partition_list_add(offsets, topics[i], i)
+ ->offset = RD_KAFKA_OFFSET_END;
+
+ del_records = rd_kafka_DeleteRecords_new(offsets);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ TIMING_START(&timing, "DeleteRecords");
+ TEST_SAY("Call DeleteRecords, timeout is %dms\n", exp_timeout);
+ rd_kafka_DeleteRecords(rk, &del_records, 1, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+
+ rd_kafka_DeleteRecords_destroy(del_records);
+
+ if (destroy)
+ goto destroy;
+
+ /* Poll result queue */
+ TIMING_START(&timing, "DeleteRecords.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DeleteRecords: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteRecords_result(rkev);
+ TEST_ASSERT(res, "expected DeleteRecords_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error (pre-fanout leader_req will fail) */
+ err = rd_kafka_event_error(rkev);
+ TEST_ASSERT(err, "expected DeleteRecords to fail");
+
+ rd_kafka_event_destroy(rkev);
+
+destroy:
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++)
+ rd_free(topics[i]);
+
+#undef MY_DEL_RECORDS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_DeleteConsumerGroupOffsets(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options) {
+ rd_kafka_queue_t *q;
+#define MY_DEL_CGRPOFFS_CNT 1
+ rd_kafka_AdminOptions_t *options = NULL;
+ const rd_kafka_DeleteConsumerGroupOffsets_result_t *res;
+ rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets[MY_DEL_CGRPOFFS_CNT];
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s DeleteConsumerGroupOffsets with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < MY_DEL_CGRPOFFS_CNT; i++) {
+ rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
+ rd_kafka_topic_partition_list_add(partitions, "topic3", 15);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 1);
+ cgoffsets[i] = rd_kafka_DeleteConsumerGroupOffsets_new(
+ "mygroup", partitions);
+ rd_kafka_topic_partition_list_destroy(partitions);
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)99981;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ TIMING_START(&timing, "DeleteConsumerGroupOffsets");
+ TEST_SAY("Call DeleteConsumerGroupOffsets, timeout is %dms\n",
+ exp_timeout);
+ rd_kafka_DeleteConsumerGroupOffsets(rk, cgoffsets, MY_DEL_CGRPOFFS_CNT,
+ options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+
+ /* Poll result queue */
+ TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected DeleteConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ TEST_ASSERT(err, "expected DeleteConsumerGroupOffsets to fail");
+
+ rd_kafka_event_destroy(rkev);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ rd_kafka_DeleteConsumerGroupOffsets_destroy_array(cgoffsets,
+ MY_DEL_CGRPOFFS_CNT);
+
+#undef MY_DEL_CGRPOFFS_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief AclBinding tests
+ *
+ *
+ *
+ */
+static void do_test_AclBinding() {
+ int i;
+ char errstr[512];
+ rd_kafka_AclBinding_t *new_acl;
+
+ rd_bool_t valid_resource_types[] = {rd_false, rd_false, rd_true,
+ rd_true, rd_true, rd_false};
+ rd_bool_t valid_resource_pattern_types[] = {
+ rd_false, rd_false, rd_false, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_operation[] = {
+ rd_false, rd_false, rd_true, rd_true, rd_true, rd_true, rd_true,
+ rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_permission_type[] = {rd_false, rd_false, rd_true,
+ rd_true, rd_false};
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK();
+
+ // Valid acl binding
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid resource name"),
+ "expected error string \"Invalid resource name\", not %s",
+ errstr);
+
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ NULL, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid principal"),
+ "expected error string \"Invalid principal\", not %s",
+ errstr);
+
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, NULL, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(!new_acl && !strcmp(errstr, "Invalid host"),
+ "expected error string \"Invalid host\", not %s", errstr);
+
+ for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal,
+ host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_types[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(
+ !new_acl &&
+ !strcmp(errstr, "Invalid resource type"),
+ "expected error string \"Invalid resource type\", "
+ "not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_pattern_types[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(
+ !new_acl &&
+ !strcmp(errstr,
+ "Invalid resource pattern type"),
+ "expected error string \"Invalid resource pattern "
+ "type\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_operation[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(!new_acl &&
+ !strcmp(errstr, "Invalid operation"),
+ "expected error string \"Invalid "
+ "operation\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_permission_type[i]) {
+ TEST_ASSERT(new_acl, "expected AclBinding");
+ rd_kafka_AclBinding_destroy(new_acl);
+ } else
+ TEST_ASSERT(
+ !new_acl &&
+ !strcmp(errstr, "Invalid permission type"),
+ "expected error string \"permission type\", not %s",
+ errstr);
+ }
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief AclBindingFilter tests
+ *
+ *
+ *
+ */
+static void do_test_AclBindingFilter() {
+ int i;
+ char errstr[512];
+ rd_kafka_AclBindingFilter_t *new_acl_filter;
+
+ rd_bool_t valid_resource_types[] = {rd_false, rd_true, rd_true,
+ rd_true, rd_true, rd_false};
+ rd_bool_t valid_resource_pattern_types[] = {
+ rd_false, rd_true, rd_true, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_operation[] = {
+ rd_false, rd_true, rd_true, rd_true, rd_true, rd_true, rd_true,
+ rd_true, rd_true, rd_true, rd_true, rd_true, rd_true, rd_false};
+ rd_bool_t valid_acl_permission_type[] = {rd_false, rd_true, rd_true,
+ rd_true, rd_false};
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK();
+
+ // Valid acl binding
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, NULL, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ NULL, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ principal, NULL, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ TEST_ASSERT(new_acl_filter, "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+
+ for (i = -1; i <= RD_KAFKA_RESOURCE__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ i, topic, RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal,
+ host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_types[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(
+ !new_acl_filter &&
+ !strcmp(errstr, "Invalid resource type"),
+ "expected error string \"Invalid resource type\", "
+ "not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_RESOURCE_PATTERN_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, i, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_resource_pattern_types[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(
+ !new_acl_filter &&
+ !strcmp(errstr,
+ "Invalid resource pattern type"),
+ "expected error string \"Invalid resource pattern "
+ "type\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_OPERATION__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host, i,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_operation[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(!new_acl_filter &&
+ !strcmp(errstr, "Invalid operation"),
+ "expected error string \"Invalid "
+ "operation\", not %s",
+ errstr);
+ }
+ for (i = -1; i <= RD_KAFKA_ACL_PERMISSION_TYPE__CNT; i++) {
+ *errstr = '\0';
+ new_acl_filter = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL, i, errstr, sizeof(errstr));
+ if (i >= 0 && valid_acl_permission_type[i]) {
+ TEST_ASSERT(new_acl_filter,
+ "expected AclBindingFilter");
+ rd_kafka_AclBinding_destroy(new_acl_filter);
+ } else
+ TEST_ASSERT(
+ !new_acl_filter &&
+ !strcmp(errstr, "Invalid permission type"),
+ "expected error string \"permission type\", not %s",
+ errstr);
+ }
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief CreateAcls tests
+ *
+ *
+ *
+ */
+static void do_test_CreateAcls(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_bool_t with_background_event_cb,
+ rd_bool_t with_options) {
+ rd_kafka_queue_t *q;
+#define MY_NEW_ACLS_CNT 2
+ rd_kafka_AclBinding_t *new_acls[MY_NEW_ACLS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_CreateAcls_result_t *res;
+ const rd_kafka_acl_result_t **resacls;
+ size_t resacls_cnt;
+ void *my_opaque = NULL, *opaque;
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK("%s CreaetAcls with %s, timeout %dms", rd_kafka_name(rk),
+ what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct AclBinding array
+ */
+ for (i = 0; i < MY_NEW_ACLS_CNT; i++) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ new_acls[i] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "CreateAcls");
+ TEST_SAY("Call CreateAcls, timeout is %dms\n", exp_timeout);
+ rd_kafka_CreateAcls(rk, new_acls, MY_NEW_ACLS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "CreateAcls.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "CreateAcls.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("CreateAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_CreateAcls_result(rkev);
+ TEST_ASSERT(res, "expected CreateAcls_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected CreateAcls to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract acls results anyway, should return NULL. */
+ resacls = rd_kafka_CreateAcls_result_acls(res, &resacls_cnt);
+ TEST_ASSERT(!resacls && resacls_cnt == 0,
+ "expected no acl result, got %p cnt %" PRIusz, resacls,
+ resacls_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_AclBinding_destroy_array(new_acls, MY_NEW_ACLS_CNT);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+#undef MY_NEW_ACLS_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief DescribeAcls tests
+ *
+ *
+ *
+ */
+static void do_test_DescribeAcls(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_bool_t with_background_event_cb,
+ rd_bool_t with_options) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AclBindingFilter_t *describe_acls;
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DescribeAcls_result_t *res;
+ const rd_kafka_AclBinding_t **res_acls;
+ size_t res_acls_cnt;
+ void *my_opaque = NULL, *opaque;
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK("%s DescribeAcls with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct AclBindingFilter
+ */
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ describe_acls = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic, RD_KAFKA_RESOURCE_PATTERN_PREFIXED,
+ principal, host, RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "DescribeAcls");
+ TEST_SAY("Call DescribeAcls, timeout is %dms\n", exp_timeout);
+ rd_kafka_DescribeAcls(rk, describe_acls, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "DescribeAcls.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "DescribeAcls.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DescribeAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DescribeAcls_result(rkev);
+ TEST_ASSERT(res, "expected DescribeAcls_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected DescribeAcls to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract result acls anyway, should return NULL. */
+ res_acls = rd_kafka_DescribeAcls_result_acls(res, &res_acls_cnt);
+ TEST_ASSERT(!res_acls && res_acls_cnt == 0,
+ "expected no result acls, got %p cnt %" PRIusz, res_acls,
+ res_acls_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_AclBinding_destroy(describe_acls);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief DeleteAcls tests
+ *
+ *
+ *
+ */
+static void do_test_DeleteAcls(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_bool_t with_background_event_cb,
+ rd_bool_t with_options) {
+#define DELETE_ACLS_FILTERS_CNT 2
+ rd_kafka_queue_t *q;
+ rd_kafka_AclBindingFilter_t *delete_acls[DELETE_ACLS_FILTERS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteAcls_result_t *res;
+ const rd_kafka_DeleteAcls_result_response_t **res_response;
+ size_t res_response_cnt;
+ void *my_opaque = NULL, *opaque;
+ const char *principal = "User:test";
+ const char *host = "*";
+
+ SUB_TEST_QUICK("%s DeleteAcls with %s, timeout %dms", rd_kafka_name(rk),
+ what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct AclBindingFilter array
+ */
+ for (i = 0; i < DELETE_ACLS_FILTERS_CNT; i++) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ delete_acls[i] = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic,
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED, principal, host,
+ RD_KAFKA_ACL_OPERATION_ALL,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, errstr, sizeof(errstr));
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ my_opaque = (void *)123;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+
+ TIMING_START(&timing, "DeleteAcls");
+ TEST_SAY("Call DeleteAcls, timeout is %dms\n", exp_timeout);
+ rd_kafka_DeleteAcls(rk, delete_acls, DELETE_ACLS_FILTERS_CNT, options,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ if (with_background_event_cb) {
+ /* Result event will be triggered by callback from
+ * librdkafka background queue thread. */
+ TIMING_START(&timing, "DeleteAcls.wait_background_event_cb");
+ rkev = wait_background_event_cb();
+ } else {
+ /* Poll result queue */
+ TIMING_START(&timing, "DeleteAcls.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ }
+
+ TIMING_ASSERT_LATER(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("DeleteAcls: got %s in %.3fs\n", rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteAcls_result(rkev);
+ TEST_ASSERT(res, "expected DeleteAcls_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected DeleteAcls to return error %s, not %s (%s)",
+ rd_kafka_err2str(RD_KAFKA_RESP_ERR__TIMED_OUT),
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ /* Attempt to extract result responses anyway, should return NULL. */
+ res_response =
+ rd_kafka_DeleteAcls_result_responses(res, &res_response_cnt);
+ TEST_ASSERT(!res_response && res_response_cnt == 0,
+ "expected no result response, got %p cnt %" PRIusz,
+ res_response, res_response_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_AclBinding_destroy_array(delete_acls, DELETE_ACLS_FILTERS_CNT);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+#undef DELETE_ACLS_FILTERS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_AlterConsumerGroupOffsets(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options) {
+ rd_kafka_queue_t *q;
+#define MY_ALTER_CGRPOFFS_CNT 1
+ rd_kafka_AdminOptions_t *options = NULL;
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *res;
+ rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets[MY_ALTER_CGRPOFFS_CNT];
+ rd_kafka_AlterConsumerGroupOffsets_t
+ *cgoffsets_empty[MY_ALTER_CGRPOFFS_CNT];
+ rd_kafka_AlterConsumerGroupOffsets_t
+ *cgoffsets_negative[MY_ALTER_CGRPOFFS_CNT];
+ rd_kafka_AlterConsumerGroupOffsets_t
+ *cgoffsets_duplicate[MY_ALTER_CGRPOFFS_CNT];
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ void *my_opaque = NULL, *opaque;
+
+ SUB_TEST_QUICK("%s AlterConsumerGroupOffsets with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < MY_ALTER_CGRPOFFS_CNT; i++) {
+ /* Call with three correct topic partitions. */
+ rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 9)
+ ->offset = 9;
+ rd_kafka_topic_partition_list_add(partitions, "topic3", 15)
+ ->offset = 15;
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 1)
+ ->offset = 1;
+ cgoffsets[i] = rd_kafka_AlterConsumerGroupOffsets_new(
+ "mygroup", partitions);
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ /* Call with empty topic-partition list. */
+ rd_kafka_topic_partition_list_t *partitions_empty =
+ rd_kafka_topic_partition_list_new(0);
+ cgoffsets_empty[i] = rd_kafka_AlterConsumerGroupOffsets_new(
+ "mygroup", partitions_empty);
+ rd_kafka_topic_partition_list_destroy(partitions_empty);
+
+ /* Call with a topic-partition having negative offset. */
+ rd_kafka_topic_partition_list_t *partitions_negative =
+ rd_kafka_topic_partition_list_new(4);
+ rd_kafka_topic_partition_list_add(partitions_negative, "topic1",
+ 9)
+ ->offset = 9;
+ rd_kafka_topic_partition_list_add(partitions_negative, "topic3",
+ 15)
+ ->offset = 15;
+ rd_kafka_topic_partition_list_add(partitions_negative, "topic1",
+ 1)
+ ->offset = 1;
+ rd_kafka_topic_partition_list_add(partitions_negative, "topic1",
+ 2)
+ ->offset = -3;
+ cgoffsets_negative[i] = rd_kafka_AlterConsumerGroupOffsets_new(
+ "mygroup", partitions_negative);
+ rd_kafka_topic_partition_list_destroy(partitions_negative);
+
+ /* Call with duplicate partitions. */
+ rd_kafka_topic_partition_list_t *partitions_duplicate =
+ rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(partitions_duplicate,
+ "topic1", 9)
+ ->offset = 9;
+ rd_kafka_topic_partition_list_add(partitions_duplicate,
+ "topic3", 15)
+ ->offset = 15;
+ rd_kafka_topic_partition_list_add(partitions_duplicate,
+ "topic1", 9)
+ ->offset = 1;
+
+ cgoffsets_duplicate[i] = rd_kafka_AlterConsumerGroupOffsets_new(
+ "mygroup", partitions_duplicate);
+ rd_kafka_topic_partition_list_destroy(partitions_duplicate);
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)99981;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ /* Empty topic-partition list */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets");
+ TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
+ exp_timeout);
+ rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_empty,
+ MY_ALTER_CGRPOFFS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+ rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_empty,
+ MY_ALTER_CGRPOFFS_CNT);
+
+ /* Poll result queue */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, 0, 10);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+ /* Convert event to proper result */
+ res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ const char *event_errstr_empty = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(strcmp(event_errstr_empty,
+ "Non-empty topic partition list must be present") ==
+ 0,
+ "expected \"Non-empty topic partition list must be "
+ "present\", not \"%s\"",
+ event_errstr_empty);
+ rd_kafka_event_destroy(rkev);
+
+ /* Negative topic-partition offset */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets");
+ TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
+ exp_timeout);
+ rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_negative,
+ MY_ALTER_CGRPOFFS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+ rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_negative,
+ MY_ALTER_CGRPOFFS_CNT);
+ /* Poll result queue */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, 0, 10);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+ /* Convert event to proper result */
+ res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ const char *event_errstr_negative = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(
+ strcmp(event_errstr_negative,
+ "All topic-partition offsets must be >= 0") == 0,
+ "expected \"All topic-partition offsets must be >= 0\", not \"%s\"",
+ event_errstr_negative);
+ rd_kafka_event_destroy(rkev);
+
+ /* Duplicate topic-partition offset */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets");
+ TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
+ exp_timeout);
+ rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets_duplicate,
+ MY_ALTER_CGRPOFFS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+ rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets_duplicate,
+ MY_ALTER_CGRPOFFS_CNT);
+ /* Poll result queue */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, 0, 10);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+ /* Convert event to proper result */
+ res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ const char *event_errstr_duplicate = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__INVALID_ARG,
+ "expected RD_KAFKA_RESP_ERR__INVALID_ARG, not %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(strcmp(event_errstr_duplicate,
+ "Duplicate partitions not allowed") == 0,
+ "expected \"Duplicate partitions not allowed\", not \"%s\"",
+ event_errstr_duplicate);
+ rd_kafka_event_destroy(rkev);
+
+ /* Correct topic-partition list, local timeout */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets");
+ TEST_SAY("Call AlterConsumerGroupOffsets, timeout is %dms\n",
+ exp_timeout);
+ rd_kafka_AlterConsumerGroupOffsets(rk, cgoffsets, MY_ALTER_CGRPOFFS_CNT,
+ options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+ /* Poll result queue */
+ TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+ /* Convert event to proper result */
+ res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ const char *event_errstr = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err, "expected AlterConsumerGroupOffsets to fail");
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected RD_KAFKA_RESP_ERR__TIMED_OUT, not %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(strcmp(event_errstr,
+ "Failed while waiting for response from broker: "
+ "Local: Timed out") == 0,
+ "expected \"Failed while waiting for response from broker: "
+ "Local: Timed out\", not \"%s\"",
+ event_errstr);
+ rd_kafka_event_destroy(rkev);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ rd_kafka_AlterConsumerGroupOffsets_destroy_array(cgoffsets,
+ MY_ALTER_CGRPOFFS_CNT);
+
+#undef MY_ALTER_CGRPOFFS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_ListConsumerGroupOffsets(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int with_options,
+ rd_bool_t null_toppars) {
+ rd_kafka_queue_t *q;
+#define MY_LIST_CGRPOFFS_CNT 1
+ rd_kafka_AdminOptions_t *options = NULL;
+ const rd_kafka_ListConsumerGroupOffsets_result_t *res;
+ rd_kafka_ListConsumerGroupOffsets_t *cgoffsets[MY_LIST_CGRPOFFS_CNT];
+ rd_kafka_ListConsumerGroupOffsets_t
+ *cgoffsets_empty[MY_LIST_CGRPOFFS_CNT];
+ rd_kafka_ListConsumerGroupOffsets_t
+ *cgoffsets_duplicate[MY_LIST_CGRPOFFS_CNT];
+ int exp_timeout = MY_SOCKET_TIMEOUT_MS;
+ int i;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ void *my_opaque = NULL, *opaque;
+ const char *errstr_ptr;
+
+ SUB_TEST_QUICK("%s ListConsumerGroupOffsets with %s, timeout %dms",
+ rd_kafka_name(rk), what, exp_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ for (i = 0; i < MY_LIST_CGRPOFFS_CNT; i++) {
+ rd_kafka_topic_partition_list_t *partitions =
+ rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
+ rd_kafka_topic_partition_list_add(partitions, "topic3", 15);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 1);
+ if (null_toppars) {
+ cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new(
+ "mygroup", NULL);
+ } else {
+ cgoffsets[i] = rd_kafka_ListConsumerGroupOffsets_new(
+ "mygroup", partitions);
+ }
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ rd_kafka_topic_partition_list_t *partitions_empty =
+ rd_kafka_topic_partition_list_new(0);
+ cgoffsets_empty[i] = rd_kafka_ListConsumerGroupOffsets_new(
+ "mygroup", partitions_empty);
+ rd_kafka_topic_partition_list_destroy(partitions_empty);
+
+ partitions = rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
+ rd_kafka_topic_partition_list_add(partitions, "topic3", 15);
+ rd_kafka_topic_partition_list_add(partitions, "topic1", 9);
+ cgoffsets_duplicate[i] = rd_kafka_ListConsumerGroupOffsets_new(
+ "mygroup", partitions);
+ rd_kafka_topic_partition_list_destroy(partitions);
+ }
+
+ if (with_options) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);
+
+ exp_timeout = MY_SOCKET_TIMEOUT_MS * 2;
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, exp_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (useq) {
+ my_opaque = (void *)99981;
+ rd_kafka_AdminOptions_set_opaque(options, my_opaque);
+ }
+ }
+
+ TEST_SAY(
+ "Call ListConsumerGroupOffsets with empty topic-partition list.\n");
+ rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_empty,
+ MY_LIST_CGRPOFFS_CNT, options, q);
+ rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_empty,
+ MY_LIST_CGRPOFFS_CNT);
+ /* Poll result queue */
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TEST_SAY("ListConsumerGroupOffsets: got %s\n",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail");
+
+ errstr_ptr = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(
+ !strcmp(errstr_ptr,
+ "NULL or non-empty topic partition list must be passed"),
+ "expected error string \"NULL or non-empty topic partition list "
+ "must be passed\", not %s",
+ errstr_ptr);
+
+ rd_kafka_event_destroy(rkev);
+
+
+ TEST_SAY(
+ "Call ListConsumerGroupOffsets with topic-partition list"
+ "containing duplicates.\n");
+ rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets_duplicate, 1, options,
+ q);
+ rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets_duplicate,
+ MY_LIST_CGRPOFFS_CNT);
+ /* Poll result queue */
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TEST_SAY("ListConsumerGroupOffsets: got %s\n",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail");
+
+ errstr_ptr = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!strcmp(errstr_ptr, "Duplicate partitions not allowed"),
+ "expected error string \"Duplicate partitions not allowed\""
+ ", not %s",
+ errstr_ptr);
+
+ rd_kafka_event_destroy(rkev);
+
+
+ TIMING_START(&timing, "ListConsumerGroupOffsets");
+ TEST_SAY("Call ListConsumerGroupOffsets, timeout is %dms\n",
+ exp_timeout);
+ rd_kafka_ListConsumerGroupOffsets(rk, cgoffsets, MY_LIST_CGRPOFFS_CNT,
+ options, q);
+ rd_kafka_ListConsumerGroupOffsets_destroy_array(cgoffsets,
+ MY_LIST_CGRPOFFS_CNT);
+ TIMING_ASSERT_LATER(&timing, 0, 10);
+
+ /* Poll result queue */
+ TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll");
+ rkev = rd_kafka_queue_poll(q, exp_timeout + 1000);
+ TIMING_ASSERT(&timing, exp_timeout - 100, exp_timeout + 100);
+ TEST_ASSERT(rkev != NULL, "expected result in %dms", exp_timeout);
+ TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fs\n",
+ rd_kafka_event_name(rkev), TIMING_DURATION(&timing) / 1000.0f);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ opaque = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(opaque == my_opaque, "expected opaque to be %p, not %p",
+ my_opaque, opaque);
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ TEST_ASSERT(err, "expected ListConsumerGroupOffsets to fail");
+
+ errstr_ptr = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!strcmp(errstr_ptr,
+ "Failed while waiting for response from broker: "
+ "Local: Timed out"),
+ "expected error string \"Failed while waiting for response "
+ "from broker: Local: Timed out\", not %s",
+ errstr_ptr);
+
+ rd_kafka_event_destroy(rkev);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+#undef MY_LIST_CGRPOFFS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test a mix of APIs using the same replyq.
+ *
+ * - Create topics A,B
+ * - Delete topic B
+ * - Create topic C
+ * - Delete groups A,B,C
+ * - Delete records from A,B,C
+ * - Create extra partitions for topic D
+ */
+static void do_test_mix(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
+ char *topics[] = {"topicA", "topicB", "topicC"};
+ int cnt = 0;
+ struct waiting {
+ rd_kafka_event_type_t evtype;
+ int seen;
+ };
+ struct waiting id1 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
+ struct waiting id2 = {RD_KAFKA_EVENT_DELETETOPICS_RESULT};
+ struct waiting id3 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
+ struct waiting id4 = {RD_KAFKA_EVENT_DELETEGROUPS_RESULT};
+ struct waiting id5 = {RD_KAFKA_EVENT_DELETERECORDS_RESULT};
+ struct waiting id6 = {RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT};
+ struct waiting id7 = {RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT};
+ struct waiting id8 = {RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT};
+ struct waiting id9 = {RD_KAFKA_EVENT_CREATETOPICS_RESULT};
+ rd_kafka_topic_partition_list_t *offsets;
+
+
+ SUB_TEST_QUICK();
+
+ offsets = rd_kafka_topic_partition_list_new(3);
+ rd_kafka_topic_partition_list_add(offsets, topics[0], 0)->offset =
+ RD_KAFKA_OFFSET_END;
+ rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset =
+ RD_KAFKA_OFFSET_END;
+ rd_kafka_topic_partition_list_add(offsets, topics[2], 0)->offset =
+ RD_KAFKA_OFFSET_END;
+
+ test_CreateTopics_simple(rk, rkqu, topics, 2, 1, &id1);
+ test_DeleteTopics_simple(rk, rkqu, &topics[1], 1, &id2);
+ test_CreateTopics_simple(rk, rkqu, &topics[2], 1, 1, &id3);
+ test_DeleteGroups_simple(rk, rkqu, topics, 3, &id4);
+ test_DeleteRecords_simple(rk, rkqu, offsets, &id5);
+ test_CreatePartitions_simple(rk, rkqu, "topicD", 15, &id6);
+ test_DeleteConsumerGroupOffsets_simple(rk, rkqu, "mygroup", offsets,
+ &id7);
+ test_DeleteConsumerGroupOffsets_simple(rk, rkqu, NULL, NULL, &id8);
+ /* Use broker-side defaults for partition count */
+ test_CreateTopics_simple(rk, rkqu, topics, 2, -1, &id9);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ while (cnt < 9) {
+ rd_kafka_event_t *rkev;
+ struct waiting *w;
+
+ rkev = rd_kafka_queue_poll(rkqu, -1);
+ TEST_ASSERT(rkev);
+
+ TEST_SAY("Got event %s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ w = rd_kafka_event_opaque(rkev);
+ TEST_ASSERT(w);
+
+ TEST_ASSERT(w->evtype == rd_kafka_event_type(rkev),
+ "Expected evtype %d, not %d (%s)", w->evtype,
+ rd_kafka_event_type(rkev),
+ rd_kafka_event_name(rkev));
+
+ TEST_ASSERT(w->seen == 0, "Duplicate results");
+
+ w->seen++;
+ cnt++;
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test AlterConfigs and DescribeConfigs
+ */
+static void do_test_configs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
+#define MY_CONFRES_CNT RD_KAFKA_RESOURCE__CNT + 2
+ rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_event_t *rkev;
+ rd_kafka_resp_err_t err;
+ const rd_kafka_AlterConfigs_result_t *res;
+ const rd_kafka_ConfigResource_t **rconfigs;
+ size_t rconfig_cnt;
+ char errstr[128];
+ int i;
+
+ SUB_TEST_QUICK();
+
+ /* Check invalids */
+ configs[0] = rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)-1,
+ "something");
+ TEST_ASSERT(!configs[0]);
+
+ configs[0] =
+ rd_kafka_ConfigResource_new((rd_kafka_ResourceType_t)0, NULL);
+ TEST_ASSERT(!configs[0]);
+
+
+ for (i = 0; i < MY_CONFRES_CNT; i++) {
+ int set_config = !(i % 2);
+
+ /* librdkafka shall not limit the use of illogical
+ * or unknown settings, they are enforced by the broker. */
+ configs[i] = rd_kafka_ConfigResource_new(
+ (rd_kafka_ResourceType_t)i, "3");
+ TEST_ASSERT(configs[i] != NULL);
+
+ if (set_config) {
+ rd_kafka_ConfigResource_set_config(configs[i],
+ "some.conf",
+ "which remains "
+ "unchecked");
+ rd_kafka_ConfigResource_set_config(
+ configs[i], "some.conf.null", NULL);
+ }
+ }
+
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+ err = rd_kafka_AdminOptions_set_request_timeout(options, 1000, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+ /* AlterConfigs */
+ rd_kafka_AlterConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu);
+
+ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT,
+ 2000);
+
+ TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected timeout, not %s",
+ rd_kafka_event_error_string(rkev));
+
+ res = rd_kafka_event_AlterConfigs_result(rkev);
+ TEST_ASSERT(res);
+
+ rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt);
+ TEST_ASSERT(!rconfigs && !rconfig_cnt,
+ "Expected no result resources, got %" PRIusz, rconfig_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ /* DescribeConfigs: reuse same configs and options */
+ rd_kafka_DescribeConfigs(rk, configs, MY_CONFRES_CNT, options, rkqu);
+
+ rd_kafka_AdminOptions_destroy(options);
+ rd_kafka_ConfigResource_destroy_array(configs, MY_CONFRES_CNT);
+
+ rkev = test_wait_admin_result(
+ rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 2000);
+
+ TEST_ASSERT(rd_kafka_event_error(rkev) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected timeout, not %s",
+ rd_kafka_event_error_string(rkev));
+
+ res = rd_kafka_event_DescribeConfigs_result(rkev);
+ TEST_ASSERT(res);
+
+ rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt);
+ TEST_ASSERT(!rconfigs && !rconfig_cnt,
+ "Expected no result resources, got %" PRIusz, rconfig_cnt);
+
+ rd_kafka_event_destroy(rkev);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify that an unclean rd_kafka_destroy() does not hang or crash.
+ */
+static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) {
+ rd_kafka_t *rk;
+ char errstr[512];
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *q;
+ rd_kafka_event_t *rkev;
+ rd_kafka_DeleteTopic_t *topic;
+ test_timing_t t_destroy;
+
+ SUB_TEST_QUICK("Test unclean destroy using %s",
+ with_mainq ? "mainq" : "tempq");
+
+ test_conf_init(&conf, NULL, 0);
+ /* Remove brokers, if any, since this is a local test and we
+ * rely on the controller not being found. */
+ test_conf_set(conf, "bootstrap.servers", "");
+ test_conf_set(conf, "socket.timeout.ms", "60000");
+
+ rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);
+
+ if (with_mainq)
+ q = rd_kafka_queue_get_main(rk);
+ else
+ q = rd_kafka_queue_new(rk);
+
+ topic = rd_kafka_DeleteTopic_new("test");
+ rd_kafka_DeleteTopics(rk, &topic, 1, NULL, q);
+ rd_kafka_DeleteTopic_destroy(topic);
+
+ /* We're not expecting a result yet since DeleteTopics will attempt
+ * to look up the controller for socket.timeout.ms (1 minute). */
+ rkev = rd_kafka_queue_poll(q, 100);
+ TEST_ASSERT(!rkev, "Did not expect result: %s",
+ rd_kafka_event_name(rkev));
+
+ rd_kafka_queue_destroy(q);
+
+ TEST_SAY(
+ "Giving rd_kafka_destroy() 5s to finish, "
+ "despite Admin API request being processed\n");
+ test_timeout_set(5);
+ TIMING_START(&t_destroy, "rd_kafka_destroy()");
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_destroy);
+
+ SUB_TEST_PASS();
+
+ /* Restore timeout */
+ test_timeout_set(60);
+}
+
+
+/**
+ * @brief Test AdminOptions
+ */
+static void do_test_options(rd_kafka_t *rk) {
+#define _all_apis \
+ { \
+ RD_KAFKA_ADMIN_OP_CREATETOPICS, \
+ RD_KAFKA_ADMIN_OP_DELETETOPICS, \
+ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS, \
+ RD_KAFKA_ADMIN_OP_ALTERCONFIGS, \
+ RD_KAFKA_ADMIN_OP_DESCRIBECONFIGS, \
+ RD_KAFKA_ADMIN_OP_DELETERECORDS, \
+ RD_KAFKA_ADMIN_OP_CREATEACLS, \
+ RD_KAFKA_ADMIN_OP_DESCRIBEACLS, \
+ RD_KAFKA_ADMIN_OP_DELETEACLS, \
+ RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS, \
+ RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS, \
+ RD_KAFKA_ADMIN_OP_DELETEGROUPS, \
+ RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS, \
+ RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS, \
+ RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS, \
+ RD_KAFKA_ADMIN_OP_ANY /* Must be last */ \
+ }
+ struct {
+ const char *setter;
+ const rd_kafka_admin_op_t valid_apis[16];
+ } matrix[] = {
+ {"request_timeout", _all_apis},
+ {"operation_timeout",
+ {RD_KAFKA_ADMIN_OP_CREATETOPICS, RD_KAFKA_ADMIN_OP_DELETETOPICS,
+ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
+ RD_KAFKA_ADMIN_OP_DELETERECORDS}},
+ {"validate_only",
+ {RD_KAFKA_ADMIN_OP_CREATETOPICS,
+ RD_KAFKA_ADMIN_OP_CREATEPARTITIONS,
+ RD_KAFKA_ADMIN_OP_ALTERCONFIGS}},
+ {"broker", _all_apis},
+ {"require_stable_offsets",
+ {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS}},
+ {"match_consumer_group_states",
+ {RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS}},
+ {"opaque", _all_apis},
+ {NULL},
+ };
+ int i;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_consumer_group_state_t state[1] = {
+ RD_KAFKA_CONSUMER_GROUP_STATE_STABLE};
+
+ SUB_TEST_QUICK();
+
+ for (i = 0; matrix[i].setter; i++) {
+ static const rd_kafka_admin_op_t all_apis[] = _all_apis;
+ const rd_kafka_admin_op_t *for_api;
+
+ for (for_api = all_apis;; for_api++) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_resp_err_t exp_err =
+ RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_error_t *error = NULL;
+ char errstr[512];
+ int fi;
+
+ options = rd_kafka_AdminOptions_new(rk, *for_api);
+ TEST_ASSERT(options, "AdminOptions_new(%d) failed",
+ *for_api);
+
+ if (!strcmp(matrix[i].setter, "request_timeout"))
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, 1234, errstr, sizeof(errstr));
+ else if (!strcmp(matrix[i].setter, "operation_timeout"))
+ err =
+ rd_kafka_AdminOptions_set_operation_timeout(
+ options, 12345, errstr, sizeof(errstr));
+ else if (!strcmp(matrix[i].setter, "validate_only"))
+ err = rd_kafka_AdminOptions_set_validate_only(
+ options, 1, errstr, sizeof(errstr));
+ else if (!strcmp(matrix[i].setter, "broker"))
+ err = rd_kafka_AdminOptions_set_broker(
+ options, 5, errstr, sizeof(errstr));
+ else if (!strcmp(matrix[i].setter,
+ "require_stable_offsets"))
+ error =
+ rd_kafka_AdminOptions_set_require_stable_offsets(
+ options, 0);
+ else if (!strcmp(matrix[i].setter,
+ "match_consumer_group_states"))
+ error =
+ rd_kafka_AdminOptions_set_match_consumer_group_states(
+ options, state, 1);
+ else if (!strcmp(matrix[i].setter, "opaque")) {
+ rd_kafka_AdminOptions_set_opaque(
+ options, (void *)options);
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ } else
+ TEST_FAIL("Invalid setter: %s",
+ matrix[i].setter);
+
+ if (error) {
+ err = rd_kafka_error_code(error);
+ snprintf(errstr, sizeof(errstr), "%s",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ }
+
+
+ TEST_SAYL(3,
+ "AdminOptions_set_%s on "
+ "RD_KAFKA_ADMIN_OP_%d options "
+ "returned %s: %s\n",
+ matrix[i].setter, *for_api,
+ rd_kafka_err2name(err),
+ err ? errstr : "success");
+
+ /* Scan matrix valid_apis to see if this
+ * setter should be accepted or not. */
+ if (exp_err) {
+ /* An expected error is already set */
+ } else if (*for_api != RD_KAFKA_ADMIN_OP_ANY) {
+ exp_err = RD_KAFKA_RESP_ERR__INVALID_ARG;
+
+ for (fi = 0; matrix[i].valid_apis[fi]; fi++) {
+ if (matrix[i].valid_apis[fi] ==
+ *for_api)
+ exp_err =
+ RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+ } else {
+ exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ if (err != exp_err)
+ TEST_FAIL_LATER(
+ "Expected AdminOptions_set_%s "
+ "for RD_KAFKA_ADMIN_OP_%d "
+ "options to return %s, "
+ "not %s",
+ matrix[i].setter, *for_api,
+ rd_kafka_err2name(exp_err),
+ rd_kafka_err2name(err));
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (*for_api == RD_KAFKA_ADMIN_OP_ANY)
+ break; /* This was the last one */
+ }
+ }
+
+ /* Try an invalid for_api */
+ options = rd_kafka_AdminOptions_new(rk, (rd_kafka_admin_op_t)1234);
+ TEST_ASSERT(!options,
+ "Expected AdminOptions_new() to fail "
+ "with an invalid for_api, didn't.");
+
+ TEST_LATER_CHECK();
+
+ SUB_TEST_PASS();
+}
+
+
+static rd_kafka_t *create_admin_client(rd_kafka_type_t cltype) {
+ rd_kafka_t *rk;
+ char errstr[512];
+ rd_kafka_conf_t *conf;
+
+ test_conf_init(&conf, NULL, 0);
+ /* Remove brokers, if any, since this is a local test and we
+ * rely on the controller not being found. */
+ test_conf_set(conf, "bootstrap.servers", "");
+ test_conf_set(conf, "socket.timeout.ms", MY_SOCKET_TIMEOUT_MS_STR);
+ /* For use with the background queue */
+ rd_kafka_conf_set_background_event_cb(conf, background_event_cb);
+
+ rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);
+
+ return rk;
+}
+
+
+static void do_test_apis(rd_kafka_type_t cltype) {
+ rd_kafka_t *rk;
+ rd_kafka_queue_t *mainq, *backgroundq;
+
+ mtx_init(&last_event_lock, mtx_plain);
+ cnd_init(&last_event_cnd);
+
+ do_test_unclean_destroy(cltype, 0 /*tempq*/);
+ do_test_unclean_destroy(cltype, 1 /*mainq*/);
+
+ rk = create_admin_client(cltype);
+
+ mainq = rd_kafka_queue_get_main(rk);
+ backgroundq = rd_kafka_queue_get_background(rk);
+
+ do_test_options(rk);
+
+ do_test_CreateTopics("temp queue, no options", rk, NULL, 0, 0);
+ do_test_CreateTopics("temp queue, no options, background_event_cb", rk,
+ backgroundq, 1, 0);
+ do_test_CreateTopics("temp queue, options", rk, NULL, 0, 1);
+ do_test_CreateTopics("main queue, options", rk, mainq, 0, 1);
+
+ do_test_DeleteTopics("temp queue, no options", rk, NULL, 0);
+ do_test_DeleteTopics("temp queue, options", rk, NULL, 1);
+ do_test_DeleteTopics("main queue, options", rk, mainq, 1);
+
+ do_test_ListConsumerGroups("temp queue, no options", rk, NULL, 0,
+ rd_false);
+ do_test_ListConsumerGroups("temp queue, options", rk, NULL, 1,
+ rd_false);
+ do_test_ListConsumerGroups("main queue", rk, mainq, 0, rd_false);
+
+ do_test_DescribeConsumerGroups("temp queue, no options", rk, NULL, 0,
+ rd_false);
+ do_test_DescribeConsumerGroups("temp queue, options", rk, NULL, 1,
+ rd_false);
+ do_test_DescribeConsumerGroups("main queue, options", rk, mainq, 1,
+ rd_false);
+
+ do_test_DeleteGroups("temp queue, no options", rk, NULL, 0, rd_false);
+ do_test_DeleteGroups("temp queue, options", rk, NULL, 1, rd_false);
+ do_test_DeleteGroups("main queue, options", rk, mainq, 1, rd_false);
+
+ do_test_DeleteRecords("temp queue, no options", rk, NULL, 0, rd_false);
+ do_test_DeleteRecords("temp queue, options", rk, NULL, 1, rd_false);
+ do_test_DeleteRecords("main queue, options", rk, mainq, 1, rd_false);
+
+ do_test_DeleteConsumerGroupOffsets("temp queue, no options", rk, NULL,
+ 0);
+ do_test_DeleteConsumerGroupOffsets("temp queue, options", rk, NULL, 1);
+ do_test_DeleteConsumerGroupOffsets("main queue, options", rk, mainq, 1);
+
+ do_test_AclBinding();
+ do_test_AclBindingFilter();
+
+ do_test_CreateAcls("temp queue, no options", rk, NULL, rd_false,
+ rd_false);
+ do_test_CreateAcls("temp queue, options", rk, NULL, rd_false, rd_true);
+ do_test_CreateAcls("main queue, options", rk, mainq, rd_false, rd_true);
+
+ do_test_DescribeAcls("temp queue, no options", rk, NULL, rd_false,
+ rd_false);
+ do_test_DescribeAcls("temp queue, options", rk, NULL, rd_false,
+ rd_true);
+ do_test_DescribeAcls("main queue, options", rk, mainq, rd_false,
+ rd_true);
+
+ do_test_DeleteAcls("temp queue, no options", rk, NULL, rd_false,
+ rd_false);
+ do_test_DeleteAcls("temp queue, options", rk, NULL, rd_false, rd_true);
+ do_test_DeleteAcls("main queue, options", rk, mainq, rd_false, rd_true);
+
+ do_test_AlterConsumerGroupOffsets("temp queue, no options", rk, NULL,
+ 0);
+ do_test_AlterConsumerGroupOffsets("temp queue, options", rk, NULL, 1);
+ do_test_AlterConsumerGroupOffsets("main queue, options", rk, mainq, 1);
+
+ do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0,
+ rd_false);
+ do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1,
+ rd_false);
+ do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1,
+ rd_false);
+ do_test_ListConsumerGroupOffsets("temp queue, no options", rk, NULL, 0,
+ rd_true);
+ do_test_ListConsumerGroupOffsets("temp queue, options", rk, NULL, 1,
+ rd_true);
+ do_test_ListConsumerGroupOffsets("main queue, options", rk, mainq, 1,
+ rd_true);
+
+ do_test_mix(rk, mainq);
+
+ do_test_configs(rk, mainq);
+
+ rd_kafka_queue_destroy(backgroundq);
+ rd_kafka_queue_destroy(mainq);
+
+ rd_kafka_destroy(rk);
+
+ /*
+ * Tests which require a unique unused client instance.
+ */
+ rk = create_admin_client(cltype);
+ mainq = rd_kafka_queue_get_main(rk);
+ do_test_DeleteRecords("main queue, options, destroy", rk, mainq, 1,
+ rd_true /*destroy instance before finishing*/);
+ rd_kafka_queue_destroy(mainq);
+ rd_kafka_destroy(rk);
+
+ rk = create_admin_client(cltype);
+ mainq = rd_kafka_queue_get_main(rk);
+ do_test_DeleteGroups("main queue, options, destroy", rk, mainq, 1,
+ rd_true /*destroy instance before finishing*/);
+ rd_kafka_queue_destroy(mainq);
+ rd_kafka_destroy(rk);
+
+
+ /* Done */
+ mtx_destroy(&last_event_lock);
+ cnd_destroy(&last_event_cnd);
+}
+
+
+int main_0080_admin_ut(int argc, char **argv) {
+ do_test_apis(RD_KAFKA_PRODUCER);
+ do_test_apis(RD_KAFKA_CONSUMER);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c
new file mode 100644
index 000000000..7da2dff15
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0081-admin.c
@@ -0,0 +1,3797 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+#include "../src/rdstring.h"
+
+/**
+ * @brief Admin API integration tests.
+ */
+
+
+static int32_t *avail_brokers;
+static size_t avail_broker_cnt;
+
+
+
+static void do_test_CreateTopics(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int op_timeout,
+ rd_bool_t validate_only) {
+ rd_kafka_queue_t *q;
+#define MY_NEW_TOPICS_CNT 7
+ char *topics[MY_NEW_TOPICS_CNT];
+ rd_kafka_NewTopic_t *new_topics[MY_NEW_TOPICS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_resp_err_t exp_topicerr[MY_NEW_TOPICS_CNT] = {0};
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ /* Expected topics in metadata */
+ rd_kafka_metadata_topic_t exp_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ /* Not expected topics in metadata */
+ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_NEW_TOPICS_CNT] = {{0}};
+ int exp_not_mdtopic_cnt = 0;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_CreateTopics_result_t *res;
+ const rd_kafka_topic_result_t **restopics;
+ size_t restopic_cnt;
+ int metadata_tmout;
+ int num_replicas = (int)avail_broker_cnt;
+ int32_t *replicas;
+
+ SUB_TEST_QUICK(
+ "%s CreateTopics with %s, "
+ "op_timeout %d, validate_only %d",
+ rd_kafka_name(rk), what, op_timeout, validate_only);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /* Set up replicas */
+ replicas = rd_alloca(sizeof(*replicas) * num_replicas);
+ for (i = 0; i < num_replicas; i++)
+ replicas[i] = avail_brokers[i];
+
+ /**
+ * Construct NewTopic array with different properties for
+ * different partitions.
+ */
+ for (i = 0; i < MY_NEW_TOPICS_CNT; i++) {
+ char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ int use_defaults =
+ i == 6 && test_broker_version >= TEST_BRKVER(2, 4, 0, 0);
+ int num_parts = !use_defaults ? (i * 7 + 1) : -1;
+ int set_config = (i & 1);
+ int add_invalid_config = (i == 1);
+ int set_replicas = !use_defaults && !(i % 3);
+ rd_kafka_resp_err_t this_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ topics[i] = topic;
+ new_topics[i] = rd_kafka_NewTopic_new(
+ topic, num_parts, set_replicas ? -1 : num_replicas, NULL,
+ 0);
+
+ if (set_config) {
+ /*
+ * Add various configuration properties
+ */
+ err = rd_kafka_NewTopic_set_config(
+ new_topics[i], "compression.type", "lz4");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ err = rd_kafka_NewTopic_set_config(
+ new_topics[i], "delete.retention.ms", "900");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+ if (add_invalid_config) {
+ /* Add invalid config property */
+ err = rd_kafka_NewTopic_set_config(
+ new_topics[i], "dummy.doesntexist",
+ "broker is verifying this");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ this_exp_err = RD_KAFKA_RESP_ERR_INVALID_CONFIG;
+ }
+
+ TEST_SAY(
+ "Expecting result for topic #%d: %s "
+ "(set_config=%d, add_invalid_config=%d, "
+ "set_replicas=%d, use_defaults=%d)\n",
+ i, rd_kafka_err2name(this_exp_err), set_config,
+ add_invalid_config, set_replicas, use_defaults);
+
+ if (set_replicas) {
+ int32_t p;
+
+ /*
+ * Set valid replica assignments
+ */
+ for (p = 0; p < num_parts; p++) {
+ err = rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], p, replicas, num_replicas,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+ }
+ }
+
+ if (this_exp_err || validate_only) {
+ exp_topicerr[i] = this_exp_err;
+ exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;
+
+ } else {
+ exp_mdtopics[exp_mdtopic_cnt].topic = topic;
+ exp_mdtopics[exp_mdtopic_cnt].partition_cnt = num_parts;
+ exp_mdtopic_cnt++;
+ }
+ }
+
+ if (op_timeout != -1 || validate_only) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
+
+ if (op_timeout != -1) {
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, op_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+ if (validate_only) {
+ err = rd_kafka_AdminOptions_set_validate_only(
+ options, validate_only, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+ }
+
+ TIMING_START(&timing, "CreateTopics");
+ TEST_SAY("Call CreateTopics\n");
+ rd_kafka_CreateTopics(rk, new_topics, MY_NEW_TOPICS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /* Poll result queue for CreateTopics result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ TIMING_START(&timing, "CreateTopics.queue_poll");
+ do {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
+ TEST_SAY("CreateTopics: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+ } while (rd_kafka_event_type(rkev) !=
+ RD_KAFKA_EVENT_CREATETOPICS_RESULT);
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_CreateTopics_result(rkev);
+ TEST_ASSERT(res, "expected CreateTopics_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == exp_err,
+ "expected CreateTopics to return %s, not %s (%s)",
+ rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ TEST_SAY("CreateTopics: returned %s (%s)\n", rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ /* Extract topics */
+ restopics = rd_kafka_CreateTopics_result_topics(res, &restopic_cnt);
+
+
+ /* Scan topics for proper fields and expected failures. */
+ for (i = 0; i < (int)restopic_cnt; i++) {
+ const rd_kafka_topic_result_t *terr = restopics[i];
+
+ /* Verify that topic order matches our request. */
+ if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
+ TEST_FAIL_LATER(
+ "Topic result order mismatch at #%d: "
+ "expected %s, got %s",
+ i, topics[i], rd_kafka_topic_result_name(terr));
+
+ TEST_SAY("CreateTopics result: #%d: %s: %s: %s\n", i,
+ rd_kafka_topic_result_name(terr),
+ rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
+ rd_kafka_topic_result_error_string(terr));
+ if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
+ TEST_FAIL_LATER("Expected %s, not %d: %s",
+ rd_kafka_err2name(exp_topicerr[i]),
+ rd_kafka_topic_result_error(terr),
+ rd_kafka_err2name(
+ rd_kafka_topic_result_error(terr)));
+ }
+
+ /**
+ * Verify that the expecteded topics are created and the non-expected
+ * are not. Allow it some time to propagate.
+ */
+ if (validate_only) {
+ /* No topics should have been created, give it some time
+ * before checking. */
+ rd_sleep(2);
+ metadata_tmout = 5 * 1000;
+ } else {
+ if (op_timeout > 0)
+ metadata_tmout = op_timeout + 1000;
+ else
+ metadata_tmout = 10 * 1000;
+ }
+
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt,
+ exp_not_mdtopics, exp_not_mdtopic_cnt,
+ metadata_tmout);
+
+ rd_kafka_event_destroy(rkev);
+
+ for (i = 0; i < MY_NEW_TOPICS_CNT; i++) {
+ rd_kafka_NewTopic_destroy(new_topics[i]);
+ rd_free(topics[i]);
+ }
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef MY_NEW_TOPICS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test deletion of topics
+ *
+ *
+ */
+static void do_test_DeleteTopics(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int op_timeout) {
+ rd_kafka_queue_t *q;
+ const int skip_topic_cnt = 2;
+#define MY_DEL_TOPICS_CNT 9
+ char *topics[MY_DEL_TOPICS_CNT];
+ rd_kafka_DeleteTopic_t *del_topics[MY_DEL_TOPICS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_resp_err_t exp_topicerr[MY_DEL_TOPICS_CNT] = {0};
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ /* Expected topics in metadata */
+ rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ /* Not expected topics in metadata */
+ rd_kafka_metadata_topic_t exp_not_mdtopics[MY_DEL_TOPICS_CNT] = {{0}};
+ int exp_not_mdtopic_cnt = 0;
+ int i;
+ char errstr[512];
+ const char *errstr2;
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteTopics_result_t *res;
+ const rd_kafka_topic_result_t **restopics;
+ size_t restopic_cnt;
+ int metadata_tmout;
+
+ SUB_TEST_QUICK("%s DeleteTopics with %s, op_timeout %d",
+ rd_kafka_name(rk), what, op_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /**
+ * Construct DeleteTopic array
+ */
+ for (i = 0; i < MY_DEL_TOPICS_CNT; i++) {
+ char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ int notexist_topic = i >= MY_DEL_TOPICS_CNT - skip_topic_cnt;
+
+ topics[i] = topic;
+
+ del_topics[i] = rd_kafka_DeleteTopic_new(topic);
+
+ if (notexist_topic)
+ exp_topicerr[i] =
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else {
+ exp_topicerr[i] = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
+ }
+
+ exp_not_mdtopics[exp_not_mdtopic_cnt++].topic = topic;
+ }
+
+ if (op_timeout != -1) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, op_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ /* Create the topics first, minus the skip count. */
+ test_CreateTopics_simple(rk, NULL, topics,
+ MY_DEL_TOPICS_CNT - skip_topic_cnt,
+ 2 /*num_partitions*/, NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
+ 15 * 1000);
+
+ TIMING_START(&timing, "DeleteTopics");
+ TEST_SAY("Call DeleteTopics\n");
+ rd_kafka_DeleteTopics(rk, del_topics, MY_DEL_TOPICS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /* Poll result queue for DeleteTopics result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ TIMING_START(&timing, "DeleteTopics.queue_poll");
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
+ TEST_SAY("DeleteTopics: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_DELETETOPICS_RESULT)
+ break;
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteTopics_result(rkev);
+ TEST_ASSERT(res, "expected DeleteTopics_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == exp_err,
+ "expected DeleteTopics to return %s, not %s (%s)",
+ rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ TEST_SAY("DeleteTopics: returned %s (%s)\n", rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ /* Extract topics */
+ restopics = rd_kafka_DeleteTopics_result_topics(res, &restopic_cnt);
+
+
+ /* Scan topics for proper fields and expected failures. */
+ for (i = 0; i < (int)restopic_cnt; i++) {
+ const rd_kafka_topic_result_t *terr = restopics[i];
+
+ /* Verify that topic order matches our request. */
+ if (strcmp(rd_kafka_topic_result_name(terr), topics[i]))
+ TEST_FAIL_LATER(
+ "Topic result order mismatch at #%d: "
+ "expected %s, got %s",
+ i, topics[i], rd_kafka_topic_result_name(terr));
+
+ TEST_SAY("DeleteTopics result: #%d: %s: %s: %s\n", i,
+ rd_kafka_topic_result_name(terr),
+ rd_kafka_err2name(rd_kafka_topic_result_error(terr)),
+ rd_kafka_topic_result_error_string(terr));
+ if (rd_kafka_topic_result_error(terr) != exp_topicerr[i])
+ TEST_FAIL_LATER("Expected %s, not %d: %s",
+ rd_kafka_err2name(exp_topicerr[i]),
+ rd_kafka_topic_result_error(terr),
+ rd_kafka_err2name(
+ rd_kafka_topic_result_error(terr)));
+ }
+
+ /**
+ * Verify that the expected topics are deleted and the non-expected
+ * are not. Allow it some time to propagate.
+ */
+ if (op_timeout > 0)
+ metadata_tmout = op_timeout + 1000;
+ else
+ metadata_tmout = 10 * 1000;
+
+ test_wait_metadata_update(rk, NULL, 0, exp_not_mdtopics,
+ exp_not_mdtopic_cnt, metadata_tmout);
+
+ rd_kafka_event_destroy(rkev);
+
+ for (i = 0; i < MY_DEL_TOPICS_CNT; i++) {
+ rd_kafka_DeleteTopic_destroy(del_topics[i]);
+ rd_free(topics[i]);
+ }
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef MY_DEL_TOPICS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test creation of partitions
+ *
+ *
+ */
+static void do_test_CreatePartitions(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int op_timeout) {
+ rd_kafka_queue_t *q;
+#define MY_CRP_TOPICS_CNT 9
+ char *topics[MY_CRP_TOPICS_CNT];
+ rd_kafka_NewTopic_t *new_topics[MY_CRP_TOPICS_CNT];
+ rd_kafka_NewPartitions_t *crp_topics[MY_CRP_TOPICS_CNT];
+ rd_kafka_AdminOptions_t *options = NULL;
+ /* Expected topics in metadata */
+ rd_kafka_metadata_topic_t exp_mdtopics[MY_CRP_TOPICS_CNT] = {{0}};
+ rd_kafka_metadata_partition_t exp_mdparts[2] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ int i;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+ int metadata_tmout;
+ int num_replicas = (int)avail_broker_cnt;
+
+ SUB_TEST_QUICK("%s CreatePartitions with %s, op_timeout %d",
+ rd_kafka_name(rk), what, op_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ /* Set up two expected partitions with different replication sets
+ * so they can be matched by the metadata checker later.
+ * Even partitions use exp_mdparts[0] while odd partitions
+ * use exp_mdparts[1]. */
+
+ /* Set valid replica assignments (even, and odd (reverse) ) */
+ exp_mdparts[0].replicas =
+ rd_alloca(sizeof(*exp_mdparts[0].replicas) * num_replicas);
+ exp_mdparts[1].replicas =
+ rd_alloca(sizeof(*exp_mdparts[1].replicas) * num_replicas);
+ exp_mdparts[0].replica_cnt = num_replicas;
+ exp_mdparts[1].replica_cnt = num_replicas;
+ for (i = 0; i < num_replicas; i++) {
+ exp_mdparts[0].replicas[i] = avail_brokers[i];
+ exp_mdparts[1].replicas[i] =
+ avail_brokers[num_replicas - i - 1];
+ }
+
+ /**
+ * Construct CreatePartitions array
+ */
+ for (i = 0; i < MY_CRP_TOPICS_CNT; i++) {
+ char *topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ int initial_part_cnt = 1 + (i * 2);
+ int new_part_cnt = 1 + (i / 2);
+ int final_part_cnt = initial_part_cnt + new_part_cnt;
+ int set_replicas = !(i % 2);
+ int pi;
+
+ topics[i] = topic;
+
+ /* Topic to create with initial partition count */
+ new_topics[i] = rd_kafka_NewTopic_new(
+ topic, initial_part_cnt, set_replicas ? -1 : num_replicas,
+ NULL, 0);
+
+ /* .. and later add more partitions to */
+ crp_topics[i] = rd_kafka_NewPartitions_new(
+ topic, final_part_cnt, errstr, sizeof(errstr));
+
+ if (set_replicas) {
+ exp_mdtopics[exp_mdtopic_cnt].partitions = rd_alloca(
+ final_part_cnt *
+ sizeof(*exp_mdtopics[exp_mdtopic_cnt].partitions));
+
+ for (pi = 0; pi < final_part_cnt; pi++) {
+ const rd_kafka_metadata_partition_t *exp_mdp =
+ &exp_mdparts[pi & 1];
+
+ exp_mdtopics[exp_mdtopic_cnt].partitions[pi] =
+ *exp_mdp; /* copy */
+
+ exp_mdtopics[exp_mdtopic_cnt]
+ .partitions[pi]
+ .id = pi;
+
+ if (pi < initial_part_cnt) {
+ /* Set replica assignment
+ * for initial partitions */
+ err =
+ rd_kafka_NewTopic_set_replica_assignment(
+ new_topics[i], pi,
+ exp_mdp->replicas,
+ (size_t)exp_mdp->replica_cnt,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(!err,
+ "NewTopic_set_replica_"
+ "assignment: %s",
+ errstr);
+ } else {
+ /* Set replica assignment for new
+ * partitions */
+ err =
+ rd_kafka_NewPartitions_set_replica_assignment(
+ crp_topics[i],
+ pi - initial_part_cnt,
+ exp_mdp->replicas,
+ (size_t)exp_mdp->replica_cnt,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(!err,
+ "NewPartitions_set_replica_"
+ "assignment: %s",
+ errstr);
+ }
+ }
+ }
+
+ TEST_SAY(_C_YEL
+ "Topic %s with %d initial partitions will grow "
+ "by %d to %d total partitions with%s replicas set\n",
+ topics[i], initial_part_cnt, new_part_cnt,
+ final_part_cnt, set_replicas ? "" : "out");
+
+ exp_mdtopics[exp_mdtopic_cnt].topic = topic;
+ exp_mdtopics[exp_mdtopic_cnt].partition_cnt = final_part_cnt;
+
+ exp_mdtopic_cnt++;
+ }
+
+ if (op_timeout != -1) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, op_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+ /*
+ * Create topics with initial partition count
+ */
+ TIMING_START(&timing, "CreateTopics");
+ TEST_SAY("Creating topics with initial partition counts\n");
+ rd_kafka_CreateTopics(rk, new_topics, MY_CRP_TOPICS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, 15000);
+ TEST_ASSERT(!err, "CreateTopics failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_NewTopic_destroy_array(new_topics, MY_CRP_TOPICS_CNT);
+
+
+ /*
+ * Create new partitions
+ */
+ TIMING_START(&timing, "CreatePartitions");
+ TEST_SAY("Creating partitions\n");
+ rd_kafka_CreatePartitions(rk, crp_topics, MY_CRP_TOPICS_CNT, options,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, 15000);
+ TEST_ASSERT(!err, "CreatePartitions failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_NewPartitions_destroy_array(crp_topics, MY_CRP_TOPICS_CNT);
+
+
+ /**
+ * Verify that the expected topics are deleted and the non-expected
+ * are not. Allow it some time to propagate.
+ */
+ if (op_timeout > 0)
+ metadata_tmout = op_timeout + 1000;
+ else
+ metadata_tmout = 10 * 1000;
+
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
+ metadata_tmout);
+
+ for (i = 0; i < MY_CRP_TOPICS_CNT; i++)
+ rd_free(topics[i]);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef MY_CRP_TOPICS_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Print the ConfigEntrys in the provided array.
+ */
+static void test_print_ConfigEntry_array(const rd_kafka_ConfigEntry_t **entries,
+ size_t entry_cnt,
+ unsigned int depth) {
+ const char *indent = &" "[4 - (depth > 4 ? 4 : depth)];
+ size_t ei;
+
+ for (ei = 0; ei < entry_cnt; ei++) {
+ const rd_kafka_ConfigEntry_t *e = entries[ei];
+ const rd_kafka_ConfigEntry_t **syns;
+ size_t syn_cnt;
+
+ syns = rd_kafka_ConfigEntry_synonyms(e, &syn_cnt);
+
+#define YN(v) ((v) ? "y" : "n")
+ TEST_SAYL(
+ 3,
+ "%s#%" PRIusz "/%" PRIusz
+ ": Source %s (%d): \"%s\"=\"%s\" "
+ "[is read-only=%s, default=%s, sensitive=%s, "
+ "synonym=%s] with %" PRIusz " synonym(s)\n",
+ indent, ei, entry_cnt,
+ rd_kafka_ConfigSource_name(rd_kafka_ConfigEntry_source(e)),
+ rd_kafka_ConfigEntry_source(e),
+ rd_kafka_ConfigEntry_name(e),
+ rd_kafka_ConfigEntry_value(e)
+ ? rd_kafka_ConfigEntry_value(e)
+ : "(NULL)",
+ YN(rd_kafka_ConfigEntry_is_read_only(e)),
+ YN(rd_kafka_ConfigEntry_is_default(e)),
+ YN(rd_kafka_ConfigEntry_is_sensitive(e)),
+ YN(rd_kafka_ConfigEntry_is_synonym(e)), syn_cnt);
+#undef YN
+
+ if (syn_cnt > 0)
+ test_print_ConfigEntry_array(syns, syn_cnt, depth + 1);
+ }
+}
+
+
+/**
+ * @brief Test AlterConfigs
+ */
+static void do_test_AlterConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
+#define MY_CONFRES_CNT 3
+ char *topics[MY_CONFRES_CNT];
+ rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT];
+ rd_kafka_event_t *rkev;
+ rd_kafka_resp_err_t err;
+ const rd_kafka_AlterConfigs_result_t *res;
+ const rd_kafka_ConfigResource_t **rconfigs;
+ size_t rconfig_cnt;
+ char errstr[128];
+ const char *errstr2;
+ int ci = 0;
+ int i;
+ int fails = 0;
+
+ SUB_TEST_QUICK();
+
+ /*
+ * Only create one topic, the others will be non-existent.
+ */
+ for (i = 0; i < MY_CONFRES_CNT; i++)
+ rd_strdupa(&topics[i], test_mk_topic_name(__FUNCTION__, 1));
+
+ test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL);
+
+ test_wait_topic_exists(rk, topics[0], 10000);
+
+ /*
+ * ConfigResource #0: valid topic config
+ */
+ configs[ci] =
+ rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
+
+ err = rd_kafka_ConfigResource_set_config(configs[ci],
+ "compression.type", "gzip");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ err = rd_kafka_ConfigResource_set_config(configs[ci], "flush.ms",
+ "12345678");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
+ ci++;
+
+
+ if (test_broker_version >= TEST_BRKVER(1, 1, 0, 0)) {
+ /*
+ * ConfigResource #1: valid broker config
+ */
+ configs[ci] = rd_kafka_ConfigResource_new(
+ RD_KAFKA_RESOURCE_BROKER,
+ tsprintf("%" PRId32, avail_brokers[0]));
+
+ err = rd_kafka_ConfigResource_set_config(
+ configs[ci], "sasl.kerberos.min.time.before.relogin",
+ "58000");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
+ ci++;
+ } else {
+ TEST_WARN(
+ "Skipping RESOURCE_BROKER test on unsupported "
+ "broker version\n");
+ }
+
+ /*
+ * ConfigResource #2: valid topic config, non-existent topic
+ */
+ configs[ci] =
+ rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
+
+ err = rd_kafka_ConfigResource_set_config(configs[ci],
+ "compression.type", "lz4");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ err = rd_kafka_ConfigResource_set_config(
+ configs[ci], "offset.metadata.max.bytes", "12345");
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+
+ if (test_broker_version >= TEST_BRKVER(2, 7, 0, 0))
+ exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else
+ exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN;
+ ci++;
+
+
+ /*
+ * Timeout options
+ */
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ALTERCONFIGS);
+ err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+
+ /*
+ * Fire off request
+ */
+ rd_kafka_AlterConfigs(rk, configs, ci, options, rkqu);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ /*
+ * Wait for result
+ */
+ rkev = test_wait_admin_result(rkqu, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT,
+ 10000 + 1000);
+
+ /*
+ * Extract result
+ */
+ res = rd_kafka_event_AlterConfigs_result(rkev);
+ TEST_ASSERT(res, "Expected AlterConfigs result, not %s",
+ rd_kafka_event_name(rkev));
+
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!err, "Expected success, not %s: %s",
+ rd_kafka_err2name(err), errstr2);
+
+ rconfigs = rd_kafka_AlterConfigs_result_resources(res, &rconfig_cnt);
+ TEST_ASSERT((int)rconfig_cnt == ci,
+ "Expected %d result resources, got %" PRIusz "\n", ci,
+ rconfig_cnt);
+
+ /*
+ * Verify status per resource
+ */
+ for (i = 0; i < (int)rconfig_cnt; i++) {
+ const rd_kafka_ConfigEntry_t **entries;
+ size_t entry_cnt;
+
+ err = rd_kafka_ConfigResource_error(rconfigs[i]);
+ errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]);
+
+ entries =
+ rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt);
+
+ TEST_SAY(
+ "ConfigResource #%d: type %s (%d), \"%s\": "
+ "%" PRIusz " ConfigEntries, error %s (%s)\n",
+ i,
+ rd_kafka_ResourceType_name(
+ rd_kafka_ConfigResource_type(rconfigs[i])),
+ rd_kafka_ConfigResource_type(rconfigs[i]),
+ rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt,
+ rd_kafka_err2name(err), errstr2 ? errstr2 : "");
+
+ test_print_ConfigEntry_array(entries, entry_cnt, 1);
+
+ if (rd_kafka_ConfigResource_type(rconfigs[i]) !=
+ rd_kafka_ConfigResource_type(configs[i]) ||
+ strcmp(rd_kafka_ConfigResource_name(rconfigs[i]),
+ rd_kafka_ConfigResource_name(configs[i]))) {
+ TEST_FAIL_LATER(
+ "ConfigResource #%d: "
+ "expected type %s name %s, "
+ "got type %s name %s",
+ i,
+ rd_kafka_ResourceType_name(
+ rd_kafka_ConfigResource_type(configs[i])),
+ rd_kafka_ConfigResource_name(configs[i]),
+ rd_kafka_ResourceType_name(
+ rd_kafka_ConfigResource_type(rconfigs[i])),
+ rd_kafka_ConfigResource_name(rconfigs[i]));
+ fails++;
+ continue;
+ }
+
+
+ if (err != exp_err[i]) {
+ TEST_FAIL_LATER(
+ "ConfigResource #%d: "
+ "expected %s (%d), got %s (%s)",
+ i, rd_kafka_err2name(exp_err[i]), exp_err[i],
+ rd_kafka_err2name(err), errstr2 ? errstr2 : "");
+ fails++;
+ }
+ }
+
+ TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_ConfigResource_destroy_array(configs, ci);
+
+ TEST_LATER_CHECK();
+#undef MY_CONFRES_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test DescribeConfigs
+ */
+static void do_test_DescribeConfigs(rd_kafka_t *rk, rd_kafka_queue_t *rkqu) {
+#define MY_CONFRES_CNT 3
+ char *topics[MY_CONFRES_CNT];
+ rd_kafka_ConfigResource_t *configs[MY_CONFRES_CNT];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_resp_err_t exp_err[MY_CONFRES_CNT];
+ rd_kafka_event_t *rkev;
+ rd_kafka_resp_err_t err;
+ const rd_kafka_DescribeConfigs_result_t *res;
+ const rd_kafka_ConfigResource_t **rconfigs;
+ size_t rconfig_cnt;
+ char errstr[128];
+ const char *errstr2;
+ int ci = 0;
+ int i;
+ int fails = 0;
+ int max_retry_describe = 3;
+
+ SUB_TEST_QUICK();
+
+ /*
+ * Only create one topic, the others will be non-existent.
+ */
+ rd_strdupa(&topics[0], test_mk_topic_name("DescribeConfigs_exist", 1));
+ for (i = 1; i < MY_CONFRES_CNT; i++)
+ rd_strdupa(&topics[i],
+ test_mk_topic_name("DescribeConfigs_notexist", 1));
+
+ test_CreateTopics_simple(rk, NULL, topics, 1, 1, NULL);
+
+ /*
+ * ConfigResource #0: topic config, no config entries.
+ */
+ configs[ci] =
+ rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
+ exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
+ ci++;
+
+ /*
+ * ConfigResource #1:broker config, no config entries
+ */
+ configs[ci] = rd_kafka_ConfigResource_new(
+ RD_KAFKA_RESOURCE_BROKER, tsprintf("%" PRId32, avail_brokers[0]));
+
+ exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
+ ci++;
+
+ /*
+ * ConfigResource #2: topic config, non-existent topic, no config entr.
+ */
+ configs[ci] =
+ rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_TOPIC, topics[ci]);
+ /* FIXME: This is a bug in the broker (<v2.0.0), it returns a full
+ * response for unknown topics.
+ * https://issues.apache.org/jira/browse/KAFKA-6778
+ */
+ if (test_broker_version < TEST_BRKVER(2, 0, 0, 0))
+ exp_err[ci] = RD_KAFKA_RESP_ERR_NO_ERROR;
+ else
+ exp_err[ci] = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ ci++;
+
+
+retry_describe:
+ /*
+ * Timeout options
+ */
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+ err = rd_kafka_AdminOptions_set_request_timeout(options, 10000, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+
+ /*
+ * Fire off request
+ */
+ rd_kafka_DescribeConfigs(rk, configs, ci, options, rkqu);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ /*
+ * Wait for result
+ */
+ rkev = test_wait_admin_result(
+ rkqu, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 10000 + 1000);
+
+ /*
+ * Extract result
+ */
+ res = rd_kafka_event_DescribeConfigs_result(rkev);
+ TEST_ASSERT(res, "Expected DescribeConfigs result, not %s",
+ rd_kafka_event_name(rkev));
+
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!err, "Expected success, not %s: %s",
+ rd_kafka_err2name(err), errstr2);
+
+ rconfigs = rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt);
+ TEST_ASSERT((int)rconfig_cnt == ci,
+ "Expected %d result resources, got %" PRIusz "\n", ci,
+ rconfig_cnt);
+
+ /*
+ * Verify status per resource
+ */
+ for (i = 0; i < (int)rconfig_cnt; i++) {
+ const rd_kafka_ConfigEntry_t **entries;
+ size_t entry_cnt;
+
+ err = rd_kafka_ConfigResource_error(rconfigs[i]);
+ errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[i]);
+
+ entries =
+ rd_kafka_ConfigResource_configs(rconfigs[i], &entry_cnt);
+
+ TEST_SAY(
+ "ConfigResource #%d: type %s (%d), \"%s\": "
+ "%" PRIusz " ConfigEntries, error %s (%s)\n",
+ i,
+ rd_kafka_ResourceType_name(
+ rd_kafka_ConfigResource_type(rconfigs[i])),
+ rd_kafka_ConfigResource_type(rconfigs[i]),
+ rd_kafka_ConfigResource_name(rconfigs[i]), entry_cnt,
+ rd_kafka_err2name(err), errstr2 ? errstr2 : "");
+
+ test_print_ConfigEntry_array(entries, entry_cnt, 1);
+
+ if (rd_kafka_ConfigResource_type(rconfigs[i]) !=
+ rd_kafka_ConfigResource_type(configs[i]) ||
+ strcmp(rd_kafka_ConfigResource_name(rconfigs[i]),
+ rd_kafka_ConfigResource_name(configs[i]))) {
+ TEST_FAIL_LATER(
+ "ConfigResource #%d: "
+ "expected type %s name %s, "
+ "got type %s name %s",
+ i,
+ rd_kafka_ResourceType_name(
+ rd_kafka_ConfigResource_type(configs[i])),
+ rd_kafka_ConfigResource_name(configs[i]),
+ rd_kafka_ResourceType_name(
+ rd_kafka_ConfigResource_type(rconfigs[i])),
+ rd_kafka_ConfigResource_name(rconfigs[i]));
+ fails++;
+ continue;
+ }
+
+
+ if (err != exp_err[i]) {
+ if (err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART &&
+ max_retry_describe-- > 0) {
+ TEST_WARN(
+ "ConfigResource #%d: "
+ "expected %s (%d), got %s (%s): "
+ "this is typically a temporary "
+ "error while the new resource "
+ "is propagating: retrying",
+ i, rd_kafka_err2name(exp_err[i]),
+ exp_err[i], rd_kafka_err2name(err),
+ errstr2 ? errstr2 : "");
+ rd_kafka_event_destroy(rkev);
+ rd_sleep(1);
+ goto retry_describe;
+ }
+
+ TEST_FAIL_LATER(
+ "ConfigResource #%d: "
+ "expected %s (%d), got %s (%s)",
+ i, rd_kafka_err2name(exp_err[i]), exp_err[i],
+ rd_kafka_err2name(err), errstr2 ? errstr2 : "");
+ fails++;
+ }
+ }
+
+ TEST_ASSERT(!fails, "See %d previous failure(s)", fails);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_ConfigResource_destroy_array(configs, ci);
+
+ TEST_LATER_CHECK();
+#undef MY_CONFRES_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test CreateAcls
+ */
+static void
+do_test_CreateAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
+ rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
+ size_t resacl_cnt;
+ test_timing_t timing;
+ rd_kafka_resp_err_t err;
+ char errstr[128];
+ const char *errstr2;
+ const char *user_test1 = "User:test1";
+ const char *user_test2 = "User:test2";
+ const char *base_topic_name;
+ char topic1_name[512];
+ char topic2_name[512];
+ rd_kafka_AclBinding_t *acl_bindings[2];
+ rd_kafka_ResourcePatternType_t pattern_type_first_topic =
+ RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
+ rd_kafka_AdminOptions_t *admin_options;
+ rd_kafka_event_t *rkev_acl_create;
+ const rd_kafka_CreateAcls_result_t *acl_res;
+ const rd_kafka_acl_result_t **acl_res_acls;
+ unsigned int i;
+
+ SUB_TEST_QUICK();
+
+ if (version == 0)
+ pattern_type_first_topic = RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+
+ base_topic_name = test_mk_topic_name(__FUNCTION__, 1);
+
+ rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name);
+ rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name);
+
+
+ acl_bindings[0] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_first_topic,
+ user_test1, "*", RD_KAFKA_ACL_OPERATION_READ,
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW, NULL, 0);
+ acl_bindings[1] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic2_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, "*",
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+
+
+ admin_options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS);
+ err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+ TIMING_START(&timing, "CreateAcls");
+ TEST_SAY("Call CreateAcls\n");
+ rd_kafka_CreateAcls(rk, acl_bindings, 2, admin_options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_create = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_CREATEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_create);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_create);
+
+ if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "Expected unsupported feature, not: %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(!strcmp(errstr2,
+ "ACLs Admin API (KIP-140) not supported "
+ "by broker, requires broker "
+ "version >= 0.11.0.0"),
+ "Expected a different message, not: %s", errstr2);
+ TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err));
+ }
+
+ if (version > 0 && test_broker_version < TEST_BRKVER(2, 0, 0, 0)) {
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "Expected unsupported feature, not: %s",
+ rd_kafka_err2name(err));
+ TEST_ASSERT(!strcmp(errstr2,
+ "Broker only supports LITERAL "
+ "resource pattern types"),
+ "Expected a different message, not: %s", errstr2);
+ TEST_FAIL("Unexpected error: %s", rd_kafka_err2name(err));
+ }
+
+ TEST_ASSERT(!err, "Expected success, not %s: %s",
+ rd_kafka_err2name(err), errstr2);
+
+ /*
+ * Extract result
+ */
+ acl_res = rd_kafka_event_CreateAcls_result(rkev_acl_create);
+ TEST_ASSERT(acl_res, "Expected CreateAcls result, not %s",
+ rd_kafka_event_name(rkev_acl_create));
+
+ acl_res_acls = rd_kafka_CreateAcls_result_acls(acl_res, &resacl_cnt);
+ TEST_ASSERT(resacl_cnt == 2, "Expected 2, not %zu", resacl_cnt);
+
+ for (i = 0; i < resacl_cnt; i++) {
+ const rd_kafka_acl_result_t *acl_res_acl = *(acl_res_acls + i);
+ const rd_kafka_error_t *error =
+ rd_kafka_acl_result_error(acl_res_acl);
+
+ TEST_ASSERT(!error,
+ "Expected RD_KAFKA_RESP_ERR_NO_ERROR, not %s",
+ rd_kafka_error_string(error));
+ }
+
+ rd_kafka_AdminOptions_destroy(admin_options);
+ rd_kafka_event_destroy(rkev_acl_create);
+ rd_kafka_AclBinding_destroy_array(acl_bindings, 2);
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test DescribeAcls
+ */
+static void
+do_test_DescribeAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
+ rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
+ size_t acl_binding_results_cntp;
+ test_timing_t timing;
+ rd_kafka_resp_err_t err;
+ uint32_t i;
+ char errstr[128];
+ const char *errstr2;
+ const char *user_test1 = "User:test1";
+ const char *user_test2 = "User:test2";
+ const char *any_host = "*";
+ const char *topic_name;
+ rd_kafka_AclBinding_t *acl_bindings_create[2];
+ rd_kafka_AclBinding_t *acl_bindings_describe;
+ rd_kafka_AclBinding_t *acl;
+ const rd_kafka_DescribeAcls_result_t *acl_describe_result;
+ const rd_kafka_AclBinding_t **acl_binding_results;
+ rd_kafka_ResourcePatternType_t pattern_type_first_topic_create;
+ rd_bool_t broker_version1 =
+ test_broker_version >= TEST_BRKVER(2, 0, 0, 0);
+ rd_kafka_resp_err_t create_err;
+ rd_kafka_AdminOptions_t *admin_options;
+ rd_kafka_event_t *rkev_acl_describe;
+ const rd_kafka_error_t *error;
+
+ SUB_TEST_QUICK();
+
+ if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
+ SUB_TEST_SKIP(
+ "Skipping DESCRIBE_ACLS test on unsupported "
+ "broker version\n");
+ return;
+ }
+
+ pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
+ if (!broker_version1)
+ pattern_type_first_topic_create =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+
+ topic_name = test_mk_topic_name(__FUNCTION__, 1);
+
+ acl_bindings_create[0] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ pattern_type_first_topic_create, user_test1, any_host,
+ RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+ acl_bindings_create[1] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+
+ create_err =
+ test_CreateAcls_simple(rk, NULL, acl_bindings_create, 2, NULL);
+
+ TEST_ASSERT(!create_err, "create error: %s",
+ rd_kafka_err2str(create_err));
+
+ acl_bindings_describe = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ RD_KAFKA_RESOURCE_PATTERN_MATCH, NULL, NULL,
+ RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL,
+ 0);
+
+ admin_options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS);
+ err = rd_kafka_AdminOptions_set_request_timeout(admin_options, 10000,
+ errstr, sizeof(errstr));
+
+ TIMING_START(&timing, "DescribeAcls");
+ TEST_SAY("Call DescribeAcls\n");
+ rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_describe = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_describe);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
+
+ if (!broker_version1) {
+ TEST_ASSERT(
+ err == RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "expected RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE, not %s",
+ rd_kafka_err2str(err));
+ TEST_ASSERT(strcmp(errstr2,
+ "Broker only supports LITERAL and ANY "
+ "resource pattern types") == 0,
+ "expected another message, not %s", errstr2);
+ } else {
+ TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ errstr2);
+ }
+
+ if (!err) {
+
+ acl_describe_result =
+ rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
+
+ TEST_ASSERT(acl_describe_result,
+ "acl_describe_result should not be NULL");
+
+ acl_binding_results_cntp = 0;
+ acl_binding_results = rd_kafka_DescribeAcls_result_acls(
+ acl_describe_result, &acl_binding_results_cntp);
+
+ TEST_ASSERT(acl_binding_results_cntp == 2,
+ "acl_binding_results_cntp should be 2, not %zu",
+ acl_binding_results_cntp);
+
+ for (i = 0; i < acl_binding_results_cntp; i++) {
+ acl = (rd_kafka_AclBinding_t *)acl_binding_results[i];
+
+ if (strcmp(rd_kafka_AclBinding_principal(acl),
+ user_test1) == 0) {
+ TEST_ASSERT(
+ rd_kafka_AclBinding_restype(acl) ==
+ RD_KAFKA_RESOURCE_TOPIC,
+ "acl->restype should be "
+ "RD_KAFKA_RESOURCE_TOPIC, not %s",
+ rd_kafka_ResourceType_name(
+ rd_kafka_AclBinding_restype(acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_name(acl),
+ topic_name) == 0,
+ "acl->name should be %s, not %s",
+ topic_name, rd_kafka_AclBinding_name(acl));
+ TEST_ASSERT(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl) == pattern_type_first_topic_create,
+ "acl->resource_pattern_type should be %s, "
+ "not %s",
+ rd_kafka_ResourcePatternType_name(
+ pattern_type_first_topic_create),
+ rd_kafka_ResourcePatternType_name(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_principal(acl),
+ user_test1) == 0,
+ "acl->principal should be %s, not %s",
+ user_test1,
+ rd_kafka_AclBinding_principal(acl));
+
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_host(acl),
+ any_host) == 0,
+ "acl->host should be %s, not %s", any_host,
+ rd_kafka_AclBinding_host(acl));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_operation(acl) ==
+ RD_KAFKA_ACL_OPERATION_READ,
+ "acl->operation should be %s, not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_READ),
+ rd_kafka_AclOperation_name(
+ rd_kafka_AclBinding_operation(acl)));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_permission_type(acl) ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "acl->permission_type should be %s, not %s",
+ rd_kafka_AclPermissionType_name(
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(
+ rd_kafka_AclBinding_permission_type(
+ acl)));
+
+ error = rd_kafka_AclBinding_error(acl);
+ TEST_ASSERT(!error,
+ "acl->error should be NULL, not %s",
+ rd_kafka_error_string(error));
+
+ } else {
+ TEST_ASSERT(
+ rd_kafka_AclBinding_restype(acl) ==
+ RD_KAFKA_RESOURCE_TOPIC,
+ "acl->restype should be "
+ "RD_KAFKA_RESOURCE_TOPIC, not %s",
+ rd_kafka_ResourceType_name(
+ rd_kafka_AclBinding_restype(acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_name(acl),
+ topic_name) == 0,
+ "acl->name should be %s, not %s",
+ topic_name, rd_kafka_AclBinding_name(acl));
+ TEST_ASSERT(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl) ==
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "acl->resource_pattern_type should be %s, "
+ "not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(
+ rd_kafka_AclBinding_resource_pattern_type(
+ acl)));
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_principal(acl),
+ user_test2) == 0,
+ "acl->principal should be %s, not %s",
+ user_test2,
+ rd_kafka_AclBinding_principal(acl));
+
+ TEST_ASSERT(
+ strcmp(rd_kafka_AclBinding_host(acl),
+ any_host) == 0,
+ "acl->host should be %s, not %s", any_host,
+ rd_kafka_AclBinding_host(acl));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_operation(acl) ==
+ RD_KAFKA_ACL_OPERATION_WRITE,
+ "acl->operation should be %s, not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_WRITE),
+ rd_kafka_AclOperation_name(
+ rd_kafka_AclBinding_operation(acl)));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_permission_type(acl) ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "acl->permission_type should be %s, not %s",
+ rd_kafka_AclPermissionType_name(
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(
+ rd_kafka_AclBinding_permission_type(
+ acl)));
+
+
+ error = rd_kafka_AclBinding_error(acl);
+ TEST_ASSERT(!error,
+ "acl->error should be NULL, not %s",
+ rd_kafka_error_string(error));
+ }
+ }
+ }
+
+ rd_kafka_AclBinding_destroy(acl_bindings_describe);
+ rd_kafka_event_destroy(rkev_acl_describe);
+
+ acl_bindings_describe = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
+ NULL, 0);
+
+ TIMING_START(&timing, "DescribeAcls");
+ rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_describe = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_describe);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
+
+ TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ errstr2);
+
+ acl_describe_result =
+ rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
+
+ TEST_ASSERT(acl_describe_result,
+ "acl_describe_result should not be NULL");
+
+ acl_binding_results_cntp = 0;
+ acl_binding_results = rd_kafka_DescribeAcls_result_acls(
+ acl_describe_result, &acl_binding_results_cntp);
+
+ TEST_ASSERT(acl_binding_results_cntp == 1,
+ "acl_binding_results_cntp should be 1, not %zu",
+ acl_binding_results_cntp);
+
+ acl = (rd_kafka_AclBinding_t *)acl_binding_results[0];
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_restype(acl) == RD_KAFKA_RESOURCE_TOPIC,
+ "acl->restype should be RD_KAFKA_RESOURCE_TOPIC, not %s",
+ rd_kafka_ResourceType_name(rd_kafka_AclBinding_restype(acl)));
+ TEST_ASSERT(strcmp(rd_kafka_AclBinding_name(acl), topic_name) == 0,
+ "acl->name should be %s, not %s", topic_name,
+ rd_kafka_AclBinding_name(acl));
+ TEST_ASSERT(rd_kafka_AclBinding_resource_pattern_type(acl) ==
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "acl->resource_pattern_type should be %s, not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(
+ rd_kafka_AclBinding_resource_pattern_type(acl)));
+ TEST_ASSERT(strcmp(rd_kafka_AclBinding_principal(acl), user_test2) == 0,
+ "acl->principal should be %s, not %s", user_test2,
+ rd_kafka_AclBinding_principal(acl));
+
+ TEST_ASSERT(strcmp(rd_kafka_AclBinding_host(acl), any_host) == 0,
+ "acl->host should be %s, not %s", any_host,
+ rd_kafka_AclBinding_host(acl));
+
+ TEST_ASSERT(
+ rd_kafka_AclBinding_permission_type(acl) ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "acl->permission_type should be %s, not %s",
+ rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(
+ rd_kafka_AclBinding_permission_type(acl)));
+
+ error = rd_kafka_AclBinding_error(acl);
+ TEST_ASSERT(!error, "acl->error should be NULL, not %s",
+ rd_kafka_error_string(error));
+
+ rd_kafka_AclBinding_destroy(acl_bindings_describe);
+ rd_kafka_event_destroy(rkev_acl_describe);
+ rd_kafka_AdminOptions_destroy(admin_options);
+ rd_kafka_AclBinding_destroy_array(acl_bindings_create, 2);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Count acls by acl filter
+ */
+static size_t
+do_test_acls_count(rd_kafka_t *rk,
+ rd_kafka_AclBindingFilter_t *acl_bindings_describe,
+ rd_kafka_queue_t *q) {
+ char errstr[128];
+ rd_kafka_resp_err_t err;
+ rd_kafka_AdminOptions_t *admin_options_describe;
+ rd_kafka_event_t *rkev_acl_describe;
+ const rd_kafka_DescribeAcls_result_t *acl_describe_result;
+ const char *errstr2;
+ size_t acl_binding_results_cntp;
+
+ admin_options_describe =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DESCRIBEACLS);
+ rd_kafka_AdminOptions_set_request_timeout(admin_options_describe, 10000,
+ errstr, sizeof(errstr));
+
+ rd_kafka_DescribeAcls(rk, acl_bindings_describe, admin_options_describe,
+ q);
+ /*
+ * Wait for result
+ */
+ rkev_acl_describe = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBEACLS_RESULT, 10000 + 1000);
+
+ err = rd_kafka_event_error(rkev_acl_describe);
+ errstr2 = rd_kafka_event_error_string(rkev_acl_describe);
+
+ TEST_ASSERT(!err, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ errstr2);
+
+ acl_describe_result =
+ rd_kafka_event_DescribeAcls_result(rkev_acl_describe);
+
+ TEST_ASSERT(acl_describe_result,
+ "acl_describe_result should not be NULL");
+
+ acl_binding_results_cntp = 0;
+ rd_kafka_DescribeAcls_result_acls(acl_describe_result,
+ &acl_binding_results_cntp);
+ rd_kafka_event_destroy(rkev_acl_describe);
+ rd_kafka_AdminOptions_destroy(admin_options_describe);
+
+ return acl_binding_results_cntp;
+}
+
+/**
+ * @brief Test DeleteAcls
+ */
+static void
+do_test_DeleteAcls(rd_kafka_t *rk, rd_kafka_queue_t *useq, int version) {
+ rd_kafka_queue_t *q = useq ? useq : rd_kafka_queue_new(rk);
+ test_timing_t timing;
+ uint32_t i;
+ char errstr[128];
+ const char *user_test1 = "User:test1";
+ const char *user_test2 = "User:test2";
+ const char *any_host = "*";
+ const char *base_topic_name;
+ char topic1_name[512];
+ char topic2_name[512];
+ size_t acl_binding_results_cntp;
+ size_t DeleteAcls_result_responses_cntp;
+ size_t matching_acls_cntp;
+ rd_kafka_AclBinding_t *acl_bindings_create[3];
+ rd_kafka_AclBindingFilter_t *acl_bindings_describe;
+ rd_kafka_AclBindingFilter_t *acl_bindings_delete;
+ rd_kafka_event_t *rkev_acl_delete;
+ rd_kafka_AdminOptions_t *admin_options_delete;
+ const rd_kafka_DeleteAcls_result_t *acl_delete_result;
+ const rd_kafka_DeleteAcls_result_response_t *
+ *DeleteAcls_result_responses;
+ const rd_kafka_DeleteAcls_result_response_t *DeleteAcls_result_response;
+ const rd_kafka_AclBinding_t **matching_acls;
+ const rd_kafka_AclBinding_t *matching_acl;
+ rd_kafka_ResourcePatternType_t pattern_type_first_topic_create;
+ rd_kafka_ResourcePatternType_t pattern_type_delete;
+ rd_bool_t broker_version1 =
+ test_broker_version >= TEST_BRKVER(2, 0, 0, 0);
+ rd_kafka_resp_err_t create_err;
+ rd_kafka_ResourceType_t restype;
+ rd_kafka_ResourcePatternType_t resource_pattern_type;
+ rd_kafka_AclOperation_t operation;
+ rd_kafka_AclPermissionType_t permission_type;
+ const char *name;
+ const char *principal;
+ const rd_kafka_error_t *error;
+
+ SUB_TEST_QUICK();
+
+ if (test_broker_version < TEST_BRKVER(0, 11, 0, 0)) {
+ SUB_TEST_SKIP(
+ "Skipping DELETE_ACLS test on unsupported "
+ "broker version\n");
+ return;
+ }
+
+ pattern_type_first_topic_create = RD_KAFKA_RESOURCE_PATTERN_PREFIXED;
+ pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_MATCH;
+ if (!broker_version1) {
+ pattern_type_first_topic_create =
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ pattern_type_delete = RD_KAFKA_RESOURCE_PATTERN_LITERAL;
+ }
+
+ base_topic_name = test_mk_topic_name(__FUNCTION__, 1);
+
+ rd_snprintf(topic1_name, sizeof(topic1_name), "%s_1", base_topic_name);
+ rd_snprintf(topic2_name, sizeof(topic2_name), "%s_2", base_topic_name);
+
+ acl_bindings_create[0] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name,
+ pattern_type_first_topic_create, user_test1, any_host,
+ RD_KAFKA_ACL_OPERATION_READ, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+ acl_bindings_create[1] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+ acl_bindings_create[2] = rd_kafka_AclBinding_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic2_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, user_test2, any_host,
+ RD_KAFKA_ACL_OPERATION_WRITE, RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ NULL, 0);
+
+ acl_bindings_delete = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic1_name, pattern_type_delete, NULL,
+ NULL, RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY,
+ NULL, 0);
+
+ acl_bindings_describe = acl_bindings_delete;
+
+ create_err =
+ test_CreateAcls_simple(rk, NULL, acl_bindings_create, 3, NULL);
+
+ TEST_ASSERT(!create_err, "create error: %s",
+ rd_kafka_err2str(create_err));
+
+ admin_options_delete =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEACLS);
+ rd_kafka_AdminOptions_set_request_timeout(admin_options_delete, 10000,
+ errstr, sizeof(errstr));
+
+ acl_binding_results_cntp =
+ do_test_acls_count(rk, acl_bindings_describe, q);
+ TEST_ASSERT(acl_binding_results_cntp == 2,
+ "acl_binding_results_cntp should not be 2, not %zu\n",
+ acl_binding_results_cntp);
+
+ TIMING_START(&timing, "DeleteAcls");
+ rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_delete = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000);
+
+ acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete);
+
+ TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL");
+
+ DeleteAcls_result_responses_cntp = 0;
+ DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses(
+ acl_delete_result, &DeleteAcls_result_responses_cntp);
+
+ TEST_ASSERT(DeleteAcls_result_responses_cntp == 1,
+ "DeleteAcls_result_responses_cntp should be 1, not %zu\n",
+ DeleteAcls_result_responses_cntp);
+
+ DeleteAcls_result_response = DeleteAcls_result_responses[0];
+
+ TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error(
+ DeleteAcls_result_response));
+
+ matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
+ DeleteAcls_result_response, &matching_acls_cntp);
+
+ TEST_ASSERT(matching_acls_cntp == 2,
+ "matching_acls_cntp should be 2, not %zu\n",
+ matching_acls_cntp);
+
+ for (i = 0; i < matching_acls_cntp; i++) {
+ rd_kafka_ResourceType_t restype;
+ rd_kafka_ResourcePatternType_t resource_pattern_type;
+ rd_kafka_AclOperation_t operation;
+ rd_kafka_AclPermissionType_t permission_type;
+ const char *name;
+ const char *principal;
+
+ matching_acl = matching_acls[i];
+ error = rd_kafka_AclBinding_error(matching_acl);
+ restype = rd_kafka_AclBinding_restype(matching_acl);
+ name = rd_kafka_AclBinding_name(matching_acl);
+ resource_pattern_type =
+ rd_kafka_AclBinding_resource_pattern_type(matching_acl);
+ principal = rd_kafka_AclBinding_principal(matching_acl);
+ operation = rd_kafka_AclBinding_operation(matching_acl);
+ permission_type =
+ rd_kafka_AclBinding_permission_type(matching_acl);
+
+ TEST_ASSERT(!error, "expected success, not %s",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC,
+ "expected RD_KAFKA_RESOURCE_TOPIC not %s",
+ rd_kafka_ResourceType_name(restype));
+ TEST_ASSERT(strcmp(name, topic1_name) == 0,
+ "expected %s not %s", topic1_name, name);
+ TEST_ASSERT(permission_type ==
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "expected %s not %s",
+ rd_kafka_AclPermissionType_name(
+ RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(permission_type));
+
+ if (strcmp(user_test1, principal) == 0) {
+ TEST_ASSERT(resource_pattern_type ==
+ pattern_type_first_topic_create,
+ "expected %s not %s",
+ rd_kafka_ResourcePatternType_name(
+ pattern_type_first_topic_create),
+ rd_kafka_ResourcePatternType_name(
+ resource_pattern_type));
+
+ TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_READ,
+ "expected %s not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_READ),
+ rd_kafka_AclOperation_name(operation));
+
+ } else {
+ TEST_ASSERT(resource_pattern_type ==
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "expected %s not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(
+ resource_pattern_type));
+
+ TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE,
+ "expected %s not %s",
+ rd_kafka_AclOperation_name(
+ RD_KAFKA_ACL_OPERATION_WRITE),
+ rd_kafka_AclOperation_name(operation));
+ }
+ }
+
+ acl_binding_results_cntp =
+ do_test_acls_count(rk, acl_bindings_describe, q);
+ TEST_ASSERT(acl_binding_results_cntp == 0,
+ "acl_binding_results_cntp should be 0, not %zu\n",
+ acl_binding_results_cntp);
+
+ rd_kafka_event_destroy(rkev_acl_delete);
+ rd_kafka_AclBinding_destroy(acl_bindings_delete);
+
+ acl_bindings_delete = rd_kafka_AclBindingFilter_new(
+ RD_KAFKA_RESOURCE_TOPIC, topic2_name,
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL, NULL, NULL,
+ RD_KAFKA_ACL_OPERATION_ANY, RD_KAFKA_ACL_PERMISSION_TYPE_ANY, NULL,
+ 0);
+ acl_bindings_describe = acl_bindings_delete;
+
+ TIMING_START(&timing, "DeleteAcls");
+ rd_kafka_DeleteAcls(rk, &acl_bindings_delete, 1, admin_options_delete,
+ q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ /*
+ * Wait for result
+ */
+ rkev_acl_delete = test_wait_admin_result(
+ q, RD_KAFKA_EVENT_DELETEACLS_RESULT, 10000 + 1000);
+
+ acl_delete_result = rd_kafka_event_DeleteAcls_result(rkev_acl_delete);
+
+ TEST_ASSERT(acl_delete_result, "acl_delete_result should not be NULL");
+
+ DeleteAcls_result_responses_cntp = 0;
+ DeleteAcls_result_responses = rd_kafka_DeleteAcls_result_responses(
+ acl_delete_result, &DeleteAcls_result_responses_cntp);
+
+ TEST_ASSERT(DeleteAcls_result_responses_cntp == 1,
+ "DeleteAcls_result_responses_cntp should be 1, not %zu\n",
+ DeleteAcls_result_responses_cntp);
+
+ DeleteAcls_result_response = DeleteAcls_result_responses[0];
+
+ TEST_CALL_ERROR__(rd_kafka_DeleteAcls_result_response_error(
+ DeleteAcls_result_response));
+
+ matching_acls = rd_kafka_DeleteAcls_result_response_matching_acls(
+ DeleteAcls_result_response, &matching_acls_cntp);
+
+ TEST_ASSERT(matching_acls_cntp == 1,
+ "matching_acls_cntp should be 1, not %zu\n",
+ matching_acls_cntp);
+
+ matching_acl = matching_acls[0];
+ error = rd_kafka_AclBinding_error(matching_acl);
+ restype = rd_kafka_AclBinding_restype(matching_acl);
+ name = rd_kafka_AclBinding_name(matching_acl);
+ resource_pattern_type =
+ rd_kafka_AclBinding_resource_pattern_type(matching_acl);
+ principal = rd_kafka_AclBinding_principal(matching_acl);
+ operation = rd_kafka_AclBinding_operation(matching_acl);
+ permission_type = rd_kafka_AclBinding_permission_type(matching_acl);
+
+ TEST_ASSERT(!error, "expected RD_KAFKA_RESP_ERR_NO_ERROR not %s",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(restype == RD_KAFKA_RESOURCE_TOPIC,
+ "expected RD_KAFKA_RESOURCE_TOPIC not %s",
+ rd_kafka_ResourceType_name(restype));
+ TEST_ASSERT(strcmp(name, topic2_name) == 0, "expected %s not %s",
+ topic2_name, name);
+ TEST_ASSERT(
+ permission_type == RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW,
+ "expected %s not %s",
+ rd_kafka_AclPermissionType_name(RD_KAFKA_ACL_PERMISSION_TYPE_ALLOW),
+ rd_kafka_AclPermissionType_name(permission_type));
+ TEST_ASSERT(strcmp(user_test2, principal) == 0, "expected %s not %s",
+ user_test2, principal);
+ TEST_ASSERT(resource_pattern_type == RD_KAFKA_RESOURCE_PATTERN_LITERAL,
+ "expected %s not %s",
+ rd_kafka_ResourcePatternType_name(
+ RD_KAFKA_RESOURCE_PATTERN_LITERAL),
+ rd_kafka_ResourcePatternType_name(resource_pattern_type));
+
+ TEST_ASSERT(operation == RD_KAFKA_ACL_OPERATION_WRITE,
+ "expected %s not %s",
+ rd_kafka_AclOperation_name(RD_KAFKA_ACL_OPERATION_WRITE),
+ rd_kafka_AclOperation_name(operation));
+
+ acl_binding_results_cntp =
+ do_test_acls_count(rk, acl_bindings_describe, q);
+ TEST_ASSERT(acl_binding_results_cntp == 0,
+ "acl_binding_results_cntp should be 0, not %zu\n",
+ acl_binding_results_cntp);
+
+ rd_kafka_AclBinding_destroy(acl_bindings_delete);
+ rd_kafka_event_destroy(rkev_acl_delete);
+ rd_kafka_AdminOptions_destroy(admin_options_delete);
+
+ rd_kafka_AclBinding_destroy_array(acl_bindings_create, 3);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Verify that an unclean rd_kafka_destroy() does not hang.
+ */
+static void do_test_unclean_destroy(rd_kafka_type_t cltype, int with_mainq) {
+ rd_kafka_t *rk;
+ char errstr[512];
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *q;
+ rd_kafka_NewTopic_t *topic;
+ test_timing_t t_destroy;
+
+ SUB_TEST_QUICK("Test unclean destroy using %s",
+ with_mainq ? "mainq" : "tempq");
+
+ test_conf_init(&conf, NULL, 0);
+
+ rk = rd_kafka_new(cltype, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "kafka_new(%d): %s", cltype, errstr);
+
+ if (with_mainq)
+ q = rd_kafka_queue_get_main(rk);
+ else
+ q = rd_kafka_queue_new(rk);
+
+ topic = rd_kafka_NewTopic_new(test_mk_topic_name(__FUNCTION__, 1), 3, 1,
+ NULL, 0);
+ rd_kafka_CreateTopics(rk, &topic, 1, NULL, q);
+ rd_kafka_NewTopic_destroy(topic);
+
+ rd_kafka_queue_destroy(q);
+
+ TEST_SAY(
+ "Giving rd_kafka_destroy() 5s to finish, "
+ "despite Admin API request being processed\n");
+ test_timeout_set(5);
+ TIMING_START(&t_destroy, "rd_kafka_destroy()");
+ rd_kafka_destroy(rk);
+ TIMING_STOP(&t_destroy);
+
+ SUB_TEST_PASS();
+
+ /* Restore timeout */
+ test_timeout_set(60);
+}
+
+
+
+/**
+ * @brief Test deletion of records
+ *
+ *
+ */
+static void do_test_DeleteRecords(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int op_timeout) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_topic_partition_list_t *offsets = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ const char *errstr2;
+#define MY_DEL_RECORDS_CNT 3
+ rd_kafka_topic_partition_list_t *results = NULL;
+ int i;
+ const int partitions_cnt = 3;
+ const int msgs_cnt = 100;
+ char *topics[MY_DEL_RECORDS_CNT];
+ rd_kafka_metadata_topic_t exp_mdtopics[MY_DEL_RECORDS_CNT] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_DeleteRecords_t *del_records;
+ const rd_kafka_DeleteRecords_result_t *res;
+
+ SUB_TEST_QUICK("%s DeleteRecords with %s, op_timeout %d",
+ rd_kafka_name(rk), what, op_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (op_timeout != -1) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, op_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
+ char pfx[32];
+ char *topic;
+
+ rd_snprintf(pfx, sizeof(pfx), "DeleteRecords-topic%d", i);
+ topic = rd_strdup(test_mk_topic_name(pfx, 1));
+
+ topics[i] = topic;
+ exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
+ }
+
+ /* Create the topics first. */
+ test_CreateTopics_simple(rk, NULL, topics, MY_DEL_RECORDS_CNT,
+ partitions_cnt /*num_partitions*/, NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
+ 15 * 1000);
+
+ /* Produce 100 msgs / partition */
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
+ int32_t partition;
+ for (partition = 0; partition < partitions_cnt; partition++) {
+ test_produce_msgs_easy(topics[i], 0, partition,
+ msgs_cnt);
+ }
+ }
+
+ offsets = rd_kafka_topic_partition_list_new(10);
+
+ /* Wipe all data from topic 0 */
+ for (i = 0; i < partitions_cnt; i++)
+ rd_kafka_topic_partition_list_add(offsets, topics[0], i)
+ ->offset = RD_KAFKA_OFFSET_END;
+
+ /* Wipe all data from partition 0 in topic 1 */
+ rd_kafka_topic_partition_list_add(offsets, topics[1], 0)->offset =
+ RD_KAFKA_OFFSET_END;
+
+ /* Wipe some data from partition 2 in topic 1 */
+ rd_kafka_topic_partition_list_add(offsets, topics[1], 2)->offset =
+ msgs_cnt / 2;
+
+ /* Not changing the offset (out of range) for topic 2 partition 0 */
+ rd_kafka_topic_partition_list_add(offsets, topics[2], 0);
+
+ /* Offset out of range for topic 2 partition 1 */
+ rd_kafka_topic_partition_list_add(offsets, topics[2], 1)->offset =
+ msgs_cnt + 1;
+
+ del_records = rd_kafka_DeleteRecords_new(offsets);
+
+ TIMING_START(&timing, "DeleteRecords");
+ TEST_SAY("Call DeleteRecords\n");
+ rd_kafka_DeleteRecords(rk, &del_records, 1, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ rd_kafka_DeleteRecords_destroy(del_records);
+
+ TIMING_START(&timing, "DeleteRecords.queue_poll");
+
+ /* Poll result queue for DeleteRecords result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
+ TEST_SAY("DeleteRecords: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_DELETERECORDS_RESULT) {
+ break;
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteRecords_result(rkev);
+ TEST_ASSERT(res, "expected DeleteRecords_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == exp_err,
+ "expected DeleteRecords to return %s, not %s (%s)",
+ rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ TEST_SAY("DeleteRecords: returned %s (%s)\n", rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ results = rd_kafka_topic_partition_list_copy(
+ rd_kafka_DeleteRecords_result_offsets(res));
+
+ /* Sort both input and output list */
+ rd_kafka_topic_partition_list_sort(offsets, NULL, NULL);
+ rd_kafka_topic_partition_list_sort(results, NULL, NULL);
+
+ TEST_SAY("Input partitions:\n");
+ test_print_partition_list(offsets);
+ TEST_SAY("Result partitions:\n");
+ test_print_partition_list(results);
+
+ TEST_ASSERT(offsets->cnt == results->cnt,
+ "expected DeleteRecords_result_offsets to return %d items, "
+ "not %d",
+ offsets->cnt, results->cnt);
+
+ for (i = 0; i < results->cnt; i++) {
+ const rd_kafka_topic_partition_t *input = &offsets->elems[i];
+ const rd_kafka_topic_partition_t *output = &results->elems[i];
+ int64_t expected_offset = input->offset;
+ rd_kafka_resp_err_t expected_err = 0;
+
+ if (expected_offset == RD_KAFKA_OFFSET_END)
+ expected_offset = msgs_cnt;
+
+ /* Expect Offset out of range error */
+ if (input->offset < RD_KAFKA_OFFSET_END ||
+ input->offset > msgs_cnt)
+ expected_err = 1;
+
+ TEST_SAY("DeleteRecords Returned %s for %s [%" PRId32
+ "] "
+ "low-watermark = %d\n",
+ rd_kafka_err2name(output->err), output->topic,
+ output->partition, (int)output->offset);
+
+ if (strcmp(output->topic, input->topic))
+ TEST_FAIL_LATER(
+ "Result order mismatch at #%d: "
+ "expected topic %s, got %s",
+ i, input->topic, output->topic);
+
+ if (output->partition != input->partition)
+ TEST_FAIL_LATER(
+ "Result order mismatch at #%d: "
+ "expected partition %d, got %d",
+ i, input->partition, output->partition);
+
+ if (output->err != expected_err)
+ TEST_FAIL_LATER(
+ "%s [%" PRId32
+ "]: "
+ "expected error code %d (%s), "
+ "got %d (%s)",
+ output->topic, output->partition, expected_err,
+ rd_kafka_err2str(expected_err), output->err,
+ rd_kafka_err2str(output->err));
+
+ if (output->err == 0 && output->offset != expected_offset)
+ TEST_FAIL_LATER("%s [%" PRId32
+ "]: "
+ "expected offset %" PRId64
+ ", "
+ "got %" PRId64,
+ output->topic, output->partition,
+ expected_offset, output->offset);
+ }
+
+ /* Check watermarks for partitions */
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++) {
+ int32_t partition;
+ for (partition = 0; partition < partitions_cnt; partition++) {
+ const rd_kafka_topic_partition_t *del =
+ rd_kafka_topic_partition_list_find(
+ results, topics[i], partition);
+ int64_t expected_low = 0;
+ int64_t expected_high = msgs_cnt;
+ int64_t low, high;
+
+ if (del && del->err == 0) {
+ expected_low = del->offset;
+ }
+
+ err = rd_kafka_query_watermark_offsets(
+ rk, topics[i], partition, &low, &high,
+ tmout_multip(10000));
+ if (err)
+ TEST_FAIL(
+ "query_watermark_offsets failed: "
+ "%s\n",
+ rd_kafka_err2str(err));
+
+ if (low != expected_low)
+ TEST_FAIL_LATER("For %s [%" PRId32
+ "] expected "
+ "a low watermark of %" PRId64
+ ", got %" PRId64,
+ topics[i], partition,
+ expected_low, low);
+
+ if (high != expected_high)
+ TEST_FAIL_LATER("For %s [%" PRId32
+ "] expected "
+ "a high watermark of %" PRId64
+ ", got %" PRId64,
+ topics[i], partition,
+ expected_high, high);
+ }
+ }
+
+ rd_kafka_event_destroy(rkev);
+
+ for (i = 0; i < MY_DEL_RECORDS_CNT; i++)
+ rd_free(topics[i]);
+
+ if (results)
+ rd_kafka_topic_partition_list_destroy(results);
+
+ if (offsets)
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef MY_DEL_RECORDS_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test deletion of groups
+ *
+ *
+ */
+
+typedef struct expected_group_result {
+ char *group;
+ rd_kafka_resp_err_t err;
+} expected_group_result_t;
+
+static void do_test_DeleteGroups(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int request_timeout) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ const char *errstr2;
+#define MY_DEL_GROUPS_CNT 4
+ int known_groups = MY_DEL_GROUPS_CNT - 1;
+ int i;
+ const int partitions_cnt = 1;
+ const int msgs_cnt = 100;
+ char *topic;
+ rd_kafka_metadata_topic_t exp_mdtopic = {0};
+ int64_t testid = test_id_generate();
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ const rd_kafka_group_result_t **results = NULL;
+ expected_group_result_t expected[MY_DEL_GROUPS_CNT] = {{0}};
+ rd_kafka_DeleteGroup_t *del_groups[MY_DEL_GROUPS_CNT];
+ const rd_kafka_DeleteGroups_result_t *res;
+
+ SUB_TEST_QUICK("%s DeleteGroups with %s, request_timeout %d",
+ rd_kafka_name(rk), what, request_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (request_timeout != -1) {
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_ANY);
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, request_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ exp_mdtopic.topic = topic;
+
+ /* Create the topics first. */
+ test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000);
+
+ /* Produce 100 msgs */
+ test_produce_msgs_easy(topic, testid, 0, msgs_cnt);
+
+ for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
+ char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ if (i < known_groups) {
+ test_consume_msgs_easy(group, topic, testid, -1,
+ msgs_cnt, NULL);
+ expected[i].group = group;
+ expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ } else {
+ expected[i].group = group;
+ expected[i].err = RD_KAFKA_RESP_ERR_GROUP_ID_NOT_FOUND;
+ }
+ del_groups[i] = rd_kafka_DeleteGroup_new(group);
+ }
+
+ TIMING_START(&timing, "DeleteGroups");
+ TEST_SAY("Call DeleteGroups\n");
+ rd_kafka_DeleteGroups(rk, del_groups, MY_DEL_GROUPS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ TIMING_START(&timing, "DeleteGroups.queue_poll");
+
+ /* Poll result queue for DeleteGroups result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
+ TEST_SAY("DeleteGroups: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_DELETEGROUPS_RESULT) {
+ break;
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteGroups_result(rkev);
+ TEST_ASSERT(res, "expected DeleteGroups_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == exp_err,
+ "expected DeleteGroups to return %s, not %s (%s)",
+ rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ TEST_SAY("DeleteGroups: returned %s (%s)\n", rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ size_t cnt = 0;
+ results = rd_kafka_DeleteGroups_result_groups(res, &cnt);
+
+ TEST_ASSERT(MY_DEL_GROUPS_CNT == cnt,
+ "expected DeleteGroups_result_groups to return %d items, "
+ "not %" PRIusz,
+ MY_DEL_GROUPS_CNT, cnt);
+
+ for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
+ const expected_group_result_t *exp = &expected[i];
+ rd_kafka_resp_err_t exp_err = exp->err;
+ const rd_kafka_group_result_t *act = results[i];
+ rd_kafka_resp_err_t act_err =
+ rd_kafka_error_code(rd_kafka_group_result_error(act));
+ TEST_ASSERT(
+ strcmp(exp->group, rd_kafka_group_result_name(act)) == 0,
+ "Result order mismatch at #%d: expected group name to be "
+ "%s, not %s",
+ i, exp->group, rd_kafka_group_result_name(act));
+ TEST_ASSERT(exp_err == act_err,
+ "expected err=%d for group %s, not %d (%s)",
+ exp_err, exp->group, act_err,
+ rd_kafka_err2str(act_err));
+ }
+
+ rd_kafka_event_destroy(rkev);
+
+ for (i = 0; i < MY_DEL_GROUPS_CNT; i++) {
+ rd_kafka_DeleteGroup_destroy(del_groups[i]);
+ rd_free(expected[i].group);
+ }
+
+ rd_free(topic);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef MY_DEL_GROUPS_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test list groups, creating consumers for a set of groups,
+ * listing and deleting them at the end.
+ */
+static void do_test_ListConsumerGroups(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int request_timeout,
+ rd_bool_t match_states) {
+#define TEST_LIST_CONSUMER_GROUPS_CNT 4
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ size_t valid_cnt, error_cnt;
+ rd_bool_t is_simple_consumer_group;
+ rd_kafka_consumer_group_state_t state;
+ char errstr[512];
+ const char *errstr2, *group_id;
+ char *list_consumer_groups[TEST_LIST_CONSUMER_GROUPS_CNT];
+ const int partitions_cnt = 1;
+ const int msgs_cnt = 100;
+ size_t i, found;
+ char *topic;
+ rd_kafka_metadata_topic_t exp_mdtopic = {0};
+ int64_t testid = test_id_generate();
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ const rd_kafka_ListConsumerGroups_result_t *res;
+ const rd_kafka_ConsumerGroupListing_t **groups;
+ rd_bool_t has_match_states =
+ test_broker_version >= TEST_BRKVER(2, 7, 0, 0);
+
+ SUB_TEST_QUICK(
+ "%s ListConsumerGroups with %s, request_timeout %d"
+ ", match_states %s",
+ rd_kafka_name(rk), what, request_timeout, RD_STR_ToF(match_states));
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (request_timeout != -1) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPS);
+
+ if (match_states) {
+ rd_kafka_consumer_group_state_t empty =
+ RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY;
+
+ TEST_CALL_ERROR__(
+ rd_kafka_AdminOptions_set_match_consumer_group_states(
+ options, &empty, 1));
+ }
+
+ TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout(
+ options, request_timeout, errstr, sizeof(errstr)));
+ }
+
+
+ topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ exp_mdtopic.topic = topic;
+
+ /* Create the topics first. */
+ test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000);
+
+ /* Produce 100 msgs */
+ test_produce_msgs_easy(topic, testid, 0, msgs_cnt);
+
+ for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) {
+ char *group = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ test_consume_msgs_easy(group, topic, testid, -1, msgs_cnt,
+ NULL);
+ list_consumer_groups[i] = group;
+ }
+
+ TIMING_START(&timing, "ListConsumerGroups");
+ TEST_SAY("Call ListConsumerGroups\n");
+ rd_kafka_ListConsumerGroups(rk, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ TIMING_START(&timing, "ListConsumerGroups.queue_poll");
+
+ /* Poll result queue for ListConsumerGroups result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
+ TEST_SAY("ListConsumerGroups: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) {
+ break;
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ /* Convert event to proper result */
+ res = rd_kafka_event_ListConsumerGroups_result(rkev);
+ TEST_ASSERT(res, "expected ListConsumerGroups_result, got %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == exp_err,
+ "expected ListConsumerGroups to return %s, got %s (%s)",
+ rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ TEST_SAY("ListConsumerGroups: returned %s (%s)\n",
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ groups = rd_kafka_ListConsumerGroups_result_valid(res, &valid_cnt);
+ rd_kafka_ListConsumerGroups_result_errors(res, &error_cnt);
+
+ /* Other tests could be running */
+ TEST_ASSERT(valid_cnt >= TEST_LIST_CONSUMER_GROUPS_CNT,
+ "expected ListConsumerGroups to return at least %" PRId32
+ " valid groups,"
+ " got %zu",
+ TEST_LIST_CONSUMER_GROUPS_CNT, valid_cnt);
+
+ TEST_ASSERT(error_cnt == 0,
+ "expected ListConsumerGroups to return 0 errors,"
+ " got %zu",
+ error_cnt);
+
+ found = 0;
+ for (i = 0; i < valid_cnt; i++) {
+ int j;
+ const rd_kafka_ConsumerGroupListing_t *group;
+ group = groups[i];
+ group_id = rd_kafka_ConsumerGroupListing_group_id(group);
+ is_simple_consumer_group =
+ rd_kafka_ConsumerGroupListing_is_simple_consumer_group(
+ group);
+ state = rd_kafka_ConsumerGroupListing_state(group);
+ for (j = 0; j < TEST_LIST_CONSUMER_GROUPS_CNT; j++) {
+ if (!strcmp(list_consumer_groups[j], group_id)) {
+ found++;
+ TEST_ASSERT(!is_simple_consumer_group,
+ "expected a normal group,"
+ " got a simple group");
+
+ if (!has_match_states)
+ break;
+
+ TEST_ASSERT(
+ state ==
+ RD_KAFKA_CONSUMER_GROUP_STATE_EMPTY,
+ "expected an Empty state,"
+ " got state %s",
+ rd_kafka_consumer_group_state_name(state));
+ break;
+ }
+ }
+ }
+ TEST_ASSERT(found == TEST_LIST_CONSUMER_GROUPS_CNT,
+ "expected to find %d"
+ " started groups,"
+ " got %" PRIusz,
+ TEST_LIST_CONSUMER_GROUPS_CNT, found);
+
+ rd_kafka_event_destroy(rkev);
+
+ test_DeleteGroups_simple(rk, NULL, (char **)list_consumer_groups,
+ TEST_LIST_CONSUMER_GROUPS_CNT, NULL);
+
+ for (i = 0; i < TEST_LIST_CONSUMER_GROUPS_CNT; i++) {
+ rd_free(list_consumer_groups[i]);
+ }
+
+ rd_free(topic);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef TEST_LIST_CONSUMER_GROUPS_CNT
+
+ SUB_TEST_PASS();
+}
+
+typedef struct expected_DescribeConsumerGroups_result {
+ char *group_id;
+ rd_kafka_resp_err_t err;
+} expected_DescribeConsumerGroups_result_t;
+
+
+/**
+ * @brief Test describe groups, creating consumers for a set of groups,
+ * describing and deleting them at the end.
+ */
+static void do_test_DescribeConsumerGroups(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int request_timeout) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ const char *errstr2;
+#define TEST_DESCRIBE_CONSUMER_GROUPS_CNT 4
+ int known_groups = TEST_DESCRIBE_CONSUMER_GROUPS_CNT - 1;
+ int i;
+ const int partitions_cnt = 1;
+ const int msgs_cnt = 100;
+ char *topic;
+ rd_kafka_metadata_topic_t exp_mdtopic = {0};
+ int64_t testid = test_id_generate();
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ const rd_kafka_ConsumerGroupDescription_t **results = NULL;
+ expected_DescribeConsumerGroups_result_t
+ expected[TEST_DESCRIBE_CONSUMER_GROUPS_CNT] = RD_ZERO_INIT;
+ const char *describe_groups[TEST_DESCRIBE_CONSUMER_GROUPS_CNT];
+ char group_instance_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512];
+ char client_ids[TEST_DESCRIBE_CONSUMER_GROUPS_CNT][512];
+ rd_kafka_t *rks[TEST_DESCRIBE_CONSUMER_GROUPS_CNT];
+ const rd_kafka_DescribeConsumerGroups_result_t *res;
+ rd_bool_t has_group_instance_id =
+ test_broker_version >= TEST_BRKVER(2, 4, 0, 0);
+
+ SUB_TEST_QUICK("%s DescribeConsumerGroups with %s, request_timeout %d",
+ rd_kafka_name(rk), what, request_timeout);
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (request_timeout != -1) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DESCRIBECONSUMERGROUPS);
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, request_timeout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ topic = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ exp_mdtopic.topic = topic;
+
+ /* Create the topics first. */
+ test_CreateTopics_simple(rk, NULL, &topic, 1, partitions_cnt, NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, &exp_mdtopic, 1, NULL, 0, 15 * 1000);
+
+ /* Produce 100 msgs */
+ test_produce_msgs_easy(topic, testid, 0, msgs_cnt);
+
+ for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
+ rd_kafka_conf_t *conf;
+ char *group_id = rd_strdup(test_mk_topic_name(__FUNCTION__, 1));
+ if (i < known_groups) {
+ snprintf(group_instance_ids[i],
+ sizeof(group_instance_ids[i]),
+ "group_instance_id_%" PRId32, i);
+ snprintf(client_ids[i], sizeof(client_ids[i]),
+ "client_id_%" PRId32, i);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "client.id", client_ids[i]);
+ test_conf_set(conf, "group.instance.id",
+ group_instance_ids[i]);
+ test_conf_set(conf, "session.timeout.ms", "5000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ rks[i] =
+ test_create_consumer(group_id, NULL, conf, NULL);
+ test_consumer_subscribe(rks[i], topic);
+ /* Consume messages */
+ test_consumer_poll("consumer", rks[i], testid, -1, -1,
+ msgs_cnt, NULL);
+ }
+ expected[i].group_id = group_id;
+ expected[i].err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ describe_groups[i] = group_id;
+ }
+
+ TIMING_START(&timing, "DescribeConsumerGroups");
+ TEST_SAY("Call DescribeConsumerGroups\n");
+ rd_kafka_DescribeConsumerGroups(
+ rk, describe_groups, TEST_DESCRIBE_CONSUMER_GROUPS_CNT, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ TIMING_START(&timing, "DescribeConsumerGroups.queue_poll");
+
+ /* Poll result queue for DescribeConsumerGroups result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(20 * 1000));
+ TEST_SAY("DescribeConsumerGroups: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) {
+ break;
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ /* Convert event to proper result */
+ res = rd_kafka_event_DescribeConsumerGroups_result(rkev);
+ TEST_ASSERT(res, "expected DescribeConsumerGroups_result, got %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(err == exp_err,
+ "expected DescribeConsumerGroups to return %s, got %s (%s)",
+ rd_kafka_err2str(exp_err), rd_kafka_err2str(err),
+ err ? errstr2 : "n/a");
+
+ TEST_SAY("DescribeConsumerGroups: returned %s (%s)\n",
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ size_t cnt = 0;
+ results = rd_kafka_DescribeConsumerGroups_result_groups(res, &cnt);
+
+ TEST_ASSERT(
+ TEST_DESCRIBE_CONSUMER_GROUPS_CNT == cnt,
+ "expected DescribeConsumerGroups_result_groups to return %d items, "
+ "got %" PRIusz,
+ TEST_DESCRIBE_CONSUMER_GROUPS_CNT, cnt);
+
+ for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
+ expected_DescribeConsumerGroups_result_t *exp = &expected[i];
+ rd_kafka_resp_err_t exp_err = exp->err;
+ const rd_kafka_ConsumerGroupDescription_t *act = results[i];
+ rd_kafka_resp_err_t act_err = rd_kafka_error_code(
+ rd_kafka_ConsumerGroupDescription_error(act));
+ rd_kafka_consumer_group_state_t state =
+ rd_kafka_ConsumerGroupDescription_state(act);
+ TEST_ASSERT(
+ strcmp(exp->group_id,
+ rd_kafka_ConsumerGroupDescription_group_id(act)) ==
+ 0,
+ "Result order mismatch at #%d: expected group id to be "
+ "%s, got %s",
+ i, exp->group_id,
+ rd_kafka_ConsumerGroupDescription_group_id(act));
+ if (i < known_groups) {
+ int member_count;
+ const rd_kafka_MemberDescription_t *member;
+ const rd_kafka_MemberAssignment_t *assignment;
+ const char *client_id;
+ const char *group_instance_id;
+ const rd_kafka_topic_partition_list_t *partitions;
+
+ TEST_ASSERT(state ==
+ RD_KAFKA_CONSUMER_GROUP_STATE_STABLE,
+ "Expected Stable state, got %s.",
+ rd_kafka_consumer_group_state_name(state));
+
+ TEST_ASSERT(
+ !rd_kafka_ConsumerGroupDescription_is_simple_consumer_group(
+ act),
+ "Expected a normal consumer group, got a simple "
+ "one.");
+
+ member_count =
+ rd_kafka_ConsumerGroupDescription_member_count(act);
+ TEST_ASSERT(member_count == 1,
+ "Expected one member, got %d.",
+ member_count);
+
+ member =
+ rd_kafka_ConsumerGroupDescription_member(act, 0);
+
+ client_id =
+ rd_kafka_MemberDescription_client_id(member);
+ TEST_ASSERT(!strcmp(client_id, client_ids[i]),
+ "Expected client id \"%s\","
+ " got \"%s\".",
+ client_ids[i], client_id);
+
+ if (has_group_instance_id) {
+ group_instance_id =
+ rd_kafka_MemberDescription_group_instance_id(
+ member);
+ TEST_ASSERT(!strcmp(group_instance_id,
+ group_instance_ids[i]),
+ "Expected group instance id \"%s\","
+ " got \"%s\".",
+ group_instance_ids[i],
+ group_instance_id);
+ }
+
+ assignment =
+ rd_kafka_MemberDescription_assignment(member);
+ TEST_ASSERT(assignment != NULL,
+ "Expected non-NULL member assignment");
+
+ partitions =
+ rd_kafka_MemberAssignment_partitions(assignment);
+ TEST_ASSERT(partitions != NULL,
+ "Expected non-NULL member partitions");
+
+ TEST_SAY(
+ "Member client.id=\"%s\", "
+ "group.instance.id=\"%s\", "
+ "consumer_id=\"%s\", "
+ "host=\"%s\", assignment:\n",
+ rd_kafka_MemberDescription_client_id(member),
+ rd_kafka_MemberDescription_group_instance_id(
+ member),
+ rd_kafka_MemberDescription_consumer_id(member),
+ rd_kafka_MemberDescription_host(member));
+ /* This is just to make sure the returned memory
+ * is valid. */
+ test_print_partition_list(partitions);
+ } else {
+ TEST_ASSERT(state == RD_KAFKA_CONSUMER_GROUP_STATE_DEAD,
+ "Expected Dead state, got %s.",
+ rd_kafka_consumer_group_state_name(state));
+ }
+ TEST_ASSERT(exp_err == act_err,
+ "expected err=%d for group %s, got %d (%s)",
+ exp_err, exp->group_id, act_err,
+ rd_kafka_err2str(act_err));
+ }
+
+ rd_kafka_event_destroy(rkev);
+
+ for (i = 0; i < known_groups; i++) {
+ test_consumer_close(rks[i]);
+ rd_kafka_destroy(rks[i]);
+ }
+
+ /* Wait session timeout + 1s. Because using static group membership */
+ rd_sleep(6);
+
+ test_DeleteGroups_simple(rk, NULL, (char **)describe_groups,
+ known_groups, NULL);
+
+ for (i = 0; i < TEST_DESCRIBE_CONSUMER_GROUPS_CNT; i++) {
+ rd_free(expected[i].group_id);
+ }
+
+ rd_free(topic);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef TEST_DESCRIBE_CONSUMER_GROUPS_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test deletion of committed offsets.
+ *
+ *
+ */
+static void do_test_DeleteConsumerGroupOffsets(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int req_timeout_ms,
+ rd_bool_t sub_consumer) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_delete,
+ *committed, *deleted, *subscription = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ const char *errstr2;
+#define MY_TOPIC_CNT 3
+ int i;
+ const int partitions_cnt = 3;
+ char *topics[MY_TOPIC_CNT];
+ rd_kafka_metadata_topic_t exp_mdtopics[MY_TOPIC_CNT] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets;
+ const rd_kafka_DeleteConsumerGroupOffsets_result_t *res;
+ const rd_kafka_group_result_t **gres;
+ size_t gres_cnt;
+ rd_kafka_t *consumer;
+ char *groupid;
+
+ SUB_TEST_QUICK(
+ "%s DeleteConsumerGroupOffsets with %s, req_timeout_ms %d%s",
+ rd_kafka_name(rk), what, req_timeout_ms,
+ sub_consumer ? ", with subscribing consumer" : "");
+
+ if (sub_consumer)
+ exp_err = RD_KAFKA_RESP_ERR_GROUP_SUBSCRIBED_TO_TOPIC;
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (req_timeout_ms != -1) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS);
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, req_timeout_ms, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ subscription = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT);
+
+ for (i = 0; i < MY_TOPIC_CNT; i++) {
+ char pfx[64];
+ char *topic;
+
+ rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i);
+ topic = rd_strdup(test_mk_topic_name(pfx, 1));
+
+ topics[i] = topic;
+ exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
+
+ rd_kafka_topic_partition_list_add(subscription, topic,
+ RD_KAFKA_PARTITION_UA);
+ }
+
+ groupid = topics[0];
+
+ /* Create the topics first. */
+ test_CreateTopics_simple(rk, NULL, topics, MY_TOPIC_CNT, partitions_cnt,
+ NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
+ 15 * 1000);
+
+ rd_sleep(1); /* Additional wait time for cluster propagation */
+
+ consumer = test_create_consumer(groupid, NULL, NULL, NULL);
+
+ if (sub_consumer) {
+ TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription));
+ test_consumer_wait_assignment(consumer, rd_true);
+ }
+
+ /* Commit some offsets */
+ orig_offsets = rd_kafka_topic_partition_list_new(MY_TOPIC_CNT * 2);
+ for (i = 0; i < MY_TOPIC_CNT * 2; i++)
+ rd_kafka_topic_partition_list_add(orig_offsets, topics[i / 2],
+ i % MY_TOPIC_CNT)
+ ->offset = (i + 1) * 10;
+
+ TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/));
+
+ /* Verify committed offsets match */
+ committed = rd_kafka_topic_partition_list_copy(orig_offsets);
+ TEST_CALL_ERR__(
+ rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000)));
+
+ if (test_partition_list_and_offsets_cmp(committed, orig_offsets)) {
+ TEST_SAY("commit() list:\n");
+ test_print_partition_list(orig_offsets);
+ TEST_SAY("committed() list:\n");
+ test_print_partition_list(committed);
+ TEST_FAIL("committed offsets don't match");
+ }
+
+ rd_kafka_topic_partition_list_destroy(committed);
+
+ /* Now delete second half of the commits */
+ offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
+ to_delete = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
+ for (i = 0; i < orig_offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar;
+ if (i < orig_offsets->cnt / 2) {
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ rktpar->offset = orig_offsets->elems[i].offset;
+ } else {
+ rktpar = rd_kafka_topic_partition_list_add(
+ to_delete, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ rktpar->offset = RD_KAFKA_OFFSET_INVALID;
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ rktpar->offset = RD_KAFKA_OFFSET_INVALID;
+ }
+ }
+
+ cgoffsets = rd_kafka_DeleteConsumerGroupOffsets_new(groupid, to_delete);
+
+ TIMING_START(&timing, "DeleteConsumerGroupOffsets");
+ TEST_SAY("Call DeleteConsumerGroupOffsets\n");
+ rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, 1, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ rd_kafka_DeleteConsumerGroupOffsets_destroy(cgoffsets);
+
+ TIMING_START(&timing, "DeleteConsumerGroupOffsets.queue_poll");
+ /* Poll result queue for DeleteConsumerGroupOffsets result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
+ TEST_SAY("DeleteConsumerGroupOffsets: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT)
+ break;
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_DeleteConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected DeleteConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!err,
+ "expected DeleteConsumerGroupOffsets to succeed, "
+ "got %s (%s)",
+ rd_kafka_err2name(err), err ? errstr2 : "n/a");
+
+ TEST_SAY("DeleteConsumerGroupOffsets: returned %s (%s)\n",
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ gres =
+ rd_kafka_DeleteConsumerGroupOffsets_result_groups(res, &gres_cnt);
+ TEST_ASSERT(gres && gres_cnt == 1,
+ "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
+
+ deleted = rd_kafka_topic_partition_list_copy(
+ rd_kafka_group_result_partitions(gres[0]));
+
+ if (test_partition_list_and_offsets_cmp(deleted, to_delete)) {
+ TEST_SAY("Result list:\n");
+ test_print_partition_list(deleted);
+ TEST_SAY("Partitions passed to DeleteConsumerGroupOffsets:\n");
+ test_print_partition_list(to_delete);
+ TEST_FAIL("deleted/requested offsets don't match");
+ }
+
+ /* Verify expected errors */
+ for (i = 0; i < deleted->cnt; i++) {
+ TEST_ASSERT_LATER(deleted->elems[i].err == exp_err,
+ "Result %s [%" PRId32
+ "] has error %s, "
+ "expected %s",
+ deleted->elems[i].topic,
+ deleted->elems[i].partition,
+ rd_kafka_err2name(deleted->elems[i].err),
+ rd_kafka_err2name(exp_err));
+ }
+
+ TEST_LATER_CHECK();
+
+ rd_kafka_topic_partition_list_destroy(deleted);
+ rd_kafka_topic_partition_list_destroy(to_delete);
+
+ rd_kafka_event_destroy(rkev);
+
+
+ /* Verify committed offsets match */
+ committed = rd_kafka_topic_partition_list_copy(orig_offsets);
+ TEST_CALL_ERR__(
+ rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000)));
+
+ TEST_SAY("Original committed offsets:\n");
+ test_print_partition_list(orig_offsets);
+
+ TEST_SAY("Committed offsets after delete:\n");
+ test_print_partition_list(committed);
+
+ rd_kafka_topic_partition_list_t *expected = offsets;
+ if (sub_consumer)
+ expected = orig_offsets;
+
+ if (test_partition_list_and_offsets_cmp(committed, expected)) {
+ TEST_SAY("expected list:\n");
+ test_print_partition_list(expected);
+ TEST_SAY("committed() list:\n");
+ test_print_partition_list(committed);
+ TEST_FAIL("committed offsets don't match");
+ }
+
+ rd_kafka_topic_partition_list_destroy(committed);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ rd_kafka_topic_partition_list_destroy(orig_offsets);
+ rd_kafka_topic_partition_list_destroy(subscription);
+
+ for (i = 0; i < MY_TOPIC_CNT; i++)
+ rd_free(topics[i]);
+
+ rd_kafka_destroy(consumer);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef MY_TOPIC_CNT
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test altering of committed offsets.
+ *
+ *
+ */
+static void do_test_AlterConsumerGroupOffsets(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int req_timeout_ms,
+ rd_bool_t sub_consumer,
+ rd_bool_t create_topics) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_topic_partition_list_t *orig_offsets, *offsets, *to_alter,
+ *committed, *alterd, *subscription = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ const char *errstr2;
+#define TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3
+ int i;
+ const int partitions_cnt = 3;
+ char *topics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT];
+ rd_kafka_metadata_topic_t
+ exp_mdtopics[TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets;
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *res;
+ const rd_kafka_group_result_t **gres;
+ size_t gres_cnt;
+ rd_kafka_t *consumer = NULL;
+ char *group_id;
+
+ SUB_TEST_QUICK(
+ "%s AlterConsumerGroupOffsets with %s, "
+ "request_timeout %d%s",
+ rd_kafka_name(rk), what, req_timeout_ms,
+ sub_consumer ? ", with subscribing consumer" : "");
+
+ if (!create_topics)
+ exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART;
+ else if (sub_consumer)
+ exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+
+ if (sub_consumer && !create_topics)
+ TEST_FAIL(
+ "Can't use set sub_consumer and unset create_topics at the "
+ "same time");
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (req_timeout_ms != -1) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, req_timeout_ms, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ subscription = rd_kafka_topic_partition_list_new(
+ TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT);
+
+ for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) {
+ char pfx[64];
+ char *topic;
+
+ rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i);
+ topic = rd_strdup(test_mk_topic_name(pfx, 1));
+
+ topics[i] = topic;
+ exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
+
+ rd_kafka_topic_partition_list_add(subscription, topic,
+ RD_KAFKA_PARTITION_UA);
+ }
+
+ group_id = topics[0];
+
+ /* Create the topics first if needed. */
+ if (create_topics) {
+ test_CreateTopics_simple(
+ rk, NULL, topics,
+ TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT, partitions_cnt,
+ NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt,
+ NULL, 0, 15 * 1000);
+
+ rd_sleep(1); /* Additional wait time for cluster propagation */
+
+ consumer = test_create_consumer(group_id, NULL, NULL, NULL);
+
+ if (sub_consumer) {
+ TEST_CALL_ERR__(
+ rd_kafka_subscribe(consumer, subscription));
+ test_consumer_wait_assignment(consumer, rd_true);
+ }
+ }
+
+ orig_offsets = rd_kafka_topic_partition_list_new(
+ TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt);
+ for (i = 0;
+ i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * partitions_cnt;
+ i++) {
+ rd_kafka_topic_partition_t *rktpar;
+ rktpar = rd_kafka_topic_partition_list_add(
+ orig_offsets, topics[i / partitions_cnt],
+ i % partitions_cnt);
+ rktpar->offset = (i + 1) * 10;
+ rd_kafka_topic_partition_set_leader_epoch(rktpar, 1);
+ }
+
+ /* Commit some offsets, if topics exists */
+ if (create_topics) {
+ TEST_CALL_ERR__(
+ rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/));
+
+ /* Verify committed offsets match */
+ committed = rd_kafka_topic_partition_list_copy(orig_offsets);
+ TEST_CALL_ERR__(rd_kafka_committed(consumer, committed,
+ tmout_multip(5 * 1000)));
+
+ if (test_partition_list_and_offsets_cmp(committed,
+ orig_offsets)) {
+ TEST_SAY("commit() list:\n");
+ test_print_partition_list(orig_offsets);
+ TEST_SAY("committed() list:\n");
+ test_print_partition_list(committed);
+ TEST_FAIL("committed offsets don't match");
+ }
+ rd_kafka_topic_partition_list_destroy(committed);
+ }
+
+ /* Now alter second half of the commits */
+ offsets = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
+ to_alter = rd_kafka_topic_partition_list_new(orig_offsets->cnt / 2);
+ for (i = 0; i < orig_offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar;
+ if (i < orig_offsets->cnt / 2) {
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ rktpar->offset = orig_offsets->elems[i].offset;
+ rd_kafka_topic_partition_set_leader_epoch(
+ rktpar, rd_kafka_topic_partition_get_leader_epoch(
+ &orig_offsets->elems[i]));
+ } else {
+ rktpar = rd_kafka_topic_partition_list_add(
+ to_alter, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ rktpar->offset = 5;
+ rd_kafka_topic_partition_set_leader_epoch(rktpar, 2);
+ rktpar = rd_kafka_topic_partition_list_add(
+ offsets, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ rktpar->offset = 5;
+ rd_kafka_topic_partition_set_leader_epoch(rktpar, 2);
+ }
+ }
+
+ cgoffsets = rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter);
+
+ TIMING_START(&timing, "AlterConsumerGroupOffsets");
+ TEST_SAY("Call AlterConsumerGroupOffsets\n");
+ rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets);
+
+ TIMING_START(&timing, "AlterConsumerGroupOffsets.queue_poll");
+ /* Poll result queue for AlterConsumerGroupOffsets result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
+ TEST_SAY("AlterConsumerGroupOffsets: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT)
+ break;
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected AlterConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!err,
+ "expected AlterConsumerGroupOffsets to succeed, "
+ "got %s (%s)",
+ rd_kafka_err2name(err), err ? errstr2 : "n/a");
+
+ TEST_SAY("AlterConsumerGroupOffsets: returned %s (%s)\n",
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ gres = rd_kafka_AlterConsumerGroupOffsets_result_groups(res, &gres_cnt);
+ TEST_ASSERT(gres && gres_cnt == 1,
+ "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
+
+ alterd = rd_kafka_topic_partition_list_copy(
+ rd_kafka_group_result_partitions(gres[0]));
+
+ if (test_partition_list_and_offsets_cmp(alterd, to_alter)) {
+ TEST_SAY("Result list:\n");
+ test_print_partition_list(alterd);
+ TEST_SAY("Partitions passed to AlterConsumerGroupOffsets:\n");
+ test_print_partition_list(to_alter);
+ TEST_FAIL("altered/requested offsets don't match");
+ }
+
+ /* Verify expected errors */
+ for (i = 0; i < alterd->cnt; i++) {
+ TEST_ASSERT_LATER(alterd->elems[i].err == exp_err,
+ "Result %s [%" PRId32
+ "] has error %s, "
+ "expected %s",
+ alterd->elems[i].topic,
+ alterd->elems[i].partition,
+ rd_kafka_err2name(alterd->elems[i].err),
+ rd_kafka_err2name(exp_err));
+ }
+
+ TEST_LATER_CHECK();
+
+ rd_kafka_topic_partition_list_destroy(alterd);
+ rd_kafka_topic_partition_list_destroy(to_alter);
+
+ rd_kafka_event_destroy(rkev);
+
+
+ /* Verify committed offsets match, if topics exist. */
+ if (create_topics) {
+ committed = rd_kafka_topic_partition_list_copy(orig_offsets);
+ TEST_CALL_ERR__(rd_kafka_committed(consumer, committed,
+ tmout_multip(5 * 1000)));
+
+ rd_kafka_topic_partition_list_t *expected = offsets;
+ if (sub_consumer) {
+ /* Alter fails with an active consumer */
+ expected = orig_offsets;
+ }
+ TEST_SAY("Original committed offsets:\n");
+ test_print_partition_list(orig_offsets);
+
+ TEST_SAY("Committed offsets after alter:\n");
+ test_print_partition_list(committed);
+
+ if (test_partition_list_and_offsets_cmp(committed, expected)) {
+ TEST_SAY("expected list:\n");
+ test_print_partition_list(expected);
+ TEST_SAY("committed() list:\n");
+ test_print_partition_list(committed);
+ TEST_FAIL("committed offsets don't match");
+ }
+ rd_kafka_topic_partition_list_destroy(committed);
+ }
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+ rd_kafka_topic_partition_list_destroy(orig_offsets);
+ rd_kafka_topic_partition_list_destroy(subscription);
+
+ for (i = 0; i < TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++)
+ rd_free(topics[i]);
+
+ if (create_topics) /* consumer is created only if topics are. */
+ rd_kafka_destroy(consumer);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+#undef TEST_ALTER_CONSUMER_GROUP_OFFSETS_TOPIC_CNT
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Test listing of committed offsets.
+ *
+ *
+ */
+static void do_test_ListConsumerGroupOffsets(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ int req_timeout_ms,
+ rd_bool_t sub_consumer,
+ rd_bool_t null_toppars) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_topic_partition_list_t *orig_offsets, *to_list, *committed,
+ *listd, *subscription = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ const char *errstr2;
+#define TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT 3
+ int i;
+ const int partitions_cnt = 3;
+ char *topics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT];
+ rd_kafka_metadata_topic_t
+ exp_mdtopics[TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT] = {{0}};
+ int exp_mdtopic_cnt = 0;
+ test_timing_t timing;
+ rd_kafka_resp_err_t exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rd_kafka_ListConsumerGroupOffsets_t *cgoffsets;
+ const rd_kafka_ListConsumerGroupOffsets_result_t *res;
+ const rd_kafka_group_result_t **gres;
+ size_t gres_cnt;
+ rd_kafka_t *consumer;
+ char *group_id;
+
+ SUB_TEST_QUICK(
+ "%s ListConsumerGroupOffsets with %s, "
+ "request timeout %d%s",
+ rd_kafka_name(rk), what, req_timeout_ms,
+ sub_consumer ? ", with subscribing consumer" : "");
+
+ q = useq ? useq : rd_kafka_queue_new(rk);
+
+ if (req_timeout_ms != -1) {
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_LISTCONSUMERGROUPOFFSETS);
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, req_timeout_ms, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ }
+
+
+ subscription = rd_kafka_topic_partition_list_new(
+ TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT);
+
+ for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++) {
+ char pfx[64];
+ char *topic;
+
+ rd_snprintf(pfx, sizeof(pfx), "DCGO-topic%d", i);
+ topic = rd_strdup(test_mk_topic_name(pfx, 1));
+
+ topics[i] = topic;
+ exp_mdtopics[exp_mdtopic_cnt++].topic = topic;
+
+ rd_kafka_topic_partition_list_add(subscription, topic,
+ RD_KAFKA_PARTITION_UA);
+ }
+
+ group_id = topics[0];
+
+ /* Create the topics first. */
+ test_CreateTopics_simple(rk, NULL, topics,
+ TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT,
+ partitions_cnt, NULL);
+
+ /* Verify that topics are reported by metadata */
+ test_wait_metadata_update(rk, exp_mdtopics, exp_mdtopic_cnt, NULL, 0,
+ 15 * 1000);
+
+ rd_sleep(1); /* Additional wait time for cluster propagation */
+
+ consumer = test_create_consumer(group_id, NULL, NULL, NULL);
+
+ if (sub_consumer) {
+ TEST_CALL_ERR__(rd_kafka_subscribe(consumer, subscription));
+ test_consumer_wait_assignment(consumer, rd_true);
+ }
+
+ /* Commit some offsets */
+ orig_offsets = rd_kafka_topic_partition_list_new(
+ TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2);
+ for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT * 2; i++) {
+ rd_kafka_topic_partition_t *rktpar;
+ rktpar = rd_kafka_topic_partition_list_add(
+ orig_offsets, topics[i / 2],
+ i % TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT);
+ rktpar->offset = (i + 1) * 10;
+ rd_kafka_topic_partition_set_leader_epoch(rktpar, 2);
+ }
+
+ TEST_CALL_ERR__(rd_kafka_commit(consumer, orig_offsets, 0 /*sync*/));
+
+ /* Verify committed offsets match */
+ committed = rd_kafka_topic_partition_list_copy(orig_offsets);
+ TEST_CALL_ERR__(
+ rd_kafka_committed(consumer, committed, tmout_multip(5 * 1000)));
+
+ if (test_partition_list_and_offsets_cmp(committed, orig_offsets)) {
+ TEST_SAY("commit() list:\n");
+ test_print_partition_list(orig_offsets);
+ TEST_SAY("committed() list:\n");
+ test_print_partition_list(committed);
+ TEST_FAIL("committed offsets don't match");
+ }
+
+ rd_kafka_topic_partition_list_destroy(committed);
+
+ to_list = rd_kafka_topic_partition_list_new(orig_offsets->cnt);
+ for (i = 0; i < orig_offsets->cnt; i++) {
+ rd_kafka_topic_partition_list_add(
+ to_list, orig_offsets->elems[i].topic,
+ orig_offsets->elems[i].partition);
+ }
+
+ if (null_toppars) {
+ cgoffsets =
+ rd_kafka_ListConsumerGroupOffsets_new(group_id, NULL);
+ } else {
+ cgoffsets =
+ rd_kafka_ListConsumerGroupOffsets_new(group_id, to_list);
+ }
+
+ TIMING_START(&timing, "ListConsumerGroupOffsets");
+ TEST_SAY("Call ListConsumerGroupOffsets\n");
+ rd_kafka_ListConsumerGroupOffsets(rk, &cgoffsets, 1, options, q);
+ TIMING_ASSERT_LATER(&timing, 0, 50);
+
+ rd_kafka_ListConsumerGroupOffsets_destroy(cgoffsets);
+
+ TIMING_START(&timing, "ListConsumerGroupOffsets.queue_poll");
+ /* Poll result queue for ListConsumerGroupOffsets result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
+ TEST_SAY("ListConsumerGroupOffsets: got %s in %.3fms\n",
+ rd_kafka_event_name(rkev),
+ TIMING_DURATION(&timing) / 1000.0f);
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_LISTCONSUMERGROUPOFFSETS_RESULT)
+ break;
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_ListConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res, "expected ListConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ /* Expecting error */
+ err = rd_kafka_event_error(rkev);
+ errstr2 = rd_kafka_event_error_string(rkev);
+ TEST_ASSERT(!err,
+ "expected ListConsumerGroupOffsets to succeed, "
+ "got %s (%s)",
+ rd_kafka_err2name(err), err ? errstr2 : "n/a");
+
+ TEST_SAY("ListConsumerGroupOffsets: returned %s (%s)\n",
+ rd_kafka_err2str(err), err ? errstr2 : "n/a");
+
+ gres = rd_kafka_ListConsumerGroupOffsets_result_groups(res, &gres_cnt);
+ TEST_ASSERT(gres && gres_cnt == 1,
+ "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
+
+ listd = rd_kafka_topic_partition_list_copy(
+ rd_kafka_group_result_partitions(gres[0]));
+
+ if (test_partition_list_and_offsets_cmp(listd, orig_offsets)) {
+ TEST_SAY("Result list:\n");
+ test_print_partition_list(listd);
+ TEST_SAY("Partitions passed to ListConsumerGroupOffsets:\n");
+ test_print_partition_list(orig_offsets);
+ TEST_FAIL("listd/requested offsets don't match");
+ }
+
+ /* Verify expected errors */
+ for (i = 0; i < listd->cnt; i++) {
+ TEST_ASSERT_LATER(listd->elems[i].err == exp_err,
+ "Result %s [%" PRId32
+ "] has error %s, "
+ "expected %s",
+ listd->elems[i].topic,
+ listd->elems[i].partition,
+ rd_kafka_err2name(listd->elems[i].err),
+ rd_kafka_err2name(exp_err));
+ }
+
+ TEST_LATER_CHECK();
+
+ rd_kafka_topic_partition_list_destroy(listd);
+ rd_kafka_topic_partition_list_destroy(to_list);
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_topic_partition_list_destroy(orig_offsets);
+ rd_kafka_topic_partition_list_destroy(subscription);
+
+ for (i = 0; i < TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT; i++)
+ rd_free(topics[i]);
+
+ rd_kafka_destroy(consumer);
+
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (!useq)
+ rd_kafka_queue_destroy(q);
+
+ TEST_LATER_CHECK();
+
+#undef TEST_LIST_CONSUMER_GROUP_OFFSETS_TOPIC_CNT
+
+ SUB_TEST_PASS();
+}
+
+static void do_test_apis(rd_kafka_type_t cltype) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *mainq;
+
+ /* Get the available brokers, but use a separate rd_kafka_t instance
+ * so we don't jinx the tests by having up-to-date metadata. */
+ avail_brokers = test_get_broker_ids(NULL, &avail_broker_cnt);
+ TEST_SAY("%" PRIusz
+ " brokers in cluster "
+ "which will be used for replica sets\n",
+ avail_broker_cnt);
+
+ do_test_unclean_destroy(cltype, 0 /*tempq*/);
+ do_test_unclean_destroy(cltype, 1 /*mainq*/);
+
+ test_conf_init(&conf, NULL, 180);
+ test_conf_set(conf, "socket.timeout.ms", "10000");
+ rk = test_create_handle(cltype, conf);
+
+ mainq = rd_kafka_queue_get_main(rk);
+
+ /* Create topics */
+ do_test_CreateTopics("temp queue, op timeout 0", rk, NULL, 0, 0);
+ do_test_CreateTopics("temp queue, op timeout 15000", rk, NULL, 15000,
+ 0);
+ do_test_CreateTopics(
+ "temp queue, op timeout 300, "
+ "validate only",
+ rk, NULL, 300, rd_true);
+ do_test_CreateTopics("temp queue, op timeout 9000, validate_only", rk,
+ NULL, 9000, rd_true);
+ do_test_CreateTopics("main queue, options", rk, mainq, -1, 0);
+
+ /* Delete topics */
+ do_test_DeleteTopics("temp queue, op timeout 0", rk, NULL, 0);
+ do_test_DeleteTopics("main queue, op timeout 15000", rk, mainq, 1500);
+
+ if (test_broker_version >= TEST_BRKVER(1, 0, 0, 0)) {
+ /* Create Partitions */
+ do_test_CreatePartitions("temp queue, op timeout 6500", rk,
+ NULL, 6500);
+ do_test_CreatePartitions("main queue, op timeout 0", rk, mainq,
+ 0);
+ }
+
+ /* CreateAcls */
+ do_test_CreateAcls(rk, mainq, 0);
+ do_test_CreateAcls(rk, mainq, 1);
+
+ /* DescribeAcls */
+ do_test_DescribeAcls(rk, mainq, 0);
+ do_test_DescribeAcls(rk, mainq, 1);
+
+ /* DeleteAcls */
+ do_test_DeleteAcls(rk, mainq, 0);
+ do_test_DeleteAcls(rk, mainq, 1);
+
+ /* AlterConfigs */
+ do_test_AlterConfigs(rk, mainq);
+
+ /* DescribeConfigs */
+ do_test_DescribeConfigs(rk, mainq);
+
+ /* Delete records */
+ do_test_DeleteRecords("temp queue, op timeout 0", rk, NULL, 0);
+ do_test_DeleteRecords("main queue, op timeout 1500", rk, mainq, 1500);
+
+ /* List groups */
+ do_test_ListConsumerGroups("temp queue", rk, NULL, -1, rd_false);
+ do_test_ListConsumerGroups("main queue", rk, mainq, 1500, rd_true);
+
+ /* Describe groups */
+ do_test_DescribeConsumerGroups("temp queue", rk, NULL, -1);
+ do_test_DescribeConsumerGroups("main queue", rk, mainq, 1500);
+
+ /* Delete groups */
+ do_test_DeleteGroups("temp queue", rk, NULL, -1);
+ do_test_DeleteGroups("main queue", rk, mainq, 1500);
+
+ if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0)) {
+ /* Delete committed offsets */
+ do_test_DeleteConsumerGroupOffsets("temp queue", rk, NULL, -1,
+ rd_false);
+ do_test_DeleteConsumerGroupOffsets("main queue", rk, mainq,
+ 1500, rd_false);
+ do_test_DeleteConsumerGroupOffsets(
+ "main queue", rk, mainq, 1500,
+ rd_true /*with subscribing consumer*/);
+
+ /* Alter committed offsets */
+ do_test_AlterConsumerGroupOffsets("temp queue", rk, NULL, -1,
+ rd_false, rd_true);
+ do_test_AlterConsumerGroupOffsets("main queue", rk, mainq, 1500,
+ rd_false, rd_true);
+ do_test_AlterConsumerGroupOffsets(
+ "main queue, nonexistent topics", rk, mainq, 1500, rd_false,
+ rd_false /* don't create topics */);
+ do_test_AlterConsumerGroupOffsets(
+ "main queue", rk, mainq, 1500,
+ rd_true, /*with subscribing consumer*/
+ rd_true);
+
+ /* List committed offsets */
+ do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1,
+ rd_false, rd_false);
+ do_test_ListConsumerGroupOffsets(
+ "main queue, op timeout "
+ "1500",
+ rk, mainq, 1500, rd_false, rd_false);
+ do_test_ListConsumerGroupOffsets(
+ "main queue", rk, mainq, 1500,
+ rd_true /*with subscribing consumer*/, rd_false);
+ do_test_ListConsumerGroupOffsets("temp queue", rk, NULL, -1,
+ rd_false, rd_true);
+ do_test_ListConsumerGroupOffsets("main queue", rk, mainq, 1500,
+ rd_false, rd_true);
+ do_test_ListConsumerGroupOffsets(
+ "main queue", rk, mainq, 1500,
+ rd_true /*with subscribing consumer*/, rd_true);
+ }
+
+ rd_kafka_queue_destroy(mainq);
+
+ rd_kafka_destroy(rk);
+
+ free(avail_brokers);
+}
+
+
+int main_0081_admin(int argc, char **argv) {
+
+ do_test_apis(RD_KAFKA_PRODUCER);
+
+ if (test_quick) {
+ TEST_SAY("Skipping further 0081 tests due to quick mode\n");
+ return 0;
+ }
+
+ do_test_apis(RD_KAFKA_CONSUMER);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp
new file mode 100644
index 000000000..16eb5a21a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0082-fetch_max_bytes.cpp
@@ -0,0 +1,133 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+/**
+ * @brief Test fetch.max.bytes
+ *
+ * - Produce 1*10 Megs to 3 partitions (~<1 Meg per message)
+ * - Set max.partition.fetch.bytes to 5 Meg
+ * - Set fetch.max.bytes to 2 Meg
+ * - Verify all messages are consumed without error.
+ */
+
+
+static void do_test_fetch_max_bytes(void) {
+ const int partcnt = 3;
+ int msgcnt = 10 * partcnt;
+ const int msgsize = 900 * 1024; /* Less than 1 Meg to account
+ * for batch overhead */
+ std::string errstr;
+ RdKafka::ErrorCode err;
+
+ std::string topic = Test::mk_topic_name("0081-fetch_max_bytes", 1);
+
+ /* Produce messages to partitions */
+ for (int32_t p = 0; p < (int32_t)partcnt; p++)
+ test_produce_msgs_easy_size(topic.c_str(), 0, p, msgcnt, msgsize);
+
+ /* Create consumer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "group.id", topic);
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ /* We try to fetch 20 Megs per partition, but only allow 1 Meg as total
+ * response size, this ends up serving the first batch from the
+ * first partition.
+ * receive.message.max.bytes is set low to trigger the original bug,
+ * but this value is now adjusted upwards automatically by rd_kafka_new()
+ * to hold both fetch.max.bytes and the protocol / batching overhead.
+ * Prior to the introduction of fetch.max.bytes the fetcher code
+ * would use receive.message.max.bytes to limit the total Fetch response,
+ * but due to batching overhead it would result in situations where
+ * the consumer asked for 1000000 bytes and got 1000096 bytes batch, which
+ * was higher than the 1000000 limit.
+ * See https://github.com/edenhill/librdkafka/issues/1616
+ *
+ * With the added configuration strictness checks, a user-supplied
+ * value is no longer over-written:
+ * receive.message.max.bytes must be configured to be at least 512 bytes
+ * larger than fetch.max.bytes.
+ */
+ Test::conf_set(conf, "max.partition.fetch.bytes", "20000000"); /* ~20MB */
+ Test::conf_set(conf, "fetch.max.bytes", "1000000"); /* ~1MB */
+ Test::conf_set(conf, "receive.message.max.bytes", "1000512"); /* ~1MB+512 */
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Subscribe */
+ std::vector<std::string> topics;
+ topics.push_back(topic);
+ if ((err = c->subscribe(topics)))
+ Test::Fail("subscribe failed: " + RdKafka::err2str(err));
+
+ /* Start consuming */
+ Test::Say("Consuming topic " + topic + "\n");
+ int cnt = 0;
+ while (cnt < msgcnt) {
+ RdKafka::Message *msg = c->consume(tmout_multip(1000));
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ cnt++;
+ break;
+
+ default:
+ Test::Fail("Consume error: " + msg->errstr());
+ break;
+ }
+
+ delete msg;
+ }
+ Test::Say("Done\n");
+
+ c->close();
+ delete c;
+}
+
+extern "C" {
+int main_0082_fetch_max_bytes(int argc, char **argv) {
+ if (test_quick) {
+ Test::Skip("Test skipped due to quick mode\n");
+ return 0;
+ }
+
+ do_test_fetch_max_bytes();
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c
new file mode 100644
index 000000000..23ce79820
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0083-cb_event.c
@@ -0,0 +1,228 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests the queue callback IO event signalling.
+ */
+
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * @brief Thread safe event counter */
+static struct {
+ mtx_t lock;
+ int count;
+} event_receiver;
+
+/**
+ * @brief Event callback function. Check the opaque pointer and
+ * increase the count of received event. */
+static void event_cb(rd_kafka_t *rk_p, void *opaque) {
+ TEST_ASSERT(opaque == (void *)0x1234,
+ "Opaque pointer is not as expected (got: %p)", opaque);
+ mtx_lock(&event_receiver.lock);
+ event_receiver.count += 1;
+ mtx_unlock(&event_receiver.lock);
+}
+
+/**
+ * @brief Wait for one or more events to be received.
+ * Return 0 if no event was received within the timeout. */
+static int wait_event_cb(int timeout_secs) {
+ int event_count = 0;
+ for (; timeout_secs >= 0; timeout_secs--) {
+ mtx_lock(&event_receiver.lock);
+ event_count = event_receiver.count;
+ event_receiver.count = 0;
+ mtx_unlock(&event_receiver.lock);
+ if (event_count > 0 || timeout_secs == 0)
+ return event_count;
+ rd_sleep(1);
+ }
+ return 0;
+}
+
+
+int main_0083_cb_event(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *tconf;
+ rd_kafka_t *rk_p, *rk_c;
+ const char *topic;
+ rd_kafka_topic_t *rkt_p;
+ rd_kafka_queue_t *queue;
+ uint64_t testid;
+ int msgcnt = 100;
+ int recvd = 0;
+ int wait_multiplier = 1;
+ rd_kafka_resp_err_t err;
+ enum { _NOPE, _YEP, _REBALANCE } expecting_io = _REBALANCE;
+ int callback_event_count;
+ rd_kafka_event_t *rkev;
+ int eventcnt = 0;
+
+ mtx_init(&event_receiver.lock, mtx_plain);
+
+ testid = test_id_generate();
+ topic = test_mk_topic_name(__FUNCTION__, 1);
+
+ rk_p = test_create_producer();
+ rkt_p = test_create_producer_topic(rk_p, topic, NULL);
+ err = test_auto_create_topic_rkt(rk_p, rkt_p, tmout_multip(5000));
+ TEST_ASSERT(!err, "Topic auto creation failed: %s",
+ rd_kafka_err2str(err));
+
+ test_conf_init(&conf, &tconf, 0);
+ rd_kafka_conf_set_events(conf, RD_KAFKA_EVENT_REBALANCE);
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "enable.partition.eof", "false");
+ /* Speed up propagation of new topics */
+ test_conf_set(conf, "metadata.max.age.ms", "5000");
+ test_topic_conf_set(tconf, "auto.offset.reset", "earliest");
+ rk_c = test_create_consumer(topic, NULL, conf, tconf);
+
+ queue = rd_kafka_queue_get_consumer(rk_c);
+
+ test_consumer_subscribe(rk_c, topic);
+
+ rd_kafka_queue_cb_event_enable(queue, event_cb, (void *)0x1234);
+
+ /**
+ * 1) Wait for rebalance event
+ * 2) Wait 1 interval (1s) expecting no IO (nothing produced).
+ * 3) Produce half the messages
+ * 4) Expect CB
+ * 5) Consume the available messages
+ * 6) Wait 1 interval expecting no CB.
+ * 7) Produce remaing half
+ * 8) Expect CB
+ * 9) Done.
+ */
+ while (recvd < msgcnt) {
+ TEST_SAY("Waiting for event\n");
+ callback_event_count = wait_event_cb(1 * wait_multiplier);
+ TEST_ASSERT(callback_event_count <= 1,
+ "Event cb called %d times", callback_event_count);
+
+ if (callback_event_count == 1) {
+ TEST_SAY("Events received: %d\n", callback_event_count);
+
+ while ((rkev = rd_kafka_queue_poll(queue, 0))) {
+ eventcnt++;
+ switch (rd_kafka_event_type(rkev)) {
+ case RD_KAFKA_EVENT_REBALANCE:
+ TEST_SAY(
+ "Got %s: %s\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_err2str(
+ rd_kafka_event_error(rkev)));
+ if (expecting_io != _REBALANCE)
+ TEST_FAIL(
+ "Got Rebalance when "
+ "expecting message\n");
+ if (rd_kafka_event_error(rkev) ==
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ rd_kafka_assign(
+ rk_c,
+ rd_kafka_event_topic_partition_list(
+ rkev));
+ expecting_io = _NOPE;
+ } else
+ rd_kafka_assign(rk_c, NULL);
+ break;
+
+ case RD_KAFKA_EVENT_FETCH:
+ if (expecting_io != _YEP)
+ TEST_FAIL(
+ "Did not expect more "
+ "messages at %d/%d\n",
+ recvd, msgcnt);
+ recvd++;
+ if (recvd == (msgcnt / 2) ||
+ recvd == msgcnt)
+ expecting_io = _NOPE;
+ break;
+
+ case RD_KAFKA_EVENT_ERROR:
+ TEST_FAIL(
+ "Error: %s\n",
+ rd_kafka_event_error_string(rkev));
+ break;
+
+ default:
+ TEST_SAY("Ignoring event %s\n",
+ rd_kafka_event_name(rkev));
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ TEST_SAY("%d events, Consumed %d/%d messages\n",
+ eventcnt, recvd, msgcnt);
+
+ wait_multiplier = 1;
+
+ } else {
+ if (expecting_io == _REBALANCE) {
+ continue;
+ } else if (expecting_io == _YEP) {
+ TEST_FAIL(
+ "Did not see expected IO after %d/%d "
+ "msgs\n",
+ recvd, msgcnt);
+ }
+
+ TEST_SAY("Event wait timeout (good)\n");
+ TEST_SAY("Got idle period, producing\n");
+ test_produce_msgs(rk_p, rkt_p, testid, 0, recvd,
+ msgcnt / 2, NULL, 10);
+
+ expecting_io = _YEP;
+ /* When running slowly (e.g., valgrind) it might take
+ * some time before the first message is received
+ * after producing. */
+ wait_multiplier = 3;
+ }
+ }
+ TEST_SAY("Done\n");
+
+ rd_kafka_topic_destroy(rkt_p);
+ rd_kafka_destroy(rk_p);
+
+ rd_kafka_queue_destroy(queue);
+ rd_kafka_consumer_close(rk_c);
+ rd_kafka_destroy(rk_c);
+
+ mtx_destroy(&event_receiver.lock);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c
new file mode 100644
index 000000000..cd8bbf7de
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0084-destroy_flags.c
@@ -0,0 +1,211 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * @name Test rd_kafka_destroy_flags()
+ */
+
+
+#include "test.h"
+
+
+static RD_TLS int rebalance_cnt = 0;
+
+static void destroy_flags_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ rebalance_cnt++;
+
+ TEST_SAY("rebalance_cb: %s with %d partition(s)\n",
+ rd_kafka_err2str(err), parts->cnt);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ test_consumer_assign("rebalance", rk, parts);
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ test_consumer_unassign("rebalance", rk);
+ break;
+
+ default:
+ TEST_FAIL("rebalance_cb: error: %s", rd_kafka_err2str(err));
+ }
+}
+
+struct df_args {
+ rd_kafka_type_t client_type;
+ int produce_cnt;
+ int consumer_subscribe;
+ int consumer_unsubscribe;
+};
+
+static void do_test_destroy_flags(const char *topic,
+ int destroy_flags,
+ int local_mode,
+ const struct df_args *args) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ test_timing_t t_destroy;
+
+ TEST_SAY(_C_MAG
+ "[ test destroy_flags 0x%x for client_type %d, "
+ "produce_cnt %d, subscribe %d, unsubscribe %d, "
+ "%s mode ]\n" _C_CLR,
+ destroy_flags, args->client_type, args->produce_cnt,
+ args->consumer_subscribe, args->consumer_unsubscribe,
+ local_mode ? "local" : "broker");
+
+ test_conf_init(&conf, NULL, 20);
+
+ if (local_mode)
+ test_conf_set(conf, "bootstrap.servers", "");
+
+ if (args->client_type == RD_KAFKA_PRODUCER) {
+
+ rk = test_create_handle(args->client_type, conf);
+
+ if (args->produce_cnt > 0) {
+ rd_kafka_topic_t *rkt;
+ int msgcounter = 0;
+
+ rkt = test_create_producer_topic(rk, topic, NULL);
+ test_produce_msgs_nowait(
+ rk, rkt, 0, RD_KAFKA_PARTITION_UA, 0,
+ args->produce_cnt, NULL, 100, 0, &msgcounter);
+ rd_kafka_topic_destroy(rkt);
+ }
+
+ } else {
+ int i;
+
+ TEST_ASSERT(args->client_type == RD_KAFKA_CONSUMER);
+
+ rk = test_create_consumer(topic, destroy_flags_rebalance_cb,
+ conf, NULL);
+
+ if (args->consumer_subscribe) {
+ test_consumer_subscribe(rk, topic);
+
+ if (!local_mode) {
+ TEST_SAY("Waiting for assignment\n");
+ while (rebalance_cnt == 0)
+ test_consumer_poll_once(rk, NULL, 1000);
+ }
+ }
+
+ for (i = 0; i < 5; i++)
+ test_consumer_poll_once(rk, NULL, 100);
+
+ if (args->consumer_unsubscribe) {
+ /* Test that calling rd_kafka_unsubscribe immediately
+ * prior to rd_kafka_destroy_flags doesn't cause the
+ * latter to hang. */
+ TEST_SAY(_C_YEL "Calling rd_kafka_unsubscribe\n"_C_CLR);
+ rd_kafka_unsubscribe(rk);
+ }
+ }
+
+ rebalance_cnt = 0;
+ TEST_SAY(_C_YEL "Calling rd_kafka_destroy_flags(0x%x)\n" _C_CLR,
+ destroy_flags);
+ TIMING_START(&t_destroy, "rd_kafka_destroy_flags(0x%x)", destroy_flags);
+ rd_kafka_destroy_flags(rk, destroy_flags);
+ TIMING_STOP(&t_destroy);
+
+ if (destroy_flags & RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE)
+ TIMING_ASSERT_LATER(&t_destroy, 0, 200);
+ else
+ TIMING_ASSERT_LATER(&t_destroy, 0, 1000);
+
+ if (args->consumer_subscribe &&
+ !(destroy_flags & RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE)) {
+ if (!local_mode)
+ TEST_ASSERT(rebalance_cnt > 0,
+ "expected final rebalance callback");
+ } else
+ TEST_ASSERT(rebalance_cnt == 0,
+ "expected no rebalance callbacks, got %d",
+ rebalance_cnt);
+
+ TEST_SAY(_C_GRN
+ "[ test destroy_flags 0x%x for client_type %d, "
+ "produce_cnt %d, subscribe %d, unsubscribe %d, "
+ "%s mode: PASS ]\n" _C_CLR,
+ destroy_flags, args->client_type, args->produce_cnt,
+ args->consumer_subscribe, args->consumer_unsubscribe,
+ local_mode ? "local" : "broker");
+}
+
+
+/**
+ * @brief Destroy with flags
+ */
+static void destroy_flags(int local_mode) {
+ const struct df_args args[] = {
+ {RD_KAFKA_PRODUCER, 0, 0, 0},
+ {RD_KAFKA_PRODUCER, test_quick ? 100 : 10000, 0, 0},
+ {RD_KAFKA_CONSUMER, 0, 1, 0},
+ {RD_KAFKA_CONSUMER, 0, 1, 1},
+ {RD_KAFKA_CONSUMER, 0, 0, 0}};
+ const int flag_combos[] = {0, RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE};
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ const rd_bool_t can_subscribe =
+ test_broker_version >= TEST_BRKVER(0, 9, 0, 0);
+ int i, j;
+
+ /* Create the topic to avoid not-yet-auto-created-topics being
+ * subscribed to (and thus raising an error). */
+ if (!local_mode) {
+ test_create_topic(NULL, topic, 3, 1);
+ test_wait_topic_exists(NULL, topic, 5000);
+ }
+
+ for (i = 0; i < (int)RD_ARRAYSIZE(args); i++) {
+ for (j = 0; j < (int)RD_ARRAYSIZE(flag_combos); j++) {
+ if (!can_subscribe && (args[i].consumer_subscribe ||
+ args[i].consumer_unsubscribe))
+ continue;
+ do_test_destroy_flags(topic, flag_combos[j], local_mode,
+ &args[i]);
+ }
+ }
+}
+
+
+
+int main_0084_destroy_flags_local(int argc, char **argv) {
+ destroy_flags(1 /*no brokers*/);
+ return 0;
+}
+
+int main_0084_destroy_flags(int argc, char **argv) {
+ destroy_flags(0 /*with brokers*/);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp
new file mode 100644
index 000000000..a342478c1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0085-headers.cpp
@@ -0,0 +1,388 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+
+static RdKafka::Producer *producer;
+static RdKafka::KafkaConsumer *consumer;
+static std::string topic;
+
+static void assert_all_headers_match(RdKafka::Headers *actual,
+ const RdKafka::Headers *expected) {
+ if (!actual) {
+ Test::Fail("Expected RdKafka::Message to contain headers");
+ }
+ if (actual->size() != expected->size()) {
+ Test::Fail(tostr() << "Expected headers length to equal "
+ << expected->size() << " instead equals "
+ << actual->size() << "\n");
+ }
+
+ std::vector<RdKafka::Headers::Header> actual_headers = actual->get_all();
+ std::vector<RdKafka::Headers::Header> expected_headers = expected->get_all();
+ Test::Say(3, tostr() << "Header size " << actual_headers.size() << "\n");
+ for (size_t i = 0; i < actual_headers.size(); i++) {
+ RdKafka::Headers::Header actual_header = actual_headers[i];
+ const RdKafka::Headers::Header expected_header = expected_headers[i];
+ std::string actual_key = actual_header.key();
+ std::string actual_value =
+ std::string(actual_header.value_string(), actual_header.value_size());
+ std::string expected_key = expected_header.key();
+ std::string expected_value =
+ std::string(actual_header.value_string(), expected_header.value_size());
+
+ Test::Say(3, tostr() << "Expected Key " << expected_key << ", Expected val "
+ << expected_value << ", Actual key " << actual_key
+ << ", Actual val " << actual_value << "\n");
+
+ if (actual_key != expected_key) {
+ Test::Fail(tostr() << "Header key does not match, expected '"
+ << actual_key << "' but got '" << expected_key
+ << "'\n");
+ }
+ if (actual_value != expected_value) {
+ Test::Fail(tostr() << "Header value does not match, expected '"
+ << actual_value << "' but got '" << expected_value
+ << "'\n");
+ }
+ }
+}
+
+static void test_headers(RdKafka::Headers *produce_headers,
+ const RdKafka::Headers *compare_headers) {
+ RdKafka::ErrorCode err;
+
+ err = producer->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY,
+ (void *)"message", 7, (void *)"key", 3, 0,
+ produce_headers, NULL);
+ if (err)
+ Test::Fail("produce() failed: " + RdKafka::err2str(err));
+
+ producer->flush(tmout_multip(10 * 1000));
+
+ if (producer->outq_len() > 0)
+ Test::Fail(tostr() << "Expected producer to be flushed, "
+ << producer->outq_len() << " messages remain");
+
+ int cnt = 0;
+ bool running = true;
+
+ while (running) {
+ RdKafka::Message *msg = consumer->consume(10 * 1000);
+
+ if (msg->err() == RdKafka::ERR_NO_ERROR) {
+ cnt++;
+ RdKafka::Headers *headers = msg->headers();
+ if (compare_headers->size() > 0) {
+ assert_all_headers_match(headers, compare_headers);
+ } else {
+ if (headers != 0) {
+ Test::Fail("Expected headers to return a NULL pointer");
+ }
+ }
+ running = false;
+ } else {
+ Test::Fail("consume() failed: " + msg->errstr());
+ }
+ delete msg;
+ }
+}
+
+static void test_headers(int num_hdrs) {
+ Test::Say(tostr() << "Test " << num_hdrs
+ << " headers in consumed message.\n");
+ RdKafka::Headers *produce_headers = RdKafka::Headers::create();
+ RdKafka::Headers *compare_headers = RdKafka::Headers::create();
+ for (int i = 0; i < num_hdrs; ++i) {
+ std::stringstream key_s;
+ key_s << "header_" << i;
+ std::string key = key_s.str();
+
+ if ((i % 4) == 0) {
+ /* NULL value */
+ produce_headers->add(key, NULL, 0);
+ compare_headers->add(key, NULL, 0);
+ } else if ((i % 5) == 0) {
+ /* Empty value, use different methods for produce
+ * and compare to make sure they behave the same way. */
+ std::string val = "";
+ produce_headers->add(key, val);
+ compare_headers->add(key, "", 0);
+ } else if ((i % 6) == 0) {
+ /* Binary value (no nul-term) */
+ produce_headers->add(key, "binary", 6);
+ compare_headers->add(key, "binary"); /* auto-nul-terminated */
+ } else {
+ /* Standard string value */
+ std::stringstream val_s;
+ val_s << "value_" << i;
+ std::string val = val_s.str();
+ produce_headers->add(key, val);
+ compare_headers->add(key, val);
+ }
+ }
+ test_headers(produce_headers, compare_headers);
+ delete compare_headers;
+}
+
+static void test_duplicate_keys() {
+ Test::Say("Test multiple headers with duplicate keys.\n");
+ int num_hdrs = 4;
+ RdKafka::Headers *produce_headers = RdKafka::Headers::create();
+ RdKafka::Headers *compare_headers = RdKafka::Headers::create();
+ for (int i = 0; i < num_hdrs; ++i) {
+ std::string dup_key = "dup_key";
+ std::stringstream val_s;
+ val_s << "value_" << i;
+ std::string val = val_s.str();
+ produce_headers->add(dup_key, val);
+ compare_headers->add(dup_key, val);
+ }
+ test_headers(produce_headers, compare_headers);
+ delete compare_headers;
+}
+
+static void test_remove_after_add() {
+ Test::Say("Test removing after adding headers.\n");
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+
+ // Add one unique key
+ std::string key_one = "key1";
+ std::string val_one = "val_one";
+ headers->add(key_one, val_one);
+
+ // Add a second unique key
+ std::string key_two = "key2";
+ std::string val_two = "val_two";
+ headers->add(key_two, val_one);
+
+ // Assert header length is 2
+ size_t expected_size = 2;
+ if (headers->size() != expected_size) {
+ Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
+ << ", instead got " << headers->size() << "\n");
+ }
+
+ // Remove key_one and assert headers == 1
+ headers->remove(key_one);
+ size_t expected_remove_size = 1;
+ if (headers->size() != expected_remove_size) {
+ Test::Fail(tostr() << "Expected header->size() to equal "
+ << expected_remove_size << ", instead got "
+ << headers->size() << "\n");
+ }
+
+ delete headers;
+}
+
+static void test_remove_all_duplicate_keys() {
+ Test::Say("Test removing duplicate keys removes all headers.\n");
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+
+ // Add one unique key
+ std::string key_one = "key1";
+ std::string val_one = "val_one";
+ headers->add(key_one, val_one);
+
+ // Add 2 duplicate keys
+ std::string dup_key = "dup_key";
+ std::string val_two = "val_two";
+ headers->add(dup_key, val_one);
+ headers->add(dup_key, val_two);
+
+ // Assert header length is 3
+ size_t expected_size = 3;
+ if (headers->size() != expected_size) {
+ Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
+ << ", instead got " << headers->size() << "\n");
+ }
+
+ // Remove key_one and assert headers == 1
+ headers->remove(dup_key);
+ size_t expected_size_remove = 1;
+ if (headers->size() != expected_size_remove) {
+ Test::Fail(tostr() << "Expected header->size() to equal "
+ << expected_size_remove << ", instead got "
+ << headers->size() << "\n");
+ }
+
+ delete headers;
+}
+
+static void test_get_last_gives_last_added_val() {
+ Test::Say("Test get_last returns the last added value of duplicate keys.\n");
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+
+ // Add two duplicate keys
+ std::string dup_key = "dup_key";
+ std::string val_one = "val_one";
+ std::string val_two = "val_two";
+ std::string val_three = "val_three";
+ headers->add(dup_key, val_one);
+ headers->add(dup_key, val_two);
+ headers->add(dup_key, val_three);
+
+ // Assert header length is 3
+ size_t expected_size = 3;
+ if (headers->size() != expected_size) {
+ Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
+ << ", instead got " << headers->size() << "\n");
+ }
+
+ // Get last of duplicate key and assert it equals val_two
+ RdKafka::Headers::Header last = headers->get_last(dup_key);
+ std::string value = std::string(last.value_string());
+ if (value != val_three) {
+ Test::Fail(tostr() << "Expected get_last to return " << val_two
+ << " as the value of the header instead got " << value
+ << "\n");
+ }
+
+ delete headers;
+}
+
+static void test_get_of_key_returns_all() {
+ Test::Say("Test get returns all the headers of a duplicate key.\n");
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+
+ // Add two duplicate keys
+ std::string unique_key = "unique";
+ std::string dup_key = "dup_key";
+ std::string val_one = "val_one";
+ std::string val_two = "val_two";
+ std::string val_three = "val_three";
+ headers->add(unique_key, val_one);
+ headers->add(dup_key, val_one);
+ headers->add(dup_key, val_two);
+ headers->add(dup_key, val_three);
+
+ // Assert header length is 4
+ size_t expected_size = 4;
+ if (headers->size() != expected_size) {
+ Test::Fail(tostr() << "Expected header->size() to equal " << expected_size
+ << ", instead got " << headers->size() << "\n");
+ }
+
+ // Get all of the duplicate key
+ std::vector<RdKafka::Headers::Header> get = headers->get(dup_key);
+ size_t expected_get_size = 3;
+ if (get.size() != expected_get_size) {
+ Test::Fail(tostr() << "Expected header->size() to equal "
+ << expected_get_size << ", instead got "
+ << headers->size() << "\n");
+ }
+
+ delete headers;
+}
+
+static void test_failed_produce() {
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+ headers->add("my", "header");
+
+ RdKafka::ErrorCode err;
+
+ err = producer->produce(topic, 999 /* invalid partition */,
+ RdKafka::Producer::RK_MSG_COPY, (void *)"message", 7,
+ (void *)"key", 3, 0, headers, NULL);
+ if (!err)
+ Test::Fail("Expected produce() to fail");
+
+ delete headers;
+}
+
+static void test_assignment_op() {
+ Test::Say("Test Header assignment operator\n");
+
+ RdKafka::Headers *headers = RdKafka::Headers::create();
+
+ headers->add("abc", "123");
+ headers->add("def", "456");
+
+ RdKafka::Headers::Header h = headers->get_last("abc");
+ h = headers->get_last("def");
+ RdKafka::Headers::Header h2 = h;
+ h = headers->get_last("nope");
+ RdKafka::Headers::Header h3 = h;
+ h = headers->get_last("def");
+
+ delete headers;
+}
+
+
+extern "C" {
+int main_0085_headers(int argc, char **argv) {
+ topic = Test::mk_topic_name("0085-headers", 1);
+
+ RdKafka::Conf *conf;
+ std::string errstr;
+
+ Test::conf_init(&conf, NULL, 0);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ Test::conf_set(conf, "group.id", topic);
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+
+ delete conf;
+
+ std::vector<RdKafka::TopicPartition *> parts;
+ parts.push_back(RdKafka::TopicPartition::create(
+ topic, 0, RdKafka::Topic::OFFSET_BEGINNING));
+ RdKafka::ErrorCode err = c->assign(parts);
+ if (err != RdKafka::ERR_NO_ERROR)
+ Test::Fail("assign() failed: " + RdKafka::err2str(err));
+ RdKafka::TopicPartition::destroy(parts);
+
+ producer = p;
+ consumer = c;
+
+ test_headers(0);
+ test_headers(1);
+ test_headers(261);
+ test_duplicate_keys();
+ test_remove_after_add();
+ test_remove_all_duplicate_keys();
+ test_get_last_gives_last_added_val();
+ test_get_of_key_returns_all();
+ test_failed_produce();
+ test_assignment_op();
+
+ c->close();
+ delete c;
+ delete p;
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c
new file mode 100644
index 000000000..4dbf937f3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0086-purge.c
@@ -0,0 +1,334 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "../src/rdkafka_protocol.h"
+
+/**
+ * @name Test rd_kafka_purge()
+ *
+ * Local test:
+ * - produce 29 messages (that will be held up in queues),
+ * for specific partitions and UA.
+ * - purge(INFLIGHT) => no change in len()
+ * - purge(QUEUE) => len() should drop to 0, dr errs should be ERR__PURGE_QUEUE
+ *
+ * Remote test (WITH_SOCKEM):
+ * - Limit in-flight messages to 10
+ * - Produce 20 messages to the same partition, in batches of 10.
+ * - First batch succeeds, then sets a 50 s delay
+ * - Second batch times out in flight
+ * - Third batch isn't completed an times out in queue
+ * - purge(QUEUE) => len should drop to 10, dr err ERR__PURGE_QUEUE
+ * - purge(INFLIGHT|QUEUE) => len should drop to 0, ERR__PURGE_INFLIGHT
+ */
+
+
+static const int msgcnt = 29;
+struct waitmsgs {
+ rd_kafka_resp_err_t exp_err[29];
+ int cnt;
+};
+
+static mtx_t produce_req_lock;
+static cnd_t produce_req_cnd;
+static int produce_req_cnt = 0;
+
+
+#if WITH_SOCKEM
+
+int test_sockfd = 0;
+
+static rd_kafka_resp_err_t on_request_sent(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ void *ic_opaque) {
+
+ /* Save socket fd to limit ProduceRequest */
+ if (ApiKey == RD_KAFKAP_ApiVersion) {
+ test_sockfd = sockfd;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+static rd_kafka_resp_err_t on_response_received(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque) {
+ /* Add delay to send fd after first batch is received */
+ if (ApiKey == RD_KAFKAP_Produce) {
+ mtx_lock(&produce_req_lock);
+ produce_req_cnt++;
+ cnd_broadcast(&produce_req_cnd);
+ mtx_unlock(&produce_req_lock);
+ test_socket_sockem_set(test_sockfd, "delay", 50000);
+ }
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_resp_err_t err;
+ err = rd_kafka_interceptor_add_on_request_sent(rk, "catch_producer_req",
+ on_request_sent, NULL);
+ if (!err) {
+ rd_kafka_interceptor_add_on_response_received(
+ rk, "catch_api_version_resp", on_response_received, NULL);
+ }
+ return err;
+}
+#endif
+
+
+
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ int msgid;
+ struct waitmsgs *waitmsgs = rkmessage->_private;
+
+ TEST_ASSERT(waitmsgs->cnt > 0, "wait_msg_cnt is zero on DR");
+
+ waitmsgs->cnt--;
+
+ TEST_ASSERT(rkmessage->len == sizeof(msgid),
+ "invalid message size %" PRIusz ", expected sizeof(int)",
+ rkmessage->len);
+
+ memcpy(&msgid, rkmessage->payload, rkmessage->len);
+
+ TEST_ASSERT(msgid >= 0 && msgid < msgcnt, "msgid %d out of range 0..%d",
+ msgid, msgcnt - 1);
+
+ TEST_ASSERT((int)waitmsgs->exp_err[msgid] != 12345,
+ "msgid %d delivered twice", msgid);
+
+ TEST_SAY("DeliveryReport for msg #%d: %s\n", msgid,
+ rd_kafka_err2name(rkmessage->err));
+
+ if (rkmessage->err != waitmsgs->exp_err[msgid]) {
+ TEST_FAIL_LATER("Expected message #%d to fail with %s, not %s",
+ msgid,
+ rd_kafka_err2str(waitmsgs->exp_err[msgid]),
+ rd_kafka_err2str(rkmessage->err));
+ }
+
+ /* Indicate already seen */
+ waitmsgs->exp_err[msgid] = (rd_kafka_resp_err_t)12345;
+}
+
+
+
+static void purge_and_expect(const char *what,
+ int line,
+ rd_kafka_t *rk,
+ int purge_flags,
+ struct waitmsgs *waitmsgs,
+ int exp_remain,
+ const char *reason) {
+ test_timing_t t_purge;
+ rd_kafka_resp_err_t err;
+
+ TEST_SAY(
+ "%s:%d: purge(0x%x): "
+ "expecting %d messages to remain when done\n",
+ what, line, purge_flags, exp_remain);
+ TIMING_START(&t_purge, "%s:%d: purge(0x%x)", what, line, purge_flags);
+ err = rd_kafka_purge(rk, purge_flags);
+ TIMING_STOP(&t_purge);
+
+ TEST_ASSERT(!err, "purge(0x%x) at %d failed: %s", purge_flags, line,
+ rd_kafka_err2str(err));
+
+ rd_kafka_poll(rk, 0);
+ TEST_ASSERT(waitmsgs->cnt == exp_remain,
+ "%s:%d: expected %d messages remaining, not %d", what, line,
+ exp_remain, waitmsgs->cnt);
+}
+
+
+/**
+ * @brief Don't treat ERR__GAPLESS_GUARANTEE as a fatal error
+ */
+static int gapless_is_not_fatal_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason) {
+ return err != RD_KAFKA_RESP_ERR__GAPLESS_GUARANTEE;
+}
+
+static void
+do_test_purge(const char *what, int remote, int idempotence, int gapless) {
+ const char *topic = test_mk_topic_name("0086_purge", 0);
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ int i;
+ rd_kafka_resp_err_t err;
+ struct waitmsgs waitmsgs = RD_ZERO_INIT;
+
+#if !WITH_SOCKEM
+ if (remote) {
+ TEST_SKIP("No sockem support\n");
+ return;
+ }
+#endif
+
+ TEST_SAY(_C_MAG "Test rd_kafka_purge(): %s\n" _C_CLR, what);
+
+ test_conf_init(&conf, NULL, 20);
+
+ test_conf_set(conf, "batch.num.messages", "10");
+ test_conf_set(conf, "max.in.flight", "1");
+ test_conf_set(conf, "linger.ms", "5000");
+ test_conf_set(conf, "enable.idempotence",
+ idempotence ? "true" : "false");
+ test_conf_set(conf, "enable.gapless.guarantee",
+ gapless ? "true" : "false");
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+
+ if (remote) {
+#if WITH_SOCKEM
+ test_socket_enable(conf);
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
+ on_new_producer, NULL);
+#endif
+
+ if (idempotence && !gapless)
+ test_curr->is_fatal_cb = gapless_is_not_fatal_cb;
+
+ mtx_init(&produce_req_lock, mtx_plain);
+ cnd_init(&produce_req_cnd);
+ } else {
+ test_conf_set(conf, "bootstrap.servers", NULL);
+ }
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("Producing %d messages to topic %s\n", msgcnt, topic);
+
+ for (i = 0; i < msgcnt; i++) {
+ int32_t partition;
+
+ if (remote) {
+ /* We need all messages in the same partition
+ * so that remaining messages are queued
+ * up behind the first messageset */
+ partition = 0;
+ } else {
+ partition = (i < 20 ? i % 3 : RD_KAFKA_PARTITION_UA);
+ }
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_VALUE((void *)&i, sizeof(i)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_OPAQUE(&waitmsgs), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev(#%d) failed: %s", i,
+ rd_kafka_err2str(err));
+
+ waitmsgs.exp_err[i] =
+ (remote && i < 10
+ ? RD_KAFKA_RESP_ERR_NO_ERROR
+ : remote && i < 20 ? RD_KAFKA_RESP_ERR__PURGE_INFLIGHT
+ : RD_KAFKA_RESP_ERR__PURGE_QUEUE);
+
+ waitmsgs.cnt++;
+ }
+
+
+ if (remote) {
+ /* Wait for ProduceRequest to be sent */
+ mtx_lock(&produce_req_lock);
+ cnd_timedwait_ms(&produce_req_cnd, &produce_req_lock,
+ 15 * 1000);
+ TEST_ASSERT(produce_req_cnt > 0,
+ "First Produce request should've been sent by now");
+ mtx_unlock(&produce_req_lock);
+
+ purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_QUEUE,
+ &waitmsgs, 10,
+ "in-flight messages should not be purged");
+
+ purge_and_expect(
+ what, __LINE__, rk,
+ RD_KAFKA_PURGE_F_INFLIGHT | RD_KAFKA_PURGE_F_QUEUE,
+ &waitmsgs, 0, "all messages should have been purged");
+ } else {
+ purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_INFLIGHT,
+ &waitmsgs, msgcnt,
+ "no messagess should have been purged");
+
+ purge_and_expect(what, __LINE__, rk, RD_KAFKA_PURGE_F_QUEUE,
+ &waitmsgs, 0,
+ "no messagess should have been purged");
+ }
+
+
+ rd_kafka_destroy(rk);
+
+ TEST_LATER_CHECK();
+}
+
+
+int main_0086_purge_remote(int argc, char **argv) {
+ const rd_bool_t has_idempotence =
+ test_broker_version >= TEST_BRKVER(0, 11, 0, 0);
+
+ do_test_purge("remote", 1 /*remote*/, 0 /*idempotence*/,
+ 0 /*!gapless*/);
+
+ if (has_idempotence) {
+ do_test_purge("remote,idempotence", 1 /*remote*/,
+ 1 /*idempotence*/, 0 /*!gapless*/);
+ do_test_purge("remote,idempotence,gapless", 1 /*remote*/,
+ 1 /*idempotence*/, 1 /*!gapless*/);
+ }
+ return 0;
+}
+
+
+int main_0086_purge_local(int argc, char **argv) {
+ do_test_purge("local", 0 /*local*/, 0, 0);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c
new file mode 100644
index 000000000..c71b5a69f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0088-produce_metadata_timeout.c
@@ -0,0 +1,162 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#if WITH_SOCKEM
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * @name Verify #1985:
+ *
+ * Previously known topic transitions to UNKNOWN when metadata times out,
+ * new messages are put on UA, when brokers come up again and metadata
+ * is retrieved the UA messages must be produced.
+ */
+
+static rd_atomic32_t refuse_connect;
+
+
+/**
+ * @brief Sockem connect, called from **internal librdkafka thread** through
+ * librdkafka's connect_cb
+ */
+static int connect_cb(struct test *test, sockem_t *skm, const char *id) {
+ if (rd_atomic32_get(&refuse_connect) > 0)
+ return -1;
+ else
+ return 0;
+}
+
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ /* Ignore connectivity errors since we'll be bringing down
+ * .. connectivity.
+ * SASL auther will think a connection-down even in the auth
+ * state means the broker doesn't support SASL PLAIN. */
+ TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
+ err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ return 0;
+ return 1;
+}
+
+static int msg_dr_cnt = 0;
+static int msg_dr_fail_cnt = 0;
+
+static void
+dr_msg_cb(rd_kafka_t *rk, const rd_kafka_message_t *rkmessage, void *opaque) {
+ msg_dr_cnt++;
+ TEST_SAYL(3, "Delivery for message %.*s: %s\n", (int)rkmessage->len,
+ (const char *)rkmessage->payload,
+ rd_kafka_err2name(rkmessage->err));
+
+ if (rkmessage->err) {
+ TEST_FAIL_LATER("Expected message to succeed, got %s",
+ rd_kafka_err2str(rkmessage->err));
+ msg_dr_fail_cnt++;
+ }
+}
+
+
+
+int main_0088_produce_metadata_timeout(int argc, char **argv) {
+ int64_t testid;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ const char *topic =
+ test_mk_topic_name("0088_produce_metadata_timeout", 1);
+ int msgcnt = 0;
+ rd_kafka_conf_t *conf;
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+ rd_kafka_conf_set_dr_msg_cb(conf, dr_msg_cb);
+ test_conf_set(conf, "metadata.max.age.ms", "10000");
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "-1");
+ test_conf_set(conf, "linger.ms", "5000");
+ test_conf_set(conf, "batch.num.messages", "5");
+
+ test_socket_enable(conf);
+ test_curr->connect_cb = connect_cb;
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* Create topic with single partition, for simplicity. */
+ test_create_topic(rk, topic, 1, 1);
+
+ rkt = rd_kafka_topic_new(rk, topic, NULL);
+
+ /* Produce first set of messages and wait for delivery */
+ test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt,
+ 20, NULL, 0, 0, &msgcnt);
+ while (msg_dr_cnt < 5)
+ rd_kafka_poll(rk, 1000);
+
+ TEST_SAY(_C_YEL
+ "Disconnecting sockets and "
+ "refusing future connections\n");
+ rd_atomic32_set(&refuse_connect, 1);
+ test_socket_close_all(test_curr, 1 /*reinit*/);
+
+
+ /* Wait for metadata timeout */
+ TEST_SAY("Waiting for metadata timeout\n");
+ rd_sleep(10 + 5);
+
+ /* These messages will be put on the UA queue */
+ test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt,
+ 20, NULL, 0, 0, &msgcnt);
+
+ /* Restore the connection(s) when metadata has timed out. */
+ TEST_SAY(_C_YEL "Allowing connections\n");
+ rd_atomic32_set(&refuse_connect, 0);
+
+ rd_sleep(3);
+ test_produce_msgs_nowait(rk, rkt, testid, RD_KAFKA_PARTITION_UA, msgcnt,
+ 20, NULL, 0, 0, &msgcnt);
+
+ test_flush(rk, 2 * 5 * 1000); /* linger.ms * 2 */
+
+ TEST_ASSERT(msg_dr_cnt == msgcnt, "expected %d, got %d", msgcnt,
+ msg_dr_cnt);
+ TEST_ASSERT(msg_dr_fail_cnt == 0, "expected %d dr failures, got %d", 0,
+ msg_dr_fail_cnt);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c
new file mode 100644
index 000000000..3d7cbf66f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0089-max_poll_interval.c
@@ -0,0 +1,358 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * Verify that long-processing consumer leaves the group during
+ * processing, with or without a log queue.
+ *
+ * MO:
+ * - produce messages to a single partition topic.
+ * - create two consumers, c1 and c2.
+ * - process first message slowly (2 * max.poll.interval.ms)
+ * - verify in other consumer that group rebalances after max.poll.interval.ms
+ * and the partition is assigned to the other consumer.
+ */
+
+/**
+ * @brief Test max.poll.interval.ms without any additional polling.
+ */
+static void do_test(void) {
+ const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
+ uint64_t testid;
+ const int msgcnt = 10;
+ rd_kafka_t *c[2];
+ rd_kafka_conf_t *conf;
+ int64_t ts_next[2] = {0, 0};
+ int64_t ts_exp_msg[2] = {0, 0};
+ int cmsgcnt = 0;
+ int i;
+ int bad = -1;
+
+ SUB_TEST();
+
+ testid = test_id_generate();
+
+ test_create_topic(NULL, topic, 1, 1);
+
+ test_produce_msgs_easy(topic, testid, -1, msgcnt);
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ c[1] = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(c[0], topic);
+ test_consumer_subscribe(c[1], topic);
+
+ while (1) {
+ for (i = 0; i < 2; i++) {
+ int64_t now;
+ rd_kafka_message_t *rkm;
+
+ /* Consumer is "processing" */
+ if (ts_next[i] > test_clock())
+ continue;
+
+ rkm = rd_kafka_consumer_poll(c[i], 100);
+ if (!rkm)
+ continue;
+
+ if (rkm->err) {
+ TEST_WARN(
+ "Consumer %d error: %s: "
+ "ignoring\n",
+ i, rd_kafka_message_errstr(rkm));
+ continue;
+ }
+
+ now = test_clock();
+
+ cmsgcnt++;
+
+ TEST_SAY(
+ "Consumer %d received message (#%d) "
+ "at offset %" PRId64 "\n",
+ i, cmsgcnt, rkm->offset);
+
+ if (ts_exp_msg[i]) {
+ /* This consumer is expecting a message
+ * after a certain time, namely after the
+ * rebalance following max.poll.. being
+ * exceeded in the other consumer */
+ TEST_ASSERT(
+ now > ts_exp_msg[i],
+ "Consumer %d: did not expect "
+ "message for at least %dms",
+ i, (int)((ts_exp_msg[i] - now) / 1000));
+ TEST_ASSERT(
+ now < ts_exp_msg[i] + 10000 * 1000,
+ "Consumer %d: expected message "
+ "within 10s, not after %dms",
+ i, (int)((now - ts_exp_msg[i]) / 1000));
+ TEST_SAY(
+ "Consumer %d: received message "
+ "at offset %" PRId64 " after rebalance\n",
+ i, rkm->offset);
+
+ rd_kafka_message_destroy(rkm);
+ goto done;
+
+ } else if (cmsgcnt == 1) {
+ /* Process this message for 20s */
+ ts_next[i] = now + (20000 * 1000);
+
+ /* Exp message on other consumer after
+ * max.poll.interval.ms */
+ ts_exp_msg[i ^ 1] = now + (10000 * 1000);
+
+ /* This is the bad consumer */
+ bad = i;
+
+ TEST_SAY(
+ "Consumer %d processing message at "
+ "offset %" PRId64 "\n",
+ i, rkm->offset);
+ rd_kafka_message_destroy(rkm);
+ } else {
+ rd_kafka_message_destroy(rkm);
+
+ TEST_FAIL(
+ "Consumer %d did not expect "
+ "a message",
+ i);
+ }
+ }
+ }
+
+done:
+
+ TEST_ASSERT(bad != -1, "Bad consumer not set");
+
+ /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
+ while (1) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(c[bad], 1000);
+ TEST_ASSERT(rkm, "Expected consumer result within 1s");
+
+ TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");
+
+ TEST_SAY("Consumer error: %s: %s\n",
+ rd_kafka_err2name(rkm->err),
+ rd_kafka_message_errstr(rkm));
+
+ if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
+ rd_kafka_message_destroy(rkm);
+ break;
+ }
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+
+ for (i = 0; i < 2; i++)
+ rd_kafka_destroy_flags(c[i],
+ RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test max.poll.interval.ms while polling log queue.
+ */
+static void do_test_with_log_queue(void) {
+ const char *topic = test_mk_topic_name("0089_max_poll_interval", 1);
+ uint64_t testid;
+ const int msgcnt = 10;
+ rd_kafka_t *c[2];
+ rd_kafka_conf_t *conf;
+ rd_kafka_queue_t *logq[2];
+ int64_t ts_next[2] = {0, 0};
+ int64_t ts_exp_msg[2] = {0, 0};
+ int cmsgcnt = 0;
+ int i;
+ int bad = -1;
+ char errstr[512];
+
+ SUB_TEST();
+
+ testid = test_id_generate();
+
+ test_create_topic(NULL, topic, 1, 1);
+
+ test_produce_msgs_easy(topic, testid, -1, msgcnt);
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "max.poll.interval.ms", "10000" /*10s*/);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "log.queue", "true");
+
+ c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ c[1] = test_create_consumer(topic, NULL, conf, NULL);
+
+
+ for (i = 0; i < 2; i++) {
+ logq[i] = rd_kafka_queue_new(c[i]);
+ TEST_CALL__(rd_kafka_set_log_queue(c[i], logq[i]));
+ test_consumer_subscribe(c[i], topic);
+ }
+
+ while (1) {
+ for (i = 0; i < 2; i++) {
+ int64_t now;
+ rd_kafka_message_t *rkm;
+
+ /* Consumer is "processing".
+ * When we are "processing", we poll the log queue. */
+ if (ts_next[i] > test_clock()) {
+ rd_kafka_event_destroy(
+ rd_kafka_queue_poll(logq[i], 100));
+ continue;
+ }
+
+ rkm = rd_kafka_consumer_poll(c[i], 100);
+ if (!rkm)
+ continue;
+
+ if (rkm->err) {
+ TEST_WARN(
+ "Consumer %d error: %s: "
+ "ignoring\n",
+ i, rd_kafka_message_errstr(rkm));
+ continue;
+ }
+
+ now = test_clock();
+
+ cmsgcnt++;
+
+ TEST_SAY(
+ "Consumer %d received message (#%d) "
+ "at offset %" PRId64 "\n",
+ i, cmsgcnt, rkm->offset);
+
+ if (ts_exp_msg[i]) {
+ /* This consumer is expecting a message
+ * after a certain time, namely after the
+ * rebalance following max.poll.. being
+ * exceeded in the other consumer */
+ TEST_ASSERT(
+ now > ts_exp_msg[i],
+ "Consumer %d: did not expect "
+ "message for at least %dms",
+ i, (int)((ts_exp_msg[i] - now) / 1000));
+ TEST_ASSERT(
+ now < ts_exp_msg[i] + 10000 * 1000,
+ "Consumer %d: expected message "
+ "within 10s, not after %dms",
+ i, (int)((now - ts_exp_msg[i]) / 1000));
+ TEST_SAY(
+ "Consumer %d: received message "
+ "at offset %" PRId64 " after rebalance\n",
+ i, rkm->offset);
+
+ rd_kafka_message_destroy(rkm);
+ goto done;
+
+ } else if (cmsgcnt == 1) {
+ /* Process this message for 20s */
+ ts_next[i] = now + (20000 * 1000);
+
+ /* Exp message on other consumer after
+ * max.poll.interval.ms */
+ ts_exp_msg[i ^ 1] = now + (10000 * 1000);
+
+ /* This is the bad consumer */
+ bad = i;
+
+ TEST_SAY(
+ "Consumer %d processing message at "
+ "offset %" PRId64 "\n",
+ i, rkm->offset);
+ rd_kafka_message_destroy(rkm);
+ } else {
+ rd_kafka_message_destroy(rkm);
+
+ TEST_FAIL(
+ "Consumer %d did not expect "
+ "a message",
+ i);
+ }
+ }
+ }
+
+done:
+
+ TEST_ASSERT(bad != -1, "Bad consumer not set");
+
+ /* Wait for error ERR__MAX_POLL_EXCEEDED on the bad consumer. */
+ while (1) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(c[bad], 1000);
+ TEST_ASSERT(rkm, "Expected consumer result within 1s");
+
+ TEST_ASSERT(rkm->err, "Did not expect message on bad consumer");
+
+ TEST_SAY("Consumer error: %s: %s\n",
+ rd_kafka_err2name(rkm->err),
+ rd_kafka_message_errstr(rkm));
+
+ if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED) {
+ rd_kafka_message_destroy(rkm);
+ break;
+ }
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+
+ for (i = 0; i < 2; i++) {
+ rd_kafka_destroy_flags(c[i],
+ RD_KAFKA_DESTROY_F_NO_CONSUMER_CLOSE);
+ rd_kafka_queue_destroy(logq[i]);
+ }
+
+ SUB_TEST_PASS();
+}
+
+int main_0089_max_poll_interval(int argc, char **argv) {
+ do_test();
+ do_test_with_log_queue();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c
new file mode 100644
index 000000000..02d16df56
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0090-idempotence.c
@@ -0,0 +1,172 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+#include <stdarg.h>
+
+/**
+ * @name Idempotent Producer tests
+ *
+ */
+
+static struct {
+ int batch_cnt;
+ int initial_fail_batch_cnt;
+ rd_atomic32_t produce_cnt;
+} state;
+
+
+
+/**
+ * @brief This is called prior to parsing the ProduceResponse,
+ * we use it to inject errors.
+ *
+ * @locality an internal rdkafka thread
+ */
+static rd_kafka_resp_err_t handle_ProduceResponse(rd_kafka_t *rk,
+ int32_t brokerid,
+ uint64_t msgseq,
+ rd_kafka_resp_err_t err) {
+ rd_kafka_resp_err_t new_err = err;
+ int n;
+
+ if (err == RD_KAFKA_RESP_ERR__RETRY)
+ return err; /* Skip internal retries, such as triggered by
+ * rd_kafka_broker_bufq_purge_by_toppar() */
+
+ n = rd_atomic32_add(&state.produce_cnt, 1);
+
+ /* Let the first N ProduceRequests fail with request timeout.
+ * Do allow the first request through. */
+ if (n > 1 && n <= state.initial_fail_batch_cnt) {
+ if (err)
+ TEST_WARN(
+ "First %d ProduceRequests should not "
+ "have failed, this is #%d with error %s for "
+ "brokerid %" PRId32 " and msgseq %" PRIu64 "\n",
+ state.initial_fail_batch_cnt, n,
+ rd_kafka_err2name(err), brokerid, msgseq);
+ assert(!err &&
+ *"First N ProduceRequests should not have failed");
+ new_err = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ }
+
+ TEST_SAY("handle_ProduceResponse(broker %" PRId32 ", MsgSeq %" PRId64
+ ", Error %s) -> new Error %s\n",
+ brokerid, msgseq, rd_kafka_err2name(err),
+ rd_kafka_err2name(new_err));
+
+ return new_err;
+}
+
+
+/**
+ * @brief Test handling of implicit acks.
+ *
+ * @param batch_cnt Total number of batches, ProduceRequests, sent.
+ * @param initial_fail_batch_cnt How many of the initial batches should
+ * fail with an emulated network timeout.
+ */
+static void do_test_implicit_ack(const char *what,
+ int batch_cnt,
+ int initial_fail_batch_cnt) {
+ rd_kafka_t *rk;
+ const char *topic = test_mk_topic_name("0090_idempotence_impl_ack", 1);
+ const int32_t partition = 0;
+ uint64_t testid;
+ int msgcnt = 10 * batch_cnt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ test_msgver_t mv;
+
+ TEST_SAY(_C_MAG "[ Test implicit ack: %s ]\n", what);
+
+ rd_atomic32_init(&state.produce_cnt, 0);
+ state.batch_cnt = batch_cnt;
+ state.initial_fail_batch_cnt = initial_fail_batch_cnt;
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ test_conf_set(conf, "enable.idempotence", "true");
+ test_conf_set(conf, "batch.num.messages", "10");
+ test_conf_set(conf, "linger.ms", "500");
+ test_conf_set(conf, "retry.backoff.ms", "10");
+
+ /* The ProduceResponse handler will inject timed-out-in-flight
+ * errors for the first N ProduceRequests, which will trigger retries
+ * that in turn will result in OutOfSequence errors. */
+ test_conf_set(conf, "ut_handle_ProduceResponse",
+ (char *)handle_ProduceResponse);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_create_topic(rk, topic, 1, 1);
+
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+
+ TEST_SAY("Producing %d messages\n", msgcnt);
+ test_produce_msgs(rk, rkt, testid, -1, 0, msgcnt, NULL, 0);
+
+ TEST_SAY("Flushing..\n");
+ rd_kafka_flush(rk, 10000);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY("Verifying messages with consumer\n");
+ test_msgver_init(&mv, testid);
+ test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, msgcnt,
+ NULL, &mv);
+ test_msgver_verify("verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
+ test_msgver_clear(&mv);
+
+ TEST_SAY(_C_GRN "[ Test implicit ack: %s : PASS ]\n", what);
+}
+
+
+int main_0090_idempotence(int argc, char **argv) {
+ /* The broker maintains a window of the N last ProduceRequests
+ * per partition and producer to allow ProduceRequest retries
+ * for previously successful requests to return a non-error response.
+ * This limit is currently (AK 2.0) hard coded at 5. */
+ const int broker_req_window = 5;
+
+ do_test_implicit_ack("within broker request window",
+ broker_req_window * 2, broker_req_window);
+
+ do_test_implicit_ack("outside broker request window",
+ broker_req_window + 3, broker_req_window + 3);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c
new file mode 100644
index 000000000..c1506afd9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0091-max_poll_interval_timeout.c
@@ -0,0 +1,297 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+/**
+ * Verify that long-processing consumer does not leave the group during
+ * processing when processing time < max.poll.interval.ms but
+ * max.poll.interval.ms > socket.timeout.ms.
+ *
+ * MO:
+ * - produce N*.. messages to two partitions
+ * - create two consumers, c0 and c1.
+ * - subscribe c0, wait for rebalance, poll first message.
+ * - subscribe c1
+ * - have both consumers poll messages and spend T seconds processing
+ * each message.
+ * - wait until both consumers have received N messages each.
+ * - check that no errors (disconnects, etc) or extra rebalances were raised.
+ */
+
+
+const int64_t processing_time = 31 * 1000 * 1000; /*31s*/
+
+struct _consumer {
+ rd_kafka_t *rk;
+ int64_t last;
+ int cnt;
+ int rebalance_cnt;
+ int max_rebalance_cnt;
+};
+
+static void do_consume(struct _consumer *cons, int timeout_s) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(cons->rk, timeout_s * 1000);
+ if (!rkm)
+ return;
+
+ TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)",
+ rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm),
+ (int)((test_clock() - cons->last) / 1000));
+
+ TEST_SAY(
+ "%s: processing message #%d from "
+ "partition %" PRId32 " at offset %" PRId64 "\n",
+ rd_kafka_name(cons->rk), cons->cnt, rkm->partition, rkm->offset);
+
+ rd_kafka_message_destroy(rkm);
+
+ cons->cnt++;
+ cons->last = test_clock();
+
+ TEST_SAY("%s: simulate processing by sleeping for %ds\n",
+ rd_kafka_name(cons->rk), timeout_s);
+ rd_sleep(timeout_s);
+}
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ struct _consumer *cons = opaque;
+
+ cons->rebalance_cnt++;
+
+ TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n",
+ rd_kafka_name(cons->rk), cons->rebalance_cnt,
+ cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt);
+
+ TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt,
+ "%s rebalanced %d times, max was %d",
+ rd_kafka_name(cons->rk), cons->rebalance_cnt,
+ cons->max_rebalance_cnt);
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+ rd_kafka_assign(rk, parts);
+ else
+ rd_kafka_assign(rk, NULL);
+}
+
+
+#define _CONSUMER_CNT 2
+static void do_test_with_subscribe(const char *topic) {
+ int64_t testid;
+ const int msgcnt = 3;
+ struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
+ rd_kafka_conf_t *conf;
+
+ TEST_SAY(_C_MAG "[ Test max.poll.interval.ms with subscribe() ]\n");
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL,
+ 10 + (int)(processing_time / 1000000) * msgcnt);
+
+ /* Produce extra messages since we can't fully rely on the
+ * random partitioner to provide exact distribution. */
+ test_produce_msgs_easy(topic, testid, -1, msgcnt * _CONSUMER_CNT * 2);
+ test_produce_msgs_easy(topic, testid, 1, msgcnt / 2);
+
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "max.poll.interval.ms", "20000" /*20s*/);
+ test_conf_set(conf, "socket.timeout.ms", "15000" /*15s*/);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.partition.eof", "false");
+ /* Trigger other requests often */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "1000");
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ rd_kafka_conf_set_opaque(conf, &c[0]);
+ c[0].rk =
+ test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+
+ rd_kafka_conf_set_opaque(conf, &c[1]);
+ c[1].rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(c[0].rk, topic);
+
+ /* c0: assign, (c1 joins) revoke, assign */
+ c[0].max_rebalance_cnt = 3;
+ /* c1: assign */
+ c[1].max_rebalance_cnt = 1;
+
+ /* Wait for assignment */
+ while (1) {
+ rd_kafka_topic_partition_list_t *parts = NULL;
+
+ do_consume(&c[0], 1 /*1s*/);
+
+ if (rd_kafka_assignment(c[0].rk, &parts) !=
+ RD_KAFKA_RESP_ERR_NO_ERROR ||
+ !parts || parts->cnt == 0) {
+ if (parts)
+ rd_kafka_topic_partition_list_destroy(parts);
+ continue;
+ }
+
+ TEST_SAY("%s got assignment of %d partition(s)\n",
+ rd_kafka_name(c[0].rk), parts->cnt);
+ rd_kafka_topic_partition_list_destroy(parts);
+ break;
+ }
+
+ test_consumer_subscribe(c[1].rk, topic);
+
+ /* Poll until both consumers have finished reading N messages */
+ while (c[0].cnt < msgcnt && c[1].cnt < msgcnt) {
+ do_consume(&c[0], 0);
+ do_consume(&c[1], 10 /*10s*/);
+ }
+
+ /* Allow the extra revoke rebalance on close() */
+ c[0].max_rebalance_cnt++;
+ c[1].max_rebalance_cnt++;
+
+ test_consumer_close(c[0].rk);
+ test_consumer_close(c[1].rk);
+
+ rd_kafka_destroy(c[0].rk);
+ rd_kafka_destroy(c[1].rk);
+
+ TEST_SAY(_C_GRN
+ "[ Test max.poll.interval.ms with subscribe(): PASS ]\n");
+}
+
+
+/**
+ * @brief Verify that max.poll.interval.ms does NOT kick in
+ * when just using assign() and not subscribe().
+ */
+static void do_test_with_assign(const char *topic) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_message_t *rkm;
+
+ TEST_SAY(_C_MAG "[ Test max.poll.interval.ms with assign() ]\n");
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_create_topic(NULL, topic, 2, 1);
+
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "max.poll.interval.ms", "7000" /*7s*/);
+
+ rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_assign_partition("ASSIGN", rk, topic, 0,
+ RD_KAFKA_OFFSET_END);
+
+
+ /* Sleep for longer than max.poll.interval.ms */
+ rd_sleep(10);
+
+ /* Make sure no error was raised */
+ while ((rkm = rd_kafka_consumer_poll(rk, 0))) {
+ TEST_ASSERT(!rkm->err, "Unexpected consumer error: %s: %s",
+ rd_kafka_err2name(rkm->err),
+ rd_kafka_message_errstr(rkm));
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN "[ Test max.poll.interval.ms with assign(): PASS ]\n");
+}
+
+
+/**
+ * @brief Verify that max.poll.interval.ms kicks in even if
+ * the application hasn't called poll once.
+ */
+static void do_test_no_poll(const char *topic) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_message_t *rkm;
+ rd_bool_t raised = rd_false;
+
+ TEST_SAY(_C_MAG "[ Test max.poll.interval.ms without calling poll ]\n");
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_create_topic(NULL, topic, 2, 1);
+
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "max.poll.interval.ms", "7000" /*7s*/);
+
+ rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(rk, topic);
+
+ /* Sleep for longer than max.poll.interval.ms */
+ rd_sleep(10);
+
+ /* Make sure the error is raised */
+ while ((rkm = rd_kafka_consumer_poll(rk, 0))) {
+ if (rkm->err == RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED)
+ raised = rd_true;
+
+ rd_kafka_message_destroy(rkm);
+ }
+
+ TEST_ASSERT(raised, "Expected to have seen ERR__MAX_POLL_EXCEEDED");
+
+ test_consumer_close(rk);
+ rd_kafka_destroy(rk);
+
+ TEST_SAY(_C_GRN
+ "[ Test max.poll.interval.ms without calling poll: PASS ]\n");
+}
+
+
+int main_0091_max_poll_interval_timeout(int argc, char **argv) {
+ const char *topic =
+ test_mk_topic_name("0091_max_poll_interval_tmout", 1);
+
+ test_create_topic(NULL, topic, 2, 1);
+
+ do_test_with_subscribe(topic);
+
+ do_test_with_assign(topic);
+
+ do_test_no_poll(topic);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c
new file mode 100644
index 000000000..46308ddf4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0092-mixed_msgver.c
@@ -0,0 +1,97 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * @name Mixed MsgVersions.
+ *
+ * - Create producer.
+ * - Produce N/2 m essages. (with MsgVer2)
+ * - Change the topic message.format.version to a MsgVer1 version.
+ * - Consume the messages to verify all can be read.
+ */
+
+
+
+int main_0092_mixed_msgver(int argc, char **argv) {
+ rd_kafka_t *rk;
+ const char *topic = test_mk_topic_name("0092_mixed_msgver", 1);
+ int32_t partition = 0;
+ const int msgcnt = 60;
+ int cnt;
+ int64_t testid;
+ int msgcounter = msgcnt;
+
+ if (test_idempotent_producer) {
+ TEST_SKIP("Idempotent producer requires MsgVersion >= 2\n");
+ return 0;
+ }
+
+ testid = test_id_generate();
+
+ rk = test_create_producer();
+
+ /* Produce messages */
+ for (cnt = 0; cnt < msgcnt; cnt++) {
+ rd_kafka_resp_err_t err;
+ char buf[230];
+
+ test_msg_fmt(buf, sizeof(buf), testid, partition, cnt);
+
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(partition),
+ RD_KAFKA_V_VALUE(buf, sizeof(buf)),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_OPAQUE(&msgcounter), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "producev() #%d failed: %s", cnt,
+ rd_kafka_err2str(err));
+
+ /* One message per batch */
+ rd_kafka_flush(rk, 30 * 1000);
+
+ if (cnt == msgcnt / 2) {
+ const char *msgconf[] = {"message.format.version",
+ "0.10.0.0"};
+ TEST_SAY("Changing message.format.version\n");
+ err = test_AlterConfigs_simple(
+ rk, RD_KAFKA_RESOURCE_TOPIC, topic, msgconf, 1);
+ TEST_ASSERT(!err, "AlterConfigs failed: %s",
+ rd_kafka_err2str(err));
+ }
+ }
+
+ rd_kafka_destroy(rk);
+
+ /* Consume messages */
+ test_consume_msgs_easy(NULL, topic, testid, -1, msgcnt, NULL);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c
new file mode 100644
index 000000000..366deca32
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0093-holb.c
@@ -0,0 +1,197 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+
+/**
+ * @brief Attempt to verify head-of-line-blocking behaviour.
+ *
+ * - Create two high-level consumers with socket.timeout.ms=low,
+ * and max.poll.interval.ms=high, metadata refresh interval=low.
+ * - Have first consumer join the group (subscribe()), should finish quickly.
+ * - Have second consumer join the group, but don't call poll on
+ * the first consumer for some time to have the second consumer
+ * block on JoinGroup.
+ * - Verify that errors were raised due to timed out (Metadata) requests.
+ */
+
+struct _consumer {
+ rd_kafka_t *rk;
+ int64_t last;
+ int cnt;
+ int rebalance_cnt;
+ int max_rebalance_cnt;
+};
+
+static void do_consume(struct _consumer *cons, int timeout_s) {
+ rd_kafka_message_t *rkm;
+
+ rkm = rd_kafka_consumer_poll(cons->rk, 100 + (timeout_s * 1000));
+ if (!rkm)
+ return;
+
+ TEST_ASSERT(!rkm->err, "%s consumer error: %s (last poll was %dms ago)",
+ rd_kafka_name(cons->rk), rd_kafka_message_errstr(rkm),
+ (int)((test_clock() - cons->last) / 1000));
+
+ rd_kafka_message_destroy(rkm);
+
+ cons->cnt++;
+ cons->last = test_clock();
+
+ if (timeout_s > 0) {
+ TEST_SAY("%s: simulate processing by sleeping for %ds\n",
+ rd_kafka_name(cons->rk), timeout_s);
+ rd_sleep(timeout_s);
+ }
+}
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ struct _consumer *cons = opaque;
+
+ cons->rebalance_cnt++;
+
+ TEST_SAY(_C_BLU "%s rebalance #%d/%d: %s: %d partition(s)\n",
+ rd_kafka_name(cons->rk), cons->rebalance_cnt,
+ cons->max_rebalance_cnt, rd_kafka_err2name(err), parts->cnt);
+
+ TEST_ASSERT(cons->rebalance_cnt <= cons->max_rebalance_cnt,
+ "%s rebalanced %d times, max was %d",
+ rd_kafka_name(cons->rk), cons->rebalance_cnt,
+ cons->max_rebalance_cnt);
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS)
+ rd_kafka_assign(rk, parts);
+ else
+ rd_kafka_assign(rk, NULL);
+}
+
+
+#define _CONSUMER_CNT 2
+int main_0093_holb_consumer(int argc, char **argv) {
+ const char *topic = test_mk_topic_name("0093_holb_consumer", 1);
+ int64_t testid;
+ const int msgcnt = 100;
+ struct _consumer c[_CONSUMER_CNT] = RD_ZERO_INIT;
+ rd_kafka_conf_t *conf;
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_create_topic(NULL, topic, 1, 1);
+
+ test_produce_msgs_easy(topic, testid, 0, msgcnt);
+
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "max.poll.interval.ms", "20000");
+ test_conf_set(conf, "socket.timeout.ms", "3000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ /* Trigger other requests often */
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ rd_kafka_conf_set_opaque(conf, &c[0]);
+ c[0].rk =
+ test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+
+ rd_kafka_conf_set_opaque(conf, &c[1]);
+ c[1].rk = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(c[0].rk, topic);
+
+ /* c0: assign */
+ c[0].max_rebalance_cnt = 1;
+
+ /* c1: none, hasn't joined yet */
+ c[1].max_rebalance_cnt = 0;
+
+ TEST_SAY("Waiting for c[0] assignment\n");
+ while (1) {
+ rd_kafka_topic_partition_list_t *parts = NULL;
+
+ do_consume(&c[0], 1 /*1s*/);
+
+ if (rd_kafka_assignment(c[0].rk, &parts) !=
+ RD_KAFKA_RESP_ERR_NO_ERROR ||
+ !parts || parts->cnt == 0) {
+ if (parts)
+ rd_kafka_topic_partition_list_destroy(parts);
+ continue;
+ }
+
+ TEST_SAY("%s got assignment of %d partition(s)\n",
+ rd_kafka_name(c[0].rk), parts->cnt);
+ rd_kafka_topic_partition_list_destroy(parts);
+ break;
+ }
+
+ TEST_SAY("c[0] got assignment, consuming..\n");
+ do_consume(&c[0], 5 /*5s*/);
+
+ TEST_SAY("Joining second consumer\n");
+ test_consumer_subscribe(c[1].rk, topic);
+
+ /* Just poll second consumer for 10s, the rebalance will not
+ * finish until the first consumer polls */
+ do_consume(&c[1], 10 /*10s*/);
+
+ /* c0: the next call to do_consume/poll will trigger
+ * its rebalance callback, first revoke then assign. */
+ c[0].max_rebalance_cnt += 2;
+ /* c1: first rebalance */
+ c[1].max_rebalance_cnt++;
+
+ TEST_SAY("Expected rebalances: c[0]: %d/%d, c[1]: %d/%d\n",
+ c[0].rebalance_cnt, c[0].max_rebalance_cnt, c[1].rebalance_cnt,
+ c[1].max_rebalance_cnt);
+
+ /* Let rebalances kick in, then consume messages. */
+ while (c[0].cnt + c[1].cnt < msgcnt) {
+ do_consume(&c[0], 0);
+ do_consume(&c[1], 0);
+ }
+
+ /* Allow the extra revoke rebalance on close() */
+ c[0].max_rebalance_cnt++;
+ c[1].max_rebalance_cnt++;
+
+ test_consumer_close(c[0].rk);
+ test_consumer_close(c[1].rk);
+
+ rd_kafka_destroy(c[0].rk);
+ rd_kafka_destroy(c[1].rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c
new file mode 100644
index 000000000..8704adc09
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0094-idempotence_msg_timeout.c
@@ -0,0 +1,230 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+#if WITH_SOCKEM
+/**
+ * @name Test handling of message timeouts with the idempotent producer.
+ *
+ * - Set message timeout low.
+ * - Set low socket send buffer, promote batching, and use large messages
+ * to make sure requests are partially sent.
+ * - Produce a steady flow of messages
+ * - After some time, set the sockem delay higher than the message timeout.
+ * - Shortly after, remove the sockem delay.
+ * - Verify that all messages were succesfully produced in order.
+ *
+ * https://github.com/confluentinc/confluent-kafka-dotnet/issues/704
+ */
+
+/*
+ * Scenario:
+ *
+ * MsgSets: [ 1 | 2 | 3 | 4 | 5 | 6 ]
+ *
+ * 1. Producer sends MsgSets 1,2,3,4,5.
+ * 2. Producer receives ack for MsgSet 1.
+ * 3. Connection to broker goes down.
+ * 4. The messages in MsgSet 2 are timed out by producer's timeout scanner.
+ * 5. Connection to broker comes back up.
+ * 6. Producer choices:
+ * 6a. Reset the epoch and starting producing MsgSet 3 with reset sequence 0.
+ * Pros: instant recovery.
+ * Cons: a. If MsgSet 2 was persisted by the broker we now have desynch
+ * between producer and broker: Producer thinks the message failed,
+ * while broker wrote them to the log.
+ * b. If MsgSets 3,.. was also persisted then there will be duplicates
+ * as MsgSet 3 is produced with a reset sequence of 0.
+ * 6b. Try to recover within the current epoch, the broker is expecting
+ * sequence 2, 3, 4, or 5, depending on what it managed to persist
+ * before the connection went down.
+ * The producer should produce msg 2 but it no longer exists due to timed
+ * out. If lucky, only 2 was persisted by the broker, which means the Producer
+ * can successfully produce 3.
+ * If 3 was persisted the producer would get a DuplicateSequence error
+ * back, indicating that it was already produced, this would get
+ * the producer back in synch.
+ * If 2+ was not persisted an OutOfOrderSeq would be returned when 3
+ * is produced. The producer should be able to bump the epoch and
+ * start with Msg 3 as reset sequence 0 without risking loss or duplication.
+ * 6c. Try to recover within the current epoch by draining the toppar
+ * and then adjusting its base msgid to the head-of-line message in
+ * the producer queue (after timed out messages were removed).
+ * This avoids bumping the epoch (which grinds all partitions to a halt
+ * while draining, and requires an extra roundtrip).
+ * It is tricky to get the adjustment value correct though.
+ * 6d. Drain all partitions and then bump the epoch, resetting the base
+ * sequence to the first message in the queue.
+ * Pros: simple.
+ * Cons: will grind all partitions to a halt while draining.
+ *
+ * We chose to go with option 6d.
+ */
+
+
+#include <stdarg.h>
+#include <errno.h>
+
+#include "sockem_ctrl.h"
+
+static struct {
+ int dr_ok;
+ int dr_fail;
+ test_msgver_t mv_delivered;
+} counters;
+
+
+static void my_dr_msg_cb(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+
+ if (rd_kafka_message_status(rkmessage) >=
+ RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED)
+ test_msgver_add_msg(rk, &counters.mv_delivered,
+ (rd_kafka_message_t *)rkmessage);
+
+ if (rkmessage->err) {
+ counters.dr_fail++;
+ } else {
+ counters.dr_ok++;
+ }
+}
+
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ /* Ignore connectivity errors since we'll be bringing down
+ * .. connectivity.
+ * SASL auther will think a connection-down even in the auth
+ * state means the broker doesn't support SASL PLAIN. */
+ TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
+ if (err == RD_KAFKA_RESP_ERR__TRANSPORT ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN ||
+ err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__TIMED_OUT)
+ return 0;
+ return 1;
+}
+
+
+static void do_test_produce_timeout(const char *topic, const int msgrate) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_t *rkt;
+ uint64_t testid;
+ rd_kafka_resp_err_t err;
+ const int partition = RD_KAFKA_PARTITION_UA;
+ int msgcnt = msgrate * 20;
+ const int msgsize = 100 * 1000;
+ sockem_ctrl_t ctrl;
+ int msgcounter = 0;
+ test_msgver_t mv;
+
+ TEST_SAY(_C_BLU
+ "Test idempotent producer "
+ "with message timeouts (%d msgs/s)\n",
+ msgrate);
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 60);
+ test_msgver_init(&counters.mv_delivered, testid);
+ sockem_ctrl_init(&ctrl);
+
+ test_conf_set(conf, "enable.idempotence", "true");
+ test_conf_set(conf, "linger.ms", "300");
+ test_conf_set(conf, "reconnect.backoff.ms", "2000");
+ test_conf_set(conf, "socket.send.buffer.bytes", "10000");
+ rd_kafka_conf_set_dr_msg_cb(conf, my_dr_msg_cb);
+
+ test_socket_enable(conf);
+ test_curr->is_fatal_cb = is_fatal_cb;
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ rkt = test_create_producer_topic(rk, topic, "message.timeout.ms",
+ "5000", NULL);
+
+ /* Create the topic to make sure connections are up and ready. */
+ err = test_auto_create_topic_rkt(rk, rkt, tmout_multip(5000));
+ TEST_ASSERT(!err, "topic creation failed: %s", rd_kafka_err2str(err));
+
+ /* After 1 seconds, set socket delay to 2*message.timeout.ms */
+ sockem_ctrl_set_delay(&ctrl, 1000, 2 * 5000);
+
+ /* After 3*message.timeout.ms seconds, remove delay. */
+ sockem_ctrl_set_delay(&ctrl, 3 * 5000, 0);
+
+ test_produce_msgs_nowait(rk, rkt, testid, partition, 0, msgcnt, NULL,
+ msgsize, msgrate, &msgcounter);
+
+ test_flush(rk, 3 * 5000);
+
+ TEST_SAY("%d/%d messages produced, %d delivered, %d failed\n",
+ msgcounter, msgcnt, counters.dr_ok, counters.dr_fail);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ sockem_ctrl_term(&ctrl);
+
+ TEST_SAY("Verifying %d delivered messages with consumer\n",
+ counters.dr_ok);
+
+ test_msgver_init(&mv, testid);
+ test_consume_msgs_easy_mv(NULL, topic, partition, testid, 1, -1, NULL,
+ &mv);
+ test_msgver_verify_compare("delivered", &mv, &counters.mv_delivered,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
+ TEST_MSGVER_BY_MSGID |
+ TEST_MSGVER_SUBSET);
+ test_msgver_clear(&mv);
+ test_msgver_clear(&counters.mv_delivered);
+
+
+ TEST_SAY(_C_GRN
+ "Test idempotent producer "
+ "with message timeouts (%d msgs/s): SUCCESS\n",
+ msgrate);
+}
+
+int main_0094_idempotence_msg_timeout(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+
+ do_test_produce_timeout(topic, 10);
+
+ if (test_quick) {
+ TEST_SAY("Skipping further tests due to quick mode\n");
+ return 0;
+ }
+
+ do_test_produce_timeout(topic, 100);
+
+ return 0;
+}
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp
new file mode 100644
index 000000000..6ebd5f500
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0095-all_brokers_down.cpp
@@ -0,0 +1,122 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+
+class errorEventCb : public RdKafka::EventCb {
+ public:
+ errorEventCb() : error_seen(false) {
+ }
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": "
+ << event.str() << "\n");
+ if (event.err() == RdKafka::ERR__ALL_BROKERS_DOWN)
+ error_seen = true;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ Test::Say(tostr() << "Log: " << event.str() << "\n");
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ bool error_seen;
+};
+
+
+extern "C" {
+int main_0095_all_brokers_down(int argc, char **argv) {
+ RdKafka::Conf *conf;
+ std::string errstr;
+
+ Test::conf_init(&conf, NULL, 20);
+ /* Two broker addresses that will quickly reject the connection */
+ Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1,127.0.0.1:2");
+
+ /*
+ * First test producer
+ */
+ errorEventCb pEvent = errorEventCb();
+
+ if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ Test::Say("Test Producer\n");
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ /* Wait for all brokers down */
+ while (!pEvent.error_seen)
+ p->poll(1000);
+
+ delete p;
+
+
+ /*
+ * Test high-level consumer that has a logical broker (group coord),
+ * which has caused AllBrokersDown generation problems (#2259)
+ */
+ errorEventCb cEvent = errorEventCb();
+
+ Test::conf_set(conf, "group.id", "test");
+
+ if (conf->set("event_cb", &cEvent, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ Test::Say("Test KafkaConsumer\n");
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+
+ delete conf;
+
+ /* Wait for all brokers down */
+ while (!cEvent.error_seen) {
+ RdKafka::Message *m = c->consume(1000);
+ if (m)
+ delete m;
+ }
+
+ c->close();
+
+ delete c;
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp
new file mode 100644
index 000000000..8a3a0bce5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0097-ssl_verify.cpp
@@ -0,0 +1,466 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include <fstream>
+#include <streambuf>
+#include "testcpp.h"
+#include "tinycthread.h"
+
+static const std::string envname[RdKafka::CERT__CNT][RdKafka::CERT_ENC__CNT] = {
+ /* [RdKafka::CERT_PUBLIC_KEY] = */
+ {
+ "SSL_pkcs",
+ "SSL_pub_der",
+ "SSL_pub_pem",
+ },
+ /* [RdKafka::CERT_PRIVATE_KEY] = */
+ {
+ "SSL_pkcs",
+ "SSL_priv_der",
+ "SSL_priv_pem",
+ },
+ /* [RdKafka::CERT_CA] = */
+ {
+ "SSL_pkcs",
+ "SSL_ca_der",
+ "SSL_all_cas_pem" /* Contains multiple CA certs */,
+ }};
+
+
+static std::vector<char> read_file(const std::string path) {
+ std::ifstream ifs(path.c_str(), std::ios::binary | std::ios::ate);
+ if (ifs.fail())
+ Test::Fail("Failed to open " + path + ": " + strerror(errno));
+ int size = (int)ifs.tellg();
+ ifs.seekg(0, std::ifstream::beg);
+ std::vector<char> buffer;
+ buffer.resize(size);
+ ifs.read(buffer.data(), size);
+ ifs.close();
+ return buffer;
+}
+
+
+/**
+ * @name SslCertVerifyCb verification.
+ *
+ * Requires security.protocol=*SSL
+ */
+
+class TestVerifyCb : public RdKafka::SslCertificateVerifyCb {
+ public:
+ bool verify_ok;
+ int cnt; //< Verify callbacks triggered.
+ mtx_t lock;
+
+ TestVerifyCb(bool verify_ok) : verify_ok(verify_ok), cnt(0) {
+ mtx_init(&lock, mtx_plain);
+ }
+
+ ~TestVerifyCb() {
+ mtx_destroy(&lock);
+ }
+
+ bool ssl_cert_verify_cb(const std::string &broker_name,
+ int32_t broker_id,
+ int *x509_error,
+ int depth,
+ const char *buf,
+ size_t size,
+ std::string &errstr) {
+ mtx_lock(&lock);
+
+ Test::Say(tostr() << "ssl_cert_verify_cb #" << cnt << ": broker_name="
+ << broker_name << ", broker_id=" << broker_id
+ << ", x509_error=" << *x509_error << ", depth=" << depth
+ << ", buf size=" << size << ", verify_ok=" << verify_ok
+ << "\n");
+
+ cnt++;
+ mtx_unlock(&lock);
+
+ if (verify_ok)
+ return true;
+
+ errstr = "This test triggered a verification failure";
+ *x509_error = 26; /*X509_V_ERR_INVALID_PURPOSE*/
+
+ return false;
+ }
+};
+
+
+/**
+ * @brief Set SSL PEM cert/key using configuration property.
+ *
+ * The cert/key is loadded from environment variables set up by trivup.
+ *
+ * @param loc_prop ssl.X.location property that will be cleared.
+ * @param pem_prop ssl.X.pem property that will be set.
+ * @param cert_type Certificate type.
+ */
+static void conf_location_to_pem(RdKafka::Conf *conf,
+ std::string loc_prop,
+ std::string pem_prop,
+ RdKafka::CertificateType cert_type) {
+ std::string loc;
+
+ std::string errstr;
+ if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to reset " + loc_prop + ": " + errstr);
+
+ const char *p;
+ p = test_getenv(envname[cert_type][RdKafka::CERT_ENC_PEM].c_str(), NULL);
+ if (!p)
+ Test::Fail(
+ "Invalid test environment: "
+ "Missing " +
+ envname[cert_type][RdKafka::CERT_ENC_PEM] +
+ " env variable: make sure trivup is up to date");
+
+ loc = p;
+
+
+ /* Read file */
+ std::ifstream ifs(loc.c_str());
+ std::string pem((std::istreambuf_iterator<char>(ifs)),
+ std::istreambuf_iterator<char>());
+
+ Test::Say("Read env " + envname[cert_type][RdKafka::CERT_ENC_PEM] + "=" +
+ loc + " from disk and changed to in-memory " + pem_prop +
+ " string\n");
+
+ if (conf->set(pem_prop, pem, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to set " + pem_prop + ": " + errstr);
+}
+
+/**
+ * @brief Set SSL cert/key using set_ssl_cert() rather than
+ * config string property \p loc_prop (which will be cleared)
+ *
+ * @remark Requires a bunch of SSL_.. env vars to point out where
+ * certs are found. These are set up by trivup.
+ */
+static void conf_location_to_setter(RdKafka::Conf *conf,
+ std::string loc_prop,
+ RdKafka::CertificateType cert_type,
+ RdKafka::CertificateEncoding encoding) {
+ std::string loc;
+ static const std::string encnames[] = {
+ "PKCS#12",
+ "DER",
+ "PEM",
+ };
+
+ /* Clear the config property (e.g., ssl.key.location) */
+ std::string errstr;
+ if (conf->set(loc_prop, "", errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to reset " + loc_prop);
+
+ const char *p;
+ p = test_getenv(envname[cert_type][encoding].c_str(), NULL);
+ if (!p)
+ Test::Fail(
+ "Invalid test environment: "
+ "Missing " +
+ envname[cert_type][encoding] +
+ " env variable: make sure trivup is up to date");
+
+ loc = p;
+
+ Test::Say(tostr() << "Reading " << loc_prop << " file " << loc << " as "
+ << encnames[encoding] << " from env "
+ << envname[cert_type][encoding] << "\n");
+
+ /* Read file */
+ std::ifstream ifs(loc.c_str(), std::ios::binary | std::ios::ate);
+ if (ifs.fail())
+ Test::Fail("Failed to open " + loc + ": " + strerror(errno));
+ int size = (int)ifs.tellg();
+ ifs.seekg(0, std::ifstream::beg);
+ std::vector<char> buffer;
+ buffer.resize(size);
+ ifs.read(buffer.data(), size);
+ ifs.close();
+
+ if (conf->set_ssl_cert(cert_type, encoding, buffer.data(), size, errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail(tostr() << "Failed to set " << loc_prop << " from " << loc
+ << " as cert type " << cert_type << " with encoding "
+ << encoding << ": " << errstr << "\n");
+}
+
+
+typedef enum {
+ USE_LOCATION, /* use ssl.X.location */
+ USE_CONF, /* use ssl.X.pem */
+ USE_SETTER, /* use conf->set_ssl_cert(), this supports multiple formats */
+} cert_load_t;
+
+static const std::string load_names[] = {
+ "location",
+ "conf",
+ "setter",
+};
+
+
+static void do_test_verify(const int line,
+ bool verify_ok,
+ cert_load_t load_key,
+ RdKafka::CertificateEncoding key_enc,
+ cert_load_t load_pub,
+ RdKafka::CertificateEncoding pub_enc,
+ cert_load_t load_ca,
+ RdKafka::CertificateEncoding ca_enc) {
+ /*
+ * Create any type of client
+ */
+ std::string teststr = tostr() << line << ": "
+ << "SSL cert verify: verify_ok=" << verify_ok
+ << ", load_key=" << load_names[load_key]
+ << ", load_pub=" << load_names[load_pub]
+ << ", load_ca=" << load_names[load_ca];
+
+ Test::Say(_C_BLU "[ " + teststr + " ]\n" _C_CLR);
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+
+ std::string val;
+ if (conf->get("ssl.key.location", val) != RdKafka::Conf::CONF_OK ||
+ val.empty()) {
+ Test::Skip("Test requires SSL to be configured\n");
+ delete conf;
+ return;
+ }
+
+ /* Get ssl.key.location, read its contents, and replace with
+ * ssl.key.pem. Same with ssl.certificate.location -> ssl.certificate.pem. */
+ if (load_key == USE_CONF)
+ conf_location_to_pem(conf, "ssl.key.location", "ssl.key.pem",
+ RdKafka::CERT_PRIVATE_KEY);
+ else if (load_key == USE_SETTER)
+ conf_location_to_setter(conf, "ssl.key.location", RdKafka::CERT_PRIVATE_KEY,
+ key_enc);
+
+ if (load_pub == USE_CONF)
+ conf_location_to_pem(conf, "ssl.certificate.location",
+ "ssl.certificate.pem", RdKafka::CERT_PUBLIC_KEY);
+ else if (load_pub == USE_SETTER)
+ conf_location_to_setter(conf, "ssl.certificate.location",
+ RdKafka::CERT_PUBLIC_KEY, pub_enc);
+
+ if (load_ca == USE_CONF)
+ conf_location_to_pem(conf, "ssl.ca.location", "ssl.ca.pem",
+ RdKafka::CERT_CA);
+ else if (load_ca == USE_SETTER)
+ conf_location_to_setter(conf, "ssl.ca.location", RdKafka::CERT_CA, ca_enc);
+
+
+ std::string errstr;
+ conf->set("debug", "security", errstr);
+
+ TestVerifyCb verifyCb(verify_ok);
+ if (conf->set("ssl_cert_verify_cb", &verifyCb, errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to set verifyCb: " + errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create producer: " + errstr);
+ delete conf;
+
+ bool run = true;
+ for (int i = 0; run && i < 10; i++) {
+ p->poll(1000);
+
+ mtx_lock(&verifyCb.lock);
+ if ((verify_ok && verifyCb.cnt > 0) || (!verify_ok && verifyCb.cnt > 3))
+ run = false;
+ mtx_unlock(&verifyCb.lock);
+ }
+
+ mtx_lock(&verifyCb.lock);
+ if (!verifyCb.cnt)
+ Test::Fail("Expected at least one verifyCb invocation");
+ mtx_unlock(&verifyCb.lock);
+
+ /* Retrieving the clusterid allows us to easily check if a
+ * connection could be made. Match this to the expected outcome of
+ * this test. */
+ std::string cluster = p->clusterid(1000);
+
+ if (verify_ok == cluster.empty())
+ Test::Fail("Expected connection to " +
+ (std::string)(verify_ok ? "succeed" : "fail") +
+ ", but got clusterid '" + cluster + "'");
+
+ delete p;
+
+ Test::Say(_C_GRN "[ PASSED: " + teststr + " ]\n" _C_CLR);
+}
+
+
+/**
+ * @brief Verification that some bad combinations of calls behave as expected.
+ * This is simply to verify #2904.
+ */
+static void do_test_bad_calls() {
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ std::string errstr;
+
+ if (conf->set("enable.ssl.certificate.verification", "false", errstr))
+ Test::Fail(errstr);
+
+ if (conf->set("security.protocol", "SSL", errstr))
+ Test::Fail(errstr);
+
+ if (conf->set("ssl.key.password", test_getenv("SSL_password", NULL), errstr))
+ Test::Fail(errstr);
+
+ std::vector<char> certBuffer = read_file(test_getenv(
+ envname[RdKafka::CERT_CA][RdKafka::CERT_ENC_PEM].c_str(), NULL));
+
+ if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM,
+ certBuffer.data(), certBuffer.size(), errstr))
+ Test::Fail(errstr);
+
+ /* Set public-key as CA (over-writing the previous one) */
+ std::vector<char> userBuffer = read_file(test_getenv(
+ envname[RdKafka::CERT_PUBLIC_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL));
+
+ if (conf->set_ssl_cert(RdKafka::CERT_CA, RdKafka::CERT_ENC_PEM,
+ userBuffer.data(), userBuffer.size(), errstr))
+ Test::Fail(errstr);
+
+ std::vector<char> keyBuffer = read_file(test_getenv(
+ envname[RdKafka::CERT_PRIVATE_KEY][RdKafka::CERT_ENC_PEM].c_str(), NULL));
+
+ if (conf->set_ssl_cert(RdKafka::CERT_PRIVATE_KEY, RdKafka::CERT_ENC_PEM,
+ keyBuffer.data(), keyBuffer.size(), errstr))
+ Test::Fail(errstr);
+
+ // Create Kafka producer
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ delete conf;
+ if (producer)
+ Test::Fail("Expected producer creation to fail");
+
+ if (errstr.find("Private key check failed") == std::string::npos)
+ Test::Fail("Expected 'Private key check failed' error, not " + errstr);
+
+ Test::Say("Producer creation failed expectedly: " + errstr + "\n");
+}
+
+extern "C" {
+int main_0097_ssl_verify(int argc, char **argv) {
+ if (!test_check_builtin("ssl")) {
+ Test::Skip("Test requires SSL support\n");
+ return 0;
+ }
+
+ if (!test_getenv("SSL_pkcs", NULL)) {
+ Test::Skip("Test requires SSL_* env-vars set up by trivup\n");
+ return 0;
+ }
+
+
+ do_test_bad_calls();
+
+ do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
+ USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION,
+ RdKafka::CERT_ENC_PEM);
+ do_test_verify(__LINE__, false, USE_LOCATION, RdKafka::CERT_ENC_PEM,
+ USE_LOCATION, RdKafka::CERT_ENC_PEM, USE_LOCATION,
+ RdKafka::CERT_ENC_PEM);
+
+ /* Verify various priv and pub key and CA input formats */
+ do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF,
+ RdKafka::CERT_ENC_PEM, USE_LOCATION, RdKafka::CERT_ENC_PEM);
+ do_test_verify(__LINE__, true, USE_CONF, RdKafka::CERT_ENC_PEM, USE_CONF,
+ RdKafka::CERT_ENC_PEM, USE_CONF, RdKafka::CERT_ENC_PEM);
+ do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PEM, USE_SETTER,
+ RdKafka::CERT_ENC_PEM, USE_SETTER, RdKafka::CERT_ENC_PKCS12);
+ do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
+ USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER,
+ RdKafka::CERT_ENC_DER);
+ do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
+ USE_SETTER, RdKafka::CERT_ENC_DER, USE_SETTER,
+ RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */
+ do_test_verify(__LINE__, true, USE_LOCATION, RdKafka::CERT_ENC_PEM,
+ USE_SETTER, RdKafka::CERT_ENC_DER, USE_CONF,
+ RdKafka::CERT_ENC_PEM); /* env: SSL_all_cas_pem */
+ do_test_verify(__LINE__, true, USE_SETTER, RdKafka::CERT_ENC_PKCS12,
+ USE_SETTER, RdKafka::CERT_ENC_PKCS12, USE_SETTER,
+ RdKafka::CERT_ENC_PKCS12);
+
+ return 0;
+}
+
+
+int main_0097_ssl_verify_local(int argc, char **argv) {
+ if (!test_check_builtin("ssl")) {
+ Test::Skip("Test requires SSL support\n");
+ return 0;
+ }
+
+
+ /* Check that creating a client with an invalid PEM string fails. */
+ const std::string props[] = {"ssl.ca.pem", "ssl.key.pem",
+ "ssl.certificate.pem", ""};
+
+ for (int i = 0; props[i] != ""; i++) {
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ std::string errstr;
+
+ if (conf->set("security.protocol", "SSL", errstr))
+ Test::Fail(errstr);
+ conf->set("debug", "security", errstr);
+ if (conf->set(props[i], "this is \n not a \t PEM!", errstr))
+ Test::Fail("Setting " + props[i] +
+ " to junk should work, "
+ "expecting failure on client creation");
+
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ delete conf;
+ if (producer)
+ Test::Fail("Expected producer creation to fail with " + props[i] +
+ " set to junk");
+ else
+ Test::Say("Failed to create producer with junk " + props[i] +
+ " (as expected): " + errstr + "\n");
+ }
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp
new file mode 100644
index 000000000..1bdb46d0b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0098-consumer-txn.cpp
@@ -0,0 +1,1218 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "testcpp.h"
+
+#if WITH_RAPIDJSON
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include <assert.h>
+#include <sstream>
+#include <string>
+#include <map>
+
+#include <rapidjson/document.h>
+#include <rapidjson/schema.h>
+#include <rapidjson/filereadstream.h>
+#include <rapidjson/stringbuffer.h>
+#include <rapidjson/error/en.h>
+#include <rapidjson/prettywriter.h>
+
+
+/**
+ * @name Consumer Transactions.
+ *
+ * - Uses the TransactionProducerCli Java application to produce messages
+ * that are part of abort and commit transactions in various combinations
+ * and tests that librdkafka consumes them as expected. Refer to
+ * TransactionProducerCli.java for scenarios covered.
+ */
+
+
+class TestEventCb : public RdKafka::EventCb {
+ public:
+ static bool should_capture_stats;
+ static bool has_captured_stats;
+ static int64_t partition_0_hi_offset;
+ static int64_t partition_0_ls_offset;
+ static std::string topic;
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_STATS:
+ if (should_capture_stats) {
+ partition_0_hi_offset = -1;
+ partition_0_ls_offset = -1;
+
+ has_captured_stats = true;
+ should_capture_stats = false;
+ char path[256];
+
+ /* Parse JSON to validate */
+ rapidjson::Document d;
+ if (d.Parse(event.str().c_str()).HasParseError())
+ Test::Fail(tostr() << "Failed to parse stats JSON: "
+ << rapidjson::GetParseError_En(d.GetParseError())
+ << " at " << d.GetErrorOffset());
+
+ rd_snprintf(path, sizeof(path), "/topics/%s/partitions/0",
+ topic.c_str());
+
+ rapidjson::Pointer jpath((const char *)path);
+ rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath);
+ if (pp == NULL)
+ return;
+
+ TEST_ASSERT(pp->HasMember("hi_offset"), "hi_offset not found in stats");
+ TEST_ASSERT(pp->HasMember("ls_offset"), "ls_offset not found in stats");
+
+ partition_0_hi_offset = (*pp)["hi_offset"].GetInt();
+ partition_0_ls_offset = (*pp)["ls_offset"].GetInt();
+ }
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ std::cerr << event.str() << "\n";
+ break;
+
+ default:
+ break;
+ }
+ }
+};
+
+bool TestEventCb::should_capture_stats;
+bool TestEventCb::has_captured_stats;
+int64_t TestEventCb::partition_0_hi_offset;
+int64_t TestEventCb::partition_0_ls_offset;
+std::string TestEventCb::topic;
+
+static TestEventCb ex_event_cb;
+
+
+static void execute_java_produce_cli(std::string &bootstrapServers,
+ const std::string &topic,
+ const std::string &testidstr,
+ const char **cmds,
+ size_t cmd_cnt) {
+ const std::string topicCmd = "topic," + topic;
+ const std::string testidCmd = "testid," + testidstr;
+ const char **argv;
+ size_t i = 0;
+
+ argv = (const char **)rd_alloca(sizeof(*argv) * (1 + 1 + 1 + cmd_cnt + 1));
+ argv[i++] = bootstrapServers.c_str();
+ argv[i++] = topicCmd.c_str();
+ argv[i++] = testidCmd.c_str();
+
+ for (size_t j = 0; j < cmd_cnt; j++)
+ argv[i++] = cmds[j];
+
+ argv[i] = NULL;
+
+ int pid = test_run_java("TransactionProducerCli", (const char **)argv);
+ test_waitpid(pid);
+}
+
+static std::vector<RdKafka::Message *>
+consume_messages(RdKafka::KafkaConsumer *c, std::string topic, int partition) {
+ RdKafka::ErrorCode err;
+
+ /* Assign partitions */
+ std::vector<RdKafka::TopicPartition *> parts;
+ parts.push_back(RdKafka::TopicPartition::create(topic, partition));
+ if ((err = c->assign(parts)))
+ Test::Fail("assign failed: " + RdKafka::err2str(err));
+ RdKafka::TopicPartition::destroy(parts);
+
+ Test::Say(tostr() << "Consuming from topic " << topic << " partition "
+ << partition << "\n");
+ std::vector<RdKafka::Message *> result = std::vector<RdKafka::Message *>();
+
+ while (true) {
+ RdKafka::Message *msg = c->consume(tmout_multip(1000));
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ delete msg;
+ continue;
+ case RdKafka::ERR__PARTITION_EOF:
+ delete msg;
+ break;
+ case RdKafka::ERR_NO_ERROR:
+ result.push_back(msg);
+ continue;
+ default:
+ Test::Fail("Error consuming from topic " + topic + ": " + msg->errstr());
+ delete msg;
+ break;
+ }
+ break;
+ }
+
+ Test::Say("Read all messages from topic: " + topic + "\n");
+
+ TestEventCb::should_capture_stats = true;
+
+ /* rely on the test timeout to prevent an infinite loop in
+ * the (unlikely) event that the statistics callback isn't
+ * called. */
+ while (!TestEventCb::has_captured_stats) {
+ RdKafka::Message *msg = c->consume(tmout_multip(500));
+ delete msg;
+ }
+
+ Test::Say("Captured consumer statistics event\n");
+
+ return result;
+}
+
+
+static void delete_messages(std::vector<RdKafka::Message *> &messages) {
+ for (size_t i = 0; i < messages.size(); ++i)
+ delete messages[i];
+}
+
+
+static std::string get_bootstrap_servers() {
+ RdKafka::Conf *conf;
+ std::string bootstrap_servers;
+ Test::conf_init(&conf, NULL, 40);
+ conf->get("bootstrap.servers", bootstrap_servers);
+ delete conf;
+ return bootstrap_servers;
+}
+
+
+static RdKafka::KafkaConsumer *create_consumer(std::string &topic_name,
+ const char *isolation_level) {
+ RdKafka::Conf *conf;
+ std::string errstr;
+
+ Test::conf_init(&conf, NULL, 40);
+ Test::conf_set(conf, "group.id", topic_name);
+ Test::conf_set(conf, "enable.auto.commit", "false");
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ Test::conf_set(conf, "enable.partition.eof", "true");
+ Test::conf_set(conf, "isolation.level", isolation_level);
+ Test::conf_set(conf, "statistics.interval.ms", "1000");
+ conf->set("event_cb", &ex_event_cb, errstr);
+ TestEventCb::should_capture_stats = false;
+ TestEventCb::has_captured_stats = false;
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+
+ delete conf;
+
+ return c;
+}
+
+
+static std::vector<std::string> csv_split(const std::string &input) {
+ std::stringstream ss(input);
+ std::vector<std::string> res;
+
+ while (ss.good()) {
+ std::string substr;
+ std::getline(ss, substr, ',');
+ /* Trim */
+ substr.erase(0, substr.find_first_not_of(' '));
+ substr.erase(substr.find_last_not_of(' ') + 1);
+ res.push_back(substr);
+ }
+
+ return res;
+}
+
+
+
+enum TransactionType {
+ TransactionType_None,
+ TransactionType_BeginAbort,
+ TransactionType_BeginCommit,
+ TransactionType_BeginOpen,
+ TransactionType_ContinueAbort,
+ TransactionType_ContinueCommit,
+ TransactionType_ContinueOpen
+};
+
+static TransactionType TransactionType_from_string(std::string str) {
+#define _CHKRET(NAME) \
+ if (!str.compare(#NAME)) \
+ return TransactionType_##NAME
+
+ _CHKRET(None);
+ _CHKRET(BeginAbort);
+ _CHKRET(BeginCommit);
+ _CHKRET(BeginOpen);
+ _CHKRET(ContinueAbort);
+ _CHKRET(ContinueCommit);
+ _CHKRET(ContinueOpen);
+
+ Test::Fail("Unknown TransactionType: " + str);
+
+ return TransactionType_None; /* NOTREACHED */
+}
+
+
+static void txn_producer_makeTestMessages(RdKafka::Producer *producer,
+ const std::string &topic,
+ const std::string &testidstr,
+ int partition,
+ int idStart,
+ int msgcount,
+ TransactionType tt,
+ bool do_flush) {
+ RdKafka::Error *error;
+
+ if (tt != TransactionType_None && tt != TransactionType_ContinueOpen &&
+ tt != TransactionType_ContinueCommit &&
+ tt != TransactionType_ContinueAbort) {
+ error = producer->begin_transaction();
+ if (error) {
+ Test::Fail("begin_transaction() failed: " + error->str());
+ delete error;
+ }
+ }
+
+ for (int i = 0; i < msgcount; i++) {
+ char key[] = {(char)((i + idStart) & 0xff)};
+ char payload[] = {0x10, 0x20, 0x30, 0x40};
+ RdKafka::ErrorCode err;
+
+ err = producer->produce(topic, partition, producer->RK_MSG_COPY, payload,
+ sizeof(payload), key, sizeof(key), 0, NULL);
+ if (err)
+ Test::Fail("produce() failed: " + RdKafka::err2str(err));
+ }
+
+ if (do_flush)
+ producer->flush(-1);
+
+ switch (tt) {
+ case TransactionType_BeginAbort:
+ case TransactionType_ContinueAbort:
+ error = producer->abort_transaction(30 * 1000);
+ if (error) {
+ Test::Fail("abort_transaction() failed: " + error->str());
+ delete error;
+ }
+ break;
+
+ case TransactionType_BeginCommit:
+ case TransactionType_ContinueCommit:
+ error = producer->commit_transaction(30 * 1000);
+ if (error) {
+ Test::Fail("commit_transaction() failed: " + error->str());
+ delete error;
+ }
+ break;
+
+ default:
+ break;
+ }
+}
+
+
+class txnDeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &msg) {
+ switch (msg.err()) {
+ case RdKafka::ERR__PURGE_QUEUE:
+ case RdKafka::ERR__PURGE_INFLIGHT:
+ /* These are expected when transactions are aborted */
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ break;
+
+ default:
+ Test::Fail("Delivery failed: " + msg.errstr());
+ break;
+ }
+ }
+};
+
+
+/**
+ * @brief Transactional producer, performing the commands in \p cmds.
+ * This is the librdkafka counterpart of
+ * java/TransactionProducerCli.java
+ */
+static void txn_producer(const std::string &brokers,
+ const std::string &topic,
+ const std::string &testidstr,
+ const char **cmds,
+ size_t cmd_cnt) {
+ RdKafka::Conf *conf;
+ txnDeliveryReportCb txn_dr;
+
+ Test::conf_init(&conf, NULL, 0);
+ Test::conf_set(conf, "bootstrap.servers", brokers);
+
+
+ std::map<std::string, RdKafka::Producer *> producers;
+
+ for (size_t i = 0; i < cmd_cnt; i++) {
+ std::string cmdstr = std::string(cmds[i]);
+
+ Test::Say(_C_CLR "rdkafka txn producer command: " + cmdstr + "\n");
+
+ std::vector<std::string> cmd = csv_split(cmdstr);
+
+ if (!cmd[0].compare("sleep")) {
+ rd_usleep(atoi(cmd[1].c_str()) * 1000, NULL);
+
+ } else if (!cmd[0].compare("exit")) {
+ break; /* We can't really simulate the Java exit behaviour
+ * from in-process. */
+
+ } else if (cmd[0].find("producer") == 0) {
+ TransactionType txntype = TransactionType_from_string(cmd[4]);
+
+ std::map<std::string, RdKafka::Producer *>::iterator it =
+ producers.find(cmd[0]);
+
+ RdKafka::Producer *producer;
+
+ if (it == producers.end()) {
+ /* Create producer if it doesn't exist */
+ std::string errstr;
+
+ Test::Say(tostr() << "Creating producer " << cmd[0]
+ << " with transactiontype " << txntype << " '"
+ << cmd[4] << "'\n");
+
+ /* Config */
+ Test::conf_set(conf, "enable.idempotence", "true");
+ if (txntype != TransactionType_None)
+ Test::conf_set(conf, "transactional.id",
+ "test-transactional-id-c-" + testidstr + "-" + cmd[0]);
+ else
+ Test::conf_set(conf, "transactional.id", "");
+ Test::conf_set(conf, "linger.ms", "5"); /* ensure batching */
+ conf->set("dr_cb", &txn_dr, errstr);
+
+ /* Create producer */
+ producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer)
+ Test::Fail("Failed to create producer " + cmd[0] + ": " + errstr);
+
+ /* Init transactions if producer is transactional */
+ if (txntype != TransactionType_None) {
+ RdKafka::Error *error = producer->init_transactions(20 * 1000);
+ if (error) {
+ Test::Fail("init_transactions() failed: " + error->str());
+ delete error;
+ }
+ }
+
+
+ producers[cmd[0]] = producer;
+ } else {
+ producer = it->second;
+ }
+
+ txn_producer_makeTestMessages(
+ producer, /* producer */
+ topic, /* topic */
+ testidstr, /* testid */
+ atoi(cmd[1].c_str()), /* partition */
+ (int)strtol(cmd[2].c_str(), NULL, 0), /* idStart */
+ atoi(cmd[3].c_str()), /* msg count */
+ txntype, /* TransactionType */
+ !cmd[5].compare("DoFlush") /* Flush */);
+
+ } else {
+ Test::Fail("Unknown command: " + cmd[0]);
+ }
+ }
+
+ delete conf;
+
+ for (std::map<std::string, RdKafka::Producer *>::iterator it =
+ producers.begin();
+ it != producers.end(); it++)
+ delete it->second;
+}
+
+
+
+static void do_test_consumer_txn_test(bool use_java_producer) {
+ std::string errstr;
+ std::string topic_name;
+ RdKafka::KafkaConsumer *c;
+ std::vector<RdKafka::Message *> msgs;
+ std::string testidstr = test_str_id_generate_tmp();
+
+ std::string bootstrap_servers = get_bootstrap_servers();
+
+ Test::Say(tostr() << _C_BLU "[ Consumer transaction tests using "
+ << (use_java_producer ? "java" : "librdkafka")
+ << " producer with testid " << testidstr << "]\n" _C_CLR);
+
+#define run_producer(CMDS...) \
+ do { \
+ const char *_cmds[] = {CMDS}; \
+ size_t _cmd_cnt = sizeof(_cmds) / sizeof(*_cmds); \
+ if (use_java_producer) \
+ execute_java_produce_cli(bootstrap_servers, topic_name, testidstr, \
+ _cmds, _cmd_cnt); \
+ else \
+ txn_producer(bootstrap_servers, topic_name, testidstr, _cmds, _cmd_cnt); \
+ } while (0)
+
+ if (test_quick) {
+ Test::Say("Skipping consumer_txn tests 0->4 due to quick mode\n");
+ goto test5;
+ }
+
+
+ Test::Say(_C_BLU "Test 0 - basic commit + abort\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x0, 5, BeginCommit, DoFlush",
+ "producer1, -1, 0x10, 5, BeginAbort, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 5,
+ "Consumed unexpected number of messages. "
+ "Expected 5, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+#define expect_msgcnt(msgcnt) \
+ TEST_ASSERT(msgs.size() == msgcnt, "Expected %d messages, got %d", \
+ (int)msgs.size(), msgcnt)
+
+#define expect_key(msgidx, value) \
+ do { \
+ TEST_ASSERT(msgs.size() > msgidx, \
+ "Expected at least %d message(s), only got %d", msgidx + 1, \
+ (int)msgs.size()); \
+ TEST_ASSERT(msgs[msgidx]->key_len() == 1, \
+ "Expected msg #%d key to be of size 1, not %d\n", msgidx, \
+ (int)msgs[msgidx]->key_len()); \
+ TEST_ASSERT(value == (int)msgs[msgidx]->key()->c_str()[0], \
+ "Expected msg #%d key 0x%x, not 0x%x", msgidx, value, \
+ (int)msgs[msgidx]->key()->c_str()[0]); \
+ } while (0)
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ expect_msgcnt(10);
+ expect_key(0, 0x0);
+ expect_key(4, 0x4);
+ expect_key(5, 0x10);
+ expect_key(9, 0x14);
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 0.1\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-0.1", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x0, 5, BeginCommit, DontFlush",
+ "producer1, -1, 0x10, 5, BeginAbort, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 5,
+ "Consumed unexpected number of messages. "
+ "Expected 5, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 10,
+ "Consumed unexpected number of messages. "
+ "Expected 10, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 4 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x10 == msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x14 == msgs[9]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 0.2\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-0.2", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x10, 5, BeginAbort, DoFlush",
+ "producer1, -1, 0x30, 5, BeginCommit, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 5,
+ "Consumed unexpected number of messages. "
+ "Expected 5, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x30 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x34 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 10,
+ "Consumed unexpected number of messages. "
+ "Expected 10, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x30 == msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x34 == msgs[9]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 1 - mixed with non-transactional.\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-1", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+ TestEventCb::topic = topic_name;
+
+ run_producer("producer3, -1, 0x10, 5, None, DoFlush",
+ "producer1, -1, 0x50, 5, BeginCommit, DoFlush",
+ "producer1, -1, 0x80, 5, BeginAbort, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+
+ TEST_ASSERT(TestEventCb::partition_0_ls_offset != -1 &&
+ TestEventCb::partition_0_ls_offset ==
+ TestEventCb::partition_0_hi_offset,
+ "Expected hi_offset to equal ls_offset but "
+ "got hi_offset: %" PRId64 ", ls_offset: %" PRId64,
+ TestEventCb::partition_0_hi_offset,
+ TestEventCb::partition_0_ls_offset);
+
+ TEST_ASSERT(msgs.size() == 10,
+ "Consumed unexpected number of messages. "
+ "Expected 10, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x50 == msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x54 == msgs[9]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+ Test::Say(_C_BLU "Test 1.1\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-1.1", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x30, 5, BeginAbort, DoFlush",
+ "producer3, -1, 0x40, 5, None, DoFlush",
+ "producer1, -1, 0x60, 5, BeginCommit, DoFlush");
+
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 10,
+ "Consumed unexpected number of messages. "
+ "Expected 10, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x40 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x44 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x60 == msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x64 == msgs[9]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 1.2\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-1.2", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x10, 5, BeginCommit, DoFlush",
+ "producer1, -1, 0x20, 5, BeginAbort, DoFlush",
+ "producer3, -1, 0x30, 5, None, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 10,
+ "Consumed unexpected number of messages. "
+ "Expected 10, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 && 0x10 == msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 && 0x14 == msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 && 0x30 == msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[9]->key_len() >= 1 && 0x34 == msgs[9]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 2 - rapid abort / committing.\n" _C_CLR);
+ // note: aborted records never seem to make it to the broker when not flushed.
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-2", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x10, 1, BeginAbort, DontFlush",
+ "producer1, -1, 0x20, 1, BeginCommit, DontFlush",
+ "producer1, -1, 0x30, 1, BeginAbort, DontFlush",
+ "producer1, -1, 0x40, 1, BeginCommit, DontFlush",
+ "producer1, -1, 0x50, 1, BeginAbort, DontFlush",
+ "producer1, -1, 0x60, 1, BeginCommit, DontFlush",
+ "producer1, -1, 0x70, 1, BeginAbort, DontFlush",
+ "producer1, -1, 0x80, 1, BeginCommit, DontFlush",
+ "producer1, -1, 0x90, 1, BeginAbort, DontFlush",
+ "producer1, -1, 0xa0, 1, BeginCommit, DoFlush",
+ "producer3, -1, 0xb0, 1, None, DontFlush",
+ "producer3, -1, 0xc0, 1, None, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 7,
+ "Consumed unexpected number of messages. "
+ "Expected 7, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 &&
+ 0x20 == (unsigned char)msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[1]->key_len() >= 1 &&
+ 0x40 == (unsigned char)msgs[1]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[2]->key_len() >= 1 &&
+ 0x60 == (unsigned char)msgs[2]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[3]->key_len() >= 1 &&
+ 0x80 == (unsigned char)msgs[3]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 &&
+ 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 &&
+ 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[6]->key_len() >= 1 &&
+ 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 2.1\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-2.1", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, -1, 0x10, 1, BeginAbort, DoFlush",
+ "producer1, -1, 0x20, 1, BeginCommit, DoFlush",
+ "producer1, -1, 0x30, 1, BeginAbort, DoFlush",
+ "producer1, -1, 0x40, 1, BeginCommit, DoFlush",
+ "producer1, -1, 0x50, 1, BeginAbort, DoFlush",
+ "producer1, -1, 0x60, 1, BeginCommit, DoFlush",
+ "producer1, -1, 0x70, 1, BeginAbort, DoFlush",
+ "producer1, -1, 0x80, 1, BeginCommit, DoFlush",
+ "producer1, -1, 0x90, 1, BeginAbort, DoFlush",
+ "producer1, -1, 0xa0, 1, BeginCommit, DoFlush",
+ "producer3, -1, 0xb0, 1, None, DoFlush",
+ "producer3, -1, 0xc0, 1, None, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 7,
+ "Consumed unexpected number of messages. "
+ "Expected 7, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 &&
+ 0x20 == (unsigned char)msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[1]->key_len() >= 1 &&
+ 0x40 == (unsigned char)msgs[1]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[2]->key_len() >= 1 &&
+ 0x60 == (unsigned char)msgs[2]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[3]->key_len() >= 1 &&
+ 0x80 == (unsigned char)msgs[3]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 &&
+ 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 &&
+ 0xb0 == (unsigned char)msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[6]->key_len() >= 1 &&
+ 0xc0 == (unsigned char)msgs[6]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 12,
+ "Consumed unexpected number of messages. "
+ "Expected 12, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 &&
+ 0x10 == (unsigned char)msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[1]->key_len() >= 1 &&
+ 0x20 == (unsigned char)msgs[1]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[2]->key_len() >= 1 &&
+ 0x30 == (unsigned char)msgs[2]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[3]->key_len() >= 1 &&
+ 0x40 == (unsigned char)msgs[3]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 &&
+ 0x50 == (unsigned char)msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 &&
+ 0x60 == (unsigned char)msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[6]->key_len() >= 1 &&
+ 0x70 == (unsigned char)msgs[6]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 3 - cross partition (simple).\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-3", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 2, 3);
+
+ run_producer("producer1, 0, 0x10, 3, BeginOpen, DoFlush",
+ "producer1, 1, 0x20, 3, ContinueOpen, DoFlush",
+ "producer1, 0, 0x30, 3, ContinueCommit, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 6,
+ "Consumed unexpected number of messages. "
+ "Expected 6, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ msgs = consume_messages(c, topic_name, 1);
+ TEST_ASSERT(msgs.size() == 3,
+ "Consumed unexpected number of messages. "
+ "Expected 3, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 6,
+ "Consumed unexpected number of messages. "
+ "Expected 6, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ msgs = consume_messages(c, topic_name, 1);
+ TEST_ASSERT(msgs.size() == 3,
+ "Consumed unexpected number of messages. "
+ "Expected 3, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 3.1\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-3.1", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 2, 3);
+
+ run_producer("producer1, 0, 0x55, 1, BeginCommit, DoFlush",
+ "producer1, 0, 0x10, 3, BeginOpen, DoFlush",
+ "producer1, 1, 0x20, 3, ContinueOpen, DoFlush",
+ "producer1, 0, 0x30, 3, ContinueAbort, DoFlush",
+ "producer3, 0, 0x00, 1, None, DoFlush",
+ "producer1, 1, 0x44, 1, BeginCommit, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 2,
+ "Consumed unexpected number of messages. "
+ "Expected 2, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 &&
+ 0x55 == (unsigned char)msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[1]->key_len() >= 1 &&
+ 0x00 == (unsigned char)msgs[1]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+ msgs = consume_messages(c, topic_name, 1);
+ TEST_ASSERT(msgs.size() == 1,
+ "Consumed unexpected number of messages. "
+ "Expected 1, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 &&
+ 0x44 == (unsigned char)msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 4 - simultaneous transactions (simple).\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-4", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer3, 0, 0x10, 1, None, DoFlush",
+ "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
+ "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
+ "producer1, 0, 0x40, 3, ContinueCommit, DoFlush",
+ "producer2, 0, 0x50, 3, ContinueAbort, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 7,
+ "Consumed unexpected number of messages. "
+ "Expected 7, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 13,
+ "Consumed unexpected number of messages. "
+ "Expected 13, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 4.1\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-4.1", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer3, 0, 0x10, 1, None, DoFlush",
+ "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
+ "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
+ "producer1, 0, 0x40, 3, ContinueAbort, DoFlush",
+ "producer2, 0, 0x50, 3, ContinueCommit, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 7,
+ "Consumed unexpected number of messages. "
+ "Expected 7, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 13,
+ "Consumed unexpected number of messages. "
+ "Expected 13, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 4.2\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-4.2", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer3, 0, 0x10, 1, None, DoFlush",
+ "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
+ "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
+ "producer1, 0, 0x40, 3, ContinueCommit, DoFlush",
+ "producer2, 0, 0x50, 3, ContinueCommit, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 13,
+ "Consumed unexpected number of messages. "
+ "Expected 7, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 13,
+ "Consumed unexpected number of messages. "
+ "Expected 13, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 4.3\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-4.3", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer3, 0, 0x10, 1, None, DoFlush",
+ "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
+ "producer2, 0, 0x30, 3, BeginOpen, DoFlush",
+ "producer1, 0, 0x40, 3, ContinueAbort, DoFlush",
+ "producer2, 0, 0x50, 3, ContinueAbort, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 1,
+ "Consumed unexpected number of messages. "
+ "Expected 7, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+ c->close();
+ delete c;
+
+ c = create_consumer(topic_name, "READ_UNCOMMITTED");
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 13,
+ "Consumed unexpected number of messages. "
+ "Expected 13, got: %d",
+ (int)msgs.size());
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+
+ Test::Say(_C_BLU "Test 5 - split transaction across message sets.\n" _C_CLR);
+
+test5:
+ topic_name = Test::mk_topic_name("0098-consumer_txn-5", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+
+ run_producer("producer1, 0, 0x10, 2, BeginOpen, DontFlush", "sleep,200",
+ "producer1, 0, 0x20, 2, ContinueAbort, DontFlush",
+ "producer1, 0, 0x30, 2, BeginOpen, DontFlush", "sleep,200",
+ "producer1, 0, 0x40, 2, ContinueCommit, DontFlush",
+ "producer1, 0, 0x50, 2, BeginOpen, DontFlush", "sleep,200",
+ "producer1, 0, 0x60, 2, ContinueAbort, DontFlush",
+ "producer1, 0, 0xa0, 2, BeginOpen, DontFlush", "sleep,200",
+ "producer1, 0, 0xb0, 2, ContinueCommit, DontFlush",
+ "producer3, 0, 0x70, 1, None, DoFlush");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 9,
+ "Consumed unexpected number of messages. "
+ "Expected 9, got: %d",
+ (int)msgs.size());
+ TEST_ASSERT(msgs[0]->key_len() >= 1 &&
+ 0x30 == (unsigned char)msgs[0]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[1]->key_len() >= 1 &&
+ 0x31 == (unsigned char)msgs[1]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[2]->key_len() >= 1 &&
+ 0x40 == (unsigned char)msgs[2]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[3]->key_len() >= 1 &&
+ 0x41 == (unsigned char)msgs[3]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[4]->key_len() >= 1 &&
+ 0xa0 == (unsigned char)msgs[4]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[5]->key_len() >= 1 &&
+ 0xa1 == (unsigned char)msgs[5]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[6]->key_len() >= 1 &&
+ 0xb0 == (unsigned char)msgs[6]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[7]->key_len() >= 1 &&
+ 0xb1 == (unsigned char)msgs[7]->key()->c_str()[0],
+ "Unexpected key");
+ TEST_ASSERT(msgs[8]->key_len() >= 1 &&
+ 0x70 == (unsigned char)msgs[8]->key()->c_str()[0],
+ "Unexpected key");
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+
+
+ Test::Say(_C_BLU "Test 6 - transaction left open\n" _C_CLR);
+
+ topic_name = Test::mk_topic_name("0098-consumer_txn-0", 1);
+ c = create_consumer(topic_name, "READ_COMMITTED");
+ Test::create_topic(c, topic_name.c_str(), 1, 3);
+ TestEventCb::topic = topic_name;
+
+ run_producer("producer3, 0, 0x10, 1, None, DoFlush",
+ "producer1, 0, 0x20, 3, BeginOpen, DoFlush",
+ // prevent abort control message from being written.
+ "exit,0");
+
+ msgs = consume_messages(c, topic_name, 0);
+ TEST_ASSERT(msgs.size() == 1,
+ "Consumed unexpected number of messages. "
+ "Expected 1, got: %d",
+ (int)msgs.size());
+
+ TEST_ASSERT(TestEventCb::partition_0_ls_offset + 3 ==
+ TestEventCb::partition_0_hi_offset,
+ "Expected hi_offset to be 3 greater than ls_offset "
+ "but got hi_offset: %" PRId64 ", ls_offset: %" PRId64,
+ TestEventCb::partition_0_hi_offset,
+ TestEventCb::partition_0_ls_offset);
+
+ delete_messages(msgs);
+
+ Test::delete_topic(c, topic_name.c_str());
+
+ c->close();
+ delete c;
+}
+#endif
+
+
+extern "C" {
+int main_0098_consumer_txn(int argc, char **argv) {
+ if (test_needs_auth()) {
+ Test::Skip(
+ "Authentication or security configuration "
+ "required on client: not supported in "
+ "Java transactional producer: skipping tests\n");
+ return 0;
+ }
+#if WITH_RAPIDJSON
+ do_test_consumer_txn_test(true /* with java producer */);
+ do_test_consumer_txn_test(false /* with librdkafka producer */);
+#else
+ Test::Skip("RapidJSON >=1.1.0 not available\n");
+#endif
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c
new file mode 100644
index 000000000..902849fb2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0099-commit_metadata.c
@@ -0,0 +1,189 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+static RD_UNUSED void
+print_toppar_list(const rd_kafka_topic_partition_list_t *list) {
+ int i;
+
+ TEST_SAY("List count: %d\n", list->cnt);
+
+ for (i = 0; i < list->cnt; i++) {
+ const rd_kafka_topic_partition_t *a = &list->elems[i];
+
+ TEST_SAY(
+ " #%d/%d: "
+ "%s [%" PRId32 "] @ %" PRId64
+ ": "
+ "(%" PRIusz ") \"%*s\"\n",
+ i, list->cnt, a->topic, a->partition, a->offset,
+ a->metadata_size, (int)a->metadata_size,
+ (const char *)a->metadata);
+ }
+}
+
+
+static void compare_toppar_lists(const rd_kafka_topic_partition_list_t *lista,
+ const rd_kafka_topic_partition_list_t *listb) {
+ int i;
+
+ TEST_ASSERT(lista->cnt == listb->cnt,
+ "different list lengths: %d != %d", lista->cnt, listb->cnt);
+
+ for (i = 0; i < lista->cnt; i++) {
+ const rd_kafka_topic_partition_t *a = &lista->elems[i];
+ const rd_kafka_topic_partition_t *b = &listb->elems[i];
+
+ if (a->offset != b->offset ||
+ a->metadata_size != b->metadata_size ||
+ memcmp(a->metadata, b->metadata, a->metadata_size))
+ TEST_FAIL_LATER(
+ "Lists did not match at element %d/%d:\n"
+ " a: %s [%" PRId32 "] @ %" PRId64
+ ": "
+ "(%" PRIusz
+ ") \"%*s\"\n"
+ " b: %s [%" PRId32 "] @ %" PRId64
+ ": "
+ "(%" PRIusz ") \"%*s\"",
+ i, lista->cnt, a->topic, a->partition, a->offset,
+ a->metadata_size, (int)a->metadata_size,
+ (const char *)a->metadata, b->topic, b->partition,
+ b->offset, b->metadata_size, (int)b->metadata_size,
+ (const char *)b->metadata);
+ }
+
+ TEST_LATER_CHECK();
+}
+
+
+static int commit_cb_cnt = 0;
+
+static void offset_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *list,
+ void *opaque) {
+ commit_cb_cnt++;
+ TEST_ASSERT(!err, "offset_commit_cb failure: %s",
+ rd_kafka_err2str(err));
+}
+
+
+static void
+commit_metadata(const char *group_id,
+ const rd_kafka_topic_partition_list_t *toppar_to_commit) {
+ rd_kafka_resp_err_t err;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+
+ test_conf_init(&conf, NULL, 20 /*timeout*/);
+
+ test_conf_set(conf, "group.id", group_id);
+
+ rd_kafka_conf_set_offset_commit_cb(conf, offset_commit_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ TEST_SAY("Committing:\n");
+ print_toppar_list(toppar_to_commit);
+
+ err = rd_kafka_commit(rk, toppar_to_commit, 0);
+ TEST_ASSERT(!err, "rd_kafka_commit failed: %s", rd_kafka_err2str(err));
+
+ while (commit_cb_cnt == 0)
+ rd_kafka_poll(rk, 1000);
+
+ rd_kafka_destroy(rk);
+}
+
+
+static void
+get_committed_metadata(const char *group_id,
+ const rd_kafka_topic_partition_list_t *toppar_to_check,
+ const rd_kafka_topic_partition_list_t *expected_toppar) {
+ rd_kafka_resp_err_t err;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_partition_list_t *committed_toppar;
+
+ test_conf_init(&conf, NULL, 20 /*timeout*/);
+
+ test_conf_set(conf, "group.id", group_id);
+
+ committed_toppar = rd_kafka_topic_partition_list_copy(toppar_to_check);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ err = rd_kafka_committed(rk, committed_toppar, tmout_multip(5000));
+ TEST_ASSERT(!err, "rd_kafka_committed failed: %s",
+ rd_kafka_err2str(err));
+
+ compare_toppar_lists(committed_toppar, expected_toppar);
+
+ rd_kafka_topic_partition_list_destroy(committed_toppar);
+
+ rd_kafka_destroy(rk);
+}
+
+int main_0099_commit_metadata(int argc, char **argv) {
+ rd_kafka_topic_partition_list_t *origin_toppar;
+ rd_kafka_topic_partition_list_t *expected_toppar;
+ const char *topic = test_mk_topic_name("0099-commit_metadata", 0);
+ char group_id[16];
+
+ test_conf_init(NULL, NULL, 20 /*timeout*/);
+
+ test_str_id_generate(group_id, sizeof(group_id));
+
+ test_create_topic(NULL, topic, 1, 1);
+
+ origin_toppar = rd_kafka_topic_partition_list_new(1);
+
+ rd_kafka_topic_partition_list_add(origin_toppar, topic, 0);
+
+ expected_toppar = rd_kafka_topic_partition_list_copy(origin_toppar);
+
+ expected_toppar->elems[0].offset = 42;
+ expected_toppar->elems[0].metadata = rd_strdup("Hello world!");
+ expected_toppar->elems[0].metadata_size =
+ strlen(expected_toppar->elems[0].metadata);
+
+ get_committed_metadata(group_id, origin_toppar, origin_toppar);
+
+ commit_metadata(group_id, expected_toppar);
+
+ get_committed_metadata(group_id, origin_toppar, expected_toppar);
+
+ rd_kafka_topic_partition_list_destroy(origin_toppar);
+ rd_kafka_topic_partition_list_destroy(expected_toppar);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp
new file mode 100644
index 000000000..a34ccac98
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0100-thread_interceptors.cpp
@@ -0,0 +1,195 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include "testcpp.h"
+
+extern "C" {
+#include "rdkafka.h" /* For interceptor interface */
+#include "../src/tinycthread.h" /* For mutexes */
+}
+
+class myThreadCb {
+ public:
+ myThreadCb() : startCnt_(0), exitCnt_(0) {
+ mtx_init(&lock_, mtx_plain);
+ }
+ ~myThreadCb() {
+ mtx_destroy(&lock_);
+ }
+ int startCount() {
+ int cnt;
+ mtx_lock(&lock_);
+ cnt = startCnt_;
+ mtx_unlock(&lock_);
+ return cnt;
+ }
+ int exitCount() {
+ int cnt;
+ mtx_lock(&lock_);
+ cnt = exitCnt_;
+ mtx_unlock(&lock_);
+ return cnt;
+ }
+ virtual void thread_start_cb(const char *threadname) {
+ Test::Say(tostr() << "Started thread: " << threadname << "\n");
+ mtx_lock(&lock_);
+ startCnt_++;
+ mtx_unlock(&lock_);
+ }
+ virtual void thread_exit_cb(const char *threadname) {
+ Test::Say(tostr() << "Exiting from thread: " << threadname << "\n");
+ mtx_lock(&lock_);
+ exitCnt_++;
+ mtx_unlock(&lock_);
+ }
+
+ private:
+ int startCnt_;
+ int exitCnt_;
+ mtx_t lock_;
+};
+
+
+/**
+ * @brief C to C++ callback trampoline.
+ */
+static rd_kafka_resp_err_t on_thread_start_trampoline(
+ rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type,
+ const char *threadname,
+ void *ic_opaque) {
+ myThreadCb *threadcb = (myThreadCb *)ic_opaque;
+
+ Test::Say(tostr() << "on_thread_start(" << thread_type << ", " << threadname
+ << ") called\n");
+
+ threadcb->thread_start_cb(threadname);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief C to C++ callback trampoline.
+ */
+static rd_kafka_resp_err_t on_thread_exit_trampoline(
+ rd_kafka_t *rk,
+ rd_kafka_thread_type_t thread_type,
+ const char *threadname,
+ void *ic_opaque) {
+ myThreadCb *threadcb = (myThreadCb *)ic_opaque;
+
+ Test::Say(tostr() << "on_thread_exit(" << thread_type << ", " << threadname
+ << ") called\n");
+
+ threadcb->thread_exit_cb(threadname);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief This interceptor is called when a new client instance is created
+ * prior to any threads being created.
+ * We use it to set up the instance's thread interceptors.
+ */
+static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ Test::Say("on_new() interceptor called\n");
+ rd_kafka_interceptor_add_on_thread_start(
+ rk, "test:0100", on_thread_start_trampoline, ic_opaque);
+ rd_kafka_interceptor_add_on_thread_exit(rk, "test:0100",
+ on_thread_exit_trampoline, ic_opaque);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief The on_conf_dup() interceptor let's use add the on_new interceptor
+ * in case the config object is copied, since interceptors are not
+ * automatically copied.
+ */
+static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf,
+ const rd_kafka_conf_t *old_conf,
+ size_t filter_cnt,
+ const char **filter,
+ void *ic_opaque) {
+ Test::Say("on_conf_dup() interceptor called\n");
+ return rd_kafka_conf_interceptor_add_on_new(new_conf, "test:0100", on_new,
+ ic_opaque);
+}
+
+
+
+static void test_thread_cbs() {
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ std::string errstr;
+ rd_kafka_conf_t *c_conf;
+ myThreadCb my_threads;
+
+ Test::conf_set(conf, "bootstrap.servers", "127.0.0.1:1");
+
+ /* Interceptors are not supported in the C++ API, instead use the C API:
+ * 1. Extract the C conf_t object
+ * 2. Set up an on_new() interceptor
+ * 3. Set up an on_conf_dup() interceptor to add interceptors in the
+ * case the config object is copied (which the C++ Conf always does).
+ * 4. In the on_new() interceptor, add the thread interceptors. */
+ c_conf = conf->c_ptr_global();
+ rd_kafka_conf_interceptor_add_on_new(c_conf, "test:0100", on_new,
+ &my_threads);
+ rd_kafka_conf_interceptor_add_on_conf_dup(c_conf, "test:0100", on_conf_dup,
+ &my_threads);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ p->poll(500);
+ delete conf;
+ delete p;
+
+ Test::Say(tostr() << my_threads.startCount() << " thread start calls, "
+ << my_threads.exitCount() << " thread exit calls seen\n");
+
+ /* 3 = rdkafka main thread + internal broker + bootstrap broker */
+ if (my_threads.startCount() < 3)
+ Test::Fail("Did not catch enough thread start callback calls");
+ if (my_threads.exitCount() < 3)
+ Test::Fail("Did not catch enough thread exit callback calls");
+ if (my_threads.startCount() != my_threads.exitCount())
+ Test::Fail("Did not catch same number of start and exit callback calls");
+}
+
+
+extern "C" {
+int main_0100_thread_interceptors(int argc, char **argv) {
+ test_thread_cbs();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp
new file mode 100644
index 000000000..342ec4f8f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0101-fetch-from-follower.cpp
@@ -0,0 +1,446 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "testcpp.h"
+
+#if WITH_RAPIDJSON
+
+#include <iostream>
+#include <cstring>
+#include <cstdlib>
+#include <assert.h>
+#include <sstream>
+#include <string>
+#include <map>
+#include <set>
+#include "rdkafka.h"
+
+#include <rapidjson/document.h>
+#include <rapidjson/schema.h>
+#include <rapidjson/filereadstream.h>
+#include <rapidjson/stringbuffer.h>
+#include <rapidjson/error/en.h>
+#include <rapidjson/prettywriter.h>
+
+
+/**
+ * @brief A basic test of fetch from follower funtionality
+ * - produces a bunch of messages to a replicated topic.
+ * - configure the consumer such that `client.rack` is different from the
+ * broker's `broker.rack` (and use
+ * org.apache.kafka.common.replica.RackAwareReplicaSelector).
+ * - consume the messages, and check they are as expected.
+ * - use rxbytes from the statistics event to confirm that
+ * the messages were retrieved from the replica broker (not the
+ * leader).
+ */
+
+
+#define test_assert(cond, msg) \
+ do { \
+ if (!(cond)) \
+ Test::Say(msg); \
+ } while (0)
+
+
+class TestEvent2Cb : public RdKafka::EventCb {
+ public:
+ static bool should_capture_stats;
+ static bool has_captured_stats;
+ static std::map<int32_t, int64_t> rxbytes;
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_LOG:
+ Test::Say(event.str() + "\n");
+ break;
+ case RdKafka::Event::EVENT_STATS:
+ if (should_capture_stats) {
+ rapidjson::Document d;
+ if (d.Parse(event.str().c_str()).HasParseError())
+ Test::Fail(tostr() << "Failed to parse stats JSON: "
+ << rapidjson::GetParseError_En(d.GetParseError())
+ << " at " << d.GetErrorOffset());
+
+ /* iterate over brokers. */
+ rapidjson::Pointer jpath((const char *)"/brokers");
+ rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath);
+ if (pp == NULL)
+ return;
+
+ for (rapidjson::Value::ConstMemberIterator itr = pp->MemberBegin();
+ itr != pp->MemberEnd(); ++itr) {
+ std::string broker_name = itr->name.GetString();
+ size_t broker_id_idx = broker_name.rfind('/');
+ if (broker_id_idx == (size_t)-1)
+ continue;
+ std::string broker_id = broker_name.substr(
+ broker_id_idx + 1, broker_name.size() - broker_id_idx - 1);
+
+ int64_t broker_rxbytes =
+ itr->value.FindMember("rxbytes")->value.GetInt64();
+ rxbytes[atoi(broker_id.c_str())] = broker_rxbytes;
+ }
+
+ has_captured_stats = true;
+ break;
+ }
+ default:
+ break;
+ }
+ }
+};
+
+bool TestEvent2Cb::should_capture_stats;
+bool TestEvent2Cb::has_captured_stats;
+std::map<int32_t, int64_t> TestEvent2Cb::rxbytes;
+static TestEvent2Cb ex_event_cb;
+
+
+static void get_brokers_info(std::string &topic_str,
+ int32_t *leader,
+ std::vector<int> &brokers) {
+ std::string errstr;
+ RdKafka::ErrorCode err;
+ class RdKafka::Metadata *metadata;
+
+ /* Determine the ids of the brokers that the partition has replicas
+ * on and which one of those is the leader.
+ */
+ RdKafka::Conf *pConf;
+ Test::conf_init(&pConf, NULL, 10);
+ RdKafka::Producer *p = RdKafka::Producer::create(pConf, errstr);
+ delete pConf;
+ test_assert(p, tostr() << "Failed to create producer: " << errstr);
+
+ RdKafka::Topic *topic = RdKafka::Topic::create(p, topic_str, NULL, errstr);
+ test_assert(topic, tostr() << "Failed to create topic: " << errstr);
+
+ err = p->metadata(0, topic, &metadata, tmout_multip(5000));
+ test_assert(
+ err == RdKafka::ERR_NO_ERROR,
+ tostr() << "%% Failed to acquire metadata: " << RdKafka::err2str(err));
+
+ test_assert(metadata->topics()->size() == 1,
+ tostr() << "expecting metadata for exactly one topic. "
+ << "have metadata for " << metadata->topics()->size()
+ << "topics");
+
+ RdKafka::Metadata::TopicMetadataIterator topicMetadata =
+ metadata->topics()->begin();
+ RdKafka::TopicMetadata::PartitionMetadataIterator partitionMetadata =
+ (*topicMetadata)->partitions()->begin();
+
+ *leader = (*partitionMetadata)->leader();
+
+ size_t idx = 0;
+ RdKafka::PartitionMetadata::ReplicasIterator replicasIterator;
+ for (replicasIterator = (*partitionMetadata)->replicas()->begin();
+ replicasIterator != (*partitionMetadata)->replicas()->end();
+ ++replicasIterator) {
+ brokers.push_back(*replicasIterator);
+ idx++;
+ }
+
+ delete metadata;
+ delete topic;
+ delete p;
+}
+
+
+/**
+ * @brief Wait for up to \p tmout for any type of admin result.
+ * @returns the event
+ */
+rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
+ rd_kafka_event_type_t evtype,
+ int tmout) {
+ rd_kafka_event_t *rkev;
+
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout);
+ if (!rkev)
+ Test::Fail(tostr() << "Timed out waiting for admin result (" << evtype
+ << ")\n");
+
+ if (rd_kafka_event_type(rkev) == evtype)
+ return rkev;
+
+ if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) {
+ Test::Say(tostr() << "Received error event while waiting for " << evtype
+ << ": " << rd_kafka_event_error_string(rkev)
+ << ": ignoring");
+ continue;
+ }
+
+ test_assert(rd_kafka_event_type(rkev) == evtype,
+ tostr() << "Expected event type " << evtype << ", got "
+ << rd_kafka_event_type(rkev) << " ("
+ << rd_kafka_event_name(rkev) << ")");
+ }
+
+ return NULL;
+}
+
+
+/**
+ * @returns the number of broker.rack values configured across all brokers.
+ */
+static int get_broker_rack_count(std::vector<int> &replica_ids) {
+ std::string errstr;
+ RdKafka::Conf *pConf;
+ Test::conf_init(&pConf, NULL, 10);
+ RdKafka::Producer *p = RdKafka::Producer::create(pConf, errstr);
+ delete pConf;
+
+ rd_kafka_queue_t *mainq = rd_kafka_queue_get_main(p->c_ptr());
+
+ std::set<std::string> racks;
+ for (size_t i = 0; i < replica_ids.size(); ++i) {
+ std::string name = tostr() << replica_ids[i];
+
+ rd_kafka_ConfigResource_t *config =
+ rd_kafka_ConfigResource_new(RD_KAFKA_RESOURCE_BROKER, &name[0]);
+
+ rd_kafka_AdminOptions_t *options;
+ char cerrstr[128];
+ options = rd_kafka_AdminOptions_new(p->c_ptr(), RD_KAFKA_ADMIN_OP_ANY);
+ rd_kafka_resp_err_t err = rd_kafka_AdminOptions_set_request_timeout(
+ options, 10000, cerrstr, sizeof(cerrstr));
+ test_assert(!err, cerrstr);
+
+ rd_kafka_DescribeConfigs(p->c_ptr(), &config, 1, options, mainq);
+ rd_kafka_ConfigResource_destroy(config);
+ rd_kafka_AdminOptions_destroy(options);
+ rd_kafka_event_t *rkev = test_wait_admin_result(
+ mainq, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, 5000);
+
+ const rd_kafka_DescribeConfigs_result_t *res =
+ rd_kafka_event_DescribeConfigs_result(rkev);
+ test_assert(res, "expecting describe config results to be not NULL");
+
+ err = rd_kafka_event_error(rkev);
+ const char *errstr2 = rd_kafka_event_error_string(rkev);
+ test_assert(!err, tostr() << "Expected success, not "
+ << rd_kafka_err2name(err) << ": " << errstr2);
+
+ size_t rconfig_cnt;
+ const rd_kafka_ConfigResource_t **rconfigs =
+ rd_kafka_DescribeConfigs_result_resources(res, &rconfig_cnt);
+ test_assert(rconfig_cnt == 1,
+ tostr() << "Expecting 1 resource, got " << rconfig_cnt);
+
+ err = rd_kafka_ConfigResource_error(rconfigs[0]);
+ errstr2 = rd_kafka_ConfigResource_error_string(rconfigs[0]);
+
+ size_t entry_cnt;
+ const rd_kafka_ConfigEntry_t **entries =
+ rd_kafka_ConfigResource_configs(rconfigs[0], &entry_cnt);
+
+ for (size_t j = 0; j < entry_cnt; ++j) {
+ const rd_kafka_ConfigEntry_t *e = entries[j];
+ const char *cname = rd_kafka_ConfigEntry_name(e);
+ if (!strcmp(cname, "broker.rack")) {
+ const char *val = rd_kafka_ConfigEntry_value(e)
+ ? rd_kafka_ConfigEntry_value(e)
+ : "(NULL)";
+ racks.insert(std::string(val));
+ }
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ rd_kafka_queue_destroy(mainq);
+ delete p;
+
+ return (int)racks.size();
+}
+
+
+static void do_fff_test(void) {
+ /* Produce some messages to a single partition topic
+ * with 3 replicas.
+ */
+ int msgcnt = 1000;
+ const int msgsize = 100;
+ std::string topic_str = Test::mk_topic_name("0101-fetch-from-follower", 1);
+ test_create_topic(NULL, topic_str.c_str(), 1, 3);
+ test_produce_msgs_easy_size(topic_str.c_str(), 0, 0, msgcnt, msgsize);
+
+ int leader_id;
+ std::vector<int> replica_ids;
+ get_brokers_info(topic_str, &leader_id, replica_ids);
+ test_assert(replica_ids.size() == 3,
+ tostr() << "expecting three replicas, but " << replica_ids.size()
+ << " were reported.");
+ Test::Say(tostr() << topic_str << " leader id: " << leader_id
+ << ", all replica ids: [" << replica_ids[0] << ", "
+ << replica_ids[1] << ", " << replica_ids[2] << "]\n");
+
+ if (get_broker_rack_count(replica_ids) != 3) {
+ Test::Skip("unexpected broker.rack configuration: skipping test.\n");
+ return;
+ }
+
+ /* arrange for the consumer's client.rack to align with a broker that is not
+ * the leader. */
+ int client_rack_id = -1;
+ size_t i;
+ for (i = 0; i < replica_ids.size(); ++i) {
+ if (replica_ids[i] != leader_id) {
+ client_rack_id = replica_ids[i];
+ break;
+ }
+ }
+
+ std::string client_rack = tostr() << "RACK" << client_rack_id;
+ Test::Say("client.rack: " + client_rack + "\n");
+
+ std::string errstr;
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 10);
+ Test::conf_set(conf, "group.id", topic_str);
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ Test::conf_set(conf, "enable.auto.commit", "false");
+ Test::conf_set(conf, "statistics.interval.ms", "1000");
+ conf->set("event_cb", &ex_event_cb, errstr);
+ Test::conf_set(conf, "client.rack", client_rack);
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ test_assert(c, "Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Subscribe */
+ std::vector<std::string> topics;
+ topics.push_back(topic_str);
+ RdKafka::ErrorCode err;
+ if ((err = c->subscribe(topics)))
+ Test::Fail("subscribe failed: " + RdKafka::err2str(err));
+
+ /* Start consuming */
+ Test::Say("Consuming topic " + topic_str + "\n");
+ int cnt = 0;
+ while (cnt < msgcnt) {
+ RdKafka::Message *msg = c->consume(tmout_multip(1000));
+
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR: {
+ test_assert(msg->len() == 100, "expecting message value size to be 100");
+ char *cnt_str_start_ptr = strstr((char *)msg->payload(), "msg=") + 4;
+ test_assert(cnt_str_start_ptr, "expecting 'msg=' in message payload");
+ char *cnt_str_end_ptr = strstr(cnt_str_start_ptr, "\n");
+ test_assert(cnt_str_start_ptr,
+ "expecting '\n' following 'msg=' in message payload");
+ *cnt_str_end_ptr = '\0';
+ int msg_cnt = atoi(cnt_str_start_ptr);
+ test_assert(msg_cnt == cnt, "message consumed out of order");
+ cnt++;
+ } break;
+
+ default:
+ Test::Fail("Consume error: " + msg->errstr());
+ break;
+ }
+
+ delete msg;
+ }
+
+ /* rely on the test timeout to prevent an infinite loop in
+ * the (unlikely) event that the statistics callback isn't
+ * called. */
+ Test::Say("Capturing rxbytes statistics\n");
+ TestEvent2Cb::should_capture_stats = true;
+ while (!TestEvent2Cb::has_captured_stats) {
+ RdKafka::Message *msg = c->consume(tmout_multip(500));
+ delete msg;
+ }
+
+ for (i = 0; i < replica_ids.size(); ++i)
+ Test::Say(
+ tostr() << _C_YEL << "rxbytes for replica on broker " << replica_ids[i]
+ << ": " << TestEvent2Cb::rxbytes[replica_ids[i]]
+ << (replica_ids[i] == leader_id ? " (leader)" : "")
+ << (replica_ids[i] == client_rack_id ? " (preferred replica)"
+ : "")
+ << "\n");
+
+ for (i = 0; i < replica_ids.size(); ++i)
+ if (replica_ids[i] != client_rack_id)
+ test_assert(
+ TestEvent2Cb::rxbytes[replica_ids[i]] <
+ TestEvent2Cb::rxbytes[client_rack_id],
+ "rxbytes was not highest on broker corresponding to client.rack.");
+
+ test_assert(
+ TestEvent2Cb::rxbytes[client_rack_id] > msgcnt * msgsize,
+ tostr() << "expecting rxbytes of client.rack broker to be at least "
+ << msgcnt * msgsize << " but it was "
+ << TestEvent2Cb::rxbytes[client_rack_id]);
+
+ Test::Say("Done\n");
+
+ // Manual test 1:
+ // - change the lease period from 5 minutes to 5 seconds (modify
+ // rdkafka_partition.c)
+ // - change the max lease grant period from 1 minute to 10 seconds (modify
+ // rdkafka_broker.c)
+ // - add infinite consume loop to the end of this test.
+ // - observe:
+ // - the partition gets delegated to the preferred replica.
+ // - the messages get consumed.
+ // - the lease expires.
+ // - the partition is reverted to the leader.
+ // - the toppar is backed off, and debug message noting the faster than
+ // expected delegation to a replica.
+
+ // Manual test 2:
+ // - same modifications as above.
+ // - add Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "3000");
+ // - observe:
+ // - that metadata being periodically received and not interfering with
+ // anything.
+
+ c->close();
+ delete c;
+}
+#endif
+
+extern "C" {
+int main_0101_fetch_from_follower(int argc, char **argv) {
+#if WITH_RAPIDJSON
+ do_fff_test();
+#else
+ Test::Skip("RapidJSON >=1.1.0 not available\n");
+#endif
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c
new file mode 100644
index 000000000..231a09065
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0102-static_group_rebalance.c
@@ -0,0 +1,535 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * @name KafkaConsumer static membership tests
+ *
+ * Runs two consumers subscribing to multiple topics simulating various
+ * rebalance scenarios with static group membership enabled.
+ */
+
+#define _CONSUMER_CNT 2
+
+typedef struct _consumer_s {
+ rd_kafka_t *rk;
+ test_msgver_t *mv;
+ int64_t assigned_at;
+ int64_t revoked_at;
+ int partition_cnt;
+ rd_kafka_resp_err_t expected_rb_event;
+ int curr_line;
+} _consumer_t;
+
+
+/**
+ * @brief Call poll until a rebalance has been triggered
+ */
+static int static_member_wait_rebalance0(int line,
+ _consumer_t *c,
+ int64_t start,
+ int64_t *target,
+ int timeout_ms) {
+ int64_t tmout = test_clock() + (timeout_ms * 1000);
+ test_timing_t t_time;
+
+ c->curr_line = line;
+
+ TEST_SAY("line %d: %s awaiting %s event\n", line, rd_kafka_name(c->rk),
+ rd_kafka_err2name(c->expected_rb_event));
+
+ TIMING_START(&t_time, "wait_rebalance");
+ while (timeout_ms < 0 ? 1 : test_clock() <= tmout) {
+ if (*target > start) {
+ c->curr_line = 0;
+ return 1;
+ }
+ test_consumer_poll_once(c->rk, c->mv, 1000);
+ }
+ TIMING_STOP(&t_time);
+
+ c->curr_line = 0;
+
+ TEST_SAY("line %d: %s timed out awaiting %s event\n", line,
+ rd_kafka_name(c->rk), rd_kafka_err2name(c->expected_rb_event));
+
+ return 0;
+}
+
+#define static_member_expect_rebalance(C, START, TARGET, TIMEOUT_MS) \
+ do { \
+ if (!static_member_wait_rebalance0(__LINE__, C, START, TARGET, \
+ TIMEOUT_MS)) \
+ TEST_FAIL("%s: timed out waiting for %s event", \
+ rd_kafka_name((C)->rk), \
+ rd_kafka_err2name((C)->expected_rb_event)); \
+ } while (0)
+
+#define static_member_wait_rebalance(C, START, TARGET, TIMEOUT_MS) \
+ static_member_wait_rebalance0(__LINE__, C, START, TARGET, TIMEOUT_MS)
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ _consumer_t *c = opaque;
+
+ TEST_ASSERT(c->expected_rb_event == err,
+ "line %d: %s: Expected rebalance event %s got %s\n",
+ c->curr_line, rd_kafka_name(rk),
+ rd_kafka_err2name(c->expected_rb_event),
+ rd_kafka_err2name(err));
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ TEST_SAY("line %d: %s Assignment (%d partition(s)):\n",
+ c->curr_line, rd_kafka_name(rk), parts->cnt);
+ test_print_partition_list(parts);
+
+ c->partition_cnt = parts->cnt;
+ c->assigned_at = test_clock();
+ rd_kafka_assign(rk, parts);
+
+ break;
+
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ c->revoked_at = test_clock();
+ rd_kafka_assign(rk, NULL);
+ TEST_SAY("line %d: %s revoked %d partitions\n", c->curr_line,
+ rd_kafka_name(c->rk), parts->cnt);
+
+ break;
+
+ default:
+ TEST_FAIL("rebalance failed: %s", rd_kafka_err2str(err));
+ break;
+ }
+
+ /* Reset error */
+ c->expected_rb_event = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ /* prevent poll from triggering more than one rebalance event */
+ rd_kafka_yield(rk);
+}
+
+
+static void do_test_static_group_rebalance(void) {
+ rd_kafka_conf_t *conf;
+ test_msgver_t mv;
+ int64_t rebalance_start;
+ _consumer_t c[_CONSUMER_CNT] = RD_ZERO_INIT;
+ const int msgcnt = 100;
+ uint64_t testid = test_id_generate();
+ const char *topic =
+ test_mk_topic_name("0102_static_group_rebalance", 1);
+ char *topics = rd_strdup(tsprintf("^%s.*", topic));
+ test_timing_t t_close;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 70);
+ test_msgver_init(&mv, testid);
+ c[0].mv = &mv;
+ c[1].mv = &mv;
+
+ test_create_topic(NULL, topic, 3, 1);
+ test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);
+
+ test_conf_set(conf, "max.poll.interval.ms", "9000");
+ test_conf_set(conf, "session.timeout.ms", "6000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "500");
+ test_conf_set(conf, "metadata.max.age.ms", "5000");
+ test_conf_set(conf, "enable.partition.eof", "true");
+ test_conf_set(conf, "group.instance.id", "consumer1");
+
+ rd_kafka_conf_set_opaque(conf, &c[0]);
+ c[0].rk = test_create_consumer(topic, rebalance_cb,
+ rd_kafka_conf_dup(conf), NULL);
+
+ rd_kafka_conf_set_opaque(conf, &c[1]);
+ test_conf_set(conf, "group.instance.id", "consumer2");
+ c[1].rk = test_create_consumer(topic, rebalance_cb,
+ rd_kafka_conf_dup(conf), NULL);
+ rd_kafka_conf_destroy(conf);
+
+ test_wait_topic_exists(c[1].rk, topic, 5000);
+
+ test_consumer_subscribe(c[0].rk, topics);
+ test_consumer_subscribe(c[1].rk, topics);
+
+ /*
+ * Static members enforce `max.poll.interval.ms` which may prompt
+ * an unwanted rebalance while the other consumer awaits its assignment.
+ * These members remain in the member list however so we must
+ * interleave calls to poll while awaiting our assignment to avoid
+ * unexpected rebalances being triggered.
+ */
+ rebalance_start = test_clock();
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ while (!static_member_wait_rebalance(&c[0], rebalance_start,
+ &c[0].assigned_at, 1000)) {
+ /* keep consumer 2 alive while consumer 1 awaits
+ * its assignment
+ */
+ c[1].curr_line = __LINE__;
+ test_consumer_poll_once(c[1].rk, &mv, 0);
+ }
+
+ static_member_expect_rebalance(&c[1], rebalance_start,
+ &c[1].assigned_at, -1);
+
+ /*
+ * Consume all the messages so we can watch for duplicates
+ * after rejoin/rebalance operations.
+ */
+ c[0].curr_line = __LINE__;
+ test_consumer_poll("serve.queue", c[0].rk, testid, c[0].partition_cnt,
+ 0, -1, &mv);
+ c[1].curr_line = __LINE__;
+ test_consumer_poll("serve.queue", c[1].rk, testid, c[1].partition_cnt,
+ 0, -1, &mv);
+
+ test_msgver_verify("first.verify", &mv, TEST_MSGVER_ALL, 0, msgcnt);
+
+ TEST_SAY("== Testing consumer restart ==\n");
+ conf = rd_kafka_conf_dup(rd_kafka_conf(c[1].rk));
+
+ /* Only c[1] should exhibit rebalance behavior */
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ TIMING_START(&t_close, "consumer restart");
+ test_consumer_close(c[1].rk);
+ rd_kafka_destroy(c[1].rk);
+
+ c[1].rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+ rd_kafka_poll_set_consumer(c[1].rk);
+
+ test_consumer_subscribe(c[1].rk, topics);
+
+ /* Await assignment */
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ rebalance_start = test_clock();
+ while (!static_member_wait_rebalance(&c[1], rebalance_start,
+ &c[1].assigned_at, 1000)) {
+ c[0].curr_line = __LINE__;
+ test_consumer_poll_once(c[0].rk, &mv, 0);
+ }
+ TIMING_STOP(&t_close);
+
+ /* Should complete before `session.timeout.ms` */
+ TIMING_ASSERT(&t_close, 0, 6000);
+
+
+ TEST_SAY("== Testing subscription expansion ==\n");
+
+ /*
+ * New topics matching the subscription pattern should cause
+ * group rebalance
+ */
+ test_create_topic(c->rk, tsprintf("%snew", topic), 1, 1);
+
+ /* Await revocation */
+ rebalance_start = test_clock();
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ while (!static_member_wait_rebalance(&c[0], rebalance_start,
+ &c[0].revoked_at, 1000)) {
+ c[1].curr_line = __LINE__;
+ test_consumer_poll_once(c[1].rk, &mv, 0);
+ }
+
+ static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
+ -1);
+
+ /* Await assignment */
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ while (!static_member_wait_rebalance(&c[0], rebalance_start,
+ &c[0].assigned_at, 1000)) {
+ c[1].curr_line = __LINE__;
+ test_consumer_poll_once(c[1].rk, &mv, 0);
+ }
+
+ static_member_expect_rebalance(&c[1], rebalance_start,
+ &c[1].assigned_at, -1);
+
+ TEST_SAY("== Testing consumer unsubscribe ==\n");
+
+ /* Unsubscribe should send a LeaveGroupRequest invoking a rebalance */
+
+ /* Send LeaveGroup incrementing generation by 1 */
+ rebalance_start = test_clock();
+ rd_kafka_unsubscribe(c[1].rk);
+
+ /* Await revocation */
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
+ -1);
+ static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at,
+ -1);
+
+ /* New cgrp generation with 1 member, c[0] */
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ static_member_expect_rebalance(&c[0], rebalance_start,
+ &c[0].assigned_at, -1);
+
+ /* Send JoinGroup bumping generation by 1 */
+ rebalance_start = test_clock();
+ test_consumer_subscribe(c[1].rk, topics);
+
+ /* End previous single member generation */
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ static_member_expect_rebalance(&c[0], rebalance_start, &c[0].revoked_at,
+ -1);
+
+ /* Await assignment */
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ while (!static_member_wait_rebalance(&c[1], rebalance_start,
+ &c[1].assigned_at, 1000)) {
+ c[0].curr_line = __LINE__;
+ test_consumer_poll_once(c[0].rk, &mv, 0);
+ }
+
+ static_member_expect_rebalance(&c[0], rebalance_start,
+ &c[0].assigned_at, -1);
+
+ TEST_SAY("== Testing max poll violation ==\n");
+ /* max.poll.interval.ms should still be enforced by the consumer */
+
+ /*
+ * Block long enough for consumer 2 to be evicted from the group
+ * `max.poll.interval.ms` + `session.timeout.ms`
+ */
+ rebalance_start = test_clock();
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ c[0].curr_line = __LINE__;
+ test_consumer_poll_no_msgs("wait.max.poll", c[0].rk, testid,
+ 6000 + 9000);
+ c[1].curr_line = __LINE__;
+ test_consumer_poll_expect_err(c[1].rk, testid, 1000,
+ RD_KAFKA_RESP_ERR__MAX_POLL_EXCEEDED);
+
+ /* Await revocation */
+ while (!static_member_wait_rebalance(&c[0], rebalance_start,
+ &c[0].revoked_at, 1000)) {
+ c[1].curr_line = __LINE__;
+ test_consumer_poll_once(c[1].rk, &mv, 0);
+ }
+
+ static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
+ -1);
+
+ /* Await assignment */
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ while (!static_member_wait_rebalance(&c[1], rebalance_start,
+ &c[1].assigned_at, 1000)) {
+ c[0].curr_line = __LINE__;
+ test_consumer_poll_once(c[0].rk, &mv, 0);
+ }
+
+ static_member_expect_rebalance(&c[0], rebalance_start,
+ &c[0].assigned_at, -1);
+
+ TEST_SAY("== Testing `session.timeout.ms` member eviction ==\n");
+
+ rebalance_start = test_clock();
+ c[0].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ TIMING_START(&t_close, "consumer close");
+ test_consumer_close(c[0].rk);
+ rd_kafka_destroy(c[0].rk);
+
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ static_member_expect_rebalance(&c[1], rebalance_start, &c[1].revoked_at,
+ 2 * 7000);
+
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS;
+ static_member_expect_rebalance(&c[1], rebalance_start,
+ &c[1].assigned_at, 2000);
+
+ /* Should take at least as long as `session.timeout.ms` but less than
+ * `max.poll.interval.ms`, but since we can't really know when
+ * the last Heartbeat or SyncGroup request was sent we need to
+ * allow some leeway on the minimum side (4s), and also some on
+ * the maximum side (1s) for slow runtimes. */
+ TIMING_ASSERT(&t_close, 6000 - 4000, 9000 + 1000);
+
+ c[1].expected_rb_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ test_consumer_close(c[1].rk);
+ rd_kafka_destroy(c[1].rk);
+
+ test_msgver_verify("final.validation", &mv, TEST_MSGVER_ALL, 0, msgcnt);
+ test_msgver_clear(&mv);
+ free(topics);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Await a non-empty assignment for all consumers in \p c
+ */
+static void await_assignment_multi(const char *what, rd_kafka_t **c, int cnt) {
+ rd_kafka_topic_partition_list_t *parts;
+ int assignment_cnt;
+
+ TEST_SAY("%s\n", what);
+
+ do {
+ int i;
+ int timeout_ms = 1000;
+
+ assignment_cnt = 0;
+
+ for (i = 0; i < cnt; i++) {
+ test_consumer_poll_no_msgs("poll", c[i], 0, timeout_ms);
+ timeout_ms = 100;
+
+ if (!rd_kafka_assignment(c[i], &parts) && parts) {
+ TEST_SAY("%s has %d partition(s) assigned\n",
+ rd_kafka_name(c[i]), parts->cnt);
+ if (parts->cnt > 0)
+ assignment_cnt++;
+ rd_kafka_topic_partition_list_destroy(parts);
+ }
+ }
+
+ } while (assignment_cnt < cnt);
+}
+
+
+static const rd_kafka_t *valid_fatal_rk;
+/**
+ * @brief Tells test harness that fatal error should not fail the current test
+ */
+static int
+is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ return rk != valid_fatal_rk;
+}
+
+/**
+ * @brief Test that consumer fencing raises a fatal error
+ */
+static void do_test_fenced_member(void) {
+ rd_kafka_t *c[3]; /* 0: consumer2b, 1: consumer1, 2: consumer2a */
+ rd_kafka_conf_t *conf;
+ const char *topic =
+ test_mk_topic_name("0102_static_group_rebalance", 1);
+ rd_kafka_message_t *rkm;
+ char errstr[512];
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_create_topic(NULL, topic, 3, 1);
+
+ test_conf_set(conf, "group.instance.id", "consumer1");
+ c[1] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+
+ test_conf_set(conf, "group.instance.id", "consumer2");
+ c[2] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+
+ test_wait_topic_exists(c[2], topic, 5000);
+
+ test_consumer_subscribe(c[1], topic);
+ test_consumer_subscribe(c[2], topic);
+
+ await_assignment_multi("Awaiting initial assignments", &c[1], 2);
+
+ /* Create conflicting consumer */
+ TEST_SAY("Creating conflicting consumer2 instance\n");
+ test_conf_set(conf, "group.instance.id", "consumer2");
+ c[0] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ rd_kafka_conf_destroy(conf);
+
+ test_curr->is_fatal_cb = is_fatal_cb;
+ valid_fatal_rk = c[2]; /* consumer2a is the consumer that should fail */
+
+ test_consumer_subscribe(c[0], topic);
+
+ /* consumer1 should not be affected (other than a rebalance which
+ * we ignore here)... */
+ test_consumer_poll_no_msgs("consumer1", c[1], 0, 5000);
+
+ /* .. but consumer2a should now have been fenced off by consumer2b */
+ rkm = rd_kafka_consumer_poll(c[2], 5000);
+ TEST_ASSERT(rkm != NULL, "Expected error, not timeout");
+ TEST_ASSERT(rkm->err == RD_KAFKA_RESP_ERR__FATAL,
+ "Expected ERR__FATAL, not %s: %s",
+ rd_kafka_err2str(rkm->err), rd_kafka_message_errstr(rkm));
+ TEST_SAY("Fenced consumer returned expected: %s: %s\n",
+ rd_kafka_err2name(rkm->err), rd_kafka_message_errstr(rkm));
+ rd_kafka_message_destroy(rkm);
+
+
+ /* Read the actual error */
+ err = rd_kafka_fatal_error(c[2], errstr, sizeof(errstr));
+ TEST_SAY("%s fatal error: %s: %s\n", rd_kafka_name(c[2]),
+ rd_kafka_err2name(err), errstr);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_FENCED_INSTANCE_ID,
+ "Expected ERR_FENCED_INSTANCE_ID as fatal error, not %s",
+ rd_kafka_err2name(err));
+
+ TEST_SAY("close\n");
+ /* Close consumer2a, should also return a fatal error */
+ err = rd_kafka_consumer_close(c[2]);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL,
+ "Expected close on %s to return ERR__FATAL, not %s",
+ rd_kafka_name(c[2]), rd_kafka_err2name(err));
+
+ rd_kafka_destroy(c[2]);
+
+ /* consumer2b and consumer1 should be fine and get their
+ * assignments */
+ await_assignment_multi("Awaiting post-fencing assignment", c, 2);
+
+ rd_kafka_destroy(c[0]);
+ rd_kafka_destroy(c[1]);
+
+ SUB_TEST_PASS();
+}
+
+
+
+int main_0102_static_group_rebalance(int argc, char **argv) {
+
+ do_test_static_group_rebalance();
+
+ do_test_fenced_member();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c
new file mode 100644
index 000000000..eaab2f217
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0103-transactions.c
@@ -0,0 +1,1297 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+/**
+ * @name Producer transaction tests
+ *
+ */
+
+
+/**
+ * @brief Produce messages using batch interface.
+ */
+void do_produce_batch(rd_kafka_t *rk,
+ const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt) {
+ rd_kafka_message_t *messages;
+ rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, topic, NULL);
+ int i;
+ int ret;
+ int remains = cnt;
+
+ TEST_SAY("Batch-producing %d messages to partition %" PRId32 "\n", cnt,
+ partition);
+
+ messages = rd_calloc(sizeof(*messages), cnt);
+ for (i = 0; i < cnt; i++) {
+ char key[128];
+ char value[128];
+
+ test_prepare_msg(testid, partition, msg_base + i, value,
+ sizeof(value), key, sizeof(key));
+ messages[i].key = rd_strdup(key);
+ messages[i].key_len = strlen(key);
+ messages[i].payload = rd_strdup(value);
+ messages[i].len = strlen(value);
+ messages[i]._private = &remains;
+ }
+
+ ret = rd_kafka_produce_batch(rkt, partition, RD_KAFKA_MSG_F_COPY,
+ messages, cnt);
+
+ rd_kafka_topic_destroy(rkt);
+
+ TEST_ASSERT(ret == cnt,
+ "Failed to batch-produce: %d/%d messages produced", ret,
+ cnt);
+
+ for (i = 0; i < cnt; i++) {
+ TEST_ASSERT(!messages[i].err, "Failed to produce message: %s",
+ rd_kafka_err2str(messages[i].err));
+ rd_free(messages[i].key);
+ rd_free(messages[i].payload);
+ }
+ rd_free(messages);
+
+ /* Wait for deliveries */
+ test_wait_delivery(rk, &remains);
+}
+
+
+
+/**
+ * @brief Basic producer transaction testing without consumed input
+ * (only consumed output for verification).
+ * e.g., no consumer offsets to commit with transaction.
+ */
+static void do_test_basic_producer_txn(rd_bool_t enable_compression) {
+ const char *topic = test_mk_topic_name("0103_transactions", 1);
+ const int partition_cnt = 4;
+#define _TXNCNT 6
+ struct {
+ const char *desc;
+ uint64_t testid;
+ int msgcnt;
+ rd_bool_t abort;
+ rd_bool_t sync;
+ rd_bool_t batch;
+ rd_bool_t batch_any;
+ } txn[_TXNCNT] = {
+ {"Commit transaction, sync producing", 0, 100, rd_false, rd_true},
+ {"Commit transaction, async producing", 0, 1000, rd_false,
+ rd_false},
+ {"Commit transaction, sync batch producing to any partition", 0,
+ 100, rd_false, rd_true, rd_true, rd_true},
+ {"Abort transaction, sync producing", 0, 500, rd_true, rd_true},
+ {"Abort transaction, async producing", 0, 5000, rd_true, rd_false},
+ {"Abort transaction, sync batch producing to one partition", 0, 500,
+ rd_true, rd_true, rd_true, rd_false},
+
+ };
+ rd_kafka_t *p, *c;
+ rd_kafka_conf_t *conf, *p_conf, *c_conf;
+ int i;
+
+ /* Mark one of run modes as quick so we don't run both when
+ * in a hurry.*/
+ SUB_TEST0(enable_compression /* quick */, "with%s compression",
+ enable_compression ? "" : "out");
+
+ test_conf_init(&conf, NULL, 30);
+
+ /* Create producer */
+ p_conf = rd_kafka_conf_dup(conf);
+ rd_kafka_conf_set_dr_msg_cb(p_conf, test_dr_msg_cb);
+ test_conf_set(p_conf, "transactional.id", topic);
+ if (enable_compression)
+ test_conf_set(p_conf, "compression.type", "lz4");
+ p = test_create_handle(RD_KAFKA_PRODUCER, p_conf);
+
+ // FIXME: add testing were the txn id is reused (and thus fails)
+
+ /* Create topic */
+ test_create_topic(p, topic, partition_cnt, 3);
+
+ /* Create consumer */
+ c_conf = conf;
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ /* Make sure default isolation.level is transaction aware */
+ TEST_ASSERT(
+ !strcmp(test_conf_get(c_conf, "isolation.level"), "read_committed"),
+ "expected isolation.level=read_committed, not %s",
+ test_conf_get(c_conf, "isolation.level"));
+
+ c = test_create_consumer(topic, NULL, c_conf, NULL);
+
+ /* Wait for topic to propagate to avoid test flakyness */
+ test_wait_topic_exists(c, topic, tmout_multip(5000));
+
+ /* Subscribe to topic */
+ test_consumer_subscribe(c, topic);
+
+ /* Wait for assignment to make sure consumer is fetching messages
+ * below, so we can use the poll_no_msgs() timeout to
+ * determine that messages were indeed aborted. */
+ test_consumer_wait_assignment(c, rd_true);
+
+ /* Init transactions */
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
+
+ for (i = 0; i < _TXNCNT; i++) {
+ int wait_msgcnt = 0;
+
+ TEST_SAY(_C_BLU "txn[%d]: Begin transaction: %s\n" _C_CLR, i,
+ txn[i].desc);
+
+ /* Begin a transaction */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+
+ /* If the transaction is aborted it is okay if
+ * messages fail producing, since they'll be
+ * purged from queues. */
+ test_curr->ignore_dr_err = txn[i].abort;
+
+ /* Produce messages */
+ txn[i].testid = test_id_generate();
+ TEST_SAY(
+ "txn[%d]: Produce %d messages %ssynchronously "
+ "with testid %" PRIu64 "\n",
+ i, txn[i].msgcnt, txn[i].sync ? "" : "a", txn[i].testid);
+
+ if (!txn[i].batch) {
+ if (txn[i].sync)
+ test_produce_msgs2(p, topic, txn[i].testid,
+ RD_KAFKA_PARTITION_UA, 0,
+ txn[i].msgcnt, NULL, 0);
+ else
+ test_produce_msgs2_nowait(
+ p, topic, txn[i].testid,
+ RD_KAFKA_PARTITION_UA, 0, txn[i].msgcnt,
+ NULL, 0, &wait_msgcnt);
+ } else if (txn[i].batch_any) {
+ /* Batch: use any partition */
+ do_produce_batch(p, topic, txn[i].testid,
+ RD_KAFKA_PARTITION_UA, 0,
+ txn[i].msgcnt);
+ } else {
+ /* Batch: specific partition */
+ do_produce_batch(p, topic, txn[i].testid,
+ 1 /* partition */, 0, txn[i].msgcnt);
+ }
+
+
+ /* Abort or commit transaction */
+ TEST_SAY("txn[%d]: %s" _C_CLR " transaction\n", i,
+ txn[i].abort ? _C_RED "Abort" : _C_GRN "Commit");
+ if (txn[i].abort) {
+ test_curr->ignore_dr_err = rd_true;
+ TEST_CALL_ERROR__(
+ rd_kafka_abort_transaction(p, 30 * 1000));
+ } else {
+ test_curr->ignore_dr_err = rd_false;
+ TEST_CALL_ERROR__(
+ rd_kafka_commit_transaction(p, 30 * 1000));
+ }
+
+ if (!txn[i].sync)
+ /* Wait for delivery reports */
+ test_wait_delivery(p, &wait_msgcnt);
+
+ /* Consume messages */
+ if (txn[i].abort)
+ test_consumer_poll_no_msgs(txn[i].desc, c,
+ txn[i].testid, 3000);
+ else
+ test_consumer_poll(txn[i].desc, c, txn[i].testid,
+ partition_cnt, 0, txn[i].msgcnt,
+ NULL);
+
+ TEST_SAY(_C_GRN "txn[%d]: Finished successfully: %s\n" _C_CLR,
+ i, txn[i].desc);
+ }
+
+ rd_kafka_destroy(p);
+
+ test_consumer_close(c);
+ rd_kafka_destroy(c);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Consumes \p cnt messages and returns them in the provided array
+ * which must be pre-allocated.
+ */
+static void
+consume_messages(rd_kafka_t *c, rd_kafka_message_t **msgs, int msgcnt) {
+ int i = 0;
+ while (i < msgcnt) {
+ msgs[i] = rd_kafka_consumer_poll(c, 1000);
+ if (!msgs[i])
+ continue;
+
+ if (msgs[i]->err) {
+ TEST_SAY("%s consumer error: %s\n", rd_kafka_name(c),
+ rd_kafka_message_errstr(msgs[i]));
+ rd_kafka_message_destroy(msgs[i]);
+ continue;
+ }
+
+ TEST_SAYL(3, "%s: consumed message %s [%d] @ %" PRId64 "\n",
+ rd_kafka_name(c), rd_kafka_topic_name(msgs[i]->rkt),
+ msgs[i]->partition, msgs[i]->offset);
+
+
+ i++;
+ }
+}
+
+static void destroy_messages(rd_kafka_message_t **msgs, int msgcnt) {
+ while (msgcnt-- > 0)
+ rd_kafka_message_destroy(msgs[msgcnt]);
+}
+
+
+/**
+ * @brief Test a transactional consumer + transactional producer combo,
+ * mimicing a streams job.
+ *
+ * One input topic produced to by transactional producer 1,
+ * consumed by transactional consumer 1, which forwards messages
+ * to transactional producer 2 that writes messages to output topic,
+ * which is consumed and verified by transactional consumer 2.
+ *
+ * Every 3rd transaction is aborted.
+ */
+void do_test_consumer_producer_txn(void) {
+ char *input_topic =
+ rd_strdup(test_mk_topic_name("0103-transactions-input", 1));
+ char *output_topic =
+ rd_strdup(test_mk_topic_name("0103-transactions-output", 1));
+ const char *c1_groupid = input_topic;
+ const char *c2_groupid = output_topic;
+ rd_kafka_t *p1, *p2, *c1, *c2;
+ rd_kafka_conf_t *conf, *tmpconf;
+ uint64_t testid;
+#define _MSGCNT (10 * 30)
+ const int txncnt = 10;
+ const int msgcnt = _MSGCNT;
+ int txn;
+ int committed_msgcnt = 0;
+ test_msgver_t expect_mv, actual_mv;
+
+ SUB_TEST_QUICK("transactional test with %d transactions", txncnt);
+
+ test_conf_init(&conf, NULL, 30);
+
+ testid = test_id_generate();
+
+ /*
+ *
+ * Producer 1
+ * |
+ * v
+ * input topic
+ * |
+ * v
+ * Consumer 1 }
+ * | } transactional streams job
+ * v }
+ * Producer 2 }
+ * |
+ * v
+ * output tpic
+ * |
+ * v
+ * Consumer 2
+ */
+
+
+ /* Create Producer 1 and seed input topic */
+ tmpconf = rd_kafka_conf_dup(conf);
+ test_conf_set(tmpconf, "transactional.id", input_topic);
+ rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb);
+ p1 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf);
+
+ /* Create input and output topics */
+ test_create_topic(p1, input_topic, 4, 3);
+ test_create_topic(p1, output_topic, 4, 3);
+
+ /* Seed input topic with messages */
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1));
+ test_produce_msgs2(p1, input_topic, testid, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt, NULL, 0);
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(p1, 30 * 1000));
+
+ rd_kafka_destroy(p1);
+
+ /* Create Consumer 1: reading msgs from input_topic (Producer 1) */
+ tmpconf = rd_kafka_conf_dup(conf);
+ test_conf_set(tmpconf, "isolation.level", "read_committed");
+ test_conf_set(tmpconf, "auto.offset.reset", "earliest");
+ test_conf_set(tmpconf, "enable.auto.commit", "false");
+ c1 = test_create_consumer(c1_groupid, NULL, tmpconf, NULL);
+ test_consumer_subscribe(c1, input_topic);
+
+ /* Create Producer 2 */
+ tmpconf = rd_kafka_conf_dup(conf);
+ test_conf_set(tmpconf, "transactional.id", output_topic);
+ rd_kafka_conf_set_dr_msg_cb(tmpconf, test_dr_msg_cb);
+ p2 = test_create_handle(RD_KAFKA_PRODUCER, tmpconf);
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000));
+
+ /* Create Consumer 2: reading msgs from output_topic (Producer 2) */
+ tmpconf = rd_kafka_conf_dup(conf);
+ test_conf_set(tmpconf, "isolation.level", "read_committed");
+ test_conf_set(tmpconf, "auto.offset.reset", "earliest");
+ c2 = test_create_consumer(c2_groupid, NULL, tmpconf, NULL);
+ test_consumer_subscribe(c2, output_topic);
+
+ /* Keep track of what messages to expect on the output topic */
+ test_msgver_init(&expect_mv, testid);
+
+ for (txn = 0; txn < txncnt; txn++) {
+ int msgcnt2 = 10 * (1 + (txn % 3));
+ rd_kafka_message_t *msgs[_MSGCNT];
+ int i;
+ rd_bool_t do_abort = !(txn % 3);
+ rd_bool_t recreate_consumer =
+ (do_abort && txn == 3) || (!do_abort && txn == 2);
+ rd_kafka_topic_partition_list_t *offsets,
+ *expected_offsets = NULL;
+ rd_kafka_resp_err_t err;
+ rd_kafka_consumer_group_metadata_t *c1_cgmetadata;
+ int remains = msgcnt2;
+
+ TEST_SAY(_C_BLU
+ "Begin transaction #%d/%d "
+ "(msgcnt=%d, do_abort=%s, recreate_consumer=%s)\n",
+ txn, txncnt, msgcnt2, do_abort ? "true" : "false",
+ recreate_consumer ? "true" : "false");
+
+ consume_messages(c1, msgs, msgcnt2);
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p2));
+
+ for (i = 0; i < msgcnt2; i++) {
+ rd_kafka_message_t *msg = msgs[i];
+
+ if (!do_abort) {
+ /* The expected msgver based on the input topic
+ * will be compared to the actual msgver based
+ * on the output topic, so we need to
+ * override the topic name to match
+ * the actual msgver's output topic. */
+ test_msgver_add_msg0(
+ __FUNCTION__, __LINE__, rd_kafka_name(p2),
+ &expect_mv, msg, output_topic);
+ committed_msgcnt++;
+ }
+
+ err = rd_kafka_producev(
+ p2, RD_KAFKA_V_TOPIC(output_topic),
+ RD_KAFKA_V_KEY(msg->key, msg->key_len),
+ RD_KAFKA_V_VALUE(msg->payload, msg->len),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_OPAQUE(&remains), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s",
+ rd_kafka_err2str(err));
+
+ rd_kafka_poll(p2, 0);
+ }
+
+ destroy_messages(msgs, msgcnt2);
+
+ err = rd_kafka_assignment(c1, &offsets);
+ TEST_ASSERT(!err, "failed to get consumer assignment: %s",
+ rd_kafka_err2str(err));
+
+ err = rd_kafka_position(c1, offsets);
+ TEST_ASSERT(!err, "failed to get consumer position: %s",
+ rd_kafka_err2str(err));
+
+ c1_cgmetadata = rd_kafka_consumer_group_metadata(c1);
+ TEST_ASSERT(c1_cgmetadata != NULL,
+ "failed to get consumer group metadata");
+
+ TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
+ p2, offsets, c1_cgmetadata, -1));
+
+ if (recreate_consumer && !do_abort) {
+ expected_offsets =
+ rd_kafka_topic_partition_list_new(offsets->cnt);
+
+ /* Cannot use rd_kafka_topic_partition_list_copy
+ * as it needs to be destroyed before closing the
+ * consumer, because of the _private field holding
+ * a reference to the internal toppar */
+ for (i = 0; i < offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar =
+ &offsets->elems[i];
+ rd_kafka_topic_partition_t *rktpar_new;
+ rktpar_new = rd_kafka_topic_partition_list_add(
+ expected_offsets, rktpar->topic,
+ rktpar->partition);
+ rktpar_new->offset = rktpar->offset;
+ rd_kafka_topic_partition_set_leader_epoch(
+ rktpar_new,
+ rd_kafka_topic_partition_get_leader_epoch(
+ rktpar));
+ }
+ }
+
+ rd_kafka_consumer_group_metadata_destroy(c1_cgmetadata);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+
+ if (do_abort) {
+ test_curr->ignore_dr_err = rd_true;
+ TEST_CALL_ERROR__(
+ rd_kafka_abort_transaction(p2, 30 * 1000));
+ } else {
+ test_curr->ignore_dr_err = rd_false;
+ TEST_CALL_ERROR__(
+ rd_kafka_commit_transaction(p2, 30 * 1000));
+ }
+
+ TEST_ASSERT(remains == 0,
+ "expected no remaining messages "
+ "in-flight/in-queue, got %d",
+ remains);
+
+
+ if (recreate_consumer) {
+ /* Recreate the consumer to pick up
+ * on the committed offset. */
+ TEST_SAY("Recreating consumer 1\n");
+ rd_kafka_consumer_close(c1);
+ rd_kafka_destroy(c1);
+
+ tmpconf = rd_kafka_conf_dup(conf);
+ test_conf_set(tmpconf, "isolation.level",
+ "read_committed");
+ test_conf_set(tmpconf, "auto.offset.reset", "earliest");
+ test_conf_set(tmpconf, "enable.auto.commit", "false");
+ c1 = test_create_consumer(c1_groupid, NULL, tmpconf,
+ NULL);
+ test_consumer_subscribe(c1, input_topic);
+
+
+ if (expected_offsets) {
+ rd_kafka_topic_partition_list_t
+ *committed_offsets =
+ rd_kafka_topic_partition_list_copy(
+ expected_offsets);
+ /* Set committed offsets and epochs to a
+ * different value before requesting them. */
+ for (i = 0; i < committed_offsets->cnt; i++) {
+ rd_kafka_topic_partition_t *rktpar =
+ &committed_offsets->elems[i];
+ rktpar->offset = -100;
+ rd_kafka_topic_partition_set_leader_epoch(
+ rktpar, -100);
+ }
+
+ TEST_CALL_ERR__(rd_kafka_committed(
+ c1, committed_offsets, -1));
+
+ if (test_partition_list_and_offsets_cmp(
+ expected_offsets, committed_offsets)) {
+ TEST_SAY("expected list:\n");
+ test_print_partition_list(
+ expected_offsets);
+ TEST_SAY("committed() list:\n");
+ test_print_partition_list(
+ committed_offsets);
+ TEST_FAIL(
+ "committed offsets don't match");
+ }
+
+ rd_kafka_topic_partition_list_destroy(
+ committed_offsets);
+
+ rd_kafka_topic_partition_list_destroy(
+ expected_offsets);
+ }
+ }
+ }
+
+ rd_kafka_conf_destroy(conf);
+
+ test_msgver_init(&actual_mv, testid);
+
+ test_consumer_poll("Verify output topic", c2, testid, -1, 0,
+ committed_msgcnt, &actual_mv);
+
+ test_msgver_verify_compare("Verify output topic", &actual_mv,
+ &expect_mv, TEST_MSGVER_ALL);
+
+ test_msgver_clear(&actual_mv);
+ test_msgver_clear(&expect_mv);
+
+ rd_kafka_consumer_close(c1);
+ rd_kafka_consumer_close(c2);
+ rd_kafka_destroy(c1);
+ rd_kafka_destroy(c2);
+ rd_kafka_destroy(p2);
+
+ rd_free(input_topic);
+ rd_free(output_topic);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Testing misuse of the transaction API.
+ */
+static void do_test_misuse_txn(void) {
+ const char *topic = test_mk_topic_name("0103-test_misuse_txn", 1);
+ rd_kafka_t *p;
+ rd_kafka_conf_t *conf;
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t fatal_err;
+ char errstr[512];
+ int i;
+
+ /*
+ * transaction.timeout.ms out of range (from broker's point of view)
+ */
+ SUB_TEST_QUICK();
+
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "transactional.id", topic);
+ test_conf_set(conf, "transaction.timeout.ms", "2147483647");
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ error = rd_kafka_init_transactions(p, 10 * 1000);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
+ "Expected error ERR_INVALID_TRANSACTION_TIMEOUT, "
+ "not %s: %s",
+ rd_kafka_error_name(error),
+ error ? rd_kafka_error_string(error) : "");
+ TEST_ASSERT(rd_kafka_error_is_fatal(error),
+ "Expected error to have is_fatal() set");
+ rd_kafka_error_destroy(error);
+ /* Check that a fatal error is raised */
+ fatal_err = rd_kafka_fatal_error(p, errstr, sizeof(errstr));
+ TEST_ASSERT(fatal_err == RD_KAFKA_RESP_ERR_INVALID_TRANSACTION_TIMEOUT,
+ "Expected fatal error ERR_INVALID_TRANSACTION_TIMEOUT, "
+ "not %s: %s",
+ rd_kafka_err2name(fatal_err), fatal_err ? errstr : "");
+
+ rd_kafka_destroy(p);
+
+
+ /*
+ * Multiple calls to init_transactions(): finish on first.
+ */
+ TEST_SAY("[ Test multiple init_transactions(): finish on first ]\n");
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "transactional.id", topic);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
+
+ error = rd_kafka_init_transactions(p, 1);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE,
+ "Expected ERR__STATE error, not %s",
+ rd_kafka_error_name(error));
+ rd_kafka_error_destroy(error);
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+
+ error = rd_kafka_init_transactions(p, 3 * 1000);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE,
+ "Expected ERR__STATE error, not %s",
+ rd_kafka_error_name(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_destroy(p);
+
+
+ /*
+ * Multiple calls to init_transactions(): timeout on first.
+ */
+ TEST_SAY("[ Test multiple init_transactions(): timeout on first ]\n");
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "transactional.id", topic);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ error = rd_kafka_init_transactions(p, 1);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_SAY("error: %s, %d\n", rd_kafka_error_string(error),
+ rd_kafka_error_is_retriable(error));
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected ERR__TIMED_OUT, not %s: %s",
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
+ "Expected error to be retriable");
+ rd_kafka_error_destroy(error);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
+
+ rd_kafka_destroy(p);
+
+
+ /*
+ * Multiple calls to init_transactions(): hysterical amounts
+ */
+ TEST_SAY("[ Test multiple init_transactions(): hysterical amounts ]\n");
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "transactional.id", topic);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* Call until init succeeds */
+ for (i = 0; i < 5000; i++) {
+ if (!(error = rd_kafka_init_transactions(p, 1)))
+ break;
+
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
+ "Expected error to be retriable");
+ rd_kafka_error_destroy(error);
+
+ error = rd_kafka_begin_transaction(p);
+ TEST_ASSERT(error, "Expected begin_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR__CONFLICT,
+ "Expected begin_transactions() to fail "
+ "with CONFLICT, not %s",
+ rd_kafka_error_name(error));
+
+ rd_kafka_error_destroy(error);
+ }
+
+ TEST_ASSERT(i <= 5000,
+ "init_transactions() did not succeed after %d calls\n", i);
+
+ TEST_SAY("init_transactions() succeeded after %d call(s)\n", i + 1);
+
+ /* Make sure a sub-sequent init call fails. */
+ error = rd_kafka_init_transactions(p, 5 * 1000);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__STATE,
+ "Expected init_transactions() to fail with STATE, not %s",
+ rd_kafka_error_name(error));
+ rd_kafka_error_destroy(error);
+
+ /* But begin.. should work now */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+
+ rd_kafka_destroy(p);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief is_fatal_cb for fenced_txn test.
+ */
+static int fenced_txn_is_fatal_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason) {
+ TEST_SAY("is_fatal?: %s: %s\n", rd_kafka_err2str(err), reason);
+ if (err == RD_KAFKA_RESP_ERR__FENCED) {
+ TEST_SAY("Saw the expected fatal error\n");
+ return 0;
+ }
+ return 1;
+}
+
+
+/**
+ * @brief Check that transaction fencing is handled correctly.
+ */
+static void do_test_fenced_txn(rd_bool_t produce_after_fence) {
+ const char *topic = test_mk_topic_name("0103_fenced_txn", 1);
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *p1, *p2;
+ rd_kafka_error_t *error;
+ uint64_t testid;
+
+ SUB_TEST_QUICK("%sproduce after fence",
+ produce_after_fence ? "" : "do not ");
+
+ if (produce_after_fence)
+ test_curr->is_fatal_cb = fenced_txn_is_fatal_cb;
+
+ test_curr->ignore_dr_err = rd_false;
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "transactional.id", topic);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
+ p2 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
+ rd_kafka_conf_destroy(conf);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p1, 30 * 1000));
+
+ /* Begin a transaction */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p1));
+
+ /* Produce some messages */
+ test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0, 10,
+ NULL, 0);
+
+ /* Initialize transactions on producer 2, this should
+ * fence off producer 1. */
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p2, 30 * 1000));
+
+ if (produce_after_fence) {
+ /* This will fail hard since the epoch was bumped. */
+ TEST_SAY("Producing after producing fencing\n");
+ test_curr->ignore_dr_err = rd_true;
+ test_produce_msgs2(p1, topic, testid, RD_KAFKA_PARTITION_UA, 0,
+ 10, NULL, 0);
+ }
+
+
+ error = rd_kafka_commit_transaction(p1, 30 * 1000);
+
+ TEST_ASSERT(error, "Expected commit to fail");
+ TEST_ASSERT(rd_kafka_fatal_error(p1, NULL, 0),
+ "Expected a fatal error to have been raised");
+ TEST_ASSERT(error, "Expected commit_transaction() to fail");
+ TEST_ASSERT(rd_kafka_error_is_fatal(error),
+ "Expected commit_transaction() to return a "
+ "fatal error");
+ TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error),
+ "Expected commit_transaction() not to return an "
+ "abortable error");
+ TEST_ASSERT(!rd_kafka_error_is_retriable(error),
+ "Expected commit_transaction() not to return a "
+ "retriable error");
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__FENCED,
+ "Expected commit_transaction() to return %s, "
+ "not %s: %s",
+ rd_kafka_err2name(RD_KAFKA_RESP_ERR__FENCED),
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_destroy(p1);
+ rd_kafka_destroy(p2);
+
+ /* Make sure no messages were committed. */
+ test_consume_txn_msgs_easy(
+ topic, topic, testid,
+ test_get_partition_count(NULL, topic, 10 * 1000), 0, NULL);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Check that fatal idempotent producer errors are also fatal
+ * transactional errors when KIP-360 is not supported.
+ */
+static void do_test_fatal_idempo_error_without_kip360(void) {
+ const char *topic = test_mk_topic_name("0103_fatal_idempo", 1);
+ const int32_t partition = 0;
+ rd_kafka_conf_t *conf, *c_conf;
+ rd_kafka_t *p, *c;
+ rd_kafka_error_t *error;
+ uint64_t testid;
+ const int msgcnt[3] = {6, 4, 1};
+ rd_kafka_topic_partition_list_t *records;
+ test_msgver_t expect_mv, actual_mv;
+ /* This test triggers UNKNOWN_PRODUCER_ID on AK <2.4 and >2.4, but
+ * not on AK 2.4.
+ * On AK <2.5 (pre KIP-360) these errors are unrecoverable,
+ * on AK >2.5 (with KIP-360) we can recover.
+ * Since 2.4 is not behaving as the other releases we skip it here. */
+ rd_bool_t expect_fail = test_broker_version < TEST_BRKVER(2, 5, 0, 0);
+
+ SUB_TEST_QUICK(
+ "%s", expect_fail ? "expecting failure since broker is < 2.5"
+ : "not expecting failure since broker is >= 2.5");
+
+ if (test_broker_version >= TEST_BRKVER(2, 4, 0, 0) &&
+ test_broker_version < TEST_BRKVER(2, 5, 0, 0))
+ SUB_TEST_SKIP("can't trigger UNKNOWN_PRODUCER_ID on AK 2.4");
+
+ if (expect_fail)
+ test_curr->is_fatal_cb = test_error_is_not_fatal_cb;
+ test_curr->ignore_dr_err = expect_fail;
+
+ testid = test_id_generate();
+
+ /* Keep track of what messages to expect on the output topic */
+ test_msgver_init(&expect_mv, testid);
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "transactional.id", topic);
+ test_conf_set(conf, "batch.num.messages", "1");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_create_topic(p, topic, 1, 3);
+
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, 30 * 1000));
+
+ /*
+ * 3 transactions:
+ * 1. Produce some messages, commit.
+ * 2. Produce some messages, then delete the messages from txn 1 and
+ * then produce some more messages: UNKNOWN_PRODUCER_ID should be
+ * raised as a fatal error.
+ * 3. Start a new transaction, produce and commit some new messages.
+ * (this step is only performed when expect_fail is false).
+ */
+
+ /*
+ * Transaction 1
+ */
+ TEST_SAY(_C_BLU "Transaction 1: %d msgs\n", msgcnt[0]);
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+ test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[0], NULL, 0);
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
+
+
+ /*
+ * Transaction 2
+ */
+ TEST_SAY(_C_BLU "Transaction 2: %d msgs\n", msgcnt[1]);
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+
+ /* Now delete the messages from txn1 */
+ TEST_SAY("Deleting records < %s [%" PRId32 "] offset %d+1\n", topic,
+ partition, msgcnt[0]);
+ records = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(records, topic, partition)->offset =
+ msgcnt[0]; /* include the control message too */
+
+ TEST_CALL_ERR__(test_DeleteRecords_simple(p, NULL, records, NULL));
+ rd_kafka_topic_partition_list_destroy(records);
+
+ /* Wait for deletes to propagate */
+ rd_sleep(2);
+
+ if (!expect_fail)
+ test_curr->dr_mv = &expect_mv;
+
+ /* Produce more messages, should now fail */
+ test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[1], NULL, 0);
+
+ error = rd_kafka_commit_transaction(p, -1);
+
+ TEST_SAY_ERROR(error, "commit_transaction() returned: ");
+
+ if (expect_fail) {
+ TEST_ASSERT(error != NULL, "Expected transaction to fail");
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "Expected abortable error");
+ rd_kafka_error_destroy(error);
+
+ /* Now abort transaction, which should raise the fatal error
+ * since it is the abort that performs the PID reinitialization.
+ */
+ error = rd_kafka_abort_transaction(p, -1);
+ TEST_SAY_ERROR(error, "abort_transaction() returned: ");
+ TEST_ASSERT(error != NULL, "Expected abort to fail");
+ TEST_ASSERT(rd_kafka_error_is_fatal(error),
+ "Expecting fatal error");
+ TEST_ASSERT(!rd_kafka_error_is_retriable(error),
+ "Did not expect retriable error");
+ TEST_ASSERT(!rd_kafka_error_txn_requires_abort(error),
+ "Did not expect abortable error");
+
+ rd_kafka_error_destroy(error);
+
+ } else {
+ TEST_ASSERT(!error, "Did not expect commit to fail: %s",
+ rd_kafka_error_string(error));
+ }
+
+
+ if (!expect_fail) {
+ /*
+ * Transaction 3
+ */
+ TEST_SAY(_C_BLU "Transaction 3: %d msgs\n", msgcnt[2]);
+ test_curr->dr_mv = &expect_mv;
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+ test_produce_msgs2(p, topic, testid, partition, 0, msgcnt[2],
+ NULL, 0);
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
+ }
+
+ rd_kafka_destroy(p);
+
+ /* Consume messages.
+ * On AK<2.5 (expect_fail=true) we do not expect to see any messages
+ * since the producer will have failed with a fatal error.
+ * On AK>=2.5 (expect_fail=false) we should only see messages from
+ * txn 3 which are sent after the producer has recovered.
+ */
+
+ test_conf_init(&c_conf, NULL, 0);
+ test_conf_set(c_conf, "enable.partition.eof", "true");
+ c = test_create_consumer(topic, NULL, c_conf, NULL);
+ test_consumer_assign_partition("consume", c, topic, partition,
+ RD_KAFKA_OFFSET_BEGINNING);
+
+ test_msgver_init(&actual_mv, testid);
+ test_msgver_ignore_eof(&actual_mv);
+
+ test_consumer_poll("Verify output topic", c, testid, 1, 0, -1,
+ &actual_mv);
+
+ test_msgver_verify_compare("Verify output topic", &actual_mv,
+ &expect_mv, TEST_MSGVER_ALL);
+
+ test_msgver_clear(&actual_mv);
+ test_msgver_clear(&expect_mv);
+
+ rd_kafka_destroy(c);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Check that empty transactions, with no messages produced, work
+ * as expected.
+ */
+static void do_test_empty_txn(rd_bool_t send_offsets, rd_bool_t do_commit) {
+ const char *topic = test_mk_topic_name("0103_empty_txn", 1);
+ rd_kafka_conf_t *conf, *c_conf;
+ rd_kafka_t *p, *c;
+ uint64_t testid;
+ const int msgcnt = 10;
+ rd_kafka_topic_partition_list_t *committed;
+ int64_t offset;
+
+ SUB_TEST_QUICK("%ssend offsets, %s", send_offsets ? "" : "don't ",
+ do_commit ? "commit" : "abort");
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 30);
+ c_conf = rd_kafka_conf_dup(conf);
+
+ test_conf_set(conf, "transactional.id", topic);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_create_topic(p, topic, 1, 3);
+
+ /* Produce some non-txnn messages for the consumer to read and commit */
+ test_produce_msgs_easy(topic, testid, 0, msgcnt);
+
+ /* Create consumer and subscribe to the topic */
+ test_conf_set(c_conf, "auto.offset.reset", "earliest");
+ test_conf_set(c_conf, "enable.auto.commit", "false");
+ c = test_create_consumer(topic, NULL, c_conf, NULL);
+ test_consumer_subscribe(c, topic);
+ test_consumer_wait_assignment(c, rd_false);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+
+ /* send_offsets? Consume messages and send those offsets to the txn */
+ if (send_offsets) {
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ test_consumer_poll("consume", c, testid, -1, 0, msgcnt, NULL);
+
+ TEST_CALL_ERR__(rd_kafka_assignment(c, &offsets));
+ TEST_CALL_ERR__(rd_kafka_position(c, offsets));
+
+ cgmetadata = rd_kafka_consumer_group_metadata(c);
+ TEST_ASSERT(cgmetadata != NULL,
+ "failed to get consumer group metadata");
+
+ TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
+ p, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+ }
+
+
+ if (do_commit)
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
+ else
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(p, -1));
+
+ /* Wait before checking the committed offsets (Kafka < 2.5.0) */
+ if (test_broker_version < TEST_BRKVER(2, 5, 0, 0))
+ rd_usleep(tmout_multip(5000 * 1000), NULL);
+
+ /* Get the committed offsets */
+ TEST_CALL_ERR__(rd_kafka_assignment(c, &committed));
+ TEST_CALL_ERR__(rd_kafka_committed(c, committed, 10 * 1000));
+
+ TEST_ASSERT(committed->cnt == 1,
+ "expected one committed offset, not %d", committed->cnt);
+ offset = committed->elems[0].offset;
+ TEST_SAY("Committed offset is %" PRId64 "\n", offset);
+
+ if (do_commit && send_offsets)
+ TEST_ASSERT(offset >= msgcnt,
+ "expected committed offset >= %d, got %" PRId64,
+ msgcnt, offset);
+ else
+ TEST_ASSERT(offset < 0,
+ "expected no committed offset, got %" PRId64,
+ offset);
+
+ rd_kafka_topic_partition_list_destroy(committed);
+
+ rd_kafka_destroy(c);
+ rd_kafka_destroy(p);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @returns the high watermark for the given partition.
+ */
+int64_t
+query_hi_wmark0(int line, rd_kafka_t *c, const char *topic, int32_t partition) {
+ rd_kafka_resp_err_t err;
+ int64_t lo = -1, hi = -1;
+
+ err = rd_kafka_query_watermark_offsets(c, topic, partition, &lo, &hi,
+ tmout_multip(5 * 1000));
+ TEST_ASSERT(!err, "%d: query_watermark_offsets(%s) failed: %s", line,
+ topic, rd_kafka_err2str(err));
+
+ return hi;
+}
+#define query_hi_wmark(c, topic, part) query_hi_wmark0(__LINE__, c, topic, part)
+
+/**
+ * @brief Check that isolation.level works as expected for query_watermark..().
+ */
+static void do_test_wmark_isolation_level(void) {
+ const char *topic = test_mk_topic_name("0103_wmark_isol", 1);
+ rd_kafka_conf_t *conf, *c_conf;
+ rd_kafka_t *p, *c1, *c2;
+ uint64_t testid;
+ int64_t hw_uncommitted, hw_committed;
+
+ SUB_TEST_QUICK();
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 30);
+ c_conf = rd_kafka_conf_dup(conf);
+
+ test_conf_set(conf, "transactional.id", topic);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ p = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
+
+ test_create_topic(p, topic, 1, 3);
+
+ /* Produce some non-txn messages to avoid 0 as the committed hwmark */
+ test_produce_msgs_easy(topic, testid, 0, 100);
+
+ /* Create consumer and subscribe to the topic */
+ test_conf_set(c_conf, "isolation.level", "read_committed");
+ c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(c_conf), NULL);
+ test_conf_set(c_conf, "isolation.level", "read_uncommitted");
+ c2 = test_create_consumer(topic, NULL, c_conf, NULL);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(p));
+
+ /* Produce some txn messages */
+ test_produce_msgs2(p, topic, testid, 0, 0, 100, NULL, 0);
+
+ test_flush(p, 10 * 1000);
+
+ hw_committed = query_hi_wmark(c1, topic, 0);
+ hw_uncommitted = query_hi_wmark(c2, topic, 0);
+
+ TEST_SAY("Pre-commit hwmarks: committed %" PRId64
+ ", uncommitted %" PRId64 "\n",
+ hw_committed, hw_uncommitted);
+
+ TEST_ASSERT(hw_committed > 0 && hw_committed < hw_uncommitted,
+ "Committed hwmark %" PRId64
+ " should be lower than "
+ "uncommitted hwmark %" PRId64 " for %s [0]",
+ hw_committed, hw_uncommitted, topic);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(p, -1));
+
+ /* Re-create the producer and re-init transactions to make
+ * sure the transaction is fully committed in the cluster. */
+ rd_kafka_destroy(p);
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(p, -1));
+ rd_kafka_destroy(p);
+
+
+ /* Now query wmarks again */
+ hw_committed = query_hi_wmark(c1, topic, 0);
+ hw_uncommitted = query_hi_wmark(c2, topic, 0);
+
+ TEST_SAY("Post-commit hwmarks: committed %" PRId64
+ ", uncommitted %" PRId64 "\n",
+ hw_committed, hw_uncommitted);
+
+ TEST_ASSERT(hw_committed == hw_uncommitted,
+ "Committed hwmark %" PRId64
+ " should be equal to "
+ "uncommitted hwmark %" PRId64 " for %s [0]",
+ hw_committed, hw_uncommitted, topic);
+
+ rd_kafka_destroy(c1);
+ rd_kafka_destroy(c2);
+
+ SUB_TEST_PASS();
+}
+
+
+
+int main_0103_transactions(int argc, char **argv) {
+
+ do_test_misuse_txn();
+ do_test_basic_producer_txn(rd_false /* without compression */);
+ do_test_basic_producer_txn(rd_true /* with compression */);
+ do_test_consumer_producer_txn();
+ do_test_fenced_txn(rd_false /* no produce after fencing */);
+ do_test_fenced_txn(rd_true /* produce after fencing */);
+ do_test_fatal_idempo_error_without_kip360();
+ do_test_empty_txn(rd_false /*don't send offsets*/, rd_true /*commit*/);
+ do_test_empty_txn(rd_false /*don't send offsets*/, rd_false /*abort*/);
+ do_test_empty_txn(rd_true /*send offsets*/, rd_true /*commit*/);
+ do_test_empty_txn(rd_true /*send offsets*/, rd_false /*abort*/);
+ do_test_wmark_isolation_level();
+ return 0;
+}
+
+
+
+/**
+ * @brief Transaction tests that don't require a broker.
+ */
+static void do_test_txn_local(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *p;
+ rd_kafka_error_t *error;
+ test_timing_t t_init;
+ int timeout_ms = 7 * 1000;
+
+ SUB_TEST_QUICK();
+
+ /*
+ * No transactional.id, init_transactions() should fail.
+ */
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", NULL);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ error = rd_kafka_init_transactions(p, 10);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(
+ rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__NOT_CONFIGURED,
+ "Expected ERR__NOT_CONFIGURED, not %s", rd_kafka_error_name(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_destroy(p);
+
+
+ /*
+ * No brokers, init_transactions() should time out according
+ * to the timeout.
+ */
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", NULL);
+ test_conf_set(conf, "transactional.id", "test");
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ TEST_SAY("Waiting for init_transactions() timeout %d ms\n", timeout_ms);
+
+ test_timeout_set((timeout_ms + 2000) / 1000);
+
+ TIMING_START(&t_init, "init_transactions()");
+ error = rd_kafka_init_transactions(p, timeout_ms);
+ TIMING_STOP(&t_init);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected RD_KAFKA_RESP_ERR__TIMED_OUT, "
+ "not %s: %s",
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+
+ TEST_SAY("init_transactions() failed as expected: %s\n",
+ rd_kafka_error_string(error));
+
+ rd_kafka_error_destroy(error);
+
+ TIMING_ASSERT(&t_init, timeout_ms - 2000, timeout_ms + 5000);
+
+ rd_kafka_destroy(p);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0103_transactions_local(int argc, char **argv) {
+
+ do_test_txn_local();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c
new file mode 100644
index 000000000..1ecf99da3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0104-fetch_from_follower_mock.c
@@ -0,0 +1,617 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * @name Fetch from follower tests using the mock broker.
+ */
+
+static int allowed_error;
+
+/**
+ * @brief Decide what error_cb's will cause the test to fail.
+ */
+static int
+error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ if (err == allowed_error ||
+ /* If transport errors are allowed then it is likely
+ * that we'll also see ALL_BROKERS_DOWN. */
+ (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT &&
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) {
+ TEST_SAY("Ignoring allowed error: %s: %s\n",
+ rd_kafka_err2name(err), reason);
+ return 0;
+ }
+ return 1;
+}
+
+
+/**
+ * @brief Test offset reset when fetching from replica.
+ * Since the highwatermark is in sync with the leader the
+ * ERR_OFFSETS_OUT_OF_RANGE is trusted by the consumer and
+ * a reset is performed. See do_test_offset_reset_lag()
+ * for the case where the replica is lagging and can't be trusted.
+ */
+static void do_test_offset_reset(const char *auto_offset_reset) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 1000;
+ const size_t msgsize = 1000;
+
+ TEST_SAY(_C_MAG "[ Test FFF auto.offset.reset=%s ]\n",
+ auto_offset_reset);
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "10", NULL);
+
+ /* Set partition leader to broker 1, follower to broker 2 */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", auto_offset_reset);
+ /* Make sure we don't consume the entire partition in one Fetch */
+ test_conf_set(conf, "fetch.message.max.bytes", "100");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ /* The first fetch will go to the leader which will redirect
+ * the consumer to the follower, the second and sub-sequent fetches
+ * will go to the follower. We want the third fetch, second one on
+ * the follower, to fail and trigger an offset reset. */
+ rd_kafka_mock_push_request_errors(
+ mcluster, 1 /*FetchRequest*/, 3,
+ RD_KAFKA_RESP_ERR_NO_ERROR /*leader*/,
+ RD_KAFKA_RESP_ERR_NO_ERROR /*follower*/,
+ RD_KAFKA_RESP_ERR_OFFSET_OUT_OF_RANGE /*follower: fail*/);
+
+ test_consumer_assign_partition(auto_offset_reset, c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ if (!strcmp(auto_offset_reset, "latest"))
+ test_consumer_poll_no_msgs(auto_offset_reset, c, 0, 5000);
+ else
+ test_consumer_poll(auto_offset_reset, c, 0, 1, 0, msgcnt, NULL);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test FFF auto.offset.reset=%s PASSED ]\n",
+ auto_offset_reset);
+}
+
+
+/**
+ * @brief Test offset reset when fetching from a lagging replica
+ * who's high-watermark is behind the leader, which means
+ * an offset reset should not be triggered.
+ */
+static void do_test_offset_reset_lag(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 10;
+ const int lag = 3;
+ const size_t msgsize = 1000;
+
+ TEST_SAY(_C_MAG "[ Test lagging FFF offset reset ]\n");
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "1", NULL);
+
+ /* Set broker rack */
+ /* Set partition leader to broker 1, follower to broker 2 */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
+
+ /* Make follower lag by some messages
+ * ( .. -1 because offsets start at 0) */
+ rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1,
+ msgcnt - lag - 1);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ /* Make sure we don't consume the entire partition in one Fetch */
+ test_conf_set(conf, "fetch.message.max.bytes", "100");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ test_consumer_assign_partition("lag", c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ /* Should receive all messages up to the followers hwmark */
+ test_consumer_poll("up to wmark", c, 0, 0, 0, msgcnt - lag, NULL);
+
+ /* And then nothing.. as the consumer waits for the replica to
+ * catch up. */
+ test_consumer_poll_no_msgs("no msgs", c, 0, 3000);
+
+ /* Catch up the replica, consumer should now get the
+ * remaining messages */
+ rd_kafka_mock_partition_set_follower_wmarks(mcluster, topic, 0, -1, -1);
+ test_consumer_poll("remaining", c, 0, 1, msgcnt - lag, lag, NULL);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test lagging FFF offset reset PASSED ]\n");
+}
+
+
+/**
+ * @brief Test delegating consumer to a follower that does not exist,
+ * the consumer should not be able to consume any messages (which
+ * is questionable but for a later PR). Then change to a valid
+ * replica and verify messages can be consumed.
+ */
+static void do_test_unknown_follower(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 1000;
+ const size_t msgsize = 1000;
+ test_msgver_t mv;
+
+ TEST_SAY(_C_MAG "[ Test unknown follower ]\n");
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "10", NULL);
+
+ /* Set partition leader to broker 1, follower
+ * to non-existent broker 19 */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 19);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ /* Make sure we don't consume the entire partition in one Fetch */
+ test_conf_set(conf, "fetch.message.max.bytes", "100");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ test_consumer_assign_partition("unknown follower", c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ test_consumer_poll_no_msgs("unknown follower", c, 0, 5000);
+
+ /* Set a valid follower (broker 3) */
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 3);
+ test_msgver_init(&mv, 0);
+ test_consumer_poll("proper follower", c, 0, 1, 0, msgcnt, &mv);
+ /* Verify messages were indeed received from broker 3 */
+ test_msgver_verify0(
+ __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID,
+ (struct test_mv_vs) {
+ .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3});
+ test_msgver_clear(&mv);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test unknown follower PASSED ]\n");
+}
+
+
+/**
+ * @brief Issue #2955: Verify that fetch does not stall until next
+ * periodic metadata timeout when leader broker is no longer
+ * a replica.
+ */
+static void do_test_replica_not_available(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 1000;
+
+ TEST_SAY(_C_MAG "[ Test REPLICA_NOT_AVAILABLE ]\n");
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "10", NULL);
+
+ /* Set partition leader to broker 1. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
+ test_conf_set(conf, "fetch.error.backoff.ms", "1000");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0,
+ RD_KAFKA_RESP_ERR_REPLICA_NOT_AVAILABLE, 0);
+
+
+ test_consumer_assign_partition("REPLICA_NOT_AVAILABLE", c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ test_consumer_poll_no_msgs("Wait initial metadata", c, 0, 2000);
+
+ /* Switch leader to broker 2 so that metadata is updated,
+ * causing the consumer to start fetching from the new leader. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2);
+
+ test_consumer_poll("Consume", c, 0, 1, 0, msgcnt, NULL);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test REPLICA_NOT_AVAILABLE PASSED ]\n");
+}
+
+/**
+ * @brief With an error \p err on a Fetch request should query for the new
+ * leader or preferred replica and refresh metadata.
+ */
+static void do_test_delegate_to_leader_on_error(rd_kafka_resp_err_t err) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 1000;
+ const char *errstr = rd_kafka_err2name(err);
+
+ TEST_SAY(_C_MAG "[ Test %s ]\n", errstr);
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "10", NULL);
+
+ /* Set partition leader to broker 1. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
+ test_conf_set(conf, "fetch.error.backoff.ms", "1000");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, 1 /*Broker 1*/, 1 /*FetchRequest*/, 10, err, 0, err, 0,
+ err, 0, err, 0, err, 0, err, 0, err, 0, err, 0, err, 0, err, 0);
+
+
+ test_consumer_assign_partition(errstr, c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ test_consumer_poll_no_msgs("Wait initial metadata", c, 0, 2000);
+
+ /* Switch leader to broker 2 so that metadata is updated,
+ * causing the consumer to start fetching from the new leader. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2);
+
+ test_consumer_poll_timeout("Consume", c, 0, 1, 0, msgcnt, NULL, 2000);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test %s ]\n", errstr);
+}
+
+/**
+ * @brief Test when the preferred replica is no longer a follower of the
+ * partition leader. We should try fetch from the leader instead.
+ */
+static void do_test_not_leader_or_follower(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 10;
+
+ TEST_SAY(_C_MAG "[ Test NOT_LEADER_OR_FOLLOWER ]\n");
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+ /* Set partition leader to broker 1. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
+ test_conf_set(conf, "fetch.error.backoff.ms", "1000");
+ test_conf_set(conf, "fetch.message.max.bytes", "10");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ test_consumer_assign_partition("NOT_LEADER_OR_FOLLOWER", c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ /* Since there are no messages, this poll only waits for metadata, and
+ * then sets the preferred replica after the first fetch request. */
+ test_consumer_poll_no_msgs("Initial metadata and preferred replica set",
+ c, 0, 2000);
+
+ /* Change the follower, so that the preferred replica is no longer the
+ * leader or follower. */
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, -1);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "10", NULL);
+
+ /* On getting a NOT_LEADER_OR_FOLLOWER error, we should change to the
+ * leader and fetch from there without timing out. */
+ test_msgver_t mv;
+ test_msgver_init(&mv, 0);
+ test_consumer_poll_timeout("from leader", c, 0, 1, 0, msgcnt, &mv,
+ 2000);
+ test_msgver_verify0(
+ __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID,
+ (struct test_mv_vs) {
+ .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 1});
+ test_msgver_clear(&mv);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test NOT_LEADER_OR_FOLLOWER PASSED ]\n");
+}
+
+
+/**
+ * @brief Test when the preferred replica broker goes down. When a broker is
+ * going down, we should delegate all its partitions to their leaders.
+ */
+static void do_test_follower_down(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 10;
+
+ TEST_SAY(_C_MAG "[ Test with follower down ]\n");
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+ /* Set partition leader to broker 1. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 2);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "client.rack", "myrack");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "topic.metadata.refresh.interval.ms", "60000");
+ test_conf_set(conf, "fetch.error.backoff.ms", "1000");
+ test_conf_set(conf, "fetch.message.max.bytes", "10");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ test_consumer_assign_partition("follower down", c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ /* Since there are no messages, this poll only waits for metadata, and
+ * then sets the preferred replica after the first fetch request. */
+ test_consumer_poll_no_msgs("Initial metadata and preferred replica set",
+ c, 0, 2000);
+
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 1000,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "10", NULL);
+
+ /* Set follower down. When follower is set as DOWN, we also expect
+ * that the cluster itself knows and does not ask us to change our
+ * preferred replica to the broker which is down. To facilitate this,
+ * we just set the follower to 3 instead of 2. */
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ rd_kafka_mock_broker_set_down(mcluster, 2);
+ rd_kafka_mock_partition_set_follower(mcluster, topic, 0, 3);
+
+ /* Wee should change to the new follower when the old one goes down,
+ * and fetch from there without timing out. */
+ test_msgver_t mv;
+ test_msgver_init(&mv, 0);
+ test_consumer_poll_timeout("from other follower", c, 0, 1, 0, msgcnt,
+ &mv, 2000);
+ test_msgver_verify0(
+ __FUNCTION__, __LINE__, "broker_id", &mv, TEST_MSGVER_BY_BROKER_ID,
+ (struct test_mv_vs) {
+ .msg_base = 0, .exp_cnt = msgcnt, .broker_id = 3});
+ test_msgver_clear(&mv);
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_SAY(_C_GRN "[ Test with follower down PASSED ]\n");
+}
+
+
+/**
+ * @brief When a seek is done with a leader epoch,
+ * the expected behavior is to validate it and
+ * start fetching from the end offset of that epoch if
+ * less than current offset.
+ * This is possible in case of external group offsets storage,
+ * associated with an unclean leader election.
+ */
+static void do_test_seek_to_offset_with_previous_epoch(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *topic = "test";
+ const int msgcnt = 10;
+ const size_t msgsize = 1000;
+ rd_kafka_topic_partition_list_t *rktpars;
+ rd_kafka_topic_partition_t *rktpar;
+
+ SUB_TEST_QUICK();
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
+ "bootstrap.servers", bootstraps, NULL);
+
+ test_conf_init(&conf, NULL, 0);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ c = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ test_consumer_assign_partition("zero", c, topic, 0,
+ RD_KAFKA_OFFSET_INVALID);
+
+ test_consumer_poll("first", c, 0, 0, msgcnt, msgcnt, NULL);
+
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 2);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, msgsize,
+ "bootstrap.servers", bootstraps, NULL);
+
+ test_consumer_poll("second", c, 0, 0, msgcnt, msgcnt, NULL);
+
+ rktpars = rd_kafka_topic_partition_list_new(1);
+ rktpar = rd_kafka_topic_partition_list_add(rktpars, topic, 0);
+ rktpar->offset = msgcnt * 2;
+ /* Will validate the offset at start fetching again
+ * from offset 'msgcnt'. */
+ rd_kafka_topic_partition_set_leader_epoch(rktpar, 0);
+ rd_kafka_seek_partitions(c, rktpars, -1);
+
+ test_consumer_poll("third", c, 0, 0, msgcnt, msgcnt, NULL);
+
+ test_consumer_close(c);
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0104_fetch_from_follower_mock(int argc, char **argv) {
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ do_test_offset_reset("earliest");
+ do_test_offset_reset("latest");
+
+ do_test_offset_reset_lag();
+
+ do_test_unknown_follower();
+
+ do_test_replica_not_available();
+
+ do_test_delegate_to_leader_on_error(
+ RD_KAFKA_RESP_ERR_OFFSET_NOT_AVAILABLE);
+
+ do_test_not_leader_or_follower();
+
+ do_test_follower_down();
+
+ do_test_seek_to_offset_with_previous_epoch();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c
new file mode 100644
index 000000000..014642df1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0105-transactions_mock.c
@@ -0,0 +1,3926 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2019, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+#include "../src/rdkafka_proto.h"
+#include "../src/rdstring.h"
+#include "../src/rdunittest.h"
+
+#include <stdarg.h>
+
+
+/**
+ * @name Producer transaction tests using the mock cluster
+ *
+ */
+
+
+static int allowed_error;
+
+/**
+ * @brief Decide what error_cb's will cause the test to fail.
+ */
+static int
+error_is_fatal_cb(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ if (err == allowed_error ||
+ /* If transport errors are allowed then it is likely
+ * that we'll also see ALL_BROKERS_DOWN. */
+ (allowed_error == RD_KAFKA_RESP_ERR__TRANSPORT &&
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN)) {
+ TEST_SAY("Ignoring allowed error: %s: %s\n",
+ rd_kafka_err2name(err), reason);
+ return 0;
+ }
+ return 1;
+}
+
+
+static rd_kafka_resp_err_t (*on_response_received_cb)(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque);
+
+/**
+ * @brief Simple on_response_received interceptor that simply calls the
+ * sub-test's on_response_received_cb function, if set.
+ */
+static rd_kafka_resp_err_t
+on_response_received_trampoline(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque) {
+ TEST_ASSERT(on_response_received_cb != NULL, "");
+ return on_response_received_cb(rk, sockfd, brokername, brokerid, ApiKey,
+ ApiVersion, CorrId, size, rtt, err,
+ ic_opaque);
+}
+
+
+/**
+ * @brief on_new interceptor to add an on_response_received interceptor.
+ */
+static rd_kafka_resp_err_t on_new_producer(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ rd_kafka_resp_err_t err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ if (on_response_received_cb)
+ err = rd_kafka_interceptor_add_on_response_received(
+ rk, "on_response_received", on_response_received_trampoline,
+ ic_opaque);
+
+ return err;
+}
+
+
+/**
+ * @brief Create a transactional producer and a mock cluster.
+ *
+ * The var-arg list is a NULL-terminated list of
+ * (const char *key, const char *value) config properties.
+ *
+ * Special keys:
+ * "on_response_received", "" - enable the on_response_received_cb
+ * interceptor,
+ * which must be assigned prior to
+ * calling create_tnx_producer().
+ */
+static RD_SENTINEL rd_kafka_t *
+create_txn_producer(rd_kafka_mock_cluster_t **mclusterp,
+ const char *transactional_id,
+ int broker_cnt,
+ ...) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ char numstr[8];
+ va_list ap;
+ const char *key;
+ rd_bool_t add_interceptors = rd_false;
+
+ rd_snprintf(numstr, sizeof(numstr), "%d", broker_cnt);
+
+ test_conf_init(&conf, NULL, 60);
+
+ test_conf_set(conf, "transactional.id", transactional_id);
+ /* When mock brokers are set to down state they're still binding
+ * the port, just not listening to it, which makes connection attempts
+ * stall until socket.connection.setup.timeout.ms expires.
+ * To speed up detection of brokers being down we reduce this timeout
+ * to just a couple of seconds. */
+ test_conf_set(conf, "socket.connection.setup.timeout.ms", "5000");
+ /* Speed up reconnects */
+ test_conf_set(conf, "reconnect.backoff.max.ms", "2000");
+ test_conf_set(conf, "test.mock.num.brokers", numstr);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ test_curr->ignore_dr_err = rd_false;
+
+ va_start(ap, broker_cnt);
+ while ((key = va_arg(ap, const char *))) {
+ if (!strcmp(key, "on_response_received")) {
+ add_interceptors = rd_true;
+ (void)va_arg(ap, const char *);
+ } else {
+ test_conf_set(conf, key, va_arg(ap, const char *));
+ }
+ }
+ va_end(ap);
+
+ /* Add an on_.. interceptors */
+ if (add_interceptors)
+ rd_kafka_conf_interceptor_add_on_new(conf, "on_new_producer",
+ on_new_producer, NULL);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ if (mclusterp) {
+ *mclusterp = rd_kafka_handle_mock_cluster(rk);
+ TEST_ASSERT(*mclusterp, "failed to create mock cluster");
+
+ /* Create some of the common consumer "input" topics
+ * that we must be able to commit to with
+ * send_offsets_to_transaction().
+ * The number depicts the number of partitions in the topic. */
+ TEST_CALL_ERR__(
+ rd_kafka_mock_topic_create(*mclusterp, "srctopic4", 4, 1));
+ TEST_CALL_ERR__(rd_kafka_mock_topic_create(
+ *mclusterp, "srctopic64", 64, 1));
+ }
+
+ return rk;
+}
+
+
+/**
+ * @brief Test recoverable errors using mock broker error injections
+ * and code coverage checks.
+ */
+static void do_test_txn_recoverable_errors(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ const char *groupid = "myGroupId";
+ const char *txnid = "myTxnId";
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
+ NULL);
+
+ /* Make sure transaction and group coordinators are different.
+ * This verifies that AddOffsetsToTxnRequest isn't sent to the
+ * transaction coordinator but the group coordinator. */
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 2);
+
+ /*
+ * Inject som InitProducerId errors that causes retries
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_InitProducerId, 3,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ (void)RD_UT_COVERAGE_CHECK(0); /* idemp_request_pid_failed(retry) */
+ (void)RD_UT_COVERAGE_CHECK(1); /* txn_idemp_state_change(READY) */
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ /* Produce a message without error first */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ rd_kafka_flush(rk, -1);
+
+ /*
+ * Produce a message, let it fail with a non-idempo/non-txn
+ * retryable error
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_NOT_ENOUGH_REPLICAS);
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ /* Make sure messages are produced */
+ rd_kafka_flush(rk, -1);
+
+ /*
+ * Send some arbitrary offsets, first with some failures, then
+ * succeed.
+ */
+ offsets = rd_kafka_topic_partition_list_new(4);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 39)->offset =
+ 999999111;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset =
+ 999;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 19)->offset =
+ 123456789;
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_AddPartitionsToTxn, 1,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_TxnOffsetCommit, 2,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ /*
+ * Commit transaction, first with som failures, then succeed.
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_EndTxn, 3,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
+ * transaction errors and that the producer can recover.
+ */
+static void do_test_txn_fatal_idempo_errors(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ const char *txnid = "myTxnId";
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
+ NULL);
+
+ test_curr->ignore_dr_err = rd_true;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ allowed_error = RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ /* Produce a message without error first */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ /* Produce a message, let it fail with a fatal idempo error. */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ /* Commit the transaction, should fail */
+ error = rd_kafka_commit_transaction(rk, -1);
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
+
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
+ rd_kafka_error_string(error));
+
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
+ "Did not expect fatal error");
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "Expected abortable error");
+ rd_kafka_error_destroy(error);
+
+ /* Abort the transaction */
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ /* Run a new transaction without errors to verify that the
+ * producer can recover. */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
+ * transaction errors, but let the broker-side bumping of the
+ * producer PID take longer than the remaining transaction timeout
+ * which should raise a retriable error from abort_transaction().
+ *
+ * @param with_sleep After the first abort sleep longer than it takes to
+ * re-init the pid so that the internal state automatically
+ * transitions.
+ */
+static void do_test_txn_slow_reinit(rd_bool_t with_sleep) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ int32_t txn_coord = 2;
+ const char *txnid = "myTxnId";
+ test_timing_t timing;
+
+ SUB_TEST("%s sleep", with_sleep ? "with" : "without");
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
+ NULL);
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
+ txn_coord);
+
+ test_curr->ignore_dr_err = rd_true;
+ test_curr->is_fatal_cb = NULL;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ /* Produce a message without error first */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ test_flush(rk, -1);
+
+ /* Set transaction coordinator latency higher than
+ * the abort_transaction() call timeout so that the automatic
+ * re-initpid takes longer than abort_transaction(). */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 10000 /*10s*/);
+
+ /* Produce a message, let it fail with a fatal idempo error. */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+
+ /* Commit the transaction, should fail */
+ TIMING_START(&timing, "commit_transaction(-1)");
+ error = rd_kafka_commit_transaction(rk, -1);
+ TIMING_STOP(&timing);
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
+
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
+ rd_kafka_error_string(error));
+
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
+ "Did not expect fatal error");
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "Expected abortable error");
+ rd_kafka_error_destroy(error);
+
+ /* Abort the transaction, should fail with retriable (timeout) error */
+ TIMING_START(&timing, "abort_transaction(100)");
+ error = rd_kafka_abort_transaction(rk, 100);
+ TIMING_STOP(&timing);
+ TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
+
+ TEST_SAY("First abort_transaction() failed: %s\n",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
+ "Did not expect fatal error");
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
+ "Expected retriable error");
+ rd_kafka_error_destroy(error);
+
+ if (with_sleep)
+ rd_sleep(12);
+
+ /* Retry abort, should now finish. */
+ TEST_SAY("Retrying abort\n");
+ TIMING_START(&timing, "abort_transaction(-1)");
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+ TIMING_STOP(&timing);
+
+ /* Run a new transaction without errors to verify that the
+ * producer can recover. */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
+ * transaction errors, but let the broker-side bumping of the
+ * producer PID fail with a fencing error.
+ * Should raise a fatal error.
+ *
+ * @param error_code Which error code InitProducerIdRequest should fail with.
+ * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
+ * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
+ */
+static void do_test_txn_fenced_reinit(rd_kafka_resp_err_t error_code) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ int32_t txn_coord = 2;
+ const char *txnid = "myTxnId";
+ char errstr[512];
+ rd_kafka_resp_err_t fatal_err;
+
+ SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
+ NULL);
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
+ txn_coord);
+
+ test_curr->ignore_dr_err = rd_true;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ allowed_error = RD_KAFKA_RESP_ERR__FENCED;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ /* Produce a message without error first */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ test_flush(rk, -1);
+
+ /* Fail the PID reinit */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
+
+ /* Produce a message, let it fail with a fatal idempo error. */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ test_flush(rk, -1);
+
+ /* Abort the transaction, should fail with a fatal error */
+ error = rd_kafka_abort_transaction(rk, -1);
+ TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
+
+ TEST_SAY("abort_transaction() failed: %s\n",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error");
+ rd_kafka_error_destroy(error);
+
+ fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised");
+ TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr);
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test EndTxn errors.
+ */
+static void do_test_txn_endtxn_errors(void) {
+ rd_kafka_t *rk = NULL;
+ rd_kafka_mock_cluster_t *mcluster = NULL;
+ rd_kafka_resp_err_t err;
+ struct {
+ size_t error_cnt;
+ rd_kafka_resp_err_t errors[4];
+ rd_kafka_resp_err_t exp_err;
+ rd_bool_t exp_retriable;
+ rd_bool_t exp_abortable;
+ rd_bool_t exp_fatal;
+ rd_bool_t exp_successful_abort;
+ } scenario[] = {
+ /* This list of errors is from the EndTxnResponse handler in
+ * AK clients/.../TransactionManager.java */
+ {
+ /* #0 */
+ 2,
+ {RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE},
+ /* Should auto-recover */
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ },
+ {
+ /* #1 */
+ 2,
+ {RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR},
+ /* Should auto-recover */
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ },
+ {
+ /* #2 */
+ 1,
+ {RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS},
+ /* Should auto-recover */
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ },
+ {
+ /* #3 */
+ 3,
+ {RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS},
+ /* Should auto-recover */
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ },
+ {
+ /* #4: the abort is auto-recovering thru epoch bump */
+ 1,
+ {RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID},
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID,
+ rd_false /* !retriable */,
+ rd_true /* abortable */,
+ rd_false /* !fatal */,
+ rd_true /* successful abort */
+ },
+ {
+ /* #5: the abort is auto-recovering thru epoch bump */
+ 1,
+ {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING},
+ RD_KAFKA_RESP_ERR_INVALID_PRODUCER_ID_MAPPING,
+ rd_false /* !retriable */,
+ rd_true /* abortable */,
+ rd_false /* !fatal */,
+ rd_true /* successful abort */
+ },
+ {
+ /* #6 */
+ 1,
+ {RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH},
+ /* This error is normalized */
+ RD_KAFKA_RESP_ERR__FENCED,
+ rd_false /* !retriable */,
+ rd_false /* !abortable */,
+ rd_true /* fatal */
+ },
+ {
+ /* #7 */
+ 1,
+ {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
+ /* This error is normalized */
+ RD_KAFKA_RESP_ERR__FENCED,
+ rd_false /* !retriable */,
+ rd_false /* !abortable */,
+ rd_true /* fatal */
+ },
+ {
+ /* #8 */
+ 1,
+ {RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED},
+ RD_KAFKA_RESP_ERR_TRANSACTIONAL_ID_AUTHORIZATION_FAILED,
+ rd_false /* !retriable */,
+ rd_false /* !abortable */,
+ rd_true /* fatal */
+ },
+ {
+ /* #9 */
+ 1,
+ {RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED},
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
+ rd_false /* !retriable */,
+ rd_true /* abortable */,
+ rd_false /* !fatal */
+ },
+ {
+ /* #10 */
+ /* Any other error should raise a fatal error */
+ 1,
+ {RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE},
+ RD_KAFKA_RESP_ERR_INVALID_MSG_SIZE,
+ rd_false /* !retriable */,
+ rd_true /* abortable */,
+ rd_false /* !fatal */,
+ },
+ {
+ /* #11 */
+ 1,
+ {RD_KAFKA_RESP_ERR_PRODUCER_FENCED},
+ /* This error is normalized */
+ RD_KAFKA_RESP_ERR__FENCED,
+ rd_false /* !retriable */,
+ rd_false /* !abortable */,
+ rd_true /* fatal */
+ },
+ {0},
+ };
+ int i;
+
+ SUB_TEST_QUICK();
+
+ for (i = 0; scenario[i].error_cnt > 0; i++) {
+ int j;
+ /* For each scenario, test:
+ * commit_transaction()
+ * flush() + commit_transaction()
+ * abort_transaction()
+ * flush() + abort_transaction()
+ */
+ for (j = 0; j < (2 + 2); j++) {
+ rd_bool_t commit = j < 2;
+ rd_bool_t with_flush = j & 1;
+ rd_bool_t exp_successful_abort =
+ !commit && scenario[i].exp_successful_abort;
+ const char *commit_str =
+ commit ? (with_flush ? "commit&flush" : "commit")
+ : (with_flush ? "abort&flush" : "abort");
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ rd_kafka_error_t *error;
+ test_timing_t t_call;
+
+ TEST_SAY("Testing scenario #%d %s with %" PRIusz
+ " injected erorrs, expecting %s\n",
+ i, commit_str, scenario[i].error_cnt,
+ exp_successful_abort
+ ? "successful abort"
+ : rd_kafka_err2name(scenario[i].exp_err));
+
+ if (!rk) {
+ const char *txnid = "myTxnId";
+ rk = create_txn_producer(&mcluster, txnid, 3,
+ NULL);
+ TEST_CALL_ERROR__(
+ rd_kafka_init_transactions(rk, 5000));
+ }
+
+ /*
+ * Start transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /* Transaction aborts will cause DR errors:
+ * ignore them. */
+ test_curr->ignore_dr_err = !commit;
+
+ /*
+ * Produce a message.
+ */
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s",
+ rd_kafka_err2str(err));
+
+ if (with_flush)
+ test_flush(rk, -1);
+
+ /*
+ * Send some arbitrary offsets.
+ */
+ offsets = rd_kafka_topic_partition_list_new(4);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4",
+ 3)
+ ->offset = 12;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64",
+ 60)
+ ->offset = 99999;
+
+ cgmetadata =
+ rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
+ rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ /*
+ * Commit transaction, first with som failures,
+ * then succeed.
+ */
+ rd_kafka_mock_push_request_errors_array(
+ mcluster, RD_KAFKAP_EndTxn, scenario[i].error_cnt,
+ scenario[i].errors);
+
+ TIMING_START(&t_call, "%s", commit_str);
+ if (commit)
+ error = rd_kafka_commit_transaction(
+ rk, tmout_multip(5000));
+ else
+ error = rd_kafka_abort_transaction(
+ rk, tmout_multip(5000));
+ TIMING_STOP(&t_call);
+
+ if (error)
+ TEST_SAY(
+ "Scenario #%d %s failed: %s: %s "
+ "(retriable=%s, req_abort=%s, "
+ "fatal=%s)\n",
+ i, commit_str, rd_kafka_error_name(error),
+ rd_kafka_error_string(error),
+ RD_STR_ToF(
+ rd_kafka_error_is_retriable(error)),
+ RD_STR_ToF(
+ rd_kafka_error_txn_requires_abort(
+ error)),
+ RD_STR_ToF(rd_kafka_error_is_fatal(error)));
+ else
+ TEST_SAY("Scenario #%d %s succeeded\n", i,
+ commit_str);
+
+ if (!scenario[i].exp_err || exp_successful_abort) {
+ TEST_ASSERT(!error,
+ "Expected #%d %s to succeed, "
+ "got %s",
+ i, commit_str,
+ rd_kafka_error_string(error));
+ continue;
+ }
+
+
+ TEST_ASSERT(error != NULL, "Expected #%d %s to fail", i,
+ commit_str);
+ TEST_ASSERT(scenario[i].exp_err ==
+ rd_kafka_error_code(error),
+ "Scenario #%d: expected %s, not %s", i,
+ rd_kafka_err2name(scenario[i].exp_err),
+ rd_kafka_error_name(error));
+ TEST_ASSERT(
+ scenario[i].exp_retriable ==
+ (rd_bool_t)rd_kafka_error_is_retriable(error),
+ "Scenario #%d: retriable mismatch", i);
+ TEST_ASSERT(
+ scenario[i].exp_abortable ==
+ (rd_bool_t)rd_kafka_error_txn_requires_abort(
+ error),
+ "Scenario #%d: abortable mismatch", i);
+ TEST_ASSERT(
+ scenario[i].exp_fatal ==
+ (rd_bool_t)rd_kafka_error_is_fatal(error),
+ "Scenario #%d: fatal mismatch", i);
+
+ /* Handle errors according to the error flags */
+ if (rd_kafka_error_is_fatal(error)) {
+ TEST_SAY("Fatal error, destroying producer\n");
+ rd_kafka_error_destroy(error);
+ rd_kafka_destroy(rk);
+ rk = NULL; /* Will be re-created on the next
+ * loop iteration. */
+
+ } else if (rd_kafka_error_txn_requires_abort(error)) {
+ rd_kafka_error_destroy(error);
+ TEST_SAY(
+ "Abortable error, "
+ "aborting transaction\n");
+ TEST_CALL_ERROR__(
+ rd_kafka_abort_transaction(rk, -1));
+
+ } else if (rd_kafka_error_is_retriable(error)) {
+ rd_kafka_error_destroy(error);
+ TEST_SAY("Retriable error, retrying %s once\n",
+ commit_str);
+ if (commit)
+ TEST_CALL_ERROR__(
+ rd_kafka_commit_transaction(rk,
+ 5000));
+ else
+ TEST_CALL_ERROR__(
+ rd_kafka_abort_transaction(rk,
+ 5000));
+ } else {
+ TEST_FAIL(
+ "Scenario #%d %s: "
+ "Permanent error without enough "
+ "hints to proceed: %s\n",
+ i, commit_str,
+ rd_kafka_error_string(error));
+ }
+ }
+ }
+
+ /* All done */
+ if (rk)
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test that the commit/abort works properly with infinite timeout.
+ */
+static void do_test_txn_endtxn_infinite(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster = NULL;
+ const char *txnid = "myTxnId";
+ int i;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ for (i = 0; i < 2; i++) {
+ rd_bool_t commit = i == 0;
+ const char *commit_str = commit ? "commit" : "abort";
+ rd_kafka_error_t *error;
+ test_timing_t t_call;
+
+ /* Messages will fail on as the transaction fails,
+ * ignore the DR error */
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_END));
+
+ /*
+ * Commit/abort transaction, first with som retriable failures,
+ * then success.
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_EndTxn, 10,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
+
+ rd_sleep(1);
+
+ TIMING_START(&t_call, "%s_transaction()", commit_str);
+ if (commit)
+ error = rd_kafka_commit_transaction(rk, -1);
+ else
+ error = rd_kafka_abort_transaction(rk, -1);
+ TIMING_STOP(&t_call);
+
+ TEST_SAY("%s returned %s\n", commit_str,
+ error ? rd_kafka_error_string(error) : "success");
+
+ TEST_ASSERT(!error, "Expected %s to succeed, got %s",
+ commit_str, rd_kafka_error_string(error));
+ }
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test that the commit/abort user timeout is honoured.
+ */
+static void do_test_txn_endtxn_timeout(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster = NULL;
+ const char *txnid = "myTxnId";
+ int i;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ for (i = 0; i < 2; i++) {
+ rd_bool_t commit = i == 0;
+ const char *commit_str = commit ? "commit" : "abort";
+ rd_kafka_error_t *error;
+ test_timing_t t_call;
+
+ /* Messages will fail as the transaction fails,
+ * ignore the DR error */
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_END));
+
+ /*
+ * Commit/abort transaction, first with some retriable failures
+ * whos retries exceed the user timeout.
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_EndTxn, 10,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
+
+ rd_sleep(1);
+
+ TIMING_START(&t_call, "%s_transaction()", commit_str);
+ if (commit)
+ error = rd_kafka_commit_transaction(rk, 100);
+ else
+ error = rd_kafka_abort_transaction(rk, 100);
+ TIMING_STOP(&t_call);
+
+ TEST_SAY_ERROR(error, "%s returned: ", commit_str);
+ TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str);
+ TEST_ASSERT(
+ rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected %s to fail with timeout, not %s: %s", commit_str,
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
+ "%s failure should raise a retriable error",
+ commit_str);
+ rd_kafka_error_destroy(error);
+
+ /* Now call it again with an infinite timeout, should work. */
+ TIMING_START(&t_call, "%s_transaction() nr 2", commit_str);
+ if (commit)
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ else
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+ TIMING_STOP(&t_call);
+ }
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test commit/abort inflight timeout behaviour, which should result
+ * in a retriable error.
+ */
+static void do_test_txn_endtxn_timeout_inflight(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster = NULL;
+ const char *txnid = "myTxnId";
+ int32_t coord_id = 1;
+ int i;
+
+ SUB_TEST();
+
+ allowed_error = RD_KAFKA_RESP_ERR__TIMED_OUT;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+
+ rk = create_txn_producer(&mcluster, txnid, 1, "transaction.timeout.ms",
+ "5000", NULL);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ for (i = 0; i < 2; i++) {
+ rd_bool_t commit = i == 0;
+ const char *commit_str = commit ? "commit" : "abort";
+ rd_kafka_error_t *error;
+ test_timing_t t_call;
+
+ /* Messages will fail as the transaction fails,
+ * ignore the DR error */
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_END));
+
+ /* Let EndTxn & EndTxn retry timeout */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_EndTxn, 2,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 10000,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 10000);
+
+ rd_sleep(1);
+
+ TIMING_START(&t_call, "%s_transaction()", commit_str);
+ if (commit)
+ error = rd_kafka_commit_transaction(rk, 4000);
+ else
+ error = rd_kafka_abort_transaction(rk, 4000);
+ TIMING_STOP(&t_call);
+
+ TEST_SAY_ERROR(error, "%s returned: ", commit_str);
+ TEST_ASSERT(error != NULL, "Expected %s to fail", commit_str);
+ TEST_ASSERT(
+ rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected %s to fail with timeout, not %s: %s", commit_str,
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
+ "%s failure should raise a retriable error",
+ commit_str);
+ rd_kafka_error_destroy(error);
+
+ /* Now call it again with an infinite timeout, should work. */
+ TIMING_START(&t_call, "%s_transaction() nr 2", commit_str);
+ if (commit)
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ else
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+ TIMING_STOP(&t_call);
+ }
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->is_fatal_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test that EndTxn is properly sent for aborted transactions
+ * even if AddOffsetsToTxnRequest was retried.
+ * This is a check for a txn_req_cnt bug.
+ */
+static void do_test_txn_req_cnt(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ const char *txnid = "myTxnId";
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
+
+ /* Messages will fail on abort(), ignore the DR error */
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /*
+ * Send some arbitrary offsets, first with some failures, then
+ * succeed.
+ */
+ offsets = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 40)->offset =
+ 999999111;
+
+ rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_AddOffsetsToTxn,
+ 2,
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_TxnOffsetCommit, 2,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test abortable errors using mock broker error injections
+ * and code coverage checks.
+ */
+static void do_test_txn_requires_abort_errors(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ int r;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /*
+ * 1. Fail on produce
+ */
+ TEST_SAY("1. Fail on produce\n");
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ /* Wait for messages to fail */
+ test_flush(rk, 5000);
+
+ /* Any other transactional API should now raise an error */
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ error =
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ TEST_ASSERT(error, "expected error");
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "expected abortable error, not %s",
+ rd_kafka_error_string(error));
+ TEST_SAY("Error %s: %s\n", rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ /*
+ * 2. Restart transaction and fail on AddPartitionsToTxn
+ */
+ TEST_SAY("2. Fail on AddPartitionsToTxn\n");
+
+ /* First refresh proper Metadata to clear the topic's auth error,
+ * otherwise the produce() below will fail immediately. */
+ r = test_get_partition_count(rk, "mytopic", 5000);
+ TEST_ASSERT(r > 0, "Expected topic %s to exist", "mytopic");
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_AddPartitionsToTxn, 1,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ error = rd_kafka_commit_transaction(rk, 5000);
+ TEST_ASSERT(error, "commit_transaction should have failed");
+ TEST_SAY("commit_transaction() error %s: %s\n",
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ /*
+ * 3. Restart transaction and fail on AddOffsetsToTxn
+ */
+ TEST_SAY("3. Fail on AddOffsetsToTxn\n");
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_AddOffsetsToTxn, 1,
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED);
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ error =
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
+ TEST_ASSERT(error, "Expected send_offsets..() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
+ "expected send_offsets_to_transaction() to fail with "
+ "group auth error: not %s",
+ rd_kafka_error_name(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+
+ error = rd_kafka_commit_transaction(rk, 5000);
+ TEST_ASSERT(error, "commit_transaction should have failed");
+ rd_kafka_error_destroy(error);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test error handling and recover for when broker goes down during
+ * an ongoing transaction.
+ */
+static void do_test_txn_broker_down_in_txn(rd_bool_t down_coord) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ int32_t coord_id, leader_id, down_id;
+ const char *down_what;
+ rd_kafka_resp_err_t err;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ int msgcnt = 1000;
+ int remains = 0;
+
+ /* Assign coordinator and leader to two different brokers */
+ coord_id = 1;
+ leader_id = 2;
+ if (down_coord) {
+ down_id = coord_id;
+ down_what = "coordinator";
+ } else {
+ down_id = leader_id;
+ down_what = "leader";
+ }
+
+ SUB_TEST_QUICK("Test %s down", down_what);
+
+ rk = create_txn_producer(&mcluster, transactional_id, 3, NULL);
+
+ /* Broker down is not a test-failing error */
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3);
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ coord_id);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id);
+
+ /* Start transactioning */
+ TEST_SAY("Starting transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt / 2, NULL, 0, &remains);
+
+ TEST_SAY("Bringing down %s %" PRId32 "\n", down_what, down_id);
+ rd_kafka_mock_broker_set_down(mcluster, down_id);
+
+ rd_kafka_flush(rk, 3000);
+
+ /* Produce remaining messages */
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA,
+ msgcnt / 2, msgcnt / 2, NULL, 0, &remains);
+
+ rd_sleep(2);
+
+ TEST_SAY("Bringing up %s %" PRId32 "\n", down_what, down_id);
+ rd_kafka_mock_broker_set_up(mcluster, down_id);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
+
+ rd_kafka_destroy(rk);
+
+ test_curr->is_fatal_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Advance the coord_id to the next broker.
+ */
+static void set_next_coord(rd_kafka_mock_cluster_t *mcluster,
+ const char *transactional_id,
+ int broker_cnt,
+ int32_t *coord_idp) {
+ int32_t new_coord_id;
+
+ new_coord_id = 1 + ((*coord_idp) % (broker_cnt));
+ TEST_SAY("Changing transaction coordinator from %" PRId32 " to %" PRId32
+ "\n",
+ *coord_idp, new_coord_id);
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ new_coord_id);
+
+ *coord_idp = new_coord_id;
+}
+
+/**
+ * @brief Switch coordinator during a transaction.
+ *
+ */
+static void do_test_txn_switch_coordinator(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ int32_t coord_id;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ const int broker_cnt = 5;
+ const int iterations = 20;
+ int i;
+
+ test_timeout_set(iterations * 10);
+
+ SUB_TEST("Test switching coordinators");
+
+ rk = create_txn_producer(&mcluster, transactional_id, broker_cnt, NULL);
+
+ coord_id = 1;
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ coord_id);
+
+ /* Start transactioning */
+ TEST_SAY("Starting transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ for (i = 0; i < iterations; i++) {
+ const int msgcnt = 100;
+ int remains = 0;
+
+ set_next_coord(mcluster, transactional_id, broker_cnt,
+ &coord_id);
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt / 2, NULL, 0);
+
+ if (!(i % 3))
+ set_next_coord(mcluster, transactional_id, broker_cnt,
+ &coord_id);
+
+ /* Produce remaining messages */
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA,
+ msgcnt / 2, msgcnt / 2, NULL, 0,
+ &remains);
+
+ if ((i & 1) || !(i % 8))
+ set_next_coord(mcluster, transactional_id, broker_cnt,
+ &coord_id);
+
+
+ if (!(i % 5)) {
+ test_curr->ignore_dr_err = rd_false;
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ } else {
+ test_curr->ignore_dr_err = rd_true;
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+ }
+ }
+
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Switch coordinator during a transaction when AddOffsetsToTxn
+ * are sent. #3571.
+ */
+static void do_test_txn_switch_coordinator_refresh(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ SUB_TEST("Test switching coordinators (refresh)");
+
+ rk = create_txn_producer(&mcluster, transactional_id, 3, NULL);
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ 1);
+
+ /* Start transactioning */
+ TEST_SAY("Starting transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /* Switch the coordinator so that AddOffsetsToTxnRequest
+ * will respond with NOT_COORDINATOR. */
+ TEST_SAY("Switching to coordinator 2\n");
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ 2);
+
+ /*
+ * Send some arbitrary offsets.
+ */
+ offsets = rd_kafka_topic_partition_list_new(4);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 29)->offset =
+ 99999;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(rd_kafka_send_offsets_to_transaction(
+ rk, offsets, cgmetadata, 20 * 1000));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+
+ /* Produce some messages */
+ test_produce_msgs2(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10, NULL, 0);
+
+ /* And commit the transaction */
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test fatal error handling when transactions are not supported
+ * by the broker.
+ */
+static void do_test_txns_not_supported(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST_QUICK();
+
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "transactional.id", "myxnid");
+ test_conf_set(conf, "bootstrap.servers", ",");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* Create mock cluster */
+ mcluster = rd_kafka_mock_cluster_new(rk, 3);
+
+ /* Disable InitProducerId */
+ rd_kafka_mock_set_apiversion(mcluster, 22 /*InitProducerId*/, -1, -1);
+
+
+ rd_kafka_brokers_add(rk, rd_kafka_mock_cluster_bootstraps(mcluster));
+
+
+
+ error = rd_kafka_init_transactions(rk, 5 * 1000);
+ TEST_SAY("init_transactions() returned %s: %s\n",
+ error ? rd_kafka_error_name(error) : "success",
+ error ? rd_kafka_error_string(error) : "success");
+
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE,
+ "Expected init_transactions() to fail with %s, not %s: %s",
+ rd_kafka_err2name(RD_KAFKA_RESP_ERR__UNSUPPORTED_FEATURE),
+ rd_kafka_error_name(error), rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("test"),
+ RD_KAFKA_V_KEY("test", 4), RD_KAFKA_V_END);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__FATAL,
+ "Expected producev() to fail with %s, not %s",
+ rd_kafka_err2name(RD_KAFKA_RESP_ERR__FATAL),
+ rd_kafka_err2name(err));
+
+ rd_kafka_mock_cluster_destroy(mcluster);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief CONCURRENT_TRANSACTION on AddOffsets.. should be retried.
+ */
+static void do_test_txns_send_offsets_concurrent_is_retried(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ /* Wait for messages to be delivered */
+ test_flush(rk, 5000);
+
+
+ /*
+ * Have AddOffsetsToTxn fail but eventually succeed due to
+ * infinite retries.
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_AddOffsetsToTxn,
+ 1 + 5, /* first request + some retries */
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify that send_offsets_to_transaction() with no eligible offsets
+ * is handled properly - the call should succeed immediately and be
+ * repeatable.
+ */
+static void do_test_txns_send_offsets_non_eligible(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ /* Wait for messages to be delivered */
+ test_flush(rk, 5000);
+
+ /* Empty offsets list */
+ offsets = rd_kafka_topic_partition_list_new(0);
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ /* Now call it again, should also succeed. */
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify that request timeouts don't cause crash (#2913).
+ */
+static void do_test_txns_no_timeout_crash(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ SUB_TEST_QUICK();
+
+ rk =
+ create_txn_producer(&mcluster, "txnid", 3, "socket.timeout.ms",
+ "1000", "transaction.timeout.ms", "5000", NULL);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ test_flush(rk, -1);
+
+ /* Delay all broker connections */
+ if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 2000)) ||
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 2000)) ||
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 2000)))
+ TEST_FAIL("Failed to set broker RTT: %s",
+ rd_kafka_err2str(err));
+
+ /* send_offsets..() should now time out */
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ error =
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
+ TEST_ASSERT(error, "Expected send_offsets..() to fail");
+ TEST_SAY("send_offsets..() failed with %serror: %s\n",
+ rd_kafka_error_is_retriable(error) ? "retriable " : "",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "expected send_offsets_to_transaction() to fail with "
+ "timeout, not %s",
+ rd_kafka_error_name(error));
+ TEST_ASSERT(rd_kafka_error_is_retriable(error),
+ "expected send_offsets_to_transaction() to fail with "
+ "a retriable error");
+ rd_kafka_error_destroy(error);
+
+ /* Reset delay and try again */
+ if ((err = rd_kafka_mock_broker_set_rtt(mcluster, 1, 0)) ||
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 2, 0)) ||
+ (err = rd_kafka_mock_broker_set_rtt(mcluster, 3, 0)))
+ TEST_FAIL("Failed to reset broker RTT: %s",
+ rd_kafka_err2str(err));
+
+ TEST_SAY("Retrying send_offsets..()\n");
+ error =
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
+ TEST_ASSERT(!error, "Expected send_offsets..() to succeed, got: %s",
+ rd_kafka_error_string(error));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ /* All done */
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test auth failure handling.
+ */
+static void do_test_txn_auth_failure(int16_t ApiKey,
+ rd_kafka_resp_err_t ErrorCode) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+
+ SUB_TEST_QUICK("ApiKey=%s ErrorCode=%s", rd_kafka_ApiKey2str(ApiKey),
+ rd_kafka_err2name(ErrorCode));
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ rd_kafka_mock_push_request_errors(mcluster, ApiKey, 1, ErrorCode);
+
+ error = rd_kafka_init_transactions(rk, 5000);
+ TEST_ASSERT(error, "Expected init_transactions() to fail");
+
+ TEST_SAY("init_transactions() failed: %s: %s\n",
+ rd_kafka_err2name(rd_kafka_error_code(error)),
+ rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_code(error) == ErrorCode,
+ "Expected error %s, not %s", rd_kafka_err2name(ErrorCode),
+ rd_kafka_err2name(rd_kafka_error_code(error)));
+ TEST_ASSERT(rd_kafka_error_is_fatal(error),
+ "Expected error to be fatal");
+ TEST_ASSERT(!rd_kafka_error_is_retriable(error),
+ "Expected error to not be retriable");
+ rd_kafka_error_destroy(error);
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Issue #3041: Commit fails due to message flush() taking too long,
+ * eventually resulting in an unabortable error and failure to
+ * re-init the transactional producer.
+ */
+static void do_test_txn_flush_timeout(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ rd_kafka_error_t *error;
+ const char *txnid = "myTxnId";
+ const char *topic = "myTopic";
+ const int32_t coord_id = 2;
+ int msgcounter = 0;
+ rd_bool_t is_retry = rd_false;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "message.timeout.ms",
+ "10000", "transaction.timeout.ms", "10000",
+ /* Speed up coordinator reconnect */
+ "reconnect.backoff.max.ms", "1000", NULL);
+
+
+ /* Broker down is not a test-failing error */
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+
+ rd_kafka_mock_topic_create(mcluster, topic, 2, 3);
+
+ /* Set coordinator so we can disconnect it later */
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, coord_id);
+
+ /*
+ * Init transactions
+ */
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+retry:
+ if (!is_retry) {
+ /* First attempt should fail. */
+
+ test_curr->ignore_dr_err = rd_true;
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+
+ /* Assign invalid partition leaders for some partitions so
+ * that messages will not be delivered. */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, -1);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, -1);
+
+ } else {
+ /* The retry should succeed */
+ test_curr->ignore_dr_err = rd_false;
+ test_curr->exp_dr_err = is_retry
+ ? RD_KAFKA_RESP_ERR_NO_ERROR
+ : RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1);
+ }
+
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /*
+ * Produce some messages to specific partitions and random.
+ */
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 100, NULL, 10,
+ &msgcounter);
+ test_produce_msgs2_nowait(rk, topic, 1, 0, 0, 100, NULL, 10,
+ &msgcounter);
+ test_produce_msgs2_nowait(rk, topic, RD_KAFKA_PARTITION_UA, 0, 0, 100,
+ NULL, 10, &msgcounter);
+
+
+ /*
+ * Send some arbitrary offsets.
+ */
+ offsets = rd_kafka_topic_partition_list_new(4);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 49)->offset =
+ 999999111;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset =
+ 999;
+ rd_kafka_topic_partition_list_add(offsets, "srctopic64", 34)->offset =
+ 123456789;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ rd_sleep(2);
+
+ if (!is_retry) {
+ /* Now disconnect the coordinator. */
+ TEST_SAY("Disconnecting transaction coordinator %" PRId32 "\n",
+ coord_id);
+ rd_kafka_mock_broker_set_down(mcluster, coord_id);
+ }
+
+ /*
+ * Start committing.
+ */
+ error = rd_kafka_commit_transaction(rk, -1);
+
+ if (!is_retry) {
+ TEST_ASSERT(error != NULL, "Expected commit to fail");
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ } else {
+ TEST_ASSERT(!error, "Expected commit to succeed, not: %s",
+ rd_kafka_error_string(error));
+ }
+
+ if (!is_retry) {
+ /*
+ * Bring the coordinator back up.
+ */
+ rd_kafka_mock_broker_set_up(mcluster, coord_id);
+ rd_sleep(2);
+
+ /*
+ * Abort, and try again, this time without error.
+ */
+ TEST_SAY("Aborting and retrying\n");
+ is_retry = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 60000));
+ goto retry;
+ }
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief ESC-4424: rko is reused in response handler after destroy in coord_req
+ * sender due to bad state.
+ *
+ * This is somewhat of a race condition so we need to perform a couple of
+ * iterations before it hits, usually 2 or 3, so we try at least 15 times.
+ */
+static void do_test_txn_coord_req_destroy(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ int i;
+ int errcnt = 0;
+
+ SUB_TEST();
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ for (i = 0; i < 15; i++) {
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ test_timeout_set(10);
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /*
+ * Inject errors to trigger retries
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_AddPartitionsToTxn,
+ 2, /* first request + number of internal retries */
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS,
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_AddOffsetsToTxn,
+ 1, /* first request + number of internal retries */
+ RD_KAFKA_RESP_ERR_CONCURRENT_TRANSACTIONS);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 4,
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED);
+ /* FIXME: When KIP-360 is supported, add this error:
+ * RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER */
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2),
+ RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+
+ /*
+ * Send offsets to transaction
+ */
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)
+ ->offset = 12;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ error = rd_kafka_send_offsets_to_transaction(rk, offsets,
+ cgmetadata, -1);
+
+ TEST_SAY("send_offsets_to_transaction() #%d: %s\n", i,
+ rd_kafka_error_string(error));
+
+ /* As we can't control the exact timing and sequence
+ * of requests this sometimes fails and sometimes succeeds,
+ * but we run the test enough times to trigger at least
+ * one failure. */
+ if (error) {
+ TEST_SAY(
+ "send_offsets_to_transaction() #%d "
+ "failed (expectedly): %s\n",
+ i, rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "Expected abortable error for #%d", i);
+ rd_kafka_error_destroy(error);
+ errcnt++;
+ }
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ /* Allow time for internal retries */
+ rd_sleep(2);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, 5000));
+ }
+
+ TEST_ASSERT(errcnt > 0,
+ "Expected at least one send_offets_to_transaction() "
+ "failure");
+
+ /* All done */
+
+ rd_kafka_destroy(rk);
+}
+
+
+static rd_atomic32_t multi_find_req_cnt;
+
+static rd_kafka_resp_err_t
+multi_find_on_response_received_cb(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque) {
+ rd_kafka_mock_cluster_t *mcluster = rd_kafka_handle_mock_cluster(rk);
+ rd_bool_t done = rd_atomic32_get(&multi_find_req_cnt) > 10000;
+
+ if (ApiKey != RD_KAFKAP_AddOffsetsToTxn || done)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32
+ ", ApiKey %hd, CorrId %d, rtt %.2fms, %s: %s\n",
+ rd_kafka_name(rk), brokername, brokerid, ApiKey, CorrId,
+ rtt != -1 ? (float)rtt / 1000.0 : 0.0,
+ done ? "already done" : "not done yet",
+ rd_kafka_err2name(err));
+
+
+ if (rd_atomic32_add(&multi_find_req_cnt, 1) == 1) {
+ /* Trigger a broker down/up event, which in turns
+ * triggers the coord_req_fsm(). */
+ rd_kafka_mock_broker_set_down(mcluster, 2);
+ rd_kafka_mock_broker_set_up(mcluster, 2);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ /* Trigger a broker down/up event, which in turns
+ * triggers the coord_req_fsm(). */
+ rd_kafka_mock_broker_set_down(mcluster, 3);
+ rd_kafka_mock_broker_set_up(mcluster, 3);
+
+ /* Clear the downed broker's latency so that it reconnects
+ * quickly, otherwise the ApiVersionRequest will be delayed and
+ * this will in turn delay the -> UP transition that we need to
+ * trigger the coord_reqs. */
+ rd_kafka_mock_broker_set_rtt(mcluster, 3, 0);
+
+ /* Only do this down/up once */
+ rd_atomic32_add(&multi_find_req_cnt, 10000);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief ESC-4444: multiple FindCoordinatorRequests are sent referencing
+ * the same coord_req_t, but the first one received will destroy
+ * the coord_req_t object and make the subsequent FindCoordingResponses
+ * reference a freed object.
+ *
+ * What we want to achieve is this sequence:
+ * 1. AddOffsetsToTxnRequest + Response which..
+ * 2. Triggers TxnOffsetCommitRequest, but the coordinator is not known, so..
+ * 3. Triggers a FindCoordinatorRequest
+ * 4. FindCoordinatorResponse from 3 is received ..
+ * 5. A TxnOffsetCommitRequest is sent from coord_req_fsm().
+ * 6. Another broker changing state to Up triggers coord reqs again, which..
+ * 7. Triggers a second TxnOffsetCommitRequest from coord_req_fsm().
+ * 7. FindCoordinatorResponse from 5 is received, references the destroyed rko
+ * and crashes.
+ */
+static void do_test_txn_coord_req_multi_find(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ const char *txnid = "txnid", *groupid = "mygroupid", *topic = "mytopic";
+ int i;
+
+ SUB_TEST();
+
+ rd_atomic32_init(&multi_find_req_cnt, 0);
+
+ on_response_received_cb = multi_find_on_response_received_cb;
+ rk = create_txn_producer(&mcluster, txnid, 3,
+ /* Need connections to all brokers so we
+ * can trigger coord_req_fsm events
+ * by toggling connections. */
+ "enable.sparse.connections", "false",
+ /* Set up on_response_received interceptor */
+ "on_response_received", "", NULL);
+
+ /* Let broker 1 be both txn and group coordinator
+ * so that the group coordinator connection is up when it is time
+ * send the TxnOffsetCommitRequest. */
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1);
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+
+ /* Set broker 1, 2, and 3 as leaders for a partition each and
+ * later produce to both partitions so we know there's a connection
+ * to all brokers. */
+ rd_kafka_mock_topic_create(mcluster, topic, 3, 1);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 2);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 3);
+
+ /* Broker down is not a test-failing error */
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ for (i = 0; i < 3; i++) {
+ err = rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(i),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+ }
+
+ test_flush(rk, 5000);
+
+ /*
+ * send_offsets_to_transaction() will query for the group coordinator,
+ * we need to make those requests slow so that multiple requests are
+ * sent.
+ */
+ for (i = 1; i <= 3; i++)
+ rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 4000);
+
+ /*
+ * Send offsets to transaction
+ */
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 12;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new(groupid);
+
+ error =
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1);
+
+ TEST_SAY("send_offsets_to_transaction() %s\n",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(!error, "send_offsets_to_transaction() failed: %s",
+ rd_kafka_error_string(error));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ /* Clear delay */
+ for (i = 1; i <= 3; i++)
+ rd_kafka_mock_broker_set_rtt(mcluster, (int32_t)i, 0);
+
+ rd_sleep(5);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
+
+ /* All done */
+
+ TEST_ASSERT(rd_atomic32_get(&multi_find_req_cnt) > 10000,
+ "on_request_sent interceptor did not trigger properly");
+
+ rd_kafka_destroy(rk);
+
+ on_response_received_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief ESC-4410: adding producer partitions gradually will trigger multiple
+ * AddPartitionsToTxn requests. Due to a bug the third partition to be
+ * registered would hang in PEND_TXN state.
+ *
+ * Trigger this behaviour by having two outstanding AddPartitionsToTxn requests
+ * at the same time, followed by a need for a third:
+ *
+ * 1. Set coordinator broker rtt high (to give us time to produce).
+ * 2. Produce to partition 0, will trigger first AddPartitionsToTxn.
+ * 3. Produce to partition 1, will trigger second AddPartitionsToTxn.
+ * 4. Wait for second AddPartitionsToTxn response.
+ * 5. Produce to partition 2, should trigger AddPartitionsToTxn, but bug
+ * causes it to be stale in pending state.
+ */
+
+static rd_atomic32_t multi_addparts_resp_cnt;
+static rd_kafka_resp_err_t
+multi_addparts_response_received_cb(rd_kafka_t *rk,
+ int sockfd,
+ const char *brokername,
+ int32_t brokerid,
+ int16_t ApiKey,
+ int16_t ApiVersion,
+ int32_t CorrId,
+ size_t size,
+ int64_t rtt,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque) {
+
+ if (ApiKey == RD_KAFKAP_AddPartitionsToTxn) {
+ TEST_SAY("on_response_received_cb: %s: %s: brokerid %" PRId32
+ ", ApiKey %hd, CorrId %d, rtt %.2fms, count %" PRId32
+ ": %s\n",
+ rd_kafka_name(rk), brokername, brokerid, ApiKey,
+ CorrId, rtt != -1 ? (float)rtt / 1000.0 : 0.0,
+ rd_atomic32_get(&multi_addparts_resp_cnt),
+ rd_kafka_err2name(err));
+
+ rd_atomic32_add(&multi_addparts_resp_cnt, 1);
+ }
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static void do_test_txn_addparts_req_multi(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *txnid = "txnid", *topic = "mytopic";
+ int32_t txn_coord = 2;
+
+ SUB_TEST();
+
+ rd_atomic32_init(&multi_addparts_resp_cnt, 0);
+
+ on_response_received_cb = multi_addparts_response_received_cb;
+ rk = create_txn_producer(&mcluster, txnid, 3, "linger.ms", "0",
+ "message.timeout.ms", "9000",
+ /* Set up on_response_received interceptor */
+ "on_response_received", "", NULL);
+
+ /* Let broker 1 be txn coordinator. */
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
+ txn_coord);
+
+ rd_kafka_mock_topic_create(mcluster, topic, 3, 1);
+
+ /* Set partition leaders to non-txn-coord broker so they wont
+ * be affected by rtt delay */
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 1);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 1, 1);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 2, 1);
+
+
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ /*
+ * Run one transaction first to let the client familiarize with
+ * the topic, this avoids metadata lookups, etc, when the real
+ * test is run.
+ */
+ TEST_SAY("Running seed transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+ TEST_CALL_ERR__(rd_kafka_producev(rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_VALUE("seed", 4),
+ RD_KAFKA_V_END));
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 5000));
+
+
+ /*
+ * Now perform test transaction with rtt delays
+ */
+ TEST_SAY("Running test transaction\n");
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /* Reset counter */
+ rd_atomic32_set(&multi_addparts_resp_cnt, 0);
+
+ /* Add latency to txn coordinator so we can pace our produce() calls */
+ rd_kafka_mock_broker_set_rtt(mcluster, txn_coord, 1000);
+
+ /* Produce to partition 0 */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ rd_usleep(500 * 1000, NULL);
+
+ /* Produce to partition 1 */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(1),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ TEST_SAY("Waiting for two AddPartitionsToTxnResponse\n");
+ while (rd_atomic32_get(&multi_addparts_resp_cnt) < 2)
+ rd_usleep(10 * 1000, NULL);
+
+ TEST_SAY("%" PRId32 " AddPartitionsToTxnResponses seen\n",
+ rd_atomic32_get(&multi_addparts_resp_cnt));
+
+ /* Produce to partition 2, this message will hang in
+ * queue if the bug is not fixed. */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(2),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ /* Allow some extra time for things to settle before committing
+ * transaction. */
+ rd_usleep(1000 * 1000, NULL);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, 10 * 1000));
+
+ /* All done */
+ rd_kafka_destroy(rk);
+
+ on_response_received_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Test handling of OffsetFetchRequest returning UNSTABLE_OFFSET_COMMIT.
+ *
+ * There are two things to test;
+ * - OffsetFetch triggered by committed() (and similar code paths)
+ * - OffsetFetch triggered by assign()
+ */
+static void do_test_unstable_offset_commit(void) {
+ rd_kafka_t *rk, *c;
+ rd_kafka_conf_t *c_conf;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_topic_partition_list_t *offsets;
+ const char *topic = "srctopic4";
+ const int msgcnt = 100;
+ const int64_t offset_to_commit = msgcnt / 2;
+ int i;
+ int remains = 0;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ test_conf_init(&c_conf, NULL, 0);
+ test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(c_conf, "bootstrap.servers",
+ rd_kafka_mock_cluster_bootstraps(mcluster));
+ test_conf_set(c_conf, "enable.partition.eof", "true");
+ test_conf_set(c_conf, "auto.offset.reset", "error");
+ c = test_create_consumer("mygroup", NULL, c_conf, NULL);
+
+ rd_kafka_mock_topic_create(mcluster, topic, 2, 3);
+
+ /* Produce some messages to the topic so that the consumer has
+ * something to read. */
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt, NULL, 0,
+ &remains);
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+
+ /* Commit offset */
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset =
+ offset_to_commit;
+ TEST_CALL_ERR__(rd_kafka_commit(c, offsets, 0 /*sync*/));
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ /* Retrieve offsets by calling committed().
+ *
+ * Have OffsetFetch fail and retry, on the first iteration
+ * the API timeout is higher than the amount of time the retries will
+ * take and thus succeed, and on the second iteration the timeout
+ * will be lower and thus fail. */
+ for (i = 0; i < 2; i++) {
+ rd_kafka_resp_err_t err;
+ rd_kafka_resp_err_t exp_err =
+ i == 0 ? RD_KAFKA_RESP_ERR_NO_ERROR
+ : RD_KAFKA_RESP_ERR__TIMED_OUT;
+ int timeout_ms = exp_err ? 200 : 5 * 1000;
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_OffsetFetch,
+ 1 + 5, /* first request + some retries */
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT);
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, topic, 0);
+
+ err = rd_kafka_committed(c, offsets, timeout_ms);
+
+ TEST_SAY("#%d: committed() returned %s (expected %s)\n", i,
+ rd_kafka_err2name(err), rd_kafka_err2name(exp_err));
+
+ TEST_ASSERT(err == exp_err,
+ "#%d: Expected committed() to return %s, not %s", i,
+ rd_kafka_err2name(exp_err), rd_kafka_err2name(err));
+ TEST_ASSERT(offsets->cnt == 1,
+ "Expected 1 committed offset, not %d",
+ offsets->cnt);
+ if (!exp_err)
+ TEST_ASSERT(offsets->elems[0].offset ==
+ offset_to_commit,
+ "Expected committed offset %" PRId64
+ ", "
+ "not %" PRId64,
+ offset_to_commit, offsets->elems[0].offset);
+ else
+ TEST_ASSERT(offsets->elems[0].offset < 0,
+ "Expected no committed offset, "
+ "not %" PRId64,
+ offsets->elems[0].offset);
+
+ rd_kafka_topic_partition_list_destroy(offsets);
+ }
+
+ TEST_SAY("Phase 2: OffsetFetch lookup through assignment\n");
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, topic, 0)->offset =
+ RD_KAFKA_OFFSET_STORED;
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_OffsetFetch,
+ 1 + 5, /* first request + some retries */
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT,
+ RD_KAFKA_RESP_ERR_UNSTABLE_OFFSET_COMMIT);
+
+ test_consumer_incremental_assign("assign", c, offsets);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ test_consumer_poll_exact("consume", c, 0, 1 /*eof*/, 0, msgcnt / 2,
+ rd_true /*exact counts*/, NULL);
+
+ /* All done */
+ rd_kafka_destroy(c);
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief If a message times out locally before being attempted to send
+ * and commit_transaction() is called, the transaction must not succeed.
+ * https://github.com/confluentinc/confluent-kafka-dotnet/issues/1568
+ */
+static void do_test_commit_after_msg_timeout(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ int32_t coord_id, leader_id;
+ rd_kafka_resp_err_t err;
+ rd_kafka_error_t *error;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ int remains = 0;
+
+ SUB_TEST_QUICK();
+
+ /* Assign coordinator and leader to two different brokers */
+ coord_id = 1;
+ leader_id = 2;
+
+ rk = create_txn_producer(&mcluster, transactional_id, 3,
+ "message.timeout.ms", "5000",
+ "transaction.timeout.ms", "10000", NULL);
+
+ /* Broker down is not a test-failing error */
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 3);
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ coord_id);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, leader_id);
+
+ /* Start transactioning */
+ TEST_SAY("Starting transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_SAY("Bringing down %" PRId32 "\n", leader_id);
+ rd_kafka_mock_broker_set_down(mcluster, leader_id);
+ rd_kafka_mock_broker_set_down(mcluster, coord_id);
+
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains);
+
+ error = rd_kafka_commit_transaction(rk, -1);
+ TEST_ASSERT(error != NULL, "expected commit_transaciton() to fail");
+ TEST_SAY_ERROR(error, "commit_transaction() failed (as expected): ");
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "Expected txn_requires_abort error");
+ rd_kafka_error_destroy(error);
+
+ /* Bring the brokers up so the abort can complete */
+ rd_kafka_mock_broker_set_up(mcluster, coord_id);
+ rd_kafka_mock_broker_set_up(mcluster, leader_id);
+
+ TEST_SAY("Aborting transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ TEST_ASSERT(remains == 0, "%d message(s) were not flushed\n", remains);
+
+ TEST_SAY("Attempting second transaction, which should succeed\n");
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, 1, NULL, 0, &remains);
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
+
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->is_fatal_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief #3575: Verify that OUT_OF_ORDER_SEQ does not trigger an epoch bump
+ * during an ongoing transaction.
+ * The transaction should instead enter the abortable state.
+ */
+static void do_test_out_of_order_seq(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ int32_t txn_coord = 1, leader = 2;
+ const char *txnid = "myTxnId";
+ test_timing_t timing;
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
+ NULL);
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
+ txn_coord);
+
+ rd_kafka_mock_partition_set_leader(mcluster, "mytopic", 0, leader);
+
+ test_curr->ignore_dr_err = rd_true;
+ test_curr->is_fatal_cb = NULL;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+
+ /* Produce one seeding message first to get the leader up and running */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ test_flush(rk, -1);
+
+ /* Let partition leader have a latency of 2 seconds
+ * so that we can have multiple messages in-flight. */
+ rd_kafka_mock_broker_set_rtt(mcluster, leader, 2 * 1000);
+
+ /* Produce a message, let it fail with with different errors,
+ * ending with OUT_OF_ORDER which previously triggered an
+ * Epoch bump. */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 3,
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
+ RD_KAFKA_RESP_ERR_NOT_LEADER_FOR_PARTITION,
+ RD_KAFKA_RESP_ERR_OUT_OF_ORDER_SEQUENCE_NUMBER);
+
+ /* Produce three messages that will be delayed
+ * and have errors injected.*/
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ /* Now sleep a short while so that the messages are processed
+ * by the broker and errors are returned. */
+ TEST_SAY("Sleeping..\n");
+ rd_sleep(5);
+
+ rd_kafka_mock_broker_set_rtt(mcluster, leader, 0);
+
+ /* Produce a fifth message, should fail with ERR__STATE since
+ * the transaction should have entered the abortable state. */
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__STATE,
+ "Expected produce() to fail with ERR__STATE, not %s",
+ rd_kafka_err2name(err));
+ TEST_SAY("produce() failed as expected: %s\n", rd_kafka_err2str(err));
+
+ /* Commit the transaction, should fail with abortable error. */
+ TIMING_START(&timing, "commit_transaction(-1)");
+ error = rd_kafka_commit_transaction(rk, -1);
+ TIMING_STOP(&timing);
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
+
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
+ rd_kafka_error_string(error));
+
+ TEST_ASSERT(!rd_kafka_error_is_fatal(error),
+ "Did not expect fatal error");
+ TEST_ASSERT(rd_kafka_error_txn_requires_abort(error),
+ "Expected abortable error");
+ rd_kafka_error_destroy(error);
+
+ /* Abort the transaction */
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ /* Run a new transaction without errors to verify that the
+ * producer can recover. */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify lossless delivery if topic disappears from Metadata for awhile.
+ *
+ * If a topic is removed from metadata inbetween transactions, the producer
+ * will remove its partition state for the topic's partitions.
+ * If later the same topic comes back (same topic instance, not a new creation)
+ * then the producer must restore the previously used msgid/BaseSequence
+ * in case the same Epoch is still used, or messages will be silently lost
+ * as they would seem like legit duplicates to the broker.
+ *
+ * Reproduction:
+ * 1. produce msgs to topic, commit transaction.
+ * 2. remove topic from metadata
+ * 3. make sure client updates its metadata, which removes the partition
+ * objects.
+ * 4. restore the topic in metadata
+ * 5. produce new msgs to topic, commit transaction.
+ * 6. consume topic. All messages should be accounted for.
+ */
+static void do_test_topic_disappears_for_awhile(void) {
+ rd_kafka_t *rk, *c;
+ rd_kafka_conf_t *c_conf;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *topic = "mytopic";
+ const char *txnid = "myTxnId";
+ test_timing_t timing;
+ int i;
+ int msgcnt = 0;
+ const int partition_cnt = 10;
+
+ SUB_TEST_QUICK();
+
+ rk = create_txn_producer(
+ &mcluster, txnid, 1, "batch.num.messages", "3", "linger.ms", "100",
+ "topic.metadata.refresh.interval.ms", "2000", NULL);
+
+ rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ for (i = 0; i < 2; i++) {
+ int cnt = 3 * 2 * partition_cnt;
+ rd_bool_t remove_topic = (i % 2) == 0;
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ while (cnt-- >= 0) {
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_PARTITION(cnt % partition_cnt),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ msgcnt++;
+ }
+
+ /* Commit the transaction */
+ TIMING_START(&timing, "commit_transaction(-1)");
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ TIMING_STOP(&timing);
+
+
+
+ if (remove_topic) {
+ /* Make it seem the topic is removed, refresh metadata,
+ * and then make the topic available again. */
+ const rd_kafka_metadata_t *md;
+
+ TEST_SAY("Marking topic as non-existent\n");
+
+ rd_kafka_mock_topic_set_error(
+ mcluster, topic,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART);
+
+ TEST_CALL_ERR__(rd_kafka_metadata(rk, 0, NULL, &md,
+ tmout_multip(5000)));
+
+ rd_kafka_metadata_destroy(md);
+
+ rd_sleep(2);
+
+ TEST_SAY("Bringing topic back to life\n");
+ rd_kafka_mock_topic_set_error(
+ mcluster, topic, RD_KAFKA_RESP_ERR_NO_ERROR);
+ }
+ }
+
+ TEST_SAY("Verifying messages by consumtion\n");
+ test_conf_init(&c_conf, NULL, 0);
+ test_conf_set(c_conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(c_conf, "bootstrap.servers",
+ rd_kafka_mock_cluster_bootstraps(mcluster));
+ test_conf_set(c_conf, "enable.partition.eof", "true");
+ test_conf_set(c_conf, "auto.offset.reset", "earliest");
+ c = test_create_consumer("mygroup", NULL, c_conf, NULL);
+
+ test_consumer_subscribe(c, topic);
+ test_consumer_poll_exact("consume", c, 0, partition_cnt, 0, msgcnt,
+ rd_true /*exact*/, NULL);
+ rd_kafka_destroy(c);
+
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test that group coordinator requests can handle an
+ * untimely disconnect.
+ *
+ * The transaction manager makes use of librdkafka coord_req to commit
+ * transaction offsets to the group coordinator.
+ * If the connection to the given group coordinator is not up the
+ * coord_req code will request a connection once, but if this connection fails
+ * there will be no new attempts and the coord_req will idle until either
+ * destroyed or the connection is retried for other reasons.
+ * This in turn stalls the send_offsets_to_transaction() call until the
+ * transaction times out.
+ *
+ * There are two variants to this test based on switch_coord:
+ * - True - Switches the coordinator during the downtime.
+ * The client should detect this and send the request to the
+ * new coordinator.
+ * - False - The coordinator remains on the down broker. Client will reconnect
+ * when down broker comes up again.
+ */
+struct some_state {
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_bool_t switch_coord;
+ int32_t broker_id;
+ const char *grpid;
+};
+
+static int delayed_up_cb(void *arg) {
+ struct some_state *state = arg;
+ rd_sleep(3);
+ if (state->switch_coord) {
+ TEST_SAY("Switching group coordinator to %" PRId32 "\n",
+ state->broker_id);
+ rd_kafka_mock_coordinator_set(state->mcluster, "group",
+ state->grpid, state->broker_id);
+ } else {
+ TEST_SAY("Bringing up group coordinator %" PRId32 "..\n",
+ state->broker_id);
+ rd_kafka_mock_broker_set_up(state->mcluster, state->broker_id);
+ }
+ return 0;
+}
+
+static void do_test_disconnected_group_coord(rd_bool_t switch_coord) {
+ const char *topic = "mytopic";
+ const char *txnid = "myTxnId";
+ const char *grpid = "myGrpId";
+ const int partition_cnt = 1;
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ struct some_state state = RD_ZERO_INIT;
+ test_timing_t timing;
+ thrd_t thrd;
+ int ret;
+
+ SUB_TEST_QUICK("switch_coord=%s", RD_STR_ToF(switch_coord));
+
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+
+ rk = create_txn_producer(&mcluster, txnid, 3, NULL);
+
+ rd_kafka_mock_topic_create(mcluster, topic, partition_cnt, 1);
+
+ /* Broker 1: txn coordinator
+ * Broker 2: group coordinator
+ * Broker 3: partition leader & backup coord if switch_coord=true */
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid, 1);
+ rd_kafka_mock_coordinator_set(mcluster, "group", grpid, 2);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, 3);
+
+ /* Bring down group coordinator so there are no undesired
+ * connections to it. */
+ rd_kafka_mock_broker_set_down(mcluster, 2);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC(topic), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+ test_flush(rk, -1);
+
+ rd_sleep(1);
+
+ /* Run a background thread that after 3s, which should be enough
+ * to perform the first failed connection attempt, makes the
+ * group coordinator available again. */
+ state.switch_coord = switch_coord;
+ state.mcluster = mcluster;
+ state.grpid = grpid;
+ state.broker_id = switch_coord ? 3 : 2;
+ if (thrd_create(&thrd, delayed_up_cb, &state) != thrd_success)
+ TEST_FAIL("Failed to create thread");
+
+ TEST_SAY("Calling send_offsets_to_transaction()\n");
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 1;
+ cgmetadata = rd_kafka_consumer_group_metadata_new(grpid);
+
+ TIMING_START(&timing, "send_offsets_to_transaction(-1)");
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+ TIMING_STOP(&timing);
+ TIMING_ASSERT(&timing, 0, 10 * 1000 /*10s*/);
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ thrd_join(thrd, &ret);
+
+ /* Commit the transaction */
+ TIMING_START(&timing, "commit_transaction(-1)");
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ TIMING_STOP(&timing);
+
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->is_fatal_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test that a NULL coordinator is not fatal when
+ * the transactional producer reconnects to the txn coordinator
+ * and the first thing it does is a FindCoordinatorRequest that
+ * fails with COORDINATOR_NOT_AVAILABLE, setting coordinator to NULL.
+ */
+static void do_test_txn_coordinator_null_not_fatal(void) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ rd_kafka_resp_err_t err;
+ int32_t coord_id = 1;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ int msgcnt = 1;
+ int remains = 0;
+
+ SUB_TEST_QUICK();
+
+ /* Broker down is not a test-failing error */
+ allowed_error = RD_KAFKA_RESP_ERR__TRANSPORT;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR__MSG_TIMED_OUT;
+
+ /* One second is the minimum transaction timeout */
+ rk = create_txn_producer(&mcluster, transactional_id, 1,
+ "transaction.timeout.ms", "1000", NULL);
+
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ coord_id);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
+
+ /* Start transactioning */
+ TEST_SAY("Starting transaction\n");
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /* Makes the produce request timeout. */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 3000);
+
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt, NULL, 0, &remains);
+
+ /* This value is linked to transaction.timeout.ms, needs enough time
+ * so the message times out and a DrainBump sequence is started. */
+ rd_kafka_flush(rk, 1000);
+
+ /* To trigger the error the COORDINATOR_NOT_AVAILABLE response
+ * must come AFTER idempotent state has changed to WaitTransport
+ * but BEFORE it changes to WaitPID. To make it more likely
+ * rd_kafka_txn_coord_timer_start timeout can be changed to 5 ms
+ * in rd_kafka_txn_coord_query, when unable to query for
+ * transaction coordinator.
+ */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_FindCoordinator, 1,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE, 10);
+
+ /* Coordinator down starts the FindCoordinatorRequest loop. */
+ TEST_SAY("Bringing down coordinator %" PRId32 "\n", coord_id);
+ rd_kafka_mock_broker_set_down(mcluster, coord_id);
+
+ /* Coordinator down for some time. */
+ rd_usleep(100 * 1000, NULL);
+
+ /* When it comes up, the error is triggered, if the preconditions
+ * happen. */
+ TEST_SAY("Bringing up coordinator %" PRId32 "\n", coord_id);
+ rd_kafka_mock_broker_set_up(mcluster, coord_id);
+
+ /* Make sure DRs are received */
+ rd_kafka_flush(rk, 1000);
+
+ error = rd_kafka_commit_transaction(rk, -1);
+
+ TEST_ASSERT(remains == 0, "%d message(s) were not produced\n", remains);
+ TEST_ASSERT(error != NULL, "Expected commit_transaction() to fail");
+ TEST_SAY("commit_transaction() failed (expectedly): %s\n",
+ rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+
+ /* Needs to wait some time before closing to make sure it doesn't go
+ * into TERMINATING state before error is triggered. */
+ rd_usleep(1000 * 1000, NULL);
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->is_fatal_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Simple test to make sure the init_transactions() timeout is honoured
+ * and also not infinite.
+ */
+static void do_test_txn_resumable_init(void) {
+ rd_kafka_t *rk;
+ const char *transactional_id = "txnid";
+ rd_kafka_error_t *error;
+ test_timing_t duration;
+
+ SUB_TEST();
+
+ rd_kafka_conf_t *conf;
+
+ test_conf_init(&conf, NULL, 20);
+ test_conf_set(conf, "bootstrap.servers", "");
+ test_conf_set(conf, "transactional.id", transactional_id);
+ test_conf_set(conf, "transaction.timeout.ms", "4000");
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ /* First make sure a lower timeout is honoured. */
+ TIMING_START(&duration, "init_transactions(1000)");
+ error = rd_kafka_init_transactions(rk, 1000);
+ TIMING_STOP(&duration);
+
+ if (error)
+ TEST_SAY("First init_transactions failed (as expected): %s\n",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected _TIMED_OUT, not %s",
+ error ? rd_kafka_error_string(error) : "success");
+ rd_kafka_error_destroy(error);
+
+ TIMING_ASSERT(&duration, 900, 1500);
+
+ TEST_SAY(
+ "Performing second init_transactions() call now with an "
+ "infinite timeout: "
+ "should time out in 2 x transaction.timeout.ms\n");
+
+ TIMING_START(&duration, "init_transactions(infinite)");
+ error = rd_kafka_init_transactions(rk, -1);
+ TIMING_STOP(&duration);
+
+ if (error)
+ TEST_SAY("Second init_transactions failed (as expected): %s\n",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_code(error) == RD_KAFKA_RESP_ERR__TIMED_OUT,
+ "Expected _TIMED_OUT, not %s",
+ error ? rd_kafka_error_string(error) : "success");
+ rd_kafka_error_destroy(error);
+
+ TIMING_ASSERT(&duration, 2 * 4000 - 500, 2 * 4000 + 500);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Retries a transaction call until it succeeds or returns a
+ * non-retriable error - which will cause the test to fail.
+ *
+ * @param intermed_calls Is a block of code that will be called after each
+ * retriable failure of \p call.
+ */
+#define RETRY_TXN_CALL__(call, intermed_calls) \
+ do { \
+ rd_kafka_error_t *_error = call; \
+ if (!_error) \
+ break; \
+ TEST_SAY_ERROR(_error, "%s: ", "" #call); \
+ TEST_ASSERT(rd_kafka_error_is_retriable(_error), \
+ "Expected retriable error"); \
+ TEST_SAY("%s failed, retrying in 1 second\n", "" #call); \
+ rd_kafka_error_destroy(_error); \
+ intermed_calls; \
+ rd_sleep(1); \
+ } while (1)
+
+/**
+ * @brief Call \p call and expect it to fail with \p exp_err_code.
+ */
+#define TXN_CALL_EXPECT_ERROR__(call, exp_err_code) \
+ do { \
+ rd_kafka_error_t *_error = call; \
+ TEST_ASSERT(_error != NULL, \
+ "%s: Expected %s error, got success", "" #call, \
+ rd_kafka_err2name(exp_err_code)); \
+ TEST_SAY_ERROR(_error, "%s: ", "" #call); \
+ TEST_ASSERT(rd_kafka_error_code(_error) == exp_err_code, \
+ "%s: Expected %s error, got %s", "" #call, \
+ rd_kafka_err2name(exp_err_code), \
+ rd_kafka_error_name(_error)); \
+ rd_kafka_error_destroy(_error); \
+ } while (0)
+
+
+/**
+ * @brief Simple test to make sure short API timeouts can be safely resumed
+ * by calling the same API again.
+ *
+ * @param do_commit Commit transaction if true, else abort transaction.
+ */
+static void do_test_txn_resumable_calls_timeout(rd_bool_t do_commit) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ int32_t coord_id = 1;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ int msgcnt = 1;
+ int remains = 0;
+
+ SUB_TEST("%s_transaction", do_commit ? "commit" : "abort");
+
+ rk = create_txn_producer(&mcluster, transactional_id, 1, NULL);
+
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ coord_id);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
+
+ TEST_SAY("Starting transaction\n");
+ TEST_SAY("Delaying first two InitProducerIdRequests by 500ms\n");
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_InitProducerId, 2,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 500, RD_KAFKA_RESP_ERR_NO_ERROR, 500);
+
+ RETRY_TXN_CALL__(
+ rd_kafka_init_transactions(rk, 100),
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1),
+ RD_KAFKA_RESP_ERR__CONFLICT));
+
+ RETRY_TXN_CALL__(rd_kafka_begin_transaction(rk), /*none*/);
+
+
+ TEST_SAY("Delaying ProduceRequests by 3000ms\n");
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 3000);
+
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt, NULL, 0, &remains);
+
+
+ TEST_SAY("Delaying SendOffsetsToTransaction by 400ms\n");
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_AddOffsetsToTxn, 1,
+ RD_KAFKA_RESP_ERR_NO_ERROR, 400);
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12;
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ /* This is not a resumable call on timeout */
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+
+ TEST_SAY("Delaying EndTxnRequests by 1200ms\n");
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_EndTxn, 1, RD_KAFKA_RESP_ERR_NO_ERROR,
+ 1200);
+
+ /* Committing/aborting the transaction will also be delayed by the
+ * previous accumulated remaining delays. */
+
+ if (do_commit) {
+ TEST_SAY("Committing transaction\n");
+
+ RETRY_TXN_CALL__(
+ rd_kafka_commit_transaction(rk, 100),
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_abort_transaction(rk, -1),
+ RD_KAFKA_RESP_ERR__CONFLICT));
+ } else {
+ TEST_SAY("Aborting transaction\n");
+
+ RETRY_TXN_CALL__(
+ rd_kafka_abort_transaction(rk, 100),
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, -1),
+ RD_KAFKA_RESP_ERR__CONFLICT));
+ }
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify that resuming timed out calls that after the timeout, but
+ * before the resuming call, would error out.
+ */
+static void do_test_txn_resumable_calls_timeout_error(rd_bool_t do_commit) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_resp_err_t err;
+ int32_t coord_id = 1;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ int msgcnt = 1;
+ int remains = 0;
+ rd_kafka_error_t *error;
+
+ SUB_TEST_QUICK("%s_transaction", do_commit ? "commit" : "abort");
+
+ rk = create_txn_producer(&mcluster, transactional_id, 1, NULL);
+
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", transactional_id,
+ coord_id);
+ rd_kafka_mock_partition_set_leader(mcluster, topic, 0, coord_id);
+
+ TEST_SAY("Starting transaction\n");
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0,
+ msgcnt, NULL, 0, &remains);
+
+
+ TEST_SAY("Fail EndTxn fatally after 2000ms\n");
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, coord_id, RD_KAFKAP_EndTxn, 1,
+ RD_KAFKA_RESP_ERR_INVALID_TXN_STATE, 2000);
+
+ if (do_commit) {
+ TEST_SAY("Committing transaction\n");
+
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500),
+ RD_KAFKA_RESP_ERR__TIMED_OUT);
+
+ /* Sleep so that the background EndTxn fails locally and sets
+ * an error result. */
+ rd_sleep(3);
+
+ error = rd_kafka_commit_transaction(rk, -1);
+
+ } else {
+ TEST_SAY("Aborting transaction\n");
+
+ TXN_CALL_EXPECT_ERROR__(rd_kafka_commit_transaction(rk, 500),
+ RD_KAFKA_RESP_ERR__TIMED_OUT);
+
+ /* Sleep so that the background EndTxn fails locally and sets
+ * an error result. */
+ rd_sleep(3);
+
+ error = rd_kafka_commit_transaction(rk, -1);
+ }
+
+ TEST_ASSERT(error != NULL && rd_kafka_error_is_fatal(error),
+ "Expected fatal error, not %s",
+ rd_kafka_error_string(error));
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR_INVALID_TXN_STATE,
+ "Expected error INVALID_TXN_STATE, got %s",
+ rd_kafka_error_name(error));
+ rd_kafka_error_destroy(error);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Concurrent transaction API calls are not permitted.
+ * This test makes sure they're properly enforced.
+ *
+ * For each transactional API, call it with a 5s timeout, and during that time
+ * from another thread call transactional APIs, one by one, and verify that
+ * we get an ERR__CONFLICT error back in the second thread.
+ *
+ * We use a mutex for synchronization, the main thread will hold the lock
+ * when not calling an API but release it just prior to calling.
+ * The other thread will acquire the lock, sleep, and hold the lock while
+ * calling the concurrent API that should fail immediately, releasing the lock
+ * when done.
+ *
+ */
+
+struct _txn_concurrent_state {
+ const char *api;
+ mtx_t lock;
+ rd_kafka_t *rk;
+ struct test *test;
+};
+
+static int txn_concurrent_thread_main(void *arg) {
+ struct _txn_concurrent_state *state = arg;
+ static const char *apis[] = {
+ "init_transactions", "begin_transaction",
+ "send_offsets_to_transaction", "commit_transaction",
+ "abort_transaction", NULL};
+ rd_kafka_t *rk = state->rk;
+ const char *main_api = NULL;
+ int i;
+
+ /* Update TLS variable so TEST_..() macros work */
+ test_curr = state->test;
+
+ while (1) {
+ const char *api = NULL;
+ const int timeout_ms = 10000;
+ rd_kafka_error_t *error = NULL;
+ rd_kafka_resp_err_t exp_err;
+ test_timing_t duration;
+
+ /* Wait for other thread's txn call to start, then sleep a bit
+ * to increase the chance of that call has really begun. */
+ mtx_lock(&state->lock);
+
+ if (state->api && state->api == main_api) {
+ /* Main thread is still blocking on the last API call */
+ TEST_SAY("Waiting for main thread to finish %s()\n",
+ main_api);
+ mtx_unlock(&state->lock);
+ rd_sleep(1);
+ continue;
+ } else if (!(main_api = state->api)) {
+ mtx_unlock(&state->lock);
+ break;
+ }
+
+ rd_sleep(1);
+
+ for (i = 0; (api = apis[i]) != NULL; i++) {
+ TEST_SAY(
+ "Triggering concurrent %s() call while "
+ "main is in %s() call\n",
+ api, main_api);
+ TIMING_START(&duration, "%s", api);
+
+ if (!strcmp(api, "init_transactions"))
+ error =
+ rd_kafka_init_transactions(rk, timeout_ms);
+ else if (!strcmp(api, "begin_transaction"))
+ error = rd_kafka_begin_transaction(rk);
+ else if (!strcmp(api, "send_offsets_to_transaction")) {
+ rd_kafka_topic_partition_list_t *offsets =
+ rd_kafka_topic_partition_list_new(1);
+ rd_kafka_consumer_group_metadata_t *cgmetadata =
+ rd_kafka_consumer_group_metadata_new(
+ "mygroupid");
+ rd_kafka_topic_partition_list_add(
+ offsets, "srctopic4", 0)
+ ->offset = 12;
+
+ error = rd_kafka_send_offsets_to_transaction(
+ rk, offsets, cgmetadata, -1);
+ rd_kafka_consumer_group_metadata_destroy(
+ cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ } else if (!strcmp(api, "commit_transaction"))
+ error =
+ rd_kafka_commit_transaction(rk, timeout_ms);
+ else if (!strcmp(api, "abort_transaction"))
+ error =
+ rd_kafka_abort_transaction(rk, timeout_ms);
+ else
+ TEST_FAIL("Unknown API: %s", api);
+
+ TIMING_STOP(&duration);
+
+ TEST_SAY_ERROR(error, "Conflicting %s() call: ", api);
+ TEST_ASSERT(error,
+ "Expected conflicting %s() call to fail",
+ api);
+
+ exp_err = !strcmp(api, main_api)
+ ? RD_KAFKA_RESP_ERR__PREV_IN_PROGRESS
+ : RD_KAFKA_RESP_ERR__CONFLICT;
+
+ TEST_ASSERT(rd_kafka_error_code(error) == exp_err,
+
+ "Conflicting %s(): Expected %s, not %s",
+ api, rd_kafka_err2str(exp_err),
+ rd_kafka_error_name(error));
+ TEST_ASSERT(
+ rd_kafka_error_is_retriable(error),
+ "Conflicting %s(): Expected retriable error", api);
+ rd_kafka_error_destroy(error);
+ /* These calls should fail immediately */
+ TIMING_ASSERT(&duration, 0, 100);
+ }
+
+ mtx_unlock(&state->lock);
+ }
+
+ return 0;
+}
+
+static void do_test_txn_concurrent_operations(rd_bool_t do_commit) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ int32_t coord_id = 1;
+ rd_kafka_resp_err_t err;
+ const char *topic = "test";
+ const char *transactional_id = "txnid";
+ int remains = 0;
+ thrd_t thrd;
+ struct _txn_concurrent_state state = RD_ZERO_INIT;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+
+ SUB_TEST("%s", do_commit ? "commit" : "abort");
+
+ test_timeout_set(90);
+
+ /* We need to override the value of socket.connection.setup.timeout.ms
+ * to be at least 2*RTT of the mock broker. This is because the first
+ * ApiVersion request will fail, since we make the request with v3, and
+ * the mock broker's MaxVersion is 2, so the request is retried with v0.
+ * We use the value 3*RTT to add some buffer.
+ */
+ rk = create_txn_producer(&mcluster, transactional_id, 1,
+ "socket.connection.setup.timeout.ms", "15000",
+ NULL);
+
+ /* Set broker RTT to 3.5s so that the background thread has ample
+ * time to call its conflicting APIs.
+ * This value must be less than socket.connection.setup.timeout.ms/2. */
+ rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 3500);
+
+ err = rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+ TEST_ASSERT(!err, "Failed to create topic: %s", rd_kafka_err2str(err));
+
+ /* Set up shared state between us and the concurrent thread */
+ mtx_init(&state.lock, mtx_plain);
+ state.test = test_curr;
+ state.rk = rk;
+
+ /* We release the lock only while calling the TXN API */
+ mtx_lock(&state.lock);
+
+ /* Spin up concurrent thread */
+ if (thrd_create(&thrd, txn_concurrent_thread_main, (void *)&state) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread");
+
+#define _start_call(callname) \
+ do { \
+ state.api = callname; \
+ mtx_unlock(&state.lock); \
+ } while (0)
+#define _end_call() mtx_lock(&state.lock)
+
+ _start_call("init_transactions");
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+ _end_call();
+
+ /* This call doesn't block, so can't really be tested concurrently. */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ test_produce_msgs2_nowait(rk, topic, 0, RD_KAFKA_PARTITION_UA, 0, 10,
+ NULL, 0, &remains);
+
+ _start_call("send_offsets_to_transaction");
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 0)->offset = 12;
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ TEST_CALL_ERROR__(
+ rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata, -1));
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+ _end_call();
+
+ if (do_commit) {
+ _start_call("commit_transaction");
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(rk, -1));
+ _end_call();
+ } else {
+ _start_call("abort_transaction");
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+ _end_call();
+ }
+
+ /* Signal completion to background thread */
+ state.api = NULL;
+
+ mtx_unlock(&state.lock);
+
+ thrd_join(thrd, NULL);
+
+ rd_kafka_destroy(rk);
+
+ mtx_destroy(&state.lock);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief KIP-360: Test that fatal idempotence errors triggers abortable
+ * transaction errors, but let the broker-side abort of the
+ * transaction fail with a fencing error.
+ * Should raise a fatal error.
+ *
+ * @param error_code Which error code EndTxn should fail with.
+ * Either RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH (older)
+ * or RD_KAFKA_RESP_ERR_PRODUCER_FENCED (newer).
+ */
+static void do_test_txn_fenced_abort(rd_kafka_resp_err_t error_code) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_error_t *error;
+ int32_t txn_coord = 2;
+ const char *txnid = "myTxnId";
+ char errstr[512];
+ rd_kafka_resp_err_t fatal_err;
+ size_t errors_cnt;
+
+ SUB_TEST_QUICK("With error %s", rd_kafka_err2name(error_code));
+
+ rk = create_txn_producer(&mcluster, txnid, 3, "batch.num.messages", "1",
+ NULL);
+
+ rd_kafka_mock_coordinator_set(mcluster, "transaction", txnid,
+ txn_coord);
+
+ test_curr->ignore_dr_err = rd_true;
+ test_curr->is_fatal_cb = error_is_fatal_cb;
+ allowed_error = RD_KAFKA_RESP_ERR__FENCED;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+
+ /*
+ * Start a transaction
+ */
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+
+ /* Produce a message without error first */
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ test_flush(rk, -1);
+
+ /* Fail abort transaction */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, txn_coord, RD_KAFKAP_EndTxn, 1, error_code, 0);
+
+ /* Fail the PID reinit */
+ rd_kafka_mock_broker_push_request_error_rtts(
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, 1, error_code, 0);
+
+ /* Produce a message, let it fail with a fatal idempo error. */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 1,
+ RD_KAFKA_RESP_ERR_UNKNOWN_PRODUCER_ID);
+
+ TEST_CALL_ERR__(rd_kafka_producev(
+ rk, RD_KAFKA_V_TOPIC("mytopic"), RD_KAFKA_V_PARTITION(0),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END));
+
+ test_flush(rk, -1);
+
+ /* Abort the transaction, should fail with a fatal error */
+ error = rd_kafka_abort_transaction(rk, -1);
+ TEST_ASSERT(error != NULL, "Expected abort_transaction() to fail");
+
+ TEST_SAY_ERROR(error, "abort_transaction() failed: ");
+ TEST_ASSERT(rd_kafka_error_is_fatal(error), "Expected a fatal error");
+ rd_kafka_error_destroy(error);
+
+ fatal_err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+ TEST_ASSERT(fatal_err, "Expected a fatal error to have been raised");
+ TEST_SAY("Fatal error: %s: %s\n", rd_kafka_err2name(fatal_err), errstr);
+
+ /* Verify that the producer sent the expected number of EndTxn requests
+ * by inspecting the mock broker error stack,
+ * which should now be empty. */
+ if (rd_kafka_mock_broker_error_stack_cnt(
+ mcluster, txn_coord, RD_KAFKAP_EndTxn, &errors_cnt)) {
+ TEST_FAIL(
+ "Broker error count should succeed for API %s"
+ " on broker %" PRId32,
+ rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), txn_coord);
+ }
+ /* Checks all the RD_KAFKAP_EndTxn responses have been consumed */
+ TEST_ASSERT(errors_cnt == 0,
+ "Expected error count 0 for API %s, found %zu",
+ rd_kafka_ApiKey2str(RD_KAFKAP_EndTxn), errors_cnt);
+
+ if (rd_kafka_mock_broker_error_stack_cnt(
+ mcluster, txn_coord, RD_KAFKAP_InitProducerId, &errors_cnt)) {
+ TEST_FAIL(
+ "Broker error count should succeed for API %s"
+ " on broker %" PRId32,
+ rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), txn_coord);
+ }
+ /* Checks none of the RD_KAFKAP_InitProducerId responses have been
+ * consumed
+ */
+ TEST_ASSERT(errors_cnt == 1,
+ "Expected error count 1 for API %s, found %zu",
+ rd_kafka_ApiKey2str(RD_KAFKAP_InitProducerId), errors_cnt);
+
+ /* All done */
+ rd_kafka_destroy(rk);
+
+ allowed_error = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Test that the TxnOffsetCommit op doesn't retry without waiting
+ * if the coordinator is found but not available, causing too frequent retries.
+ */
+static void
+do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_bool_t times_out) {
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *offsets;
+ rd_kafka_consumer_group_metadata_t *cgmetadata;
+ rd_kafka_error_t *error;
+ int timeout;
+
+ SUB_TEST_QUICK("times_out=%s", RD_STR_ToF(times_out));
+
+ rk = create_txn_producer(&mcluster, "txnid", 3, NULL);
+
+ test_curr->ignore_dr_err = rd_true;
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, 5000));
+
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ /* Wait for messages to be delivered */
+ test_flush(rk, 5000);
+
+ /*
+ * Fail TxnOffsetCommit with COORDINATOR_NOT_AVAILABLE
+ * repeatedly.
+ */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_TxnOffsetCommit, 4,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE);
+
+ offsets = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(offsets, "srctopic4", 3)->offset = 1;
+
+ cgmetadata = rd_kafka_consumer_group_metadata_new("mygroupid");
+
+ /* The retry delay is 500ms, with 4 retries it should take at least
+ * 2000ms for this call to succeed. */
+ timeout = times_out ? 500 : 4000;
+ error = rd_kafka_send_offsets_to_transaction(rk, offsets, cgmetadata,
+ timeout);
+ rd_kafka_consumer_group_metadata_destroy(cgmetadata);
+ rd_kafka_topic_partition_list_destroy(offsets);
+
+ if (times_out) {
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ "expected %s, got: %s",
+ rd_kafka_err2name(
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE),
+ rd_kafka_err2str(rd_kafka_error_code(error)));
+ } else {
+ TEST_ASSERT(rd_kafka_error_code(error) ==
+ RD_KAFKA_RESP_ERR_NO_ERROR,
+ "expected \"Success\", found: %s",
+ rd_kafka_err2str(rd_kafka_error_code(error)));
+ }
+ rd_kafka_error_destroy(error);
+
+ /* All done */
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0105_transactions_mock(int argc, char **argv) {
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ do_test_txn_recoverable_errors();
+
+ do_test_txn_fatal_idempo_errors();
+
+ do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
+ do_test_txn_fenced_reinit(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
+
+ do_test_txn_req_cnt();
+
+ do_test_txn_requires_abort_errors();
+
+ do_test_txn_slow_reinit(rd_false);
+ do_test_txn_slow_reinit(rd_true);
+
+ /* Just do a subset of tests in quick mode */
+ if (test_quick)
+ return 0;
+
+ do_test_txn_endtxn_errors();
+
+ do_test_txn_endtxn_infinite();
+
+ do_test_txn_endtxn_timeout();
+
+ do_test_txn_endtxn_timeout_inflight();
+
+ /* Bring down the coordinator */
+ do_test_txn_broker_down_in_txn(rd_true);
+
+ /* Bring down partition leader */
+ do_test_txn_broker_down_in_txn(rd_false);
+
+ do_test_txns_not_supported();
+
+ do_test_txns_send_offsets_concurrent_is_retried();
+
+ do_test_txns_send_offsets_non_eligible();
+
+ do_test_txn_coord_req_destroy();
+
+ do_test_txn_coord_req_multi_find();
+
+ do_test_txn_addparts_req_multi();
+
+ do_test_txns_no_timeout_crash();
+
+ do_test_txn_auth_failure(
+ RD_KAFKAP_InitProducerId,
+ RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED);
+
+ do_test_txn_auth_failure(
+ RD_KAFKAP_FindCoordinator,
+ RD_KAFKA_RESP_ERR_CLUSTER_AUTHORIZATION_FAILED);
+
+ do_test_txn_flush_timeout();
+
+ do_test_unstable_offset_commit();
+
+ do_test_commit_after_msg_timeout();
+
+ do_test_txn_switch_coordinator();
+
+ do_test_txn_switch_coordinator_refresh();
+
+ do_test_out_of_order_seq();
+
+ do_test_topic_disappears_for_awhile();
+
+ do_test_disconnected_group_coord(rd_false);
+
+ do_test_disconnected_group_coord(rd_true);
+
+ do_test_txn_coordinator_null_not_fatal();
+
+ do_test_txn_resumable_calls_timeout(rd_true);
+
+ do_test_txn_resumable_calls_timeout(rd_false);
+
+ do_test_txn_resumable_calls_timeout_error(rd_true);
+
+ do_test_txn_resumable_calls_timeout_error(rd_false);
+ do_test_txn_resumable_init();
+
+ do_test_txn_concurrent_operations(rd_true /*commit*/);
+
+ do_test_txn_concurrent_operations(rd_false /*abort*/);
+
+ do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_INVALID_PRODUCER_EPOCH);
+
+ do_test_txn_fenced_abort(RD_KAFKA_RESP_ERR_PRODUCER_FENCED);
+
+ do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_true);
+
+ do_test_txn_offset_commit_doesnt_retry_too_quickly(rd_false);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c
new file mode 100644
index 000000000..0451e4a00
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0106-cgrp_sess_timeout.c
@@ -0,0 +1,300 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "../src/rdkafka_proto.h"
+
+
+/**
+ * @name Verify that the high-level consumer times out itself if
+ * heartbeats are not successful (issue #2631).
+ */
+
+static const char *commit_type;
+static int rebalance_cnt;
+static rd_kafka_resp_err_t rebalance_exp_event;
+static rd_kafka_resp_err_t commit_exp_err;
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+
+ rebalance_cnt++;
+ TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt,
+ rd_kafka_err2name(err), parts->cnt);
+
+ TEST_ASSERT(
+ err == rebalance_exp_event, "Expected rebalance event %s, not %s",
+ rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err));
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ test_consumer_assign("assign", rk, parts);
+ } else {
+ rd_kafka_resp_err_t commit_err;
+
+ if (strcmp(commit_type, "auto")) {
+ rd_kafka_resp_err_t perr;
+
+ TEST_SAY("Performing %s commit\n", commit_type);
+
+ perr = rd_kafka_position(rk, parts);
+ TEST_ASSERT(!perr, "Failed to acquire position: %s",
+ rd_kafka_err2str(perr));
+
+ /* Sleep a short while so the broker times out the
+ * member too. */
+ rd_sleep(1);
+
+ commit_err = rd_kafka_commit(
+ rk, parts, !strcmp(commit_type, "async"));
+
+ if (!strcmp(commit_type, "async"))
+ TEST_ASSERT(!commit_err,
+ "Async commit should not fail, "
+ "but it returned %s",
+ rd_kafka_err2name(commit_err));
+ else
+ TEST_ASSERT(
+ commit_err == commit_exp_err ||
+ (!commit_exp_err &&
+ commit_err ==
+ RD_KAFKA_RESP_ERR__NO_OFFSET),
+ "Expected %s commit to return %s, "
+ "not %s",
+ commit_type,
+ rd_kafka_err2name(commit_exp_err),
+ rd_kafka_err2name(commit_err));
+ }
+
+ test_consumer_unassign("unassign", rk);
+ }
+
+ /* Make sure only one rebalance callback is served per poll()
+ * so that expect_rebalance() returns to the test logic on each
+ * rebalance. */
+ rd_kafka_yield(rk);
+}
+
+
+/**
+ * @brief Wait for an expected rebalance event, or fail.
+ */
+static void expect_rebalance(const char *what,
+ rd_kafka_t *c,
+ rd_kafka_resp_err_t exp_event,
+ int timeout_s) {
+ int64_t tmout = test_clock() + (timeout_s * 1000000);
+ int start_cnt = rebalance_cnt;
+
+ TEST_SAY("Waiting for %s (%s) for %ds\n", what,
+ rd_kafka_err2name(exp_event), timeout_s);
+
+ rebalance_exp_event = exp_event;
+
+ while (tmout > test_clock() && rebalance_cnt == start_cnt) {
+ if (test_consumer_poll_once(c, NULL, 1000))
+ rd_sleep(1);
+ }
+
+ if (rebalance_cnt == start_cnt + 1) {
+ rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR;
+ return;
+ }
+
+ TEST_FAIL("Timed out waiting for %s (%s)\n", what,
+ rd_kafka_err2name(exp_event));
+}
+
+
+/**
+ * @brief Verify that session timeouts are handled by the consumer itself.
+ *
+ * @param use_commit_type "auto", "sync" (manual), "async" (manual)
+ */
+static void do_test_session_timeout(const char *use_commit_type) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *groupid = "mygroup";
+ const char *topic = "test";
+
+ rebalance_cnt = 0;
+ commit_type = use_commit_type;
+
+ SUB_TEST0(!strcmp(use_commit_type, "sync") /*quick*/,
+ "Test session timeout with %s commit", use_commit_type);
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
+ bootstraps, "batch.num.messages", "10", NULL);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(conf, "group.id", groupid);
+ test_conf_set(conf, "session.timeout.ms", "5000");
+ test_conf_set(conf, "heartbeat.interval.ms", "1000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit",
+ !strcmp(commit_type, "auto") ? "true" : "false");
+
+ c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c, topic);
+
+ /* Let Heartbeats fail after a couple of successful ones */
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Heartbeat, 9, RD_KAFKA_RESP_ERR_NO_ERROR,
+ RD_KAFKA_RESP_ERR_NO_ERROR, RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR);
+
+ expect_rebalance("initial assignment", c,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2);
+
+ /* Consume a couple of messages so that we have something to commit */
+ test_consumer_poll("consume", c, 0, -1, 0, 10, NULL);
+
+ /* The commit in the rebalance callback should fail when the
+ * member has timed out from the group. */
+ commit_exp_err = RD_KAFKA_RESP_ERR_UNKNOWN_MEMBER_ID;
+
+ expect_rebalance("session timeout revoke", c,
+ RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS, 2 + 5 + 2);
+
+ expect_rebalance("second assignment", c,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS, 5 + 2);
+
+ /* Final rebalance in close().
+ * Its commit will work. */
+ rebalance_exp_event = RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS;
+ commit_exp_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Attempt manual commit when assignment has been lost (#3217)
+ */
+static void do_test_commit_on_lost(void) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ const char *groupid = "mygroup";
+ const char *topic = "test";
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST();
+
+ test_curr->is_fatal_cb = test_error_is_not_fatal_cb;
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
+ bootstraps, "batch.num.messages", "10", NULL);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(conf, "group.id", groupid);
+ test_conf_set(conf, "session.timeout.ms", "5000");
+ test_conf_set(conf, "heartbeat.interval.ms", "1000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit", "false");
+
+ c = test_create_consumer(groupid, test_rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c, topic);
+
+ /* Consume a couple of messages so that we have something to commit */
+ test_consumer_poll("consume", c, 0, -1, 0, 10, NULL);
+
+ /* Make the coordinator unreachable, this will cause a local session
+ * timeout followed by a revoke and assignment lost. */
+ rd_kafka_mock_broker_set_down(mcluster, 1);
+
+ /* Wait until the assignment is lost */
+ TEST_SAY("Waiting for assignment to be lost...\n");
+ while (!rd_kafka_assignment_lost(c))
+ rd_sleep(1);
+
+ TEST_SAY("Assignment is lost, committing\n");
+ /* Perform manual commit */
+ err = rd_kafka_commit(c, NULL, 0 /*sync*/);
+ TEST_SAY("commit() returned: %s\n", rd_kafka_err2name(err));
+ TEST_ASSERT(err, "expected commit to fail");
+
+ test_consumer_close(c);
+
+ rd_kafka_destroy(c);
+
+ test_mock_cluster_destroy(mcluster);
+
+ test_curr->is_fatal_cb = NULL;
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0106_cgrp_sess_timeout(int argc, char **argv) {
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ do_test_session_timeout("sync");
+ do_test_session_timeout("async");
+ do_test_session_timeout("auto");
+
+ do_test_commit_on_lost();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c
new file mode 100644
index 000000000..1f91e2a84
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0107-topic_recreate.c
@@ -0,0 +1,259 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "../src/rdkafka_proto.h"
+
+
+/**
+ * @name Verify that producer and consumer resumes operation after
+ * a topic has been deleted and recreated.
+ */
+
+/**
+ * The message value to produce, one of:
+ * "before" - before topic deletion
+ * "during" - during topic deletion
+ * "after" - after topic has been re-created
+ * "end" - stop producing
+ */
+static mtx_t value_mtx;
+static char *value;
+
+static const int msg_rate = 10; /**< Messages produced per second */
+
+static struct test *this_test; /**< Exposes current test struct (in TLS) to
+ * producer thread. */
+
+
+/**
+ * @brief Treat all error_cb as non-test-fatal.
+ */
+static int
+is_error_fatal(rd_kafka_t *rk, rd_kafka_resp_err_t err, const char *reason) {
+ return rd_false;
+}
+
+/**
+ * @brief Producing thread
+ */
+static int run_producer(void *arg) {
+ const char *topic = arg;
+ rd_kafka_t *producer = test_create_producer();
+ int ret = 0;
+
+ test_curr = this_test;
+
+ /* Don't check message status */
+ test_curr->exp_dr_status = (rd_kafka_msg_status_t)-1;
+
+ while (1) {
+ rd_kafka_resp_err_t err;
+
+ mtx_lock(&value_mtx);
+ if (!strcmp(value, "end")) {
+ mtx_unlock(&value_mtx);
+ break;
+ } else if (strcmp(value, "before")) {
+ /* Ignore Delivery report errors after topic
+ * has been deleted and eventually re-created,
+ * we rely on the consumer to verify that
+ * messages are produced. */
+ test_curr->ignore_dr_err = rd_true;
+ }
+
+ err = rd_kafka_producev(
+ producer, RD_KAFKA_V_TOPIC(topic),
+ RD_KAFKA_V_MSGFLAGS(RD_KAFKA_MSG_F_COPY),
+ RD_KAFKA_V_VALUE(value, strlen(value)), RD_KAFKA_V_END);
+
+ if (err == RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART ||
+ err == RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ TEST_SAY("Produce failed (expectedly): %s\n",
+ rd_kafka_err2name(err));
+ else
+ TEST_ASSERT(!err, "producev() failed: %s",
+ rd_kafka_err2name(err));
+
+ mtx_unlock(&value_mtx);
+
+ rd_usleep(1000000 / msg_rate, NULL);
+
+ rd_kafka_poll(producer, 0);
+ }
+
+ if (rd_kafka_flush(producer, 5000)) {
+ TEST_WARN("Failed to flush all message(s), %d remain\n",
+ rd_kafka_outq_len(producer));
+ /* Purge the messages to see which partition they were for */
+ rd_kafka_purge(producer, RD_KAFKA_PURGE_F_QUEUE |
+ RD_KAFKA_PURGE_F_INFLIGHT);
+ rd_kafka_flush(producer, 5000);
+ TEST_SAY("%d message(s) in queue after purge\n",
+ rd_kafka_outq_len(producer));
+
+ ret = 1; /* Fail test from main thread */
+ }
+
+ rd_kafka_destroy(producer);
+
+ return ret;
+}
+
+
+/**
+ * @brief Expect at least \p cnt messages with value matching \p exp_value,
+ * else fail the current test.
+ */
+static void
+expect_messages(rd_kafka_t *consumer, int cnt, const char *exp_value) {
+ int match_cnt = 0, other_cnt = 0, err_cnt = 0;
+ size_t exp_len = strlen(exp_value);
+
+ TEST_SAY("Expecting >= %d messages with value \"%s\"...\n", cnt,
+ exp_value);
+
+ while (match_cnt < cnt) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(consumer, 1000);
+ if (!rkmessage)
+ continue;
+
+ if (rkmessage->err) {
+ TEST_SAY("Consume error: %s\n",
+ rd_kafka_message_errstr(rkmessage));
+ err_cnt++;
+ } else if (rkmessage->len == exp_len &&
+ !memcmp(rkmessage->payload, exp_value, exp_len)) {
+ match_cnt++;
+ } else {
+ TEST_SAYL(3,
+ "Received \"%.*s\", expected \"%s\": "
+ "ignored\n",
+ (int)rkmessage->len,
+ (const char *)rkmessage->payload, exp_value);
+ other_cnt++;
+ }
+
+ rd_kafka_message_destroy(rkmessage);
+ }
+
+ TEST_SAY(
+ "Consumed %d messages matching \"%s\", "
+ "ignored %d others, saw %d error(s)\n",
+ match_cnt, exp_value, other_cnt, err_cnt);
+}
+
+
+/**
+ * @brief Test topic create + delete + create with first topic having
+ * \p part_cnt_1 partitions and second topic having \p part_cnt_2 .
+ */
+static void do_test_create_delete_create(int part_cnt_1, int part_cnt_2) {
+ rd_kafka_t *consumer;
+ thrd_t producer_thread;
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ int ret = 0;
+
+ TEST_SAY(_C_MAG
+ "[ Test topic create(%d parts)+delete+create(%d parts) ]\n",
+ part_cnt_1, part_cnt_2);
+
+ consumer = test_create_consumer(topic, NULL, NULL, NULL);
+
+ /* Create topic */
+ test_create_topic(consumer, topic, part_cnt_1, 3);
+
+ /* Start consumer */
+ test_consumer_subscribe(consumer, topic);
+ test_consumer_wait_assignment(consumer, rd_true);
+
+ mtx_lock(&value_mtx);
+ value = "before";
+ mtx_unlock(&value_mtx);
+
+ /* Create producer thread */
+ if (thrd_create(&producer_thread, run_producer, (void *)topic) !=
+ thrd_success)
+ TEST_FAIL("thrd_create failed");
+
+ /* Consume messages for 5s */
+ expect_messages(consumer, msg_rate * 5, value);
+
+ /* Delete topic */
+ mtx_lock(&value_mtx);
+ value = "during";
+ mtx_unlock(&value_mtx);
+
+ test_delete_topic(consumer, topic);
+ rd_sleep(5);
+
+ /* Re-create topic */
+ test_create_topic(consumer, topic, part_cnt_2, 3);
+
+ mtx_lock(&value_mtx);
+ value = "after";
+ mtx_unlock(&value_mtx);
+
+ /* Consume for 5 more seconds, should see new messages */
+ expect_messages(consumer, msg_rate * 5, value);
+
+ rd_kafka_destroy(consumer);
+
+ /* Wait for producer to exit */
+ mtx_lock(&value_mtx);
+ value = "end";
+ mtx_unlock(&value_mtx);
+
+ if (thrd_join(producer_thread, &ret) != thrd_success || ret != 0)
+ TEST_FAIL("Producer failed: see previous errors");
+
+ TEST_SAY(_C_GRN
+ "[ Test topic create(%d parts)+delete+create(%d parts): "
+ "PASS ]\n",
+ part_cnt_1, part_cnt_2);
+}
+
+
+int main_0107_topic_recreate(int argc, char **argv) {
+ this_test = test_curr; /* Need to expose current test struct (in TLS)
+ * to producer thread. */
+
+ this_test->is_fatal_cb = is_error_fatal;
+
+ mtx_init(&value_mtx, mtx_plain);
+
+ test_conf_init(NULL, NULL, 60);
+
+ do_test_create_delete_create(10, 3);
+ do_test_create_delete_create(3, 6);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp
new file mode 100644
index 000000000..cabee6704
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0109-auto_create_topics.cpp
@@ -0,0 +1,218 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <map>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+/**
+ * Test consumer allow.auto.create.topics by subscribing to a mix
+ * of available, unauthorized and non-existent topics.
+ *
+ * The same test is run with and without allow.auto.create.topics
+ * and with and without wildcard subscribes.
+ *
+ */
+
+
+static void do_test_consumer(bool allow_auto_create_topics,
+ bool with_wildcards) {
+ Test::Say(tostr() << _C_MAG << "[ Test allow.auto.create.topics="
+ << (allow_auto_create_topics ? "true" : "false")
+ << " with_wildcards=" << (with_wildcards ? "true" : "false")
+ << " ]\n");
+
+ bool has_acl_cli = test_broker_version >= TEST_BRKVER(2, 1, 0, 0) &&
+ !test_needs_auth(); /* We can't bother passing Java
+ * security config to kafka-acls.sh */
+
+ bool supports_allow = test_broker_version >= TEST_BRKVER(0, 11, 0, 0);
+
+ std::string topic_exists = Test::mk_topic_name("0109-exists", 1);
+ std::string topic_notexists = Test::mk_topic_name("0109-notexists", 1);
+ std::string topic_unauth = Test::mk_topic_name("0109-unauthorized", 1);
+
+ /* Create consumer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 20);
+ Test::conf_set(conf, "group.id", topic_exists);
+ Test::conf_set(conf, "enable.partition.eof", "true");
+ /* Quickly refresh metadata on topic auto-creation since the first
+ * metadata after auto-create hides the topic due to 0 partition count. */
+ Test::conf_set(conf, "topic.metadata.refresh.interval.ms", "1000");
+ if (allow_auto_create_topics)
+ Test::conf_set(conf, "allow.auto.create.topics", "true");
+
+ std::string bootstraps;
+ if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to retrieve bootstrap.servers");
+
+ std::string errstr;
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Create topics */
+ Test::create_topic(c, topic_exists.c_str(), 1, 1);
+
+ if (has_acl_cli) {
+ Test::create_topic(c, topic_unauth.c_str(), 1, 1);
+
+ /* Add denying ACL for unauth topic */
+ test_kafka_cmd(
+ "kafka-acls.sh --bootstrap-server %s "
+ "--add --deny-principal 'User:*' "
+ "--operation All --deny-host '*' "
+ "--topic '%s'",
+ bootstraps.c_str(), topic_unauth.c_str());
+ }
+
+
+ /* Wait for topic to be fully created */
+ test_wait_topic_exists(NULL, topic_exists.c_str(), 10 * 1000);
+
+
+ /*
+ * Subscribe
+ */
+ std::vector<std::string> topics;
+ std::map<std::string, RdKafka::ErrorCode> exp_errors;
+
+ topics.push_back(topic_notexists);
+ if (has_acl_cli)
+ topics.push_back(topic_unauth);
+
+ if (with_wildcards) {
+ topics.push_back("^" + topic_exists);
+ topics.push_back("^" + topic_notexists);
+ /* If the subscription contains at least one wildcard/regex
+ * then no auto topic creation will take place (since the consumer
+ * requests all topics in metadata, and not specific ones, thus
+ * not triggering topic auto creation).
+ * We need to handle the expected error cases accordingly. */
+ exp_errors["^" + topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
+ exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
+
+ if (has_acl_cli) {
+ /* Unauthorized topics are not included in list-all-topics Metadata,
+ * which we use for wildcards, so in this case the error code for
+ * unauthorixed topics show up as unknown topic. */
+ exp_errors[topic_unauth] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
+ }
+ } else {
+ topics.push_back(topic_exists);
+
+ if (has_acl_cli)
+ exp_errors[topic_unauth] = RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED;
+ }
+
+ if (supports_allow && !allow_auto_create_topics)
+ exp_errors[topic_notexists] = RdKafka::ERR_UNKNOWN_TOPIC_OR_PART;
+
+ RdKafka::ErrorCode err;
+ if ((err = c->subscribe(topics)))
+ Test::Fail("subscribe failed: " + RdKafka::err2str(err));
+
+ /* Start consuming until EOF is reached, which indicates that we have an
+ * assignment and any errors should have been reported. */
+ bool run = true;
+ while (run) {
+ RdKafka::Message *msg = c->consume(tmout_multip(1000));
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ case RdKafka::ERR_NO_ERROR:
+ break;
+
+ case RdKafka::ERR__PARTITION_EOF:
+ run = false;
+ break;
+
+ default:
+ Test::Say("Consume error on " + msg->topic_name() + ": " + msg->errstr() +
+ "\n");
+
+ std::map<std::string, RdKafka::ErrorCode>::iterator it =
+ exp_errors.find(msg->topic_name());
+
+ /* Temporary unknown-topic errors are okay for auto-created topics. */
+ bool unknown_is_ok = allow_auto_create_topics && !with_wildcards &&
+ msg->err() == RdKafka::ERR_UNKNOWN_TOPIC_OR_PART &&
+ msg->topic_name() == topic_notexists;
+
+ if (it == exp_errors.end()) {
+ if (unknown_is_ok)
+ Test::Say("Ignoring temporary auto-create error for topic " +
+ msg->topic_name() + ": " + RdKafka::err2str(msg->err()) +
+ "\n");
+ else
+ Test::Fail("Did not expect error for " + msg->topic_name() +
+ ": got: " + RdKafka::err2str(msg->err()));
+ } else if (msg->err() != it->second) {
+ if (unknown_is_ok)
+ Test::Say("Ignoring temporary auto-create error for topic " +
+ msg->topic_name() + ": " + RdKafka::err2str(msg->err()) +
+ "\n");
+ else
+ Test::Fail("Expected '" + RdKafka::err2str(it->second) + "' for " +
+ msg->topic_name() + ", got " +
+ RdKafka::err2str(msg->err()));
+ } else {
+ exp_errors.erase(msg->topic_name());
+ }
+
+ break;
+ }
+
+ delete msg;
+ }
+
+
+ /* Fail if not all expected errors were seen. */
+ if (!exp_errors.empty())
+ Test::Fail(tostr() << "Expecting " << exp_errors.size() << " more errors");
+
+ c->close();
+
+ delete c;
+}
+
+extern "C" {
+int main_0109_auto_create_topics(int argc, char **argv) {
+ /* Parameters:
+ * allow auto create, with wildcards */
+ do_test_consumer(true, true);
+ do_test_consumer(true, false);
+ do_test_consumer(false, true);
+ do_test_consumer(false, false);
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp
new file mode 100644
index 000000000..1f36b3a76
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0110-batch_size.cpp
@@ -0,0 +1,183 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Test batch.size producer property.
+ *
+ */
+
+#include <iostream>
+#include <fstream>
+#include <iterator>
+#include <string>
+#include "testcpp.h"
+
+#if WITH_RAPIDJSON
+#include <rapidjson/document.h>
+#include <rapidjson/pointer.h>
+#include <rapidjson/error/en.h>
+
+
+class myAvgStatsCb : public RdKafka::EventCb {
+ public:
+ myAvgStatsCb(std::string topic) :
+ avg_batchsize(0), min_batchsize(0), max_batchsize(0), topic_(topic) {
+ }
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_LOG:
+ Test::Say(event.str() + "\n");
+ break;
+ case RdKafka::Event::EVENT_STATS:
+ read_batch_stats(event.str());
+ break;
+ default:
+ break;
+ }
+ }
+
+ int avg_batchsize;
+ int min_batchsize;
+ int max_batchsize;
+
+ private:
+ void read_val(rapidjson::Document &d, const std::string &path, int &val) {
+ rapidjson::Pointer jpath(path.c_str());
+
+ if (!jpath.IsValid())
+ Test::Fail(tostr() << "json pointer parse " << path << " failed at "
+ << jpath.GetParseErrorOffset() << " with error code "
+ << jpath.GetParseErrorCode());
+
+ rapidjson::Value *pp = rapidjson::GetValueByPointer(d, jpath);
+ if (!pp) {
+ Test::Say(tostr() << "Could not find " << path << " in stats\n");
+ return;
+ }
+
+ val = pp->GetInt();
+ }
+
+ void read_batch_stats(const std::string &stats) {
+ rapidjson::Document d;
+
+ if (d.Parse(stats.c_str()).HasParseError())
+ Test::Fail(tostr() << "Failed to parse stats JSON: "
+ << rapidjson::GetParseError_En(d.GetParseError())
+ << " at " << d.GetErrorOffset());
+
+ read_val(d, "/topics/" + topic_ + "/batchsize/avg", avg_batchsize);
+ read_val(d, "/topics/" + topic_ + "/batchsize/min", min_batchsize);
+ read_val(d, "/topics/" + topic_ + "/batchsize/max", max_batchsize);
+ }
+
+ std::string topic_;
+};
+
+
+/**
+ * @brief Specify batch.size and parse stats to verify it takes effect.
+ *
+ */
+static void do_test_batch_size() {
+ std::string topic = Test::mk_topic_name(__FILE__, 0);
+
+ myAvgStatsCb event_cb(topic);
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 0);
+
+ const int msgcnt = 1000;
+ const int msgsize = 1000;
+ int batchsize = 5000;
+ int exp_min_batchsize = batchsize - msgsize - 100 /*~framing overhead*/;
+
+ Test::conf_set(conf, "batch.size", "5000");
+
+ /* Make sure batch.size takes precedence by setting the following high */
+ Test::conf_set(conf, "batch.num.messages", "100000");
+ Test::conf_set(conf, "linger.ms", "2000");
+
+ Test::conf_set(conf, "statistics.interval.ms", "7000");
+ std::string errstr;
+ if (conf->set("event_cb", &event_cb, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ delete conf;
+
+ /* Produce messages */
+ char val[msgsize];
+ memset(val, 'a', msgsize);
+
+ for (int i = 0; i < msgcnt; i++) {
+ RdKafka::ErrorCode err =
+ p->produce(topic, 0, RdKafka::Producer::RK_MSG_COPY, val, msgsize, NULL,
+ 0, -1, NULL);
+ if (err)
+ Test::Fail("Produce failed: " + RdKafka::err2str(err));
+ }
+
+ Test::Say(tostr() << "Produced " << msgcnt << " messages\n");
+ p->flush(5 * 1000);
+
+ Test::Say("Waiting for stats\n");
+ while (event_cb.avg_batchsize == 0)
+ p->poll(1000);
+
+ Test::Say(tostr() << "Batchsize: "
+ << "configured " << batchsize << ", min "
+ << event_cb.min_batchsize << ", max "
+ << event_cb.max_batchsize << ", avg "
+ << event_cb.avg_batchsize << "\n");
+
+ /* The average batchsize should within a message size from batch.size. */
+ if (event_cb.avg_batchsize < exp_min_batchsize ||
+ event_cb.avg_batchsize > batchsize)
+ Test::Fail(tostr() << "Expected avg batchsize to be within "
+ << exp_min_batchsize << ".." << batchsize << " but got "
+ << event_cb.avg_batchsize);
+
+ delete p;
+}
+#endif
+
+extern "C" {
+int main_0110_batch_size(int argc, char **argv) {
+#if WITH_RAPIDJSON
+ do_test_batch_size();
+#else
+ Test::Skip("RapidJSON >=1.1.0 not available\n");
+#endif
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp
new file mode 100644
index 000000000..4b6683add
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0111-delay_create_topics.cpp
@@ -0,0 +1,127 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <map>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+/**
+ * Verify that the producer waits topic.metadata.propagation.max.ms
+ * before flagging a topic as non-existent, allowing asynchronous
+ * CreateTopics() to be used in non-auto-create scenarios.
+ *
+ * This tests the producer. The consumer behaviour is implicitly tested
+ * in 0109.
+ */
+
+
+namespace {
+class DrCb : public RdKafka::DeliveryReportCb {
+ public:
+ DrCb(RdKafka::ErrorCode exp_err) : ok(false), _exp_err(exp_err) {
+ }
+
+ void dr_cb(RdKafka::Message &msg) {
+ Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n");
+ if (msg.err() != _exp_err)
+ Test::Fail("Delivery report: Expected " + RdKafka::err2str(_exp_err) +
+ " but got " + RdKafka::err2str(msg.err()));
+ else if (ok)
+ Test::Fail("Too many delivery reports");
+ else
+ ok = true;
+ }
+
+ bool ok;
+
+ private:
+ RdKafka::ErrorCode _exp_err;
+};
+}; // namespace
+
+static void do_test_producer(bool timeout_too_short) {
+ Test::Say(tostr() << _C_MAG << "[ Test with timeout_too_short="
+ << (timeout_too_short ? "true" : "false") << " ]\n");
+
+ std::string topic = Test::mk_topic_name("0110-delay_create_topics", 1);
+
+ /* Create Producer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 20);
+
+ std::string errstr;
+
+ if (timeout_too_short) {
+ if (conf->set("topic.metadata.propagation.max.ms", "3", errstr))
+ Test::Fail(errstr);
+ }
+
+ DrCb dr_cb(timeout_too_short ? RdKafka::ERR_UNKNOWN_TOPIC_OR_PART
+ : RdKafka::ERR_NO_ERROR);
+ conf->set("dr_cb", &dr_cb, errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ /* Produce a message to the yet non-existent topic. */
+ RdKafka::ErrorCode err = p->produce(
+ topic, RdKafka::Topic::PARTITION_UA, RdKafka::Producer::RK_MSG_COPY,
+ (void *)"hello", 5, "hi", 2, 0, NULL, NULL);
+ if (err)
+ Test::Fail(tostr() << "produce failed: " << RdKafka::err2str(err));
+
+ int delay = 5;
+ int64_t end_wait = test_clock() + (delay * 1000000);
+
+ while (test_clock() < end_wait)
+ p->poll(1000);
+
+ Test::create_topic(NULL, topic.c_str(), 1, 3);
+
+ p->flush(10 * 1000);
+
+ if (!dr_cb.ok)
+ Test::Fail("Did not get delivery report for message");
+
+ delete p;
+
+ Test::Say(tostr() << _C_GRN << "[ Test with timeout_too_short="
+ << (timeout_too_short ? "true" : "false") << ": PASS ]\n");
+}
+
+extern "C" {
+int main_0111_delay_create_topics(int argc, char **argv) {
+ do_test_producer(false);
+ do_test_producer(true);
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c
new file mode 100644
index 000000000..d945a2c32
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0112-assign_unknown_part.c
@@ -0,0 +1,98 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdkafka.h"
+
+/**
+ * Assign consumer to single partition topic and consume a message.
+ * Then add a new partition to the topic (i.e., one that will not
+ * be in the consumer's metadata) and assign the consumer to it.
+ * Verify that partition 0 is not incorrectly reported as missing.
+ * See #2915.
+ */
+
+int main_0112_assign_unknown_part(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__ + 5, 1);
+ int64_t offset = RD_KAFKA_OFFSET_BEGINNING;
+ uint64_t testid = test_id_generate();
+ rd_kafka_t *c;
+ rd_kafka_topic_partition_list_t *tpl;
+ int r;
+
+ test_conf_init(NULL, NULL, 60);
+
+ TEST_SAY("Creating consumer\n");
+ c = test_create_consumer(topic, NULL, NULL, NULL);
+
+ TEST_SAY("Creating topic %s with 1 partition\n", topic);
+ test_create_topic(c, topic, 1, 1);
+ test_wait_topic_exists(c, topic, 10 * 1000);
+
+ TEST_SAY("Producing message to partition 0\n");
+ test_produce_msgs_easy(topic, testid, 0, 1);
+
+ TEST_SAY("Assigning partition 0\n");
+ tpl = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(tpl, topic, 0)->offset = offset;
+ test_consumer_assign("ASSIGN", c, tpl);
+
+ TEST_SAY("Waiting for message\n");
+ test_consumer_poll("CONSUME 0", c, testid, -1, 0, 1, NULL);
+
+ TEST_SAY("Changing partition count for topic %s\n", topic);
+ test_create_partitions(NULL, topic, 2);
+
+ /* FIXME: The new partition might not have propagated through the
+ * cluster by the time the producer tries to produce to it
+ * which causes the produce to fail.
+ * Loop until the partition count is correct. */
+ while ((r = test_get_partition_count(c, topic, 5000)) != 2) {
+ TEST_SAY(
+ "Waiting for %s partition count to reach 2, "
+ "currently %d\n",
+ topic, r);
+ rd_sleep(1);
+ }
+
+ TEST_SAY("Producing message to partition 1\n");
+ test_produce_msgs_easy(topic, testid, 1, 1);
+
+ TEST_SAY("Assigning partitions 1\n");
+ rd_kafka_topic_partition_list_add(tpl, topic, 1)->offset = offset;
+ test_consumer_assign("ASSIGN", c, tpl);
+
+ TEST_SAY("Waiting for messages\n");
+ test_consumer_poll("CONSUME", c, testid, -1, 0, 2, NULL);
+
+ rd_kafka_topic_partition_list_destroy(tpl);
+ test_consumer_close(c);
+ rd_kafka_destroy(c);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp
new file mode 100644
index 000000000..430798d7f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0113-cooperative_rebalance.cpp
@@ -0,0 +1,3170 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+extern "C" {
+#include "../src/rdkafka_protocol.h"
+#include "test.h"
+}
+#include <iostream>
+#include <map>
+#include <set>
+#include <algorithm>
+#include <cstring>
+#include <cstdlib>
+#include <assert.h>
+#include "testcpp.h"
+#include <fstream>
+
+using namespace std;
+
+/** Topic+Partition helper class */
+class Toppar {
+ public:
+ Toppar(const string &topic, int32_t partition) :
+ topic(topic), partition(partition) {
+ }
+
+ Toppar(const RdKafka::TopicPartition *tp) :
+ topic(tp->topic()), partition(tp->partition()) {
+ }
+
+ friend bool operator==(const Toppar &a, const Toppar &b) {
+ return a.partition == b.partition && a.topic == b.topic;
+ }
+
+ friend bool operator<(const Toppar &a, const Toppar &b) {
+ if (a.partition < b.partition)
+ return true;
+ return a.topic < b.topic;
+ }
+
+ string str() const {
+ return tostr() << topic << "[" << partition << "]";
+ }
+
+ std::string topic;
+ int32_t partition;
+};
+
+
+
+static std::string get_bootstrap_servers() {
+ RdKafka::Conf *conf;
+ std::string bootstrap_servers;
+ Test::conf_init(&conf, NULL, 0);
+ conf->get("bootstrap.servers", bootstrap_servers);
+ delete conf;
+ return bootstrap_servers;
+}
+
+
+class DrCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &msg) {
+ if (msg.err())
+ Test::Fail("Delivery failed: " + RdKafka::err2str(msg.err()));
+ }
+};
+
+
+/**
+ * @brief Produce messages to partitions.
+ *
+ * The pair is Toppar,msg_cnt_per_partition.
+ * The Toppar is topic,partition_cnt.
+ */
+static void produce_msgs(vector<pair<Toppar, int> > partitions) {
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 0);
+
+ string errstr;
+ DrCb dr;
+ conf->set("dr_cb", &dr, errstr);
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create producer: " + errstr);
+ delete conf;
+
+ for (vector<pair<Toppar, int> >::iterator it = partitions.begin();
+ it != partitions.end(); it++) {
+ for (int part = 0; part < it->first.partition; part++) {
+ for (int i = 0; i < it->second; i++) {
+ RdKafka::ErrorCode err =
+ p->produce(it->first.topic, part, RdKafka::Producer::RK_MSG_COPY,
+ (void *)"Hello there", 11, NULL, 0, 0, NULL);
+ TEST_ASSERT(!err, "produce(%s, %d) failed: %s", it->first.topic.c_str(),
+ part, RdKafka::err2str(err).c_str());
+
+ p->poll(0);
+ }
+ }
+ }
+
+ p->flush(10000);
+
+ delete p;
+}
+
+
+
+static RdKafka::KafkaConsumer *make_consumer(
+ string client_id,
+ string group_id,
+ string assignment_strategy,
+ vector<pair<string, string> > *additional_conf,
+ RdKafka::RebalanceCb *rebalance_cb,
+ int timeout_s) {
+ std::string bootstraps;
+ std::string errstr;
+ std::vector<std::pair<std::string, std::string> >::iterator itr;
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, timeout_s);
+ Test::conf_set(conf, "client.id", client_id);
+ Test::conf_set(conf, "group.id", group_id);
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ Test::conf_set(conf, "enable.auto.commit", "false");
+ Test::conf_set(conf, "partition.assignment.strategy", assignment_strategy);
+ if (additional_conf != NULL) {
+ for (itr = (*additional_conf).begin(); itr != (*additional_conf).end();
+ itr++)
+ Test::conf_set(conf, itr->first, itr->second);
+ }
+
+ if (rebalance_cb) {
+ if (conf->set("rebalance_cb", rebalance_cb, errstr))
+ Test::Fail("Failed to set rebalance_cb: " + errstr);
+ }
+ RdKafka::KafkaConsumer *consumer =
+ RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ return consumer;
+}
+
+/**
+ * @returns a CSV string of the vector
+ */
+static string string_vec_to_str(const vector<string> &v) {
+ ostringstream ss;
+ for (vector<string>::const_iterator it = v.begin(); it != v.end(); it++)
+ ss << (it == v.begin() ? "" : ", ") << *it;
+ return ss.str();
+}
+
+void expect_assignment(RdKafka::KafkaConsumer *consumer, size_t count) {
+ std::vector<RdKafka::TopicPartition *> partitions;
+ RdKafka::ErrorCode err;
+ err = consumer->assignment(partitions);
+ if (err)
+ Test::Fail(consumer->name() +
+ " assignment() failed: " + RdKafka::err2str(err));
+ if (partitions.size() != count)
+ Test::Fail(tostr() << "Expecting consumer " << consumer->name()
+ << " to have " << count
+ << " assigned partition(s), not: " << partitions.size());
+ RdKafka::TopicPartition::destroy(partitions);
+}
+
+
+static bool TopicPartition_cmp(const RdKafka::TopicPartition *a,
+ const RdKafka::TopicPartition *b) {
+ if (a->topic() < b->topic())
+ return true;
+ else if (a->topic() > b->topic())
+ return false;
+ return a->partition() < b->partition();
+}
+
+
+void expect_assignment(RdKafka::KafkaConsumer *consumer,
+ vector<RdKafka::TopicPartition *> &expected) {
+ vector<RdKafka::TopicPartition *> partitions;
+ RdKafka::ErrorCode err;
+ err = consumer->assignment(partitions);
+ if (err)
+ Test::Fail(consumer->name() +
+ " assignment() failed: " + RdKafka::err2str(err));
+
+ if (partitions.size() != expected.size())
+ Test::Fail(tostr() << "Expecting consumer " << consumer->name()
+ << " to have " << expected.size()
+ << " assigned partition(s), not " << partitions.size());
+
+ sort(partitions.begin(), partitions.end(), TopicPartition_cmp);
+ sort(expected.begin(), expected.end(), TopicPartition_cmp);
+
+ int fails = 0;
+ for (int i = 0; i < (int)partitions.size(); i++) {
+ if (!TopicPartition_cmp(partitions[i], expected[i]))
+ continue;
+
+ Test::Say(tostr() << _C_RED << consumer->name() << ": expected assignment #"
+ << i << " " << expected[i]->topic() << " ["
+ << expected[i]->partition() << "], not "
+ << partitions[i]->topic() << " ["
+ << partitions[i]->partition() << "]\n");
+ fails++;
+ }
+
+ if (fails)
+ Test::Fail(consumer->name() + ": Expected assignment mismatch, see above");
+
+ RdKafka::TopicPartition::destroy(partitions);
+}
+
+
+class DefaultRebalanceCb : public RdKafka::RebalanceCb {
+ private:
+ static string part_list_print(
+ const vector<RdKafka::TopicPartition *> &partitions) {
+ ostringstream ss;
+ for (unsigned int i = 0; i < partitions.size(); i++)
+ ss << (i == 0 ? "" : ", ") << partitions[i]->topic() << " ["
+ << partitions[i]->partition() << "]";
+ return ss.str();
+ }
+
+ public:
+ int assign_call_cnt;
+ int revoke_call_cnt;
+ int nonempty_assign_call_cnt; /**< ASSIGN_PARTITIONS with partitions */
+ int lost_call_cnt;
+ int partitions_assigned_net;
+ bool wait_rebalance;
+ int64_t ts_last_assign; /**< Timestamp of last rebalance assignment */
+ map<Toppar, int> msg_cnt; /**< Number of consumed messages per partition. */
+
+ ~DefaultRebalanceCb() {
+ reset_msg_cnt();
+ }
+
+ DefaultRebalanceCb() :
+ assign_call_cnt(0),
+ revoke_call_cnt(0),
+ nonempty_assign_call_cnt(0),
+ lost_call_cnt(0),
+ partitions_assigned_net(0),
+ wait_rebalance(false),
+ ts_last_assign(0) {
+ }
+
+
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ wait_rebalance = false;
+
+ std::string protocol = consumer->rebalance_protocol();
+
+ TEST_ASSERT(protocol == "COOPERATIVE",
+ "%s: Expected rebalance_protocol \"COOPERATIVE\", not %s",
+ consumer->name().c_str(), protocol.c_str());
+
+ const char *lost_str = consumer->assignment_lost() ? " (LOST)" : "";
+ Test::Say(tostr() << _C_YEL "RebalanceCb " << protocol << ": "
+ << consumer->name() << " " << RdKafka::err2str(err)
+ << lost_str << ": " << part_list_print(partitions)
+ << "\n");
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+ if (consumer->assignment_lost())
+ Test::Fail("unexpected lost assignment during ASSIGN rebalance");
+ RdKafka::Error *error = consumer->incremental_assign(partitions);
+ if (error)
+ Test::Fail(tostr() << "consumer->incremental_assign() failed: "
+ << error->str());
+ if (partitions.size() > 0)
+ nonempty_assign_call_cnt++;
+ assign_call_cnt += 1;
+ partitions_assigned_net += (int)partitions.size();
+ ts_last_assign = test_clock();
+
+ } else {
+ if (consumer->assignment_lost())
+ lost_call_cnt += 1;
+ RdKafka::Error *error = consumer->incremental_unassign(partitions);
+ if (error)
+ Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
+ << error->str());
+ if (partitions.size() == 0)
+ Test::Fail("revoked partitions size should never be 0");
+ revoke_call_cnt += 1;
+ partitions_assigned_net -= (int)partitions.size();
+ }
+
+ /* Reset message counters for the given partitions. */
+ Test::Say(consumer->name() + ": resetting message counters:\n");
+ reset_msg_cnt(partitions);
+ }
+
+ bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) {
+ RdKafka::Message *msg = c->consume(timeout_ms);
+ bool ret = msg->err() != RdKafka::ERR__TIMED_OUT;
+ if (!msg->err())
+ msg_cnt[Toppar(msg->topic_name(), msg->partition())]++;
+ delete msg;
+ return ret;
+ }
+
+ void reset_msg_cnt() {
+ msg_cnt.clear();
+ }
+
+ void reset_msg_cnt(Toppar &tp) {
+ int msgcnt = get_msg_cnt(tp);
+ Test::Say(tostr() << " RESET " << tp.topic << " [" << tp.partition << "]"
+ << " with " << msgcnt << " messages\n");
+ if (!msg_cnt.erase(tp) && msgcnt)
+ Test::Fail("erase failed!");
+ }
+
+ void reset_msg_cnt(const vector<RdKafka::TopicPartition *> &partitions) {
+ for (unsigned int i = 0; i < partitions.size(); i++) {
+ Toppar tp(partitions[i]->topic(), partitions[i]->partition());
+ reset_msg_cnt(tp);
+ }
+ }
+
+ int get_msg_cnt(const Toppar &tp) {
+ map<Toppar, int>::iterator it = msg_cnt.find(tp);
+ if (it == msg_cnt.end())
+ return 0;
+ return it->second;
+ }
+};
+
+
+
+/**
+ * @brief Verify that the consumer's assignment is a subset of the
+ * subscribed topics.
+ *
+ * @param allow_mismatch Allow assignment of not subscribed topics.
+ * This can happen when the subscription is updated
+ * but a rebalance callback hasn't been seen yet.
+ * @param all_assignments Accumulated assignments for all consumers.
+ * If an assigned partition already exists it means
+ * the partition is assigned to multiple consumers and
+ * the test will fail.
+ * @param exp_msg_cnt Expected message count per assigned partition, or -1
+ * if not to check.
+ *
+ * @returns the number of assigned partitions, or fails if the
+ * assignment is empty or there is an assignment for
+ * topic that is not subscribed.
+ */
+static int verify_consumer_assignment(
+ RdKafka::KafkaConsumer *consumer,
+ DefaultRebalanceCb &rebalance_cb,
+ const vector<string> &topics,
+ bool allow_empty,
+ bool allow_mismatch,
+ map<Toppar, RdKafka::KafkaConsumer *> *all_assignments,
+ int exp_msg_cnt) {
+ vector<RdKafka::TopicPartition *> partitions;
+ RdKafka::ErrorCode err;
+ int fails = 0;
+ int count;
+ ostringstream ss;
+
+ err = consumer->assignment(partitions);
+ TEST_ASSERT(!err, "Failed to get assignment for consumer %s: %s",
+ consumer->name().c_str(), RdKafka::err2str(err).c_str());
+
+ count = (int)partitions.size();
+
+ for (vector<RdKafka::TopicPartition *>::iterator it = partitions.begin();
+ it != partitions.end(); it++) {
+ RdKafka::TopicPartition *p = *it;
+
+ if (find(topics.begin(), topics.end(), p->topic()) == topics.end()) {
+ Test::Say(tostr() << (allow_mismatch ? _C_YEL "Warning (allowed)"
+ : _C_RED "Error")
+ << ": " << consumer->name() << " is assigned "
+ << p->topic() << " [" << p->partition() << "] which is "
+ << "not in the list of subscribed topics: "
+ << string_vec_to_str(topics) << "\n");
+ if (!allow_mismatch)
+ fails++;
+ }
+
+ Toppar tp(p);
+ pair<map<Toppar, RdKafka::KafkaConsumer *>::iterator, bool> ret;
+ ret = all_assignments->insert(
+ pair<Toppar, RdKafka::KafkaConsumer *>(tp, consumer));
+ if (!ret.second) {
+ Test::Say(tostr() << _C_RED << "Error: " << consumer->name()
+ << " is assigned " << p->topic() << " ["
+ << p->partition()
+ << "] which is "
+ "already assigned to consumer "
+ << ret.first->second->name() << "\n");
+ fails++;
+ }
+
+
+ int msg_cnt = rebalance_cb.get_msg_cnt(tp);
+
+ if (exp_msg_cnt != -1 && msg_cnt != exp_msg_cnt) {
+ Test::Say(tostr() << _C_RED << "Error: " << consumer->name()
+ << " expected " << exp_msg_cnt << " messages on "
+ << p->topic() << " [" << p->partition() << "], not "
+ << msg_cnt << "\n");
+ fails++;
+ }
+
+ ss << (it == partitions.begin() ? "" : ", ") << p->topic() << " ["
+ << p->partition() << "] (" << msg_cnt << "msgs)";
+ }
+
+ RdKafka::TopicPartition::destroy(partitions);
+
+ Test::Say(tostr() << "Consumer " << consumer->name() << " assignment ("
+ << count << "): " << ss.str() << "\n");
+
+ if (count == 0 && !allow_empty)
+ Test::Fail("Consumer " + consumer->name() +
+ " has unexpected empty assignment");
+
+ if (fails)
+ Test::Fail(
+ tostr() << "Consumer " + consumer->name()
+ << " assignment verification failed (see previous error)");
+
+ return count;
+}
+
+
+
+/* -------- a_assign_tests
+ *
+ * check behavior incremental assign / unassign outside the context of a
+ * rebalance.
+ */
+
+
+/** Incremental assign, then assign(NULL).
+ */
+static void assign_test_1(RdKafka::KafkaConsumer *consumer,
+ std::vector<RdKafka::TopicPartition *> toppars1,
+ std::vector<RdKafka::TopicPartition *> toppars2) {
+ RdKafka::ErrorCode err;
+ RdKafka::Error *error;
+
+ Test::Say("Incremental assign, then assign(NULL)\n");
+
+ if ((error = consumer->incremental_assign(toppars1)))
+ Test::Fail(tostr() << "Incremental assign failed: " << error->str());
+ Test::check_assignment(consumer, 1, &toppars1[0]->topic());
+
+ if ((err = consumer->unassign()))
+ Test::Fail("Unassign failed: " + RdKafka::err2str(err));
+ Test::check_assignment(consumer, 0, NULL);
+}
+
+
+/** Assign, then incremental unassign.
+ */
+static void assign_test_2(RdKafka::KafkaConsumer *consumer,
+ std::vector<RdKafka::TopicPartition *> toppars1,
+ std::vector<RdKafka::TopicPartition *> toppars2) {
+ RdKafka::ErrorCode err;
+ RdKafka::Error *error;
+
+ Test::Say("Assign, then incremental unassign\n");
+
+ if ((err = consumer->assign(toppars1)))
+ Test::Fail("Assign failed: " + RdKafka::err2str(err));
+ Test::check_assignment(consumer, 1, &toppars1[0]->topic());
+
+ if ((error = consumer->incremental_unassign(toppars1)))
+ Test::Fail("Incremental unassign failed: " + error->str());
+ Test::check_assignment(consumer, 0, NULL);
+}
+
+
+/** Incremental assign, then incremental unassign.
+ */
+static void assign_test_3(RdKafka::KafkaConsumer *consumer,
+ std::vector<RdKafka::TopicPartition *> toppars1,
+ std::vector<RdKafka::TopicPartition *> toppars2) {
+ RdKafka::Error *error;
+
+ Test::Say("Incremental assign, then incremental unassign\n");
+
+ if ((error = consumer->incremental_assign(toppars1)))
+ Test::Fail("Incremental assign failed: " + error->str());
+ Test::check_assignment(consumer, 1, &toppars1[0]->topic());
+
+ if ((error = consumer->incremental_unassign(toppars1)))
+ Test::Fail("Incremental unassign failed: " + error->str());
+ Test::check_assignment(consumer, 0, NULL);
+}
+
+
+/** Multi-topic incremental assign and unassign + message consumption.
+ */
+static void assign_test_4(RdKafka::KafkaConsumer *consumer,
+ std::vector<RdKafka::TopicPartition *> toppars1,
+ std::vector<RdKafka::TopicPartition *> toppars2) {
+ RdKafka::Error *error;
+
+ Test::Say(
+ "Multi-topic incremental assign and unassign + message consumption\n");
+
+ if ((error = consumer->incremental_assign(toppars1)))
+ Test::Fail("Incremental assign failed: " + error->str());
+ Test::check_assignment(consumer, 1, &toppars1[0]->topic());
+
+ RdKafka::Message *m = consumer->consume(5000);
+ if (m->err() != RdKafka::ERR_NO_ERROR)
+ Test::Fail("Expecting a consumed message.");
+ if (m->len() != 100)
+ Test::Fail(tostr() << "Expecting msg len to be 100, not: "
+ << m->len()); /* implies read from topic 1. */
+ delete m;
+
+ if ((error = consumer->incremental_unassign(toppars1)))
+ Test::Fail("Incremental unassign failed: " + error->str());
+ Test::check_assignment(consumer, 0, NULL);
+
+ m = consumer->consume(100);
+ if (m->err() != RdKafka::ERR__TIMED_OUT)
+ Test::Fail("Not expecting a consumed message.");
+ delete m;
+
+ if ((error = consumer->incremental_assign(toppars2)))
+ Test::Fail("Incremental assign failed: " + error->str());
+ Test::check_assignment(consumer, 1, &toppars2[0]->topic());
+
+ m = consumer->consume(5000);
+ if (m->err() != RdKafka::ERR_NO_ERROR)
+ Test::Fail("Expecting a consumed message.");
+ if (m->len() != 200)
+ Test::Fail(tostr() << "Expecting msg len to be 200, not: "
+ << m->len()); /* implies read from topic 2. */
+ delete m;
+
+ if ((error = consumer->incremental_assign(toppars1)))
+ Test::Fail("Incremental assign failed: " + error->str());
+ if (Test::assignment_partition_count(consumer, NULL) != 2)
+ Test::Fail(tostr() << "Expecting current assignment to have size 2, not: "
+ << Test::assignment_partition_count(consumer, NULL));
+
+ m = consumer->consume(5000);
+ if (m->err() != RdKafka::ERR_NO_ERROR)
+ Test::Fail("Expecting a consumed message.");
+ delete m;
+
+ if ((error = consumer->incremental_unassign(toppars2)))
+ Test::Fail("Incremental unassign failed: " + error->str());
+ if ((error = consumer->incremental_unassign(toppars1)))
+ Test::Fail("Incremental unassign failed: " + error->str());
+ Test::check_assignment(consumer, 0, NULL);
+}
+
+
+/** Incremental assign and unassign of empty collection.
+ */
+static void assign_test_5(RdKafka::KafkaConsumer *consumer,
+ std::vector<RdKafka::TopicPartition *> toppars1,
+ std::vector<RdKafka::TopicPartition *> toppars2) {
+ RdKafka::Error *error;
+ std::vector<RdKafka::TopicPartition *> toppars3;
+
+ Test::Say("Incremental assign and unassign of empty collection\n");
+
+ if ((error = consumer->incremental_assign(toppars3)))
+ Test::Fail("Incremental assign failed: " + error->str());
+ Test::check_assignment(consumer, 0, NULL);
+
+ if ((error = consumer->incremental_unassign(toppars3)))
+ Test::Fail("Incremental unassign failed: " + error->str());
+ Test::check_assignment(consumer, 0, NULL);
+}
+
+
+
+static void run_test(
+ const std::string &t1,
+ const std::string &t2,
+ void (*test)(RdKafka::KafkaConsumer *consumer,
+ std::vector<RdKafka::TopicPartition *> toppars1,
+ std::vector<RdKafka::TopicPartition *> toppars2)) {
+ std::vector<RdKafka::TopicPartition *> toppars1;
+ toppars1.push_back(RdKafka::TopicPartition::create(t1, 0));
+ std::vector<RdKafka::TopicPartition *> toppars2;
+ toppars2.push_back(RdKafka::TopicPartition::create(t2, 0));
+
+ RdKafka::KafkaConsumer *consumer =
+ make_consumer("C_1", t1, "cooperative-sticky", NULL, NULL, 10);
+
+ test(consumer, toppars1, toppars2);
+
+ RdKafka::TopicPartition::destroy(toppars1);
+ RdKafka::TopicPartition::destroy(toppars2);
+
+ consumer->close();
+ delete consumer;
+}
+
+
+static void a_assign_tests() {
+ SUB_TEST_QUICK();
+
+ int msgcnt = 1000;
+ const int msgsize1 = 100;
+ const int msgsize2 = 200;
+
+ std::string topic1_str = Test::mk_topic_name("0113-a1", 1);
+ test_create_topic(NULL, topic1_str.c_str(), 1, 1);
+ std::string topic2_str = Test::mk_topic_name("0113-a2", 1);
+ test_create_topic(NULL, topic2_str.c_str(), 1, 1);
+
+ test_produce_msgs_easy_size(topic1_str.c_str(), 0, 0, msgcnt, msgsize1);
+ test_produce_msgs_easy_size(topic2_str.c_str(), 0, 0, msgcnt, msgsize2);
+
+ run_test(topic1_str, topic2_str, assign_test_1);
+ run_test(topic1_str, topic2_str, assign_test_2);
+ run_test(topic1_str, topic2_str, assign_test_3);
+ run_test(topic1_str, topic2_str, assign_test_4);
+ run_test(topic1_str, topic2_str, assign_test_5);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Quick Assign 1,2, Assign 2,3, Assign 1,2,3 test to verify
+ * that the correct OffsetFetch response is used.
+ * See note in rdkafka_assignment.c for details.
+ *
+ * Makes use of the mock cluster to induce latency.
+ */
+static void a_assign_rapid() {
+ SUB_TEST_QUICK();
+
+ std::string group_id = __FUNCTION__;
+
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+ int32_t coord_id = 1;
+ rd_kafka_mock_coordinator_set(mcluster, "group", group_id.c_str(), coord_id);
+
+ rd_kafka_mock_topic_create(mcluster, "topic1", 1, 1);
+ rd_kafka_mock_topic_create(mcluster, "topic2", 1, 1);
+ rd_kafka_mock_topic_create(mcluster, "topic3", 1, 1);
+
+ /*
+ * Produce messages to topics
+ */
+ const int msgs_per_partition = 1000;
+
+ RdKafka::Conf *pconf;
+ Test::conf_init(&pconf, NULL, 10);
+ Test::conf_set(pconf, "bootstrap.servers", bootstraps);
+ Test::conf_set(pconf, "security.protocol", "plaintext");
+ std::string errstr;
+ RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr);
+ if (!p)
+ Test::Fail(tostr() << __FUNCTION__
+ << ": Failed to create producer: " << errstr);
+ delete pconf;
+
+ Test::produce_msgs(p, "topic1", 0, msgs_per_partition, 10,
+ false /*no flush*/);
+ Test::produce_msgs(p, "topic2", 0, msgs_per_partition, 10,
+ false /*no flush*/);
+ Test::produce_msgs(p, "topic3", 0, msgs_per_partition, 10,
+ false /*no flush*/);
+ p->flush(10 * 1000);
+
+ delete p;
+
+ vector<RdKafka::TopicPartition *> toppars1;
+ toppars1.push_back(RdKafka::TopicPartition::create("topic1", 0));
+ vector<RdKafka::TopicPartition *> toppars2;
+ toppars2.push_back(RdKafka::TopicPartition::create("topic2", 0));
+ vector<RdKafka::TopicPartition *> toppars3;
+ toppars3.push_back(RdKafka::TopicPartition::create("topic3", 0));
+
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 20);
+ Test::conf_set(conf, "bootstrap.servers", bootstraps);
+ Test::conf_set(conf, "security.protocol", "plaintext");
+ Test::conf_set(conf, "client.id", __FUNCTION__);
+ Test::conf_set(conf, "group.id", group_id);
+ Test::conf_set(conf, "auto.offset.reset", "earliest");
+ Test::conf_set(conf, "enable.auto.commit", "false");
+
+ RdKafka::KafkaConsumer *consumer;
+ consumer = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!consumer)
+ Test::Fail(tostr() << __FUNCTION__
+ << ": Failed to create consumer: " << errstr);
+ delete conf;
+
+ vector<RdKafka::TopicPartition *> toppars;
+ vector<RdKafka::TopicPartition *> expected;
+
+ map<Toppar, int64_t> pos; /* Expected consume position per partition */
+ pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 0;
+ pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 0;
+ pos[Toppar(toppars3[0]->topic(), toppars3[0]->partition())] = 0;
+
+ /* To make sure offset commits are fetched in proper assign sequence
+ * we commit an offset that should not be used in the final consume loop.
+ * This commit will be overwritten below with another commit. */
+ vector<RdKafka::TopicPartition *> offsets;
+ offsets.push_back(RdKafka::TopicPartition::create(
+ toppars1[0]->topic(), toppars1[0]->partition(), 11));
+ /* This partition should start at this position even though
+ * there will be a sub-sequent commit to overwrite it, that should not
+ * be used since this partition is never unassigned. */
+ offsets.push_back(RdKafka::TopicPartition::create(
+ toppars2[0]->topic(), toppars2[0]->partition(), 22));
+ pos[Toppar(toppars2[0]->topic(), toppars2[0]->partition())] = 22;
+
+ Test::print_TopicPartitions("pre-commit", offsets);
+
+ RdKafka::ErrorCode err;
+ err = consumer->commitSync(offsets);
+ if (err)
+ Test::Fail(tostr() << __FUNCTION__ << ": pre-commit failed: "
+ << RdKafka::err2str(err) << "\n");
+
+ /* Add coordinator delay so that the OffsetFetchRequest originating
+ * from the coming incremental_assign() will not finish before
+ * we call incremental_unassign() and incremental_assign() again, resulting
+ * in a situation where the initial OffsetFetchResponse will contain
+ * an older offset for a previous assignment of one partition. */
+ rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 5000);
+
+
+ /* Assign 1,2 == 1,2 */
+ toppars.push_back(toppars1[0]);
+ toppars.push_back(toppars2[0]);
+ expected.push_back(toppars1[0]);
+ expected.push_back(toppars2[0]);
+ Test::incremental_assign(consumer, toppars);
+ expect_assignment(consumer, expected);
+
+ /* Unassign -1 == 2 */
+ toppars.clear();
+ toppars.push_back(toppars1[0]);
+ vector<RdKafka::TopicPartition *>::iterator it =
+ find(expected.begin(), expected.end(), toppars1[0]);
+ expected.erase(it);
+
+ Test::incremental_unassign(consumer, toppars);
+ expect_assignment(consumer, expected);
+
+
+ /* Commit offset for the removed partition and the partition that is
+ * unchanged in the assignment. */
+ RdKafka::TopicPartition::destroy(offsets);
+ offsets.push_back(RdKafka::TopicPartition::create(
+ toppars1[0]->topic(), toppars1[0]->partition(), 55));
+ offsets.push_back(RdKafka::TopicPartition::create(
+ toppars2[0]->topic(), toppars2[0]->partition(), 33)); /* should not be
+ * used. */
+ pos[Toppar(toppars1[0]->topic(), toppars1[0]->partition())] = 55;
+ Test::print_TopicPartitions("commit", offsets);
+
+ err = consumer->commitAsync(offsets);
+ if (err)
+ Test::Fail(tostr() << __FUNCTION__
+ << ": commit failed: " << RdKafka::err2str(err) << "\n");
+
+ /* Assign +3 == 2,3 */
+ toppars.clear();
+ toppars.push_back(toppars3[0]);
+ expected.push_back(toppars3[0]);
+ Test::incremental_assign(consumer, toppars);
+ expect_assignment(consumer, expected);
+
+ /* Now remove the latency */
+ Test::Say(_C_MAG "Clearing rtt\n");
+ rd_kafka_mock_broker_set_rtt(mcluster, coord_id, 0);
+
+ /* Assign +1 == 1,2,3 */
+ toppars.clear();
+ toppars.push_back(toppars1[0]);
+ expected.push_back(toppars1[0]);
+ Test::incremental_assign(consumer, toppars);
+ expect_assignment(consumer, expected);
+
+ /*
+ * Verify consumed messages
+ */
+ int wait_end = (int)expected.size();
+ while (wait_end > 0) {
+ RdKafka::Message *msg = consumer->consume(10 * 1000);
+ if (msg->err() == RdKafka::ERR__TIMED_OUT)
+ Test::Fail(tostr() << __FUNCTION__
+ << ": Consume timed out waiting "
+ "for "
+ << wait_end << " more partitions");
+
+ Toppar tp = Toppar(msg->topic_name(), msg->partition());
+ int64_t *exp_pos = &pos[tp];
+
+ Test::Say(3, tostr() << __FUNCTION__ << ": Received " << tp.topic << " ["
+ << tp.partition << "] at offset " << msg->offset()
+ << " (expected offset " << *exp_pos << ")\n");
+
+ if (*exp_pos != msg->offset())
+ Test::Fail(tostr() << __FUNCTION__ << ": expected message offset "
+ << *exp_pos << " for " << msg->topic_name() << " ["
+ << msg->partition() << "], not " << msg->offset()
+ << "\n");
+ (*exp_pos)++;
+ if (*exp_pos == msgs_per_partition) {
+ TEST_ASSERT(wait_end > 0, "");
+ wait_end--;
+ } else if (msg->offset() > msgs_per_partition)
+ Test::Fail(tostr() << __FUNCTION__ << ": unexpected message with "
+ << "offset " << msg->offset() << " on " << tp.topic
+ << " [" << tp.partition << "]\n");
+
+ delete msg;
+ }
+
+ RdKafka::TopicPartition::destroy(offsets);
+ RdKafka::TopicPartition::destroy(toppars1);
+ RdKafka::TopicPartition::destroy(toppars2);
+ RdKafka::TopicPartition::destroy(toppars3);
+
+ delete consumer;
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+/* Check behavior when:
+ * 1. single topic with 2 partitions.
+ * 2. consumer 1 (with rebalance_cb) subscribes to it.
+ * 3. consumer 2 (with rebalance_cb) subscribes to it.
+ * 4. close.
+ */
+
+static void b_subscribe_with_cb_test(rd_bool_t close_consumer) {
+ SUB_TEST();
+
+ std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+ test_create_topic(NULL, topic_name.c_str(), 2, 1);
+
+ DefaultRebalanceCb rebalance_cb1;
+ RdKafka::KafkaConsumer *c1 = make_consumer(
+ "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 25);
+ DefaultRebalanceCb rebalance_cb2;
+ RdKafka::KafkaConsumer *c2 = make_consumer(
+ "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 25);
+ test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000);
+
+ Test::subscribe(c1, topic_name);
+
+ bool c2_subscribed = false;
+ while (true) {
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+
+ /* Start c2 after c1 has received initial assignment */
+ if (!c2_subscribed && rebalance_cb1.assign_call_cnt > 0) {
+ Test::subscribe(c2, topic_name);
+ c2_subscribed = true;
+ }
+
+ /* Failure case: test will time out. */
+ if (rebalance_cb1.assign_call_cnt == 3 &&
+ rebalance_cb2.assign_call_cnt == 2) {
+ break;
+ }
+ }
+
+ /* Sequence of events:
+ *
+ * 1. c1 joins group.
+ * 2. c1 gets assigned 2 partitions.
+ * - there isn't a follow-on rebalance because there aren't any revoked
+ * partitions.
+ * 3. c2 joins group.
+ * 4. This results in a rebalance with one partition being revoked from c1,
+ * and no partitions assigned to either c1 or c2 (however the rebalance
+ * callback will be called in each case with an empty set).
+ * 5. c1 then re-joins the group since it had a partition revoked.
+ * 6. c2 is now assigned a single partition, and c1's incremental assignment
+ * is empty.
+ * 7. Since there were no revoked partitions, no further rebalance is
+ * triggered.
+ */
+
+ /* The rebalance cb is always called on assign, even if empty. */
+ if (rebalance_cb1.assign_call_cnt != 3)
+ Test::Fail(tostr() << "Expecting 3 assign calls on consumer 1, not "
+ << rebalance_cb1.assign_call_cnt);
+ if (rebalance_cb2.assign_call_cnt != 2)
+ Test::Fail(tostr() << "Expecting 2 assign calls on consumer 2, not: "
+ << rebalance_cb2.assign_call_cnt);
+
+ /* The rebalance cb is not called on and empty revoke (unless partitions lost,
+ * which is not the case here) */
+ if (rebalance_cb1.revoke_call_cnt != 1)
+ Test::Fail(tostr() << "Expecting 1 revoke call on consumer 1, not: "
+ << rebalance_cb1.revoke_call_cnt);
+ if (rebalance_cb2.revoke_call_cnt != 0)
+ Test::Fail(tostr() << "Expecting 0 revoke calls on consumer 2, not: "
+ << rebalance_cb2.revoke_call_cnt);
+
+ /* Final state */
+
+ /* Expect both consumers to have 1 assigned partition (via net calculation in
+ * rebalance_cb) */
+ if (rebalance_cb1.partitions_assigned_net != 1)
+ Test::Fail(tostr()
+ << "Expecting consumer 1 to have net 1 assigned partition, not: "
+ << rebalance_cb1.partitions_assigned_net);
+ if (rebalance_cb2.partitions_assigned_net != 1)
+ Test::Fail(tostr()
+ << "Expecting consumer 2 to have net 1 assigned partition, not: "
+ << rebalance_cb2.partitions_assigned_net);
+
+ /* Expect both consumers to have 1 assigned partition (via ->assignment()
+ * query) */
+ expect_assignment(c1, 1);
+ expect_assignment(c2, 1);
+
+ /* Make sure the fetchers are running */
+ int msgcnt = 100;
+ const int msgsize1 = 100;
+ test_produce_msgs_easy_size(topic_name.c_str(), 0, 0, msgcnt, msgsize1);
+ test_produce_msgs_easy_size(topic_name.c_str(), 0, 1, msgcnt, msgsize1);
+
+ bool consumed_from_c1 = false;
+ bool consumed_from_c2 = false;
+ while (true) {
+ RdKafka::Message *msg1 = c1->consume(100);
+ RdKafka::Message *msg2 = c2->consume(100);
+
+ if (msg1->err() == RdKafka::ERR_NO_ERROR)
+ consumed_from_c1 = true;
+ if (msg1->err() == RdKafka::ERR_NO_ERROR)
+ consumed_from_c2 = true;
+
+ delete msg1;
+ delete msg2;
+
+ /* Failure case: test will timeout. */
+ if (consumed_from_c1 && consumed_from_c2)
+ break;
+ }
+
+ if (!close_consumer) {
+ delete c1;
+ delete c2;
+ return;
+ }
+
+ c1->close();
+ c2->close();
+
+ /* Closing the consumer should trigger rebalance_cb (revoke): */
+ if (rebalance_cb1.revoke_call_cnt != 2)
+ Test::Fail(tostr() << "Expecting 2 revoke calls on consumer 1, not: "
+ << rebalance_cb1.revoke_call_cnt);
+ if (rebalance_cb2.revoke_call_cnt != 1)
+ Test::Fail(tostr() << "Expecting 1 revoke call on consumer 2, not: "
+ << rebalance_cb2.revoke_call_cnt);
+
+ /* ..and net assigned partitions should drop to 0 in both cases: */
+ if (rebalance_cb1.partitions_assigned_net != 0)
+ Test::Fail(
+ tostr()
+ << "Expecting consumer 1 to have net 0 assigned partitions, not: "
+ << rebalance_cb1.partitions_assigned_net);
+ if (rebalance_cb2.partitions_assigned_net != 0)
+ Test::Fail(
+ tostr()
+ << "Expecting consumer 2 to have net 0 assigned partitions, not: "
+ << rebalance_cb2.partitions_assigned_net);
+
+ /* Nothing in this test should result in lost partitions */
+ if (rebalance_cb1.lost_call_cnt > 0)
+ Test::Fail(
+ tostr() << "Expecting consumer 1 to have 0 lost partition events, not: "
+ << rebalance_cb1.lost_call_cnt);
+ if (rebalance_cb2.lost_call_cnt > 0)
+ Test::Fail(
+ tostr() << "Expecting consumer 2 to have 0 lost partition events, not: "
+ << rebalance_cb2.lost_call_cnt);
+
+ delete c1;
+ delete c2;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Single topic with 2 partitions.
+ * 2. Consumer 1 (no rebalance_cb) subscribes to it.
+ * 3. Consumer 2 (no rebalance_cb) subscribes to it.
+ * 4. Close.
+ */
+
+static void c_subscribe_no_cb_test(rd_bool_t close_consumer) {
+ SUB_TEST();
+
+ std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+ test_create_topic(NULL, topic_name.c_str(), 2, 1);
+
+ RdKafka::KafkaConsumer *c1 =
+ make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 20);
+ RdKafka::KafkaConsumer *c2 =
+ make_consumer("C_2", group_name, "cooperative-sticky", NULL, NULL, 20);
+ test_wait_topic_exists(c1->c_ptr(), topic_name.c_str(), 10 * 1000);
+
+ Test::subscribe(c1, topic_name);
+
+ bool c2_subscribed = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+
+ if (Test::assignment_partition_count(c1, NULL) == 2 && !c2_subscribed) {
+ Test::subscribe(c2, topic_name);
+ c2_subscribed = true;
+ }
+
+ if (Test::assignment_partition_count(c1, NULL) == 1 &&
+ Test::assignment_partition_count(c2, NULL) == 1) {
+ Test::Say("Consumer 1 and 2 are both assigned to single partition.\n");
+ done = true;
+ }
+ }
+
+ if (close_consumer) {
+ Test::Say("Closing consumer 1\n");
+ c1->close();
+ Test::Say("Closing consumer 2\n");
+ c2->close();
+ } else {
+ Test::Say("Skipping close() of consumer 1 and 2.\n");
+ }
+
+ delete c1;
+ delete c2;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Single consumer (no rebalance_cb) subscribes to topic.
+ * 2. Subscription is changed (topic added).
+ * 3. Consumer is closed.
+ */
+
+static void d_change_subscription_add_topic(rd_bool_t close_consumer) {
+ SUB_TEST();
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
+ std::string topic_name_2 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name_1);
+
+ bool subscribed_to_one_topic = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (Test::assignment_partition_count(c, NULL) == 2 &&
+ !subscribed_to_one_topic) {
+ subscribed_to_one_topic = true;
+ Test::subscribe(c, topic_name_1, topic_name_2);
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 4) {
+ Test::Say("Consumer is assigned to two topics.\n");
+ done = true;
+ }
+ }
+
+ if (close_consumer) {
+ Test::Say("Closing consumer\n");
+ c->close();
+ } else
+ Test::Say("Skipping close() of consumer\n");
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Single consumer (no rebalance_cb) subscribes to topic.
+ * 2. Subscription is changed (topic added).
+ * 3. Consumer is closed.
+ */
+
+static void e_change_subscription_remove_topic(rd_bool_t close_consumer) {
+ SUB_TEST();
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
+ std::string topic_name_2 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name_1, topic_name_2);
+
+ bool subscribed_to_two_topics = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (Test::assignment_partition_count(c, NULL) == 4 &&
+ !subscribed_to_two_topics) {
+ subscribed_to_two_topics = true;
+ Test::subscribe(c, topic_name_1);
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 2) {
+ Test::Say("Consumer is assigned to one topic\n");
+ done = true;
+ }
+ }
+
+ if (!close_consumer) {
+ Test::Say("Closing consumer\n");
+ c->close();
+ } else
+ Test::Say("Skipping close() of consumer\n");
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check that use of consumer->assign() and consumer->unassign() is disallowed
+ * when a COOPERATIVE assignor is in use.
+ *
+ * Except when the consumer is closing, where all forms of unassign are
+ * allowed and treated as a full unassign.
+ */
+
+class FTestRebalanceCb : public RdKafka::RebalanceCb {
+ public:
+ bool assigned;
+ bool closing;
+
+ FTestRebalanceCb() : assigned(false), closing(false) {
+ }
+
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " "
+ << RdKafka::err2str(err) << (closing ? " (closing)" : "")
+ << "\n");
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+ RdKafka::ErrorCode err_resp = consumer->assign(partitions);
+ Test::Say(tostr() << "consumer->assign() response code: " << err_resp
+ << "\n");
+ if (err_resp != RdKafka::ERR__STATE)
+ Test::Fail(tostr() << "Expected assign to fail with error code: "
+ << RdKafka::ERR__STATE << "(ERR__STATE)");
+
+ RdKafka::Error *error = consumer->incremental_assign(partitions);
+ if (error)
+ Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
+ << error->str());
+
+ assigned = true;
+
+ } else {
+ RdKafka::ErrorCode err_resp = consumer->unassign();
+ Test::Say(tostr() << "consumer->unassign() response code: " << err_resp
+ << "\n");
+
+ if (!closing) {
+ if (err_resp != RdKafka::ERR__STATE)
+ Test::Fail(tostr() << "Expected assign to fail with error code: "
+ << RdKafka::ERR__STATE << "(ERR__STATE)");
+
+ RdKafka::Error *error = consumer->incremental_unassign(partitions);
+ if (error)
+ Test::Fail(tostr() << "consumer->incremental_unassign() failed: "
+ << error->str());
+
+ } else {
+ /* During termination (close()) any type of unassign*() is allowed. */
+ if (err_resp)
+ Test::Fail(tostr() << "Expected unassign to succeed during close, "
+ "but got: "
+ << RdKafka::ERR__STATE << "(ERR__STATE)");
+ }
+ }
+ }
+};
+
+
+static void f_assign_call_cooperative() {
+ SUB_TEST();
+
+ std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name.c_str(), 1, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+ FTestRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name);
+
+ while (!rebalance_cb.assigned)
+ Test::poll_once(c, 500);
+
+ rebalance_cb.closing = true;
+ c->close();
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check that use of consumer->incremental_assign() and
+ * consumer->incremental_unassign() is disallowed when an EAGER assignor is in
+ * use.
+ */
+class GTestRebalanceCb : public RdKafka::RebalanceCb {
+ public:
+ bool assigned;
+ bool closing;
+
+ GTestRebalanceCb() : assigned(false), closing(false) {
+ }
+
+ void rebalance_cb(RdKafka::KafkaConsumer *consumer,
+ RdKafka::ErrorCode err,
+ std::vector<RdKafka::TopicPartition *> &partitions) {
+ Test::Say(tostr() << "RebalanceCb: " << consumer->name() << " "
+ << RdKafka::err2str(err) << "\n");
+
+ if (err == RdKafka::ERR__ASSIGN_PARTITIONS) {
+ RdKafka::Error *error = consumer->incremental_assign(partitions);
+ Test::Say(tostr() << "consumer->incremental_assign() response: "
+ << (!error ? "NULL" : error->str()) << "\n");
+ if (!error)
+ Test::Fail("Expected consumer->incremental_assign() to fail");
+ if (error->code() != RdKafka::ERR__STATE)
+ Test::Fail(tostr() << "Expected consumer->incremental_assign() to fail "
+ "with error code "
+ << RdKafka::ERR__STATE);
+ delete error;
+
+ RdKafka::ErrorCode err_resp = consumer->assign(partitions);
+ if (err_resp)
+ Test::Fail(tostr() << "consumer->assign() failed: " << err_resp);
+
+ assigned = true;
+
+ } else {
+ RdKafka::Error *error = consumer->incremental_unassign(partitions);
+ Test::Say(tostr() << "consumer->incremental_unassign() response: "
+ << (!error ? "NULL" : error->str()) << "\n");
+
+ if (!closing) {
+ if (!error)
+ Test::Fail("Expected consumer->incremental_unassign() to fail");
+ if (error->code() != RdKafka::ERR__STATE)
+ Test::Fail(tostr() << "Expected consumer->incremental_unassign() to "
+ "fail with error code "
+ << RdKafka::ERR__STATE);
+ delete error;
+
+ RdKafka::ErrorCode err_resp = consumer->unassign();
+ if (err_resp)
+ Test::Fail(tostr() << "consumer->unassign() failed: " << err_resp);
+
+ } else {
+ /* During termination (close()) any type of unassign*() is allowed. */
+ if (error)
+ Test::Fail(
+ tostr()
+ << "Expected incremental_unassign to succeed during close, "
+ "but got: "
+ << RdKafka::ERR__STATE << "(ERR__STATE)");
+ }
+ }
+ }
+};
+
+static void g_incremental_assign_call_eager() {
+ SUB_TEST();
+
+ std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name.c_str(), 1, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+ GTestRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c = make_consumer(
+ "C_1", group_name, "roundrobin", &additional_conf, &rebalance_cb, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name);
+
+ while (!rebalance_cb.assigned)
+ Test::poll_once(c, 500);
+
+ rebalance_cb.closing = true;
+ c->close();
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Single consumer (rebalance_cb) subscribes to two topics.
+ * 2. One of the topics is deleted.
+ * 3. Consumer is closed.
+ */
+
+static void h_delete_topic() {
+ SUB_TEST();
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
+ std::string topic_name_2 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 1, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+ DefaultRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name_1, topic_name_2);
+
+ bool deleted = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ std::vector<RdKafka::TopicPartition *> partitions;
+ c->assignment(partitions);
+
+ if (partitions.size() == 2 && !deleted) {
+ if (rebalance_cb.assign_call_cnt != 1)
+ Test::Fail(tostr() << "Expected 1 assign call, saw "
+ << rebalance_cb.assign_call_cnt << "\n");
+ Test::delete_topic(c, topic_name_2.c_str());
+ deleted = true;
+ }
+
+ if (partitions.size() == 1 && deleted) {
+ if (partitions[0]->topic() != topic_name_1)
+ Test::Fail(tostr() << "Expecting subscribed topic to be '"
+ << topic_name_1 << "' not '"
+ << partitions[0]->topic() << "'");
+ Test::Say(tostr() << "Assignment no longer includes deleted topic '"
+ << topic_name_2 << "'\n");
+ done = true;
+ }
+
+ RdKafka::TopicPartition::destroy(partitions);
+ }
+
+ Test::Say("Closing consumer\n");
+ c->close();
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Single consumer (rebalance_cb) subscribes to a single topic.
+ * 2. That topic is deleted leaving no topics.
+ * 3. Consumer is closed.
+ */
+
+static void i_delete_topic_2() {
+ SUB_TEST();
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+ DefaultRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name_1);
+
+ bool deleted = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) {
+ if (rebalance_cb.assign_call_cnt != 1)
+ Test::Fail(tostr() << "Expected one assign call, saw "
+ << rebalance_cb.assign_call_cnt << "\n");
+ Test::delete_topic(c, topic_name_1.c_str());
+ deleted = true;
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 0 && deleted) {
+ Test::Say(tostr() << "Assignment is empty following deletion of topic\n");
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer\n");
+ c->close();
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. single consumer (without rebalance_cb) subscribes to a single topic.
+ * 2. that topic is deleted leaving no topics.
+ * 3. consumer is closed.
+ */
+
+static void j_delete_topic_no_rb_callback() {
+ SUB_TEST();
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+ RdKafka::KafkaConsumer *c = make_consumer(
+ "C_1", group_name, "cooperative-sticky", &additional_conf, NULL, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name_1);
+
+ bool deleted = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (Test::assignment_partition_count(c, NULL) == 1 && !deleted) {
+ Test::delete_topic(c, topic_name_1.c_str());
+ deleted = true;
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 0 && deleted) {
+ Test::Say(tostr() << "Assignment is empty following deletion of topic\n");
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer\n");
+ c->close();
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Single consumer (rebalance_cb) subscribes to a 1 partition topic.
+ * 2. Number of partitions is increased to 2.
+ * 3. Consumer is closed.
+ */
+
+static void k_add_partition() {
+ SUB_TEST();
+
+ std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ test_create_topic(NULL, topic_name.c_str(), 1, 1);
+
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+ DefaultRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name);
+
+ bool subscribed = false;
+ bool done = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (Test::assignment_partition_count(c, NULL) == 1 && !subscribed) {
+ if (rebalance_cb.assign_call_cnt != 1)
+ Test::Fail(tostr() << "Expected 1 assign call, saw "
+ << rebalance_cb.assign_call_cnt);
+ if (rebalance_cb.revoke_call_cnt != 0)
+ Test::Fail(tostr() << "Expected 0 revoke calls, saw "
+ << rebalance_cb.revoke_call_cnt);
+ Test::create_partitions(c, topic_name.c_str(), 2);
+ subscribed = true;
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 2 && subscribed) {
+ if (rebalance_cb.assign_call_cnt != 2)
+ Test::Fail(tostr() << "Expected 2 assign calls, saw "
+ << rebalance_cb.assign_call_cnt);
+ if (rebalance_cb.revoke_call_cnt != 0)
+ Test::Fail(tostr() << "Expected 0 revoke calls, saw "
+ << rebalance_cb.revoke_call_cnt);
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer\n");
+ c->close();
+ delete c;
+
+ if (rebalance_cb.assign_call_cnt != 2)
+ Test::Fail(tostr() << "Expected 2 assign calls, saw "
+ << rebalance_cb.assign_call_cnt);
+ if (rebalance_cb.revoke_call_cnt != 1)
+ Test::Fail(tostr() << "Expected 1 revoke call, saw "
+ << rebalance_cb.revoke_call_cnt);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. two consumers (with rebalance_cb's) subscribe to two topics.
+ * 2. one of the consumers calls unsubscribe.
+ * 3. consumers closed.
+ */
+
+static void l_unsubscribe() {
+ SUB_TEST();
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string topic_name_2 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+ test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
+
+ DefaultRebalanceCb rebalance_cb1;
+ RdKafka::KafkaConsumer *c1 = make_consumer(
+ "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb1, 30);
+ test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c1->c_ptr(), topic_name_2.c_str(), 10 * 1000);
+
+ Test::subscribe(c1, topic_name_1, topic_name_2);
+
+ DefaultRebalanceCb rebalance_cb2;
+ RdKafka::KafkaConsumer *c2 = make_consumer(
+ "C_2", group_name, "cooperative-sticky", NULL, &rebalance_cb2, 30);
+ Test::subscribe(c2, topic_name_1, topic_name_2);
+
+ bool done = false;
+ bool unsubscribed = false;
+ while (!done) {
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+
+ if (Test::assignment_partition_count(c1, NULL) == 2 &&
+ Test::assignment_partition_count(c2, NULL) == 2) {
+ if (rebalance_cb1.assign_call_cnt != 1)
+ Test::Fail(
+ tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: "
+ << rebalance_cb1.assign_call_cnt);
+ if (rebalance_cb2.assign_call_cnt != 1)
+ Test::Fail(
+ tostr() << "Expecting consumer 2's assign_call_cnt to be 1 not: "
+ << rebalance_cb2.assign_call_cnt);
+ Test::Say("Unsubscribing consumer 1 from both topics\n");
+ c1->unsubscribe();
+ unsubscribed = true;
+ }
+
+ if (unsubscribed && Test::assignment_partition_count(c1, NULL) == 0 &&
+ Test::assignment_partition_count(c2, NULL) == 4) {
+ if (rebalance_cb1.assign_call_cnt !=
+ 1) /* is now unsubscribed, so rebalance_cb will no longer be called.
+ */
+ Test::Fail(
+ tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: "
+ << rebalance_cb1.assign_call_cnt);
+ if (rebalance_cb2.assign_call_cnt != 2)
+ Test::Fail(
+ tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: "
+ << rebalance_cb2.assign_call_cnt);
+ if (rebalance_cb1.revoke_call_cnt != 1)
+ Test::Fail(
+ tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: "
+ << rebalance_cb1.revoke_call_cnt);
+ if (rebalance_cb2.revoke_call_cnt !=
+ 0) /* the rebalance_cb should not be called if the revoked partition
+ list is empty */
+ Test::Fail(
+ tostr() << "Expecting consumer 2's revoke_call_cnt to be 0 not: "
+ << rebalance_cb2.revoke_call_cnt);
+ Test::Say("Unsubscribe completed");
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer 1\n");
+ c1->close();
+ Test::Say("Closing consumer 2\n");
+ c2->close();
+
+ /* there should be no assign rebalance_cb calls on close */
+ if (rebalance_cb1.assign_call_cnt != 1)
+ Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 1 not: "
+ << rebalance_cb1.assign_call_cnt);
+ if (rebalance_cb2.assign_call_cnt != 2)
+ Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 2 not: "
+ << rebalance_cb2.assign_call_cnt);
+
+ if (rebalance_cb1.revoke_call_cnt !=
+ 1) /* should not be called a second revoke rebalance_cb */
+ Test::Fail(tostr() << "Expecting consumer 1's revoke_call_cnt to be 1 not: "
+ << rebalance_cb1.revoke_call_cnt);
+ if (rebalance_cb2.revoke_call_cnt != 1)
+ Test::Fail(tostr() << "Expecting consumer 2's revoke_call_cnt to be 1 not: "
+ << rebalance_cb2.revoke_call_cnt);
+
+ if (rebalance_cb1.lost_call_cnt != 0)
+ Test::Fail(tostr() << "Expecting consumer 1's lost_call_cnt to be 0, not: "
+ << rebalance_cb1.lost_call_cnt);
+ if (rebalance_cb2.lost_call_cnt != 0)
+ Test::Fail(tostr() << "Expecting consumer 2's lost_call_cnt to be 0, not: "
+ << rebalance_cb2.lost_call_cnt);
+
+ delete c1;
+ delete c2;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. A consumers (with no rebalance_cb) subscribes to a topic.
+ * 2. The consumer calls unsubscribe.
+ * 3. Consumers closed.
+ */
+
+static void m_unsubscribe_2() {
+ SUB_TEST();
+
+ std::string topic_name = Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+ test_create_topic(NULL, topic_name.c_str(), 2, 1);
+
+ RdKafka::KafkaConsumer *c =
+ make_consumer("C_1", group_name, "cooperative-sticky", NULL, NULL, 15);
+ test_wait_topic_exists(c->c_ptr(), topic_name.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name);
+
+ bool done = false;
+ bool unsubscribed = false;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (Test::assignment_partition_count(c, NULL) == 2) {
+ Test::unsubscribe(c);
+ unsubscribed = true;
+ }
+
+ if (unsubscribed && Test::assignment_partition_count(c, NULL) == 0) {
+ Test::Say("Unsubscribe completed");
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer\n");
+ c->close();
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Two consumers (with rebalance_cb) subscribe to a regex (no matching
+ * topics exist)
+ * 2. Create two topics.
+ * 3. Remove one of the topics.
+ * 3. Consumers closed.
+ */
+
+static void n_wildcard() {
+ SUB_TEST();
+
+ const string topic_base_name = Test::mk_topic_name("0113-n_wildcard", 1);
+ const string topic_name_1 = topic_base_name + "_1";
+ const string topic_name_2 = topic_base_name + "_2";
+ const string topic_regex = "^" + topic_base_name + "_.";
+ const string group_name = Test::mk_unique_group_name("0113-n_wildcard");
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("topic.metadata.refresh.interval.ms"), std::string("3000")));
+
+ DefaultRebalanceCb rebalance_cb1;
+ RdKafka::KafkaConsumer *c1 =
+ make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb1, 30);
+ Test::subscribe(c1, topic_regex);
+
+ DefaultRebalanceCb rebalance_cb2;
+ RdKafka::KafkaConsumer *c2 =
+ make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb2, 30);
+ Test::subscribe(c2, topic_regex);
+
+ /* There are no matching topics, so the consumers should not join the group
+ * initially */
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+
+ if (rebalance_cb1.assign_call_cnt != 0)
+ Test::Fail(tostr() << "Expecting consumer 1's assign_call_cnt to be 0 not: "
+ << rebalance_cb1.assign_call_cnt);
+ if (rebalance_cb2.assign_call_cnt != 0)
+ Test::Fail(tostr() << "Expecting consumer 2's assign_call_cnt to be 0 not: "
+ << rebalance_cb2.assign_call_cnt);
+
+ bool done = false;
+ bool created_topics = false;
+ bool deleted_topic = false;
+ int last_cb1_assign_call_cnt = 0;
+ int last_cb2_assign_call_cnt = 0;
+ while (!done) {
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+
+ if (Test::assignment_partition_count(c1, NULL) == 0 &&
+ Test::assignment_partition_count(c2, NULL) == 0 && !created_topics) {
+ Test::Say(
+ "Creating two topics with 2 partitions each that match regex\n");
+ test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 2, 1);
+ /* The consumers should autonomously discover these topics and start
+ * consuming from them. This happens in the background - is not
+ * influenced by whether we wait for the topics to be created before
+ * continuing the main loop. It is possible that both topics are
+ * discovered simultaneously, requiring a single rebalance OR that
+ * topic 1 is discovered first (it was created first), a rebalance
+ * initiated, then topic 2 discovered, then another rebalance
+ * initiated to include it.
+ */
+ created_topics = true;
+ }
+
+ if (Test::assignment_partition_count(c1, NULL) == 2 &&
+ Test::assignment_partition_count(c2, NULL) == 2 && !deleted_topic) {
+ if (rebalance_cb1.nonempty_assign_call_cnt == 1) {
+ /* just one rebalance was required */
+ TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 1,
+ "Expecting C_1's nonempty_assign_call_cnt to be 1 not %d ",
+ rebalance_cb1.nonempty_assign_call_cnt);
+ TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 1,
+ "Expecting C_2's nonempty_assign_call_cnt to be 1 not %d ",
+ rebalance_cb2.nonempty_assign_call_cnt);
+ } else {
+ /* two rebalances were required (occurs infrequently) */
+ TEST_ASSERT(rebalance_cb1.nonempty_assign_call_cnt == 2,
+ "Expecting C_1's nonempty_assign_call_cnt to be 2 not %d ",
+ rebalance_cb1.nonempty_assign_call_cnt);
+ TEST_ASSERT(rebalance_cb2.nonempty_assign_call_cnt == 2,
+ "Expecting C_2's nonempty_assign_call_cnt to be 2 not %d ",
+ rebalance_cb2.nonempty_assign_call_cnt);
+ }
+
+ TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 0,
+ "Expecting C_1's revoke_call_cnt to be 0 not %d ",
+ rebalance_cb1.revoke_call_cnt);
+ TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 0,
+ "Expecting C_2's revoke_call_cnt to be 0 not %d ",
+ rebalance_cb2.revoke_call_cnt);
+
+ last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt;
+ last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt;
+
+ Test::Say("Deleting topic 1\n");
+ Test::delete_topic(c1, topic_name_1.c_str());
+ deleted_topic = true;
+ }
+
+ if (Test::assignment_partition_count(c1, NULL) == 1 &&
+ Test::assignment_partition_count(c2, NULL) == 1 && deleted_topic) {
+ /* accumulated in lost case as well */
+ TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 1,
+ "Expecting C_1's revoke_call_cnt to be 1 not %d",
+ rebalance_cb1.revoke_call_cnt);
+ TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 1,
+ "Expecting C_2's revoke_call_cnt to be 1 not %d",
+ rebalance_cb2.revoke_call_cnt);
+ TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1,
+ "Expecting C_1's lost_call_cnt to be 1 not %d",
+ rebalance_cb1.lost_call_cnt);
+ TEST_ASSERT(rebalance_cb2.lost_call_cnt == 1,
+ "Expecting C_2's lost_call_cnt to be 1 not %d",
+ rebalance_cb2.lost_call_cnt);
+
+ /* Consumers will rejoin group after revoking the lost partitions.
+ * this will result in an rebalance_cb assign (empty partitions).
+ * it follows the revoke, which has already been confirmed to have
+ * happened. */
+ Test::Say("Waiting for rebalance_cb assigns\n");
+ while (rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt ||
+ rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt) {
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+ }
+
+ Test::Say("Consumers are subscribed to one partition each\n");
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer 1\n");
+ last_cb1_assign_call_cnt = rebalance_cb1.assign_call_cnt;
+ c1->close();
+
+ /* There should be no assign rebalance_cb calls on close */
+ TEST_ASSERT(rebalance_cb1.assign_call_cnt == last_cb1_assign_call_cnt,
+ "Expecting C_1's assign_call_cnt to be %d not %d",
+ last_cb1_assign_call_cnt, rebalance_cb1.assign_call_cnt);
+
+ /* Let C_2 catch up on the rebalance and get assigned C_1's partitions. */
+ last_cb2_assign_call_cnt = rebalance_cb2.nonempty_assign_call_cnt;
+ while (rebalance_cb2.nonempty_assign_call_cnt == last_cb2_assign_call_cnt)
+ Test::poll_once(c2, 500);
+
+ Test::Say("Closing consumer 2\n");
+ last_cb2_assign_call_cnt = rebalance_cb2.assign_call_cnt;
+ c2->close();
+
+ /* There should be no assign rebalance_cb calls on close */
+ TEST_ASSERT(rebalance_cb2.assign_call_cnt == last_cb2_assign_call_cnt,
+ "Expecting C_2's assign_call_cnt to be %d not %d",
+ last_cb2_assign_call_cnt, rebalance_cb2.assign_call_cnt);
+
+ TEST_ASSERT(rebalance_cb1.revoke_call_cnt == 2,
+ "Expecting C_1's revoke_call_cnt to be 2 not %d",
+ rebalance_cb1.revoke_call_cnt);
+ TEST_ASSERT(rebalance_cb2.revoke_call_cnt == 2,
+ "Expecting C_2's revoke_call_cnt to be 2 not %d",
+ rebalance_cb2.revoke_call_cnt);
+
+ TEST_ASSERT(rebalance_cb1.lost_call_cnt == 1,
+ "Expecting C_1's lost_call_cnt to be 1, not %d",
+ rebalance_cb1.lost_call_cnt);
+ TEST_ASSERT(rebalance_cb2.lost_call_cnt == 1,
+ "Expecting C_2's lost_call_cnt to be 1, not %d",
+ rebalance_cb2.lost_call_cnt);
+
+ delete c1;
+ delete c2;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * 1. Consumer (librdkafka) subscribes to two topics (2 and 6 partitions).
+ * 2. Consumer (java) subscribes to the same two topics.
+ * 3. Consumer (librdkafka) unsubscribes from the two partition topic.
+ * 4. Consumer (java) process closes upon detecting the above unsubscribe.
+ * 5. Consumer (librdkafka) will now be subscribed to 6 partitions.
+ * 6. Close librdkafka consumer.
+ */
+
+static void o_java_interop() {
+ SUB_TEST();
+
+ if (*test_conf_get(NULL, "sasl.mechanism") != '\0')
+ SUB_TEST_SKIP(
+ "Cluster is set up for SASL: we won't bother with that "
+ "for the Java client\n");
+
+ std::string topic_name_1 = Test::mk_topic_name("0113_o_2", 1);
+ std::string topic_name_2 = Test::mk_topic_name("0113_o_6", 1);
+ std::string group_name = Test::mk_unique_group_name("0113_o");
+ test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 6, 1);
+
+ DefaultRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c = make_consumer(
+ "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
+
+ Test::subscribe(c, topic_name_1, topic_name_2);
+
+ bool done = false;
+ bool changed_subscription = false;
+ bool changed_subscription_done = false;
+ int java_pid = 0;
+ while (!done) {
+ Test::poll_once(c, 500);
+
+ if (1) // FIXME: Remove after debugging
+ Test::Say(tostr() << "Assignment partition count: "
+ << Test::assignment_partition_count(c, NULL)
+ << ", changed_sub " << changed_subscription
+ << ", changed_sub_done " << changed_subscription_done
+ << ", assign_call_cnt " << rebalance_cb.assign_call_cnt
+ << "\n");
+ if (Test::assignment_partition_count(c, NULL) == 8 && !java_pid) {
+ Test::Say(_C_GRN "librdkafka consumer assigned to 8 partitions\n");
+ string bootstrapServers = get_bootstrap_servers();
+ const char *argv[1 + 1 + 1 + 1 + 1 + 1];
+ size_t i = 0;
+ argv[i++] = "test1";
+ argv[i++] = bootstrapServers.c_str();
+ argv[i++] = topic_name_1.c_str();
+ argv[i++] = topic_name_2.c_str();
+ argv[i++] = group_name.c_str();
+ argv[i] = NULL;
+ java_pid = test_run_java("IncrementalRebalanceCli", argv);
+ if (java_pid <= 0)
+ Test::Fail(tostr() << "Unexpected pid: " << java_pid);
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 4 && java_pid != 0 &&
+ !changed_subscription) {
+ if (rebalance_cb.assign_call_cnt != 2)
+ Test::Fail(tostr() << "Expecting consumer's assign_call_cnt to be 2, "
+ "not "
+ << rebalance_cb.assign_call_cnt);
+ Test::Say(_C_GRN "Java consumer is now part of the group\n");
+ Test::subscribe(c, topic_name_1);
+ changed_subscription = true;
+ }
+
+ /* Depending on the timing of resubscribe rebalancing and the
+ * Java consumer terminating we might have one or two rebalances,
+ * hence the fuzzy <=5 and >=5 checks. */
+ if (Test::assignment_partition_count(c, NULL) == 2 &&
+ changed_subscription && rebalance_cb.assign_call_cnt <= 5 &&
+ !changed_subscription_done) {
+ /* All topic 1 partitions will be allocated to this consumer whether or
+ * not the Java consumer has unsubscribed yet because the sticky algorithm
+ * attempts to ensure partition counts are even. */
+ Test::Say(_C_GRN "Consumer 1 has unsubscribed from topic 2\n");
+ changed_subscription_done = true;
+ }
+
+ if (Test::assignment_partition_count(c, NULL) == 2 &&
+ changed_subscription && rebalance_cb.assign_call_cnt >= 5 &&
+ changed_subscription_done) {
+ /* When the java consumer closes, this will cause an empty assign
+ * rebalance_cb event, allowing detection of when this has happened. */
+ Test::Say(_C_GRN "Java consumer has left the group\n");
+ done = true;
+ }
+ }
+
+ Test::Say("Closing consumer\n");
+ c->close();
+
+ /* Expected behavior is IncrementalRebalanceCli will exit cleanly, timeout
+ * otherwise. */
+ test_waitpid(java_pid);
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * - Single consumer subscribes to topic.
+ * - Soon after (timing such that rebalance is probably in progress) it
+ * subscribes to a different topic.
+ */
+
+static void s_subscribe_when_rebalancing(int variation) {
+ SUB_TEST("variation %d", variation);
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string topic_name_2 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string topic_name_3 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+ test_create_topic(NULL, topic_name_1.c_str(), 1, 1);
+ test_create_topic(NULL, topic_name_2.c_str(), 1, 1);
+ test_create_topic(NULL, topic_name_3.c_str(), 1, 1);
+
+ DefaultRebalanceCb rebalance_cb;
+ RdKafka::KafkaConsumer *c = make_consumer(
+ "C_1", group_name, "cooperative-sticky", NULL, &rebalance_cb, 25);
+ test_wait_topic_exists(c->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c->c_ptr(), topic_name_2.c_str(), 10 * 1000);
+ test_wait_topic_exists(c->c_ptr(), topic_name_3.c_str(), 10 * 1000);
+
+ if (variation == 2 || variation == 4 || variation == 6) {
+ /* Pre-cache metadata for all topics. */
+ class RdKafka::Metadata *metadata;
+ c->metadata(true, NULL, &metadata, 5000);
+ delete metadata;
+ }
+
+ Test::subscribe(c, topic_name_1);
+ Test::wait_for_assignment(c, 1, &topic_name_1);
+
+ Test::subscribe(c, topic_name_2);
+
+ if (variation == 3 || variation == 5)
+ Test::poll_once(c, 500);
+
+ if (variation < 5) {
+ // Very quickly after subscribing to topic 2, subscribe to topic 3.
+ Test::subscribe(c, topic_name_3);
+ Test::wait_for_assignment(c, 1, &topic_name_3);
+ } else {
+ // ..or unsubscribe.
+ Test::unsubscribe(c);
+ Test::wait_for_assignment(c, 0, NULL);
+ }
+
+ delete c;
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check behavior when:
+ * - Two consumer subscribe to a topic.
+ * - Max poll interval is exceeded on the first consumer.
+ */
+
+static void t_max_poll_interval_exceeded(int variation) {
+ SUB_TEST("variation %d", variation);
+
+ std::string topic_name_1 =
+ Test::mk_topic_name("0113-cooperative_rebalance", 1);
+ std::string group_name =
+ Test::mk_unique_group_name("0113-cooperative_rebalance");
+ test_create_topic(NULL, topic_name_1.c_str(), 2, 1);
+
+ std::vector<std::pair<std::string, std::string> > additional_conf;
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("session.timeout.ms"), std::string("6000")));
+ additional_conf.push_back(std::pair<std::string, std::string>(
+ std::string("max.poll.interval.ms"), std::string("7000")));
+
+ DefaultRebalanceCb rebalance_cb1;
+ RdKafka::KafkaConsumer *c1 =
+ make_consumer("C_1", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb1, 30);
+ DefaultRebalanceCb rebalance_cb2;
+ RdKafka::KafkaConsumer *c2 =
+ make_consumer("C_2", group_name, "cooperative-sticky", &additional_conf,
+ &rebalance_cb2, 30);
+
+ test_wait_topic_exists(c1->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+ test_wait_topic_exists(c2->c_ptr(), topic_name_1.c_str(), 10 * 1000);
+
+ Test::subscribe(c1, topic_name_1);
+ Test::subscribe(c2, topic_name_1);
+
+ bool done = false;
+ bool both_have_been_assigned = false;
+ while (!done) {
+ if (!both_have_been_assigned)
+ Test::poll_once(c1, 500);
+ Test::poll_once(c2, 500);
+
+ if (Test::assignment_partition_count(c1, NULL) == 1 &&
+ Test::assignment_partition_count(c2, NULL) == 1 &&
+ !both_have_been_assigned) {
+ Test::Say(
+ tostr()
+ << "Both consumers are assigned to topic " << topic_name_1
+ << ". WAITING 7 seconds for max.poll.interval.ms to be exceeded\n");
+ both_have_been_assigned = true;
+ }
+
+ if (Test::assignment_partition_count(c2, NULL) == 2 &&
+ both_have_been_assigned) {
+ Test::Say("Consumer 1 is no longer assigned any partitions, done\n");
+ done = true;
+ }
+ }
+
+ if (variation == 1) {
+ if (rebalance_cb1.lost_call_cnt != 0)
+ Test::Fail(
+ tostr() << "Expected consumer 1 lost revoke count to be 0, not: "
+ << rebalance_cb1.lost_call_cnt);
+ Test::poll_once(c1,
+ 500); /* Eat the max poll interval exceeded error message */
+ Test::poll_once(c1,
+ 500); /* Trigger the rebalance_cb with lost partitions */
+ if (rebalance_cb1.lost_call_cnt != 1)
+ Test::Fail(
+ tostr() << "Expected consumer 1 lost revoke count to be 1, not: "
+ << rebalance_cb1.lost_call_cnt);
+ }
+
+ c1->close();
+ c2->close();
+
+ if (rebalance_cb1.lost_call_cnt != 1)
+ Test::Fail(tostr() << "Expected consumer 1 lost revoke count to be 1, not: "
+ << rebalance_cb1.lost_call_cnt);
+
+ if (rebalance_cb1.assign_call_cnt != 1)
+ Test::Fail(tostr() << "Expected consumer 1 assign count to be 1, not: "
+ << rebalance_cb1.assign_call_cnt);
+ if (rebalance_cb2.assign_call_cnt != 2)
+ Test::Fail(tostr() << "Expected consumer 1 assign count to be 2, not: "
+ << rebalance_cb1.assign_call_cnt);
+
+ if (rebalance_cb1.revoke_call_cnt != 1)
+ Test::Fail(tostr() << "Expected consumer 1 revoke count to be 1, not: "
+ << rebalance_cb1.revoke_call_cnt);
+ if (rebalance_cb2.revoke_call_cnt != 1)
+ Test::Fail(tostr() << "Expected consumer 2 revoke count to be 1, not: "
+ << rebalance_cb1.revoke_call_cnt);
+
+ delete c1;
+ delete c2;
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Poll all consumers until there are no more events or messages
+ * and the timeout has expired.
+ */
+static void poll_all_consumers(RdKafka::KafkaConsumer **consumers,
+ DefaultRebalanceCb *rebalance_cbs,
+ size_t num,
+ int timeout_ms) {
+ int64_t ts_end = test_clock() + (timeout_ms * 1000);
+
+ /* Poll all consumers until no more events are seen,
+ * this makes sure we exhaust the current state events before returning. */
+ bool evented;
+ do {
+ evented = false;
+ for (size_t i = 0; i < num; i++) {
+ int block_ms = min(10, (int)((ts_end - test_clock()) / 1000));
+ while (rebalance_cbs[i].poll_once(consumers[i], max(block_ms, 0)))
+ evented = true;
+ }
+ } while (evented || test_clock() < ts_end);
+}
+
+
+/**
+ * @brief Stress test with 8 consumers subscribing, fetching and committing.
+ *
+ * @param subscription_variation 0..2
+ *
+ * TODO: incorporate committing offsets.
+ */
+
+static void u_multiple_subscription_changes(bool use_rebalance_cb,
+ int subscription_variation) {
+ const int N_CONSUMERS = 8;
+ const int N_TOPICS = 2;
+ const int N_PARTS_PER_TOPIC = N_CONSUMERS * N_TOPICS;
+ const int N_PARTITIONS = N_PARTS_PER_TOPIC * N_TOPICS;
+ const int N_MSGS_PER_PARTITION = 1000;
+
+ SUB_TEST("use_rebalance_cb: %d, subscription_variation: %d",
+ (int)use_rebalance_cb, subscription_variation);
+
+ string topic_name_1 = Test::mk_topic_name("0113u_1", 1);
+ string topic_name_2 = Test::mk_topic_name("0113u_2", 1);
+ string group_name = Test::mk_unique_group_name("0113u");
+
+ test_create_topic(NULL, topic_name_1.c_str(), N_PARTS_PER_TOPIC, 1);
+ test_create_topic(NULL, topic_name_2.c_str(), N_PARTS_PER_TOPIC, 1);
+
+ Test::Say("Creating consumers\n");
+ DefaultRebalanceCb rebalance_cbs[N_CONSUMERS];
+ RdKafka::KafkaConsumer *consumers[N_CONSUMERS];
+
+ for (int i = 0; i < N_CONSUMERS; i++) {
+ std::string name = tostr() << "C_" << i;
+ consumers[i] =
+ make_consumer(name.c_str(), group_name, "cooperative-sticky", NULL,
+ use_rebalance_cb ? &rebalance_cbs[i] : NULL, 120);
+ }
+
+ test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_1.c_str(),
+ 10 * 1000);
+ test_wait_topic_exists(consumers[0]->c_ptr(), topic_name_2.c_str(),
+ 10 * 1000);
+
+
+ /*
+ * Seed all partitions with the same number of messages so we later can
+ * verify that consumption is working.
+ */
+ vector<pair<Toppar, int> > ptopics;
+ ptopics.push_back(pair<Toppar, int>(Toppar(topic_name_1, N_PARTS_PER_TOPIC),
+ N_MSGS_PER_PARTITION));
+ ptopics.push_back(pair<Toppar, int>(Toppar(topic_name_2, N_PARTS_PER_TOPIC),
+ N_MSGS_PER_PARTITION));
+ produce_msgs(ptopics);
+
+
+ /*
+ * Track what topics a consumer should be subscribed to and use this to
+ * verify both its subscription and assignment throughout the test.
+ */
+
+ /* consumer -> currently subscribed topics */
+ map<int, vector<string> > consumer_topics;
+
+ /* topic -> consumers subscribed to topic */
+ map<string, set<int> > topic_consumers;
+
+ /* The subscription alternatives that consumers
+ * alter between in the playbook. */
+ vector<string> SUBSCRIPTION_1;
+ vector<string> SUBSCRIPTION_2;
+
+ SUBSCRIPTION_1.push_back(topic_name_1);
+
+ switch (subscription_variation) {
+ case 0:
+ SUBSCRIPTION_2.push_back(topic_name_1);
+ SUBSCRIPTION_2.push_back(topic_name_2);
+ break;
+
+ case 1:
+ SUBSCRIPTION_2.push_back(topic_name_2);
+ break;
+
+ case 2:
+ /* No subscription */
+ break;
+ }
+
+ sort(SUBSCRIPTION_1.begin(), SUBSCRIPTION_1.end());
+ sort(SUBSCRIPTION_2.begin(), SUBSCRIPTION_2.end());
+
+
+ /*
+ * Define playbook
+ */
+ const struct {
+ int timestamp_ms;
+ int consumer;
+ const vector<string> *topics;
+ } playbook[] = {/* timestamp_ms, consumer_number, subscribe-to-topics */
+ {0, 0, &SUBSCRIPTION_1}, /* Cmd 0 */
+ {4000, 1, &SUBSCRIPTION_1}, {4000, 1, &SUBSCRIPTION_1},
+ {4000, 1, &SUBSCRIPTION_1}, {4000, 2, &SUBSCRIPTION_1},
+ {6000, 3, &SUBSCRIPTION_1}, /* Cmd 5 */
+ {6000, 4, &SUBSCRIPTION_1}, {6000, 5, &SUBSCRIPTION_1},
+ {6000, 6, &SUBSCRIPTION_1}, {6000, 7, &SUBSCRIPTION_2},
+ {6000, 1, &SUBSCRIPTION_1}, /* Cmd 10 */
+ {6000, 1, &SUBSCRIPTION_2}, {6000, 1, &SUBSCRIPTION_1},
+ {6000, 2, &SUBSCRIPTION_2}, {7000, 2, &SUBSCRIPTION_1},
+ {7000, 1, &SUBSCRIPTION_2}, /* Cmd 15 */
+ {8000, 0, &SUBSCRIPTION_2}, {8000, 1, &SUBSCRIPTION_1},
+ {8000, 0, &SUBSCRIPTION_1}, {13000, 2, &SUBSCRIPTION_1},
+ {13000, 1, &SUBSCRIPTION_2}, /* Cmd 20 */
+ {13000, 5, &SUBSCRIPTION_2}, {14000, 6, &SUBSCRIPTION_2},
+ {15000, 7, &SUBSCRIPTION_1}, {15000, 1, &SUBSCRIPTION_1},
+ {15000, 5, &SUBSCRIPTION_1}, /* Cmd 25 */
+ {15000, 6, &SUBSCRIPTION_1}, {INT_MAX, 0, 0}};
+
+ /*
+ * Run the playbook
+ */
+ int cmd_number = 0;
+ uint64_t ts_start = test_clock();
+
+ while (playbook[cmd_number].timestamp_ms != INT_MAX) {
+ TEST_ASSERT(playbook[cmd_number].consumer < N_CONSUMERS);
+
+ Test::Say(tostr() << "Cmd #" << cmd_number << ": wait "
+ << playbook[cmd_number].timestamp_ms << "ms\n");
+
+ poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS,
+ playbook[cmd_number].timestamp_ms -
+ (int)((test_clock() - ts_start) / 1000));
+
+ /* Verify consumer assignments match subscribed topics */
+ map<Toppar, RdKafka::KafkaConsumer *> all_assignments;
+ for (int i = 0; i < N_CONSUMERS; i++)
+ verify_consumer_assignment(
+ consumers[i], rebalance_cbs[i], consumer_topics[i],
+ /* Allow empty assignment */
+ true,
+ /* Allow mismatch between subscribed topics
+ * and actual assignment since we can't
+ * synchronize the last subscription
+ * to the current assignment due to
+ * an unknown number of rebalances required
+ * for the final assignment to settle.
+ * This is instead checked at the end of
+ * this test case. */
+ true, &all_assignments, -1 /* no msgcnt check*/);
+
+ int cid = playbook[cmd_number].consumer;
+ RdKafka::KafkaConsumer *consumer = consumers[playbook[cmd_number].consumer];
+ const vector<string> *topics = playbook[cmd_number].topics;
+
+ /*
+ * Update our view of the consumer's subscribed topics and vice versa.
+ */
+ for (vector<string>::const_iterator it = consumer_topics[cid].begin();
+ it != consumer_topics[cid].end(); it++) {
+ topic_consumers[*it].erase(cid);
+ }
+
+ consumer_topics[cid].clear();
+
+ for (vector<string>::const_iterator it = topics->begin();
+ it != topics->end(); it++) {
+ consumer_topics[cid].push_back(*it);
+ topic_consumers[*it].insert(cid);
+ }
+
+ RdKafka::ErrorCode err;
+
+ /*
+ * Change subscription
+ */
+ if (!topics->empty()) {
+ Test::Say(tostr() << "Consumer: " << consumer->name()
+ << " is subscribing to topics "
+ << string_vec_to_str(*topics) << " after "
+ << ((test_clock() - ts_start) / 1000) << "ms\n");
+ err = consumer->subscribe(*topics);
+ TEST_ASSERT(!err, "Expected subscribe() to succeed, got %s",
+ RdKafka::err2str(err).c_str());
+ } else {
+ Test::Say(tostr() << "Consumer: " << consumer->name()
+ << " is unsubscribing after "
+ << ((test_clock() - ts_start) / 1000) << "ms\n");
+ Test::unsubscribe(consumer);
+ }
+
+ /* Mark this consumer as waiting for rebalance so that
+ * verify_consumer_assignment() allows assigned partitions that
+ * (no longer) match the subscription. */
+ rebalance_cbs[cid].wait_rebalance = true;
+
+
+ /*
+ * Verify subscription matches what we think it should be.
+ */
+ vector<string> subscription;
+ err = consumer->subscription(subscription);
+ TEST_ASSERT(!err, "consumer %s subscription() failed: %s",
+ consumer->name().c_str(), RdKafka::err2str(err).c_str());
+
+ sort(subscription.begin(), subscription.end());
+
+ Test::Say(tostr() << "Consumer " << consumer->name()
+ << " subscription is now "
+ << string_vec_to_str(subscription) << "\n");
+
+ if (subscription != *topics)
+ Test::Fail(tostr() << "Expected consumer " << consumer->name()
+ << " subscription: " << string_vec_to_str(*topics)
+ << " but got: " << string_vec_to_str(subscription));
+
+ cmd_number++;
+ }
+
+
+ /*
+ * Wait for final rebalances and all consumers to settle,
+ * then verify assignments and received message counts.
+ */
+ Test::Say(_C_YEL "Waiting for final assignment state\n");
+ int done_count = 0;
+ /* Allow at least 20 seconds for group to stabilize. */
+ int64_t stabilize_until = test_clock() + (20 * 1000 * 1000); /* 20s */
+
+ while (done_count < 2) {
+ bool stabilized = test_clock() > stabilize_until;
+
+ poll_all_consumers(consumers, rebalance_cbs, N_CONSUMERS, 5000);
+
+ /* Verify consumer assignments */
+ int counts[N_CONSUMERS];
+ map<Toppar, RdKafka::KafkaConsumer *> all_assignments;
+ Test::Say(tostr() << "Consumer assignments "
+ << "(subscription_variation " << subscription_variation
+ << ")" << (stabilized ? " (stabilized)" : "")
+ << (use_rebalance_cb ? " (use_rebalance_cb)"
+ : " (no rebalance cb)")
+ << ":\n");
+ for (int i = 0; i < N_CONSUMERS; i++) {
+ bool last_rebalance_stabilized =
+ stabilized &&
+ (!use_rebalance_cb ||
+ /* session.timeout.ms * 2 + 1 */
+ test_clock() > rebalance_cbs[i].ts_last_assign + (13 * 1000 * 1000));
+
+ counts[i] = verify_consumer_assignment(
+ consumers[i], rebalance_cbs[i], consumer_topics[i],
+ /* allow empty */
+ true,
+ /* if we're waiting for a
+ * rebalance it is okay for the
+ * current assignment to contain
+ * topics that this consumer
+ * (no longer) subscribes to. */
+ !last_rebalance_stabilized || !use_rebalance_cb ||
+ rebalance_cbs[i].wait_rebalance,
+ /* do not allow assignments for
+ * topics that are not subscribed*/
+ &all_assignments,
+ /* Verify received message counts
+ * once the assignments have
+ * stabilized.
+ * Requires the rebalance cb.*/
+ done_count > 0 && use_rebalance_cb ? N_MSGS_PER_PARTITION : -1);
+ }
+
+ Test::Say(tostr() << all_assignments.size() << "/" << N_PARTITIONS
+ << " partitions assigned\n");
+
+ bool done = true;
+ for (int i = 0; i < N_CONSUMERS; i++) {
+ /* For each topic the consumer subscribes to it should
+ * be assigned its share of partitions. */
+ int exp_parts = 0;
+ for (vector<string>::const_iterator it = consumer_topics[i].begin();
+ it != consumer_topics[i].end(); it++)
+ exp_parts += N_PARTS_PER_TOPIC / (int)topic_consumers[*it].size();
+
+ Test::Say(tostr() << (counts[i] == exp_parts ? "" : _C_YEL) << "Consumer "
+ << consumers[i]->name() << " has " << counts[i]
+ << " assigned partitions (" << consumer_topics[i].size()
+ << " subscribed topic(s))"
+ << ", expecting " << exp_parts
+ << " assigned partitions\n");
+
+ if (counts[i] != exp_parts)
+ done = false;
+ }
+
+ if (done && stabilized) {
+ done_count++;
+ Test::Say(tostr() << "All assignments verified, done count is "
+ << done_count << "\n");
+ }
+ }
+
+ Test::Say("Disposing consumers\n");
+ for (int i = 0; i < N_CONSUMERS; i++) {
+ TEST_ASSERT(!use_rebalance_cb || !rebalance_cbs[i].wait_rebalance,
+ "Consumer %d still waiting for rebalance", i);
+ if (i & 1)
+ consumers[i]->close();
+ delete consumers[i];
+ }
+
+ SUB_TEST_PASS();
+}
+
+
+
+extern "C" {
+
+static int rebalance_cnt;
+static rd_kafka_resp_err_t rebalance_exp_event;
+static rd_bool_t rebalance_exp_lost;
+
+extern void test_print_partition_list(
+ const rd_kafka_topic_partition_list_t *partitions);
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ rebalance_cnt++;
+ TEST_SAY("Rebalance #%d: %s: %d partition(s)\n", rebalance_cnt,
+ rd_kafka_err2name(err), parts->cnt);
+
+ test_print_partition_list(parts);
+
+ TEST_ASSERT(err == rebalance_exp_event ||
+ rebalance_exp_event == RD_KAFKA_RESP_ERR_NO_ERROR,
+ "Expected rebalance event %s, not %s",
+ rd_kafka_err2name(rebalance_exp_event), rd_kafka_err2name(err));
+
+ if (rebalance_exp_lost) {
+ TEST_ASSERT(rd_kafka_assignment_lost(rk), "Expected partitions lost");
+ TEST_SAY("Partitions were lost\n");
+ }
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ test_consumer_incremental_assign("assign", rk, parts);
+ } else {
+ test_consumer_incremental_unassign("unassign", rk, parts);
+ }
+}
+
+/**
+ * @brief Wait for an expected rebalance event, or fail.
+ */
+static void expect_rebalance0(const char *func,
+ int line,
+ const char *what,
+ rd_kafka_t *c,
+ rd_kafka_resp_err_t exp_event,
+ rd_bool_t exp_lost,
+ int timeout_s) {
+ int64_t tmout = test_clock() + (timeout_s * 1000000);
+ int start_cnt = rebalance_cnt;
+
+ TEST_SAY("%s:%d: Waiting for %s (%s) for %ds\n", func, line, what,
+ rd_kafka_err2name(exp_event), timeout_s);
+
+ rebalance_exp_lost = exp_lost;
+ rebalance_exp_event = exp_event;
+
+ while (tmout > test_clock() && rebalance_cnt == start_cnt) {
+ test_consumer_poll_once(c, NULL, 1000);
+ }
+
+ if (rebalance_cnt == start_cnt + 1) {
+ rebalance_exp_event = RD_KAFKA_RESP_ERR_NO_ERROR;
+ rebalance_exp_lost = exp_lost = rd_false;
+ return;
+ }
+
+ TEST_FAIL("%s:%d: Timed out waiting for %s (%s)", func, line, what,
+ rd_kafka_err2name(exp_event));
+}
+
+#define expect_rebalance(WHAT, C, EXP_EVENT, EXP_LOST, TIMEOUT_S) \
+ expect_rebalance0(__FUNCTION__, __LINE__, WHAT, C, EXP_EVENT, EXP_LOST, \
+ TIMEOUT_S)
+
+
+/* Check lost partitions revoke occurs on ILLEGAL_GENERATION heartbeat error.
+ */
+
+static void p_lost_partitions_heartbeat_illegal_generation_test() {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *groupid = "mygroup";
+ const char *topic = "test";
+ rd_kafka_t *c;
+ rd_kafka_conf_t *conf;
+
+ SUB_TEST_QUICK();
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, 100, 10, "bootstrap.servers",
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(conf, "group.id", groupid);
+ test_conf_set(conf, "session.timeout.ms", "5000");
+ test_conf_set(conf, "heartbeat.interval.ms", "1000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
+
+ c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c, topic);
+
+ expect_rebalance("initial assignment", c,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rd_false /*don't expect lost*/, 5 + 2);
+
+ /* Fail heartbeats */
+ rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_Heartbeat, 5,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
+
+ expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ rd_true /*expect lost*/, 10 + 2);
+
+ rd_kafka_mock_clear_request_errors(mcluster, RD_KAFKAP_Heartbeat);
+
+ expect_rebalance("rejoin after lost", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rd_false /*don't expect lost*/, 10 + 2);
+
+ TEST_SAY("Closing consumer\n");
+ test_consumer_close(c);
+
+ TEST_SAY("Destroying consumer\n");
+ rd_kafka_destroy(c);
+
+ TEST_SAY("Destroying mock cluster\n");
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check lost partitions revoke occurs on ILLEGAL_GENERATION JoinGroup
+ * or SyncGroup error.
+ */
+
+static void q_lost_partitions_illegal_generation_test(
+ rd_bool_t test_joingroup_fail) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *groupid = "mygroup";
+ const char *topic1 = "test1";
+ const char *topic2 = "test2";
+ rd_kafka_t *c;
+ rd_kafka_conf_t *conf;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *topics;
+
+ SUB_TEST0(!test_joingroup_fail /*quick*/, "test_joingroup_fail=%d",
+ test_joingroup_fail);
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+
+ /* Seed the topic1 with messages */
+ test_produce_msgs_easy_v(topic1, 0, 0, 0, 100, 10, "bootstrap.servers",
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
+
+ /* Seed the topic2 with messages */
+ test_produce_msgs_easy_v(topic2, 0, 0, 0, 100, 10, "bootstrap.servers",
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(conf, "group.id", groupid);
+ test_conf_set(conf, "session.timeout.ms", "5000");
+ test_conf_set(conf, "heartbeat.interval.ms", "1000");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
+
+ c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c, topic1);
+
+ expect_rebalance("initial assignment", c,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rd_false /*don't expect lost*/, 5 + 2);
+
+ /* Fail JoinGroups or SyncGroups */
+ rd_kafka_mock_push_request_errors(
+ mcluster, test_joingroup_fail ? RD_KAFKAP_JoinGroup : RD_KAFKAP_SyncGroup,
+ 5, RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
+
+ topics = rd_kafka_topic_partition_list_new(2);
+ rd_kafka_topic_partition_list_add(topics, topic1, RD_KAFKA_PARTITION_UA);
+ rd_kafka_topic_partition_list_add(topics, topic2, RD_KAFKA_PARTITION_UA);
+ err = rd_kafka_subscribe(c, topics);
+ if (err)
+ TEST_FAIL("%s: Failed to subscribe to topics: %s\n", rd_kafka_name(c),
+ rd_kafka_err2str(err));
+ rd_kafka_topic_partition_list_destroy(topics);
+
+ expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ rd_true /*expect lost*/, 10 + 2);
+
+ rd_kafka_mock_clear_request_errors(mcluster, test_joingroup_fail
+ ? RD_KAFKAP_JoinGroup
+ : RD_KAFKAP_SyncGroup);
+
+ expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rd_false /*expect lost*/, 10 + 2);
+
+ TEST_SAY("Closing consumer\n");
+ test_consumer_close(c);
+
+ TEST_SAY("Destroying consumer\n");
+ rd_kafka_destroy(c);
+
+ TEST_SAY("Destroying mock cluster\n");
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/* Check lost partitions revoke occurs on ILLEGAL_GENERATION Commit
+ * error.
+ */
+
+static void r_lost_partitions_commit_illegal_generation_test_local() {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *groupid = "mygroup";
+ const char *topic = "test";
+ const int msgcnt = 100;
+ rd_kafka_t *c;
+ rd_kafka_conf_t *conf;
+
+ SUB_TEST();
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ rd_kafka_mock_coordinator_set(mcluster, "group", groupid, 1);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, 0, 0, msgcnt, 10, "bootstrap.servers",
+ bootstraps, "batch.num.messages", "10",
+ "security.protocol", "plaintext", NULL);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "security.protocol", "PLAINTEXT");
+ test_conf_set(conf, "group.id", groupid);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
+
+ c = test_create_consumer(groupid, rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c, topic);
+
+ expect_rebalance("initial assignment", c,
+ RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rd_false /*don't expect lost*/, 5 + 2);
+
+
+ /* Consume some messages so that the commit has something to commit. */
+ test_consumer_poll("consume", c, -1, -1, -1, msgcnt / 2, NULL);
+
+ /* Fail Commit */
+ rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 5,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION,
+ RD_KAFKA_RESP_ERR_ILLEGAL_GENERATION);
+
+ rd_kafka_commit(c, NULL, rd_false);
+
+ expect_rebalance("lost partitions", c, RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS,
+ rd_true /*expect lost*/, 10 + 2);
+
+ expect_rebalance("rejoin group", c, RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS,
+ rd_false /*expect lost*/, 20 + 2);
+
+ TEST_SAY("Closing consumer\n");
+ test_consumer_close(c);
+
+ TEST_SAY("Destroying consumer\n");
+ rd_kafka_destroy(c);
+
+ TEST_SAY("Destroying mock cluster\n");
+ test_mock_cluster_destroy(mcluster);
+}
+
+
+/**
+ * @brief Rebalance callback for the v_.. test below.
+ */
+static void v_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ bool *auto_commitp = (bool *)opaque;
+
+ TEST_SAY("%s: %s: %d partition(s)%s\n", rd_kafka_name(rk),
+ rd_kafka_err2name(err), parts->cnt,
+ rd_kafka_assignment_lost(rk) ? " - assignment lost" : "");
+
+ test_print_partition_list(parts);
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ test_consumer_incremental_assign("assign", rk, parts);
+ } else {
+ test_consumer_incremental_unassign("unassign", rk, parts);
+
+ if (!*auto_commitp) {
+ rd_kafka_resp_err_t commit_err;
+
+ TEST_SAY("Attempting manual commit after unassign, in 2 seconds..\n");
+ /* Sleep enough to have the generation-id bumped by rejoin. */
+ rd_sleep(2);
+ commit_err = rd_kafka_commit(rk, NULL, 0 /*sync*/);
+ TEST_ASSERT(!commit_err || commit_err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
+ commit_err == RD_KAFKA_RESP_ERR__DESTROY,
+ "%s: manual commit failed: %s", rd_kafka_name(rk),
+ rd_kafka_err2str(commit_err));
+ }
+ }
+}
+
+/**
+ * @brief Commit callback for the v_.. test.
+ */
+static void v_commit_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ TEST_SAY("%s offset commit for %d offsets: %s\n", rd_kafka_name(rk),
+ offsets ? offsets->cnt : -1, rd_kafka_err2name(err));
+ TEST_ASSERT(!err || err == RD_KAFKA_RESP_ERR__NO_OFFSET ||
+ err == RD_KAFKA_RESP_ERR__DESTROY /* consumer was closed */,
+ "%s offset commit failed: %s", rd_kafka_name(rk),
+ rd_kafka_err2str(err));
+}
+
+
+static void v_commit_during_rebalance(bool with_rebalance_cb,
+ bool auto_commit) {
+ rd_kafka_t *p, *c1, *c2;
+ rd_kafka_conf_t *conf;
+ const char *topic = test_mk_topic_name("0113_v", 1);
+ const int partition_cnt = 6;
+ const int msgcnt_per_partition = 100;
+ const int msgcnt = partition_cnt * msgcnt_per_partition;
+ uint64_t testid;
+ int i;
+
+
+ SUB_TEST("With%s rebalance callback and %s-commit",
+ with_rebalance_cb ? "" : "out", auto_commit ? "auto" : "manual");
+
+ test_conf_init(&conf, NULL, 30);
+ testid = test_id_generate();
+
+ /*
+ * Produce messages to topic
+ */
+ p = test_create_producer();
+
+ test_create_topic(p, topic, partition_cnt, 1);
+
+ for (i = 0; i < partition_cnt; i++) {
+ test_produce_msgs2(p, topic, testid, i, i * msgcnt_per_partition,
+ msgcnt_per_partition, NULL, 0);
+ }
+
+ test_flush(p, -1);
+
+ rd_kafka_destroy(p);
+
+
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit", auto_commit ? "true" : "false");
+ test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
+ rd_kafka_conf_set_offset_commit_cb(conf, v_commit_cb);
+ rd_kafka_conf_set_opaque(conf, (void *)&auto_commit);
+
+ TEST_SAY("Create and subscribe first consumer\n");
+ c1 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL,
+ rd_kafka_conf_dup(conf), NULL);
+ TEST_ASSERT(rd_kafka_opaque(c1) == (void *)&auto_commit,
+ "c1 opaque mismatch");
+ test_consumer_subscribe(c1, topic);
+
+ /* Consume some messages so that we know we have an assignment
+ * and something to commit. */
+ test_consumer_poll("C1.PRECONSUME", c1, testid, -1, 0,
+ msgcnt / partition_cnt / 2, NULL);
+
+ TEST_SAY("Create and subscribe second consumer\n");
+ c2 = test_create_consumer(topic, with_rebalance_cb ? v_rebalance_cb : NULL,
+ conf, NULL);
+ TEST_ASSERT(rd_kafka_opaque(c2) == (void *)&auto_commit,
+ "c2 opaque mismatch");
+ test_consumer_subscribe(c2, topic);
+
+ /* Poll both consumers */
+ for (i = 0; i < 10; i++) {
+ test_consumer_poll_once(c1, NULL, 1000);
+ test_consumer_poll_once(c2, NULL, 1000);
+ }
+
+ TEST_SAY("Closing consumers\n");
+ test_consumer_close(c1);
+ test_consumer_close(c2);
+
+ rd_kafka_destroy(c1);
+ rd_kafka_destroy(c2);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Verify that incremental rebalances retain stickyness.
+ */
+static void x_incremental_rebalances(void) {
+#define _NUM_CONS 3
+ rd_kafka_t *c[_NUM_CONS];
+ rd_kafka_conf_t *conf;
+ const char *topic = test_mk_topic_name("0113_x", 1);
+ int i;
+
+ SUB_TEST();
+ test_conf_init(&conf, NULL, 60);
+
+ test_create_topic(NULL, topic, 6, 1);
+
+ test_conf_set(conf, "partition.assignment.strategy", "cooperative-sticky");
+ for (i = 0; i < _NUM_CONS; i++) {
+ char clientid[32];
+ rd_snprintf(clientid, sizeof(clientid), "consumer%d", i);
+ test_conf_set(conf, "client.id", clientid);
+
+ c[i] = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ }
+ rd_kafka_conf_destroy(conf);
+
+ /* First consumer joins group */
+ TEST_SAY("%s: joining\n", rd_kafka_name(c[0]));
+ test_consumer_subscribe(c[0], topic);
+ test_consumer_wait_assignment(c[0], rd_true /*poll*/);
+ test_consumer_verify_assignment(c[0], rd_true /*fail immediately*/, topic, 0,
+ topic, 1, topic, 2, topic, 3, topic, 4, topic,
+ 5, NULL);
+
+
+ /* Second consumer joins group */
+ TEST_SAY("%s: joining\n", rd_kafka_name(c[1]));
+ test_consumer_subscribe(c[1], topic);
+ test_consumer_wait_assignment(c[1], rd_true /*poll*/);
+ rd_sleep(3);
+ test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 3,
+ topic, 4, topic, 5, NULL);
+ test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 0,
+ topic, 1, topic, 2, NULL);
+
+ /* Third consumer joins group */
+ TEST_SAY("%s: joining\n", rd_kafka_name(c[2]));
+ test_consumer_subscribe(c[2], topic);
+ test_consumer_wait_assignment(c[2], rd_true /*poll*/);
+ rd_sleep(3);
+ test_consumer_verify_assignment(c[0], rd_false /*fail later*/, topic, 4,
+ topic, 5, NULL);
+ test_consumer_verify_assignment(c[1], rd_false /*fail later*/, topic, 1,
+ topic, 2, NULL);
+ test_consumer_verify_assignment(c[2], rd_false /*fail later*/, topic, 3,
+ topic, 0, NULL);
+
+ /* Raise any previously failed verify_assignment calls and fail the test */
+ TEST_LATER_CHECK();
+
+ for (i = 0; i < _NUM_CONS; i++)
+ rd_kafka_destroy(c[i]);
+
+ SUB_TEST_PASS();
+
+#undef _NUM_CONS
+}
+
+/* Local tests not needing a cluster */
+int main_0113_cooperative_rebalance_local(int argc, char **argv) {
+ a_assign_rapid();
+ p_lost_partitions_heartbeat_illegal_generation_test();
+ q_lost_partitions_illegal_generation_test(rd_false /*joingroup*/);
+ q_lost_partitions_illegal_generation_test(rd_true /*syncgroup*/);
+ r_lost_partitions_commit_illegal_generation_test_local();
+ return 0;
+}
+
+int main_0113_cooperative_rebalance(int argc, char **argv) {
+ int i;
+
+ a_assign_tests();
+ b_subscribe_with_cb_test(true /*close consumer*/);
+ b_subscribe_with_cb_test(false /*don't close consumer*/);
+ c_subscribe_no_cb_test(true /*close consumer*/);
+
+ if (test_quick) {
+ Test::Say("Skipping tests >= c_ .. due to quick mode\n");
+ return 0;
+ }
+
+ c_subscribe_no_cb_test(false /*don't close consumer*/);
+ d_change_subscription_add_topic(true /*close consumer*/);
+ d_change_subscription_add_topic(false /*don't close consumer*/);
+ e_change_subscription_remove_topic(true /*close consumer*/);
+ e_change_subscription_remove_topic(false /*don't close consumer*/);
+ f_assign_call_cooperative();
+ g_incremental_assign_call_eager();
+ h_delete_topic();
+ i_delete_topic_2();
+ j_delete_topic_no_rb_callback();
+ k_add_partition();
+ l_unsubscribe();
+ m_unsubscribe_2();
+ n_wildcard();
+ o_java_interop();
+ for (i = 1; i <= 6; i++) /* iterate over 6 different test variations */
+ s_subscribe_when_rebalancing(i);
+ for (i = 1; i <= 2; i++)
+ t_max_poll_interval_exceeded(i);
+ /* Run all 2*3 variations of the u_.. test */
+ for (i = 0; i < 3; i++) {
+ u_multiple_subscription_changes(true /*with rebalance_cb*/, i);
+ u_multiple_subscription_changes(false /*without rebalance_cb*/, i);
+ }
+ v_commit_during_rebalance(true /*with rebalance callback*/,
+ true /*auto commit*/);
+ v_commit_during_rebalance(false /*without rebalance callback*/,
+ true /*auto commit*/);
+ v_commit_during_rebalance(true /*with rebalance callback*/,
+ false /*manual commit*/);
+ x_incremental_rebalances();
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp
new file mode 100644
index 000000000..8ef88e7df
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0114-sticky_partitioning.cpp
@@ -0,0 +1,176 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Test sticky.partitioning.linger.ms producer property.
+ *
+ */
+
+#include <iostream>
+#include <fstream>
+#include <iterator>
+#include <string>
+#include "testcpp.h"
+#include "test.h"
+
+/**
+ * @brief Specify sticky.partitioning.linger.ms and check consumed
+ * messages to verify it takes effect.
+ */
+static void do_test_sticky_partitioning(int sticky_delay) {
+ std::string topic = Test::mk_topic_name(__FILE__, 1);
+ Test::create_topic(NULL, topic.c_str(), 3, 1);
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 0);
+
+ Test::conf_set(conf, "sticky.partitioning.linger.ms",
+ tostr() << sticky_delay);
+
+ std::string errstr;
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+
+ RdKafka::Consumer *c = RdKafka::Consumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create Consumer: " + errstr);
+ delete conf;
+
+ RdKafka::Topic *t = RdKafka::Topic::create(c, topic, NULL, errstr);
+ if (!t)
+ Test::Fail("Failed to create Topic: " + errstr);
+
+ c->start(t, 0, RdKafka::Topic::OFFSET_BEGINNING);
+ c->start(t, 1, RdKafka::Topic::OFFSET_BEGINNING);
+ c->start(t, 2, RdKafka::Topic::OFFSET_BEGINNING);
+
+ const int msgrate = 100;
+ const int msgsize = 10;
+
+ /* Produce messages */
+ char val[msgsize];
+ memset(val, 'a', msgsize);
+
+ /* produce for for seconds at 100 msgs/sec */
+ for (int s = 0; s < 4; s++) {
+ int64_t end_wait = test_clock() + (1 * 1000000);
+
+ for (int i = 0; i < msgrate; i++) {
+ RdKafka::ErrorCode err = p->produce(topic, RdKafka::Topic::PARTITION_UA,
+ RdKafka::Producer::RK_MSG_COPY, val,
+ msgsize, NULL, 0, -1, NULL);
+ if (err)
+ Test::Fail("Produce failed: " + RdKafka::err2str(err));
+ }
+
+ while (test_clock() < end_wait)
+ p->poll(100);
+ }
+
+ Test::Say(tostr() << "Produced " << 4 * msgrate << " messages\n");
+ p->flush(5 * 1000);
+
+ /* Consume messages */
+ int partition_msgcnt[3] = {0, 0, 0};
+ int num_partitions_active = 0;
+ int i = 0;
+
+ int64_t end_wait = test_clock() + (5 * 1000000);
+ while (test_clock() < end_wait) {
+ RdKafka::Message *msg = c->consume(t, i, 5);
+
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ i++;
+ if (i > 2)
+ i = 0;
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ partition_msgcnt[msg->partition()]++;
+ break;
+
+ default:
+ Test::Fail("Consume error: " + msg->errstr());
+ break;
+ }
+
+ delete msg;
+ }
+
+ c->stop(t, 0);
+ c->stop(t, 1);
+ c->stop(t, 2);
+
+ for (int i = 0; i < 3; i++) {
+ /* Partitions must receive 100+ messages to be deemed 'active'. This
+ * is because while topics are being updated, it is possible for some
+ * number of messages to be partitioned to joining partitions before
+ * they become available. This can cause some initial turnover in
+ * selecting a sticky partition. This behavior is acceptable, and is
+ * not important for the purpose of this segment of the test. */
+
+ if (partition_msgcnt[i] > (msgrate - 1))
+ num_partitions_active++;
+ }
+
+ Test::Say("Partition Message Count: \n");
+ for (int i = 0; i < 3; i++) {
+ Test::Say(tostr() << " " << i << ": " << partition_msgcnt[i] << "\n");
+ }
+
+ /* When sticky.partitioning.linger.ms is long (greater than expected
+ * length of run), one partition should be sticky and receive messages. */
+ if (sticky_delay == 5000 && num_partitions_active > 1)
+ Test::Fail(tostr() << "Expected only 1 partition to receive msgs"
+ << " but " << num_partitions_active
+ << " partitions received msgs.");
+
+ /* When sticky.partitioning.linger.ms is short (sufficiently smaller than
+ * length of run), it is extremely likely that all partitions are sticky
+ * at least once and receive messages. */
+ if (sticky_delay == 1000 && num_partitions_active <= 1)
+ Test::Fail(tostr() << "Expected more than one partition to receive msgs"
+ << " but only " << num_partitions_active
+ << " partition received msgs.");
+
+ delete t;
+ delete p;
+ delete c;
+}
+
+extern "C" {
+int main_0114_sticky_partitioning(int argc, char **argv) {
+ /* long delay (5 secs) */
+ do_test_sticky_partitioning(5000);
+ /* short delay (0.001 secs) */
+ do_test_sticky_partitioning(1);
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp
new file mode 100644
index 000000000..c4d1a96aa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0115-producer_auth.cpp
@@ -0,0 +1,179 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <map>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+
+namespace {
+class DrCb : public RdKafka::DeliveryReportCb {
+ public:
+ DrCb(RdKafka::ErrorCode exp_err) : cnt(0), exp_err(exp_err) {
+ }
+
+ void dr_cb(RdKafka::Message &msg) {
+ Test::Say("Delivery report: " + RdKafka::err2str(msg.err()) + "\n");
+ if (msg.err() != exp_err)
+ Test::Fail("Delivery report: Expected " + RdKafka::err2str(exp_err) +
+ " but got " + RdKafka::err2str(msg.err()));
+ cnt++;
+ }
+
+ int cnt;
+ RdKafka::ErrorCode exp_err;
+};
+}; // namespace
+
+/**
+ * @brief Test producer auth failures.
+ *
+ * @param topic_known If true we make sure the producer knows about the topic
+ * before restricting access to it and producing,
+ * this should result in the ProduceRequest failing,
+ * if false we restrict access prior to this which should
+ * result in MetadataRequest failing.
+ */
+
+
+static void do_test_producer(bool topic_known) {
+ Test::Say(tostr() << _C_MAG << "[ Test producer auth with topic "
+ << (topic_known ? "" : "not ") << "known ]\n");
+
+ /* Create producer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 20);
+
+ std::string errstr;
+ DrCb dr(RdKafka::ERR_NO_ERROR);
+ conf->set("dr_cb", &dr, errstr);
+
+ std::string bootstraps;
+ if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to retrieve bootstrap.servers");
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ /* Create topic */
+ std::string topic_unauth = Test::mk_topic_name("0115-unauthorized", 1);
+ Test::create_topic(NULL, topic_unauth.c_str(), 3, 1);
+
+ int exp_dr_cnt = 0;
+
+ RdKafka::ErrorCode err;
+
+ if (topic_known) {
+ /* Produce a single message to make sure metadata is known. */
+ Test::Say("Producing seeding message 0\n");
+ err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA,
+ RdKafka::Producer::RK_MSG_COPY, (void *)"0", 1, NULL, 0, 0,
+ NULL);
+ TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
+
+ p->flush(-1);
+ exp_dr_cnt++;
+ }
+
+ /* Add denying ACL for unauth topic */
+ test_kafka_cmd(
+ "kafka-acls.sh --bootstrap-server %s "
+ "--add --deny-principal 'User:*' "
+ "--operation All --deny-host '*' "
+ "--topic '%s'",
+ bootstraps.c_str(), topic_unauth.c_str());
+
+ /* Produce message to any partition. */
+ Test::Say("Producing message 1 to any partition\n");
+ err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA,
+ RdKafka::Producer::RK_MSG_COPY, (void *)"1", 1, NULL, 0, 0,
+ NULL);
+ TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
+ exp_dr_cnt++;
+
+ /* Produce message to specific partition. */
+ Test::Say("Producing message 2 to partition 0\n");
+ err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"3",
+ 1, NULL, 0, 0, NULL);
+ TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
+ exp_dr_cnt++;
+
+ /* Wait for DRs */
+ dr.exp_err = RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED;
+ p->flush(-1);
+
+
+ /* Produce message to any and specific partition, should fail immediately. */
+ Test::Say("Producing message 3 to any partition\n");
+ err = p->produce(topic_unauth, RdKafka::Topic::PARTITION_UA,
+ RdKafka::Producer::RK_MSG_COPY, (void *)"3", 1, NULL, 0, 0,
+ NULL);
+ TEST_ASSERT(err == dr.exp_err,
+ "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, "
+ "not %s",
+ RdKafka::err2str(err).c_str());
+
+ /* Specific partition */
+ Test::Say("Producing message 4 to partition 0\n");
+ err = p->produce(topic_unauth, 0, RdKafka::Producer::RK_MSG_COPY, (void *)"4",
+ 1, NULL, 0, 0, NULL);
+ TEST_ASSERT(err == dr.exp_err,
+ "Expected produce() to fail with ERR_TOPIC_AUTHORIZATION_FAILED, "
+ "not %s",
+ RdKafka::err2str(err).c_str());
+
+ /* Final flush just to make sure */
+ p->flush(-1);
+
+ TEST_ASSERT(exp_dr_cnt == dr.cnt, "Expected %d deliveries, not %d",
+ exp_dr_cnt, dr.cnt);
+
+ Test::Say(tostr() << _C_GRN << "[ Test producer auth with topic "
+ << (topic_known ? "" : "not ") << "known: PASS ]\n");
+
+ delete p;
+}
+
+extern "C" {
+int main_0115_producer_auth(int argc, char **argv) {
+ /* We can't bother passing Java security config to kafka-acls.sh */
+ if (test_needs_auth()) {
+ Test::Skip("Cluster authentication required\n");
+ return 0;
+ }
+
+ do_test_producer(true);
+ do_test_producer(false);
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp
new file mode 100644
index 000000000..c674d4443
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0116-kafkaconsumer_close.cpp
@@ -0,0 +1,214 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <map>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+extern "C" {
+#include "test.h"
+#include "tinycthread.h"
+#include "rdatomic.h"
+}
+
+/**
+ * Test KafkaConsumer close and destructor behaviour.
+ */
+
+
+struct args {
+ RdKafka::Queue *queue;
+ RdKafka::KafkaConsumer *c;
+};
+
+static int run_polling_thread(void *p) {
+ struct args *args = (struct args *)p;
+
+ while (!args->c->closed()) {
+ RdKafka::Message *msg;
+
+ /* We use a long timeout to also verify that the
+ * consume() call is yielded/woken by librdkafka
+ * when consumer_close_queue() finishes. */
+ msg = args->queue->consume(60 * 1000 /*60s*/);
+ if (msg)
+ delete msg;
+ }
+
+ return 0;
+}
+
+
+static void start_polling_thread(thrd_t *thrd, struct args *args) {
+ if (thrd_create(thrd, run_polling_thread, (void *)args) != thrd_success)
+ Test::Fail("Failed to create thread");
+}
+
+static void stop_polling_thread(thrd_t thrd, struct args *args) {
+ int ret;
+ if (thrd_join(thrd, &ret) != thrd_success)
+ Test::Fail("Thread join failed");
+}
+
+
+static void do_test_consumer_close(bool do_subscribe,
+ bool do_unsubscribe,
+ bool do_close,
+ bool with_queue) {
+ std::string testname = tostr()
+ << "Test C++ KafkaConsumer close "
+ << "subscribe=" << do_subscribe
+ << ", unsubscribe=" << do_unsubscribe
+ << ", close=" << do_close << ", queue=" << with_queue;
+ SUB_TEST("%s", testname.c_str());
+
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ std::string errstr;
+
+ /*
+ * Produce messages to topics
+ */
+ const int msgs_per_partition = 10;
+ RdKafka::Conf *pconf;
+ Test::conf_init(&pconf, NULL, 10);
+ Test::conf_set(pconf, "bootstrap.servers", bootstraps);
+ RdKafka::Producer *p = RdKafka::Producer::create(pconf, errstr);
+ if (!p)
+ Test::Fail(tostr() << __FUNCTION__
+ << ": Failed to create producer: " << errstr);
+ delete pconf;
+ Test::produce_msgs(p, "some_topic", 0, msgs_per_partition, 10,
+ true /*flush*/);
+ delete p;
+
+ /* Create consumer */
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 0);
+ Test::conf_set(conf, "bootstrap.servers", bootstraps);
+ Test::conf_set(conf, "group.id", "mygroup");
+ Test::conf_set(conf, "auto.offset.reset", "beginning");
+
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ if (do_subscribe) {
+ std::vector<std::string> topics;
+ topics.push_back("some_topic");
+ RdKafka::ErrorCode err;
+ if ((err = c->subscribe(topics)))
+ Test::Fail("subscribe failed: " + RdKafka::err2str(err));
+ }
+
+ int received = 0;
+ while (received < msgs_per_partition) {
+ RdKafka::Message *msg = c->consume(500);
+ if (msg) {
+ ++received;
+ delete msg;
+ }
+ }
+
+ RdKafka::ErrorCode err;
+ if (do_unsubscribe)
+ if ((err = c->unsubscribe()))
+ Test::Fail("unsubscribe failed: " + RdKafka::err2str(err));
+
+ if (do_close) {
+ if (with_queue) {
+ RdKafka::Queue *queue = RdKafka::Queue::create(c);
+ struct args args = {queue, c};
+ thrd_t thrd;
+
+ /* Serve queue in background thread until close() is done */
+ start_polling_thread(&thrd, &args);
+
+ RdKafka::Error *error;
+
+ Test::Say("Closing with queue\n");
+ if ((error = c->close(queue)))
+ Test::Fail("close(queue) failed: " + error->str());
+
+ stop_polling_thread(thrd, &args);
+
+ Test::Say("Attempting second close\n");
+ /* A second call should fail */
+ if (!(error = c->close(queue)))
+ Test::Fail("Expected second close(queue) to fail");
+ if (error->code() != RdKafka::ERR__DESTROY)
+ Test::Fail("Expected second close(queue) to fail with DESTROY, not " +
+ error->str());
+ delete error;
+
+ delete queue;
+
+ } else {
+ if ((err = c->close()))
+ Test::Fail("close failed: " + RdKafka::err2str(err));
+
+ /* A second call should fail */
+ if ((err = c->close()) != RdKafka::ERR__DESTROY)
+ Test::Fail("Expected second close to fail with DESTROY, not " +
+ RdKafka::err2str(err));
+ }
+ }
+
+ /* Call an async method that will do nothing but verify that we're not
+ * crashing due to use-after-free. */
+ if ((err = c->commitAsync()))
+ Test::Fail("Expected commitAsync close to succeed, got " +
+ RdKafka::err2str(err));
+
+ delete c;
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+extern "C" {
+int main_0116_kafkaconsumer_close(int argc, char **argv) {
+ /* Parameters:
+ * subscribe, unsubscribe, close, with_queue */
+ for (int i = 0; i < 1 << 4; i++) {
+ bool subscribe = i & (1 << 0);
+ bool unsubscribe = i & (1 << 1);
+ bool do_close = i & (1 << 2);
+ bool with_queue = i & (1 << 3);
+ do_test_consumer_close(subscribe, unsubscribe, do_close, with_queue);
+ }
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c
new file mode 100644
index 000000000..7a82f713e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0117-mock_errors.c
@@ -0,0 +1,324 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+#include "../src/rdkafka_proto.h"
+#include "../src/rdunittest.h"
+
+#include <stdarg.h>
+
+
+/**
+ * @name Misc mock-injected errors.
+ *
+ */
+
+/**
+ * @brief Test producer handling (retry) of ERR_KAFKA_STORAGE_ERROR.
+ */
+static void do_test_producer_storage_error(rd_bool_t too_few_retries) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST_QUICK("%s", too_few_retries ? "with too few retries" : "");
+
+ test_conf_init(&conf, NULL, 10);
+
+ test_conf_set(conf, "test.mock.num.brokers", "3");
+ test_conf_set(conf, "retries", too_few_retries ? "1" : "10");
+ test_conf_set(conf, "retry.backoff.ms", "500");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ test_curr->ignore_dr_err = rd_false;
+ if (too_few_retries) {
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR;
+ test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_NOT_PERSISTED;
+ } else {
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ test_curr->exp_dr_status = RD_KAFKA_MSG_STATUS_PERSISTED;
+ }
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ mcluster = rd_kafka_handle_mock_cluster(rk);
+ TEST_ASSERT(mcluster, "missing mock cluster");
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_Produce, 3,
+ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
+ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR,
+ RD_KAFKA_RESP_ERR_KAFKA_STORAGE_ERROR);
+
+ err = rd_kafka_producev(rk, RD_KAFKA_V_TOPIC("mytopic"),
+ RD_KAFKA_V_VALUE("hi", 2), RD_KAFKA_V_END);
+ TEST_ASSERT(!err, "produce failed: %s", rd_kafka_err2str(err));
+
+ /* Wait for delivery report. */
+ test_flush(rk, 5000);
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief Issue #2933. Offset commit being retried when failing due to
+ * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers
+ * to not start.
+ */
+static void do_test_offset_commit_error_during_rebalance(void) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c1, *c2;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+ const char *topic = "test";
+ const int msgcnt = 100;
+ rd_kafka_resp_err_t err;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+ rd_kafka_mock_topic_create(mcluster, topic, 4, 3);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "1", NULL);
+
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit", "false");
+
+ /* Make sure we don't consume the entire partition in one Fetch */
+ test_conf_set(conf, "fetch.message.max.bytes", "100");
+
+ c1 = test_create_consumer("mygroup", test_rebalance_cb,
+ rd_kafka_conf_dup(conf), NULL);
+
+ c2 = test_create_consumer("mygroup", test_rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c1, topic);
+ test_consumer_subscribe(c2, topic);
+
+
+ /* Wait for assignment and one message */
+ test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL);
+ test_consumer_poll("C2.PRE", c2, 0, -1, -1, 1, NULL);
+
+ /* Trigger rebalance */
+ test_consumer_close(c2);
+ rd_kafka_destroy(c2);
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_OffsetCommit, 6,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS);
+
+ /* This commit should fail (async) */
+ TEST_SAY("Committing (should fail)\n");
+ err = rd_kafka_commit(c1, NULL, 0 /*sync*/);
+ TEST_SAY("Commit returned %s\n", rd_kafka_err2name(err));
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ "Expected commit to fail with ERR_REBALANCE_IN_PROGRESS, "
+ "not %s",
+ rd_kafka_err2name(err));
+
+ /* Wait for new assignment and able to read all messages */
+ test_consumer_poll("C1.PRE", c1, 0, -1, -1, msgcnt, NULL);
+
+ rd_kafka_destroy(c1);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+
+
+/**
+ * @brief Issue #2933. Offset commit being retried when failing due to
+ * RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS and then causing fetchers
+ * to not start.
+ */
+static void do_test_offset_commit_request_timed_out(rd_bool_t auto_commit) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c1, *c2;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+ const char *topic = "test";
+ const int msgcnt = 1;
+ rd_kafka_topic_partition_list_t *partitions;
+
+ SUB_TEST_QUICK("enable.auto.commit=%s", auto_commit ? "true" : "false");
+
+ test_conf_init(&conf, NULL, 60);
+
+ mcluster = test_mock_cluster_new(1, &bootstraps);
+
+ rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+
+ /* Seed the topic with messages */
+ test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "1", NULL);
+
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.commit",
+ auto_commit ? "true" : "false");
+ /* Too high to be done by interval in this test */
+ test_conf_set(conf, "auto.commit.interval.ms", "90000");
+
+ /* Make sure we don't consume the entire partition in one Fetch */
+ test_conf_set(conf, "fetch.message.max.bytes", "100");
+
+ c1 = test_create_consumer("mygroup", NULL, rd_kafka_conf_dup(conf),
+ NULL);
+
+
+ test_consumer_subscribe(c1, topic);
+
+ /* Wait for assignment and one message */
+ test_consumer_poll("C1.PRE", c1, 0, -1, -1, 1, NULL);
+
+ rd_kafka_mock_push_request_errors(mcluster, RD_KAFKAP_OffsetCommit, 2,
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT,
+ RD_KAFKA_RESP_ERR_REQUEST_TIMED_OUT);
+
+ if (!auto_commit)
+ TEST_CALL_ERR__(rd_kafka_commit(c1, NULL, 0 /*sync*/));
+
+ /* Rely on consumer_close() doing final commit
+ * when auto commit is enabled */
+
+ test_consumer_close(c1);
+
+ rd_kafka_destroy(c1);
+
+ /* Create a new consumer and retrieve the committed offsets to verify
+ * they were properly committed */
+ c2 = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ partitions = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(partitions, topic, 0)->offset =
+ RD_KAFKA_OFFSET_INVALID;
+
+ TEST_CALL_ERR__(rd_kafka_committed(c2, partitions, 10 * 1000));
+ TEST_ASSERT(partitions->elems[0].offset == 1,
+ "Expected committed offset to be 1, not %" PRId64,
+ partitions->elems[0].offset);
+
+ rd_kafka_topic_partition_list_destroy(partitions);
+
+ rd_kafka_destroy(c2);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+/**
+ * @brief Verify that a cluster roll does not cause consumer_poll() to return
+ * the temporary and retriable COORDINATOR_LOAD_IN_PROGRESS error. We should
+ * backoff and retry in that case.
+ */
+static void do_test_joingroup_coordinator_load_in_progress() {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *consumer;
+ rd_kafka_mock_cluster_t *mcluster;
+ const char *bootstraps;
+ const char *topic = "test";
+ const int msgcnt = 1;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+
+ mcluster = test_mock_cluster_new(1, &bootstraps);
+
+ rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+
+ test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
+ "bootstrap.servers", bootstraps,
+ "batch.num.messages", "1", NULL);
+
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_FindCoordinator, 1,
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS);
+
+ consumer = test_create_consumer("mygroup", NULL, conf, NULL);
+
+ test_consumer_subscribe(consumer, topic);
+
+ /* Wait for assignment and one message */
+ test_consumer_poll("consumer", consumer, 0, -1, -1, msgcnt, NULL);
+
+ test_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ test_mock_cluster_destroy(mcluster);
+
+ SUB_TEST_PASS();
+}
+
+int main_0117_mock_errors(int argc, char **argv) {
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ do_test_producer_storage_error(rd_false);
+ do_test_producer_storage_error(rd_true);
+
+ do_test_offset_commit_error_during_rebalance();
+
+ do_test_offset_commit_request_timed_out(rd_true);
+ do_test_offset_commit_request_timed_out(rd_false);
+
+ do_test_joingroup_coordinator_load_in_progress();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c
new file mode 100644
index 000000000..1cdcda462
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0118-commit_rebalance.c
@@ -0,0 +1,121 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/**
+ * Issue #2933: Offset commit on revoke would cause hang.
+ */
+
+static rd_kafka_t *c1, *c2;
+
+
+static void rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+
+ TEST_SAY("Rebalance for %s: %s: %d partition(s)\n", rd_kafka_name(rk),
+ rd_kafka_err2name(err), parts->cnt);
+
+ if (err == RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS) {
+ TEST_CALL_ERR__(rd_kafka_assign(rk, parts));
+
+ } else if (err == RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS) {
+ rd_kafka_resp_err_t commit_err;
+
+ TEST_CALL_ERR__(rd_kafka_position(rk, parts));
+
+ TEST_CALL_ERR__(rd_kafka_assign(rk, NULL));
+
+ if (rk == c1)
+ return;
+
+ /* Give the closing consumer some time to handle the
+ * unassignment and leave so that the coming commit fails. */
+ rd_sleep(5);
+
+ /* Committing after unassign will trigger an
+ * Illegal generation error from the broker, which would
+ * previously cause the cgrp to not properly transition
+ * the next assigned state to fetching.
+ * The closing consumer's commit is denied by the consumer
+ * since it will have started to shut down after the assign
+ * call. */
+ TEST_SAY("%s: Committing\n", rd_kafka_name(rk));
+ commit_err = rd_kafka_commit(rk, parts, 0 /*sync*/);
+ TEST_SAY("%s: Commit result: %s\n", rd_kafka_name(rk),
+ rd_kafka_err2name(commit_err));
+
+ TEST_ASSERT(commit_err,
+ "Expected closing consumer %s's commit to "
+ "fail, but got %s",
+ rd_kafka_name(rk), rd_kafka_err2name(commit_err));
+
+ } else {
+ TEST_FAIL("Unhandled event: %s", rd_kafka_err2name(err));
+ }
+}
+
+
+int main_0118_commit_rebalance(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ rd_kafka_conf_t *conf;
+ const int msgcnt = 1000;
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+
+ test_produce_msgs_easy_v(topic, 0, RD_KAFKA_PARTITION_UA, 0, msgcnt, 10,
+ NULL);
+
+ c1 = test_create_consumer(topic, rebalance_cb, rd_kafka_conf_dup(conf),
+ NULL);
+ c2 = test_create_consumer(topic, rebalance_cb, conf, NULL);
+
+ test_consumer_subscribe(c1, topic);
+ test_consumer_subscribe(c2, topic);
+
+
+ test_consumer_poll("C1.PRE", c1, 0, -1, -1, 10, NULL);
+ test_consumer_poll("C2.PRE", c2, 0, -1, -1, 10, NULL);
+
+ /* Trigger rebalance */
+ test_consumer_close(c2);
+ rd_kafka_destroy(c2);
+
+ /* Since no offsets were successfully committed the remaining consumer
+ * should be able to receive all messages. */
+ test_consumer_poll("C1.POST", c1, 0, -1, -1, msgcnt, NULL);
+
+ rd_kafka_destroy(c1);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp
new file mode 100644
index 000000000..507b67302
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0119-consumer_auth.cpp
@@ -0,0 +1,148 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <iostream>
+#include <map>
+#include <cstring>
+#include <cstdlib>
+#include "testcpp.h"
+
+
+/**
+ * @brief Let FetchRequests fail with authorization failure.
+ *
+ */
+
+
+static void do_test_fetch_unauth() {
+ Test::Say(tostr() << _C_MAG << "[ Test unauthorized Fetch ]\n");
+
+ std::string topic = Test::mk_topic_name("0119-fetch_unauth", 1);
+
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 20);
+
+ Test::conf_set(conf, "group.id", topic);
+
+ std::string bootstraps;
+ if (conf->get("bootstrap.servers", bootstraps) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Failed to retrieve bootstrap.servers");
+
+ std::string errstr;
+ RdKafka::KafkaConsumer *c = RdKafka::KafkaConsumer::create(conf, errstr);
+ if (!c)
+ Test::Fail("Failed to create KafkaConsumer: " + errstr);
+ delete conf;
+
+ /* Create topic */
+ const int partition_cnt = 3;
+ Test::create_topic(NULL, topic.c_str(), partition_cnt, 1);
+
+ /* Produce messages */
+ test_produce_msgs_easy(topic.c_str(), 0, RdKafka::Topic::PARTITION_UA, 1000);
+
+ /* Add ACLs:
+ * Allow Describe (Metadata)
+ * Deny Read (Fetch)
+ */
+
+ test_kafka_cmd(
+ "kafka-acls.sh --bootstrap-server %s "
+ "--add --allow-principal 'User:*' "
+ "--operation Describe --allow-host '*' "
+ "--topic '%s'",
+ bootstraps.c_str(), topic.c_str());
+
+ test_kafka_cmd(
+ "kafka-acls.sh --bootstrap-server %s "
+ "--add --deny-principal 'User:*' "
+ "--operation Read --deny-host '*' "
+ "--topic '%s'",
+ bootstraps.c_str(), topic.c_str());
+
+ Test::subscribe(c, topic);
+
+ int auth_err_cnt = 0;
+
+ /* Consume for 15s (30*0.5), counting the number of auth errors,
+ * should only see one error per consumed partition, and no messages. */
+ for (int i = 0; i < 30; i++) {
+ RdKafka::Message *msg;
+
+ msg = c->consume(500);
+ TEST_ASSERT(msg, "Expected msg");
+
+ switch (msg->err()) {
+ case RdKafka::ERR__TIMED_OUT:
+ break;
+
+ case RdKafka::ERR_NO_ERROR:
+ Test::Fail("Did not expect a valid message");
+ break;
+
+ case RdKafka::ERR_TOPIC_AUTHORIZATION_FAILED:
+ Test::Say(tostr() << "Consumer error on " << msg->topic_name() << " ["
+ << msg->partition() << "]: " << msg->errstr() << "\n");
+
+ if (auth_err_cnt++ > partition_cnt)
+ Test::Fail(
+ "Too many auth errors received, "
+ "expected same as number of partitions");
+ break;
+
+ default:
+ Test::Fail(tostr() << "Unexpected consumer error on " << msg->topic_name()
+ << " [" << msg->partition() << "]: " << msg->errstr());
+ break;
+ }
+
+ delete msg;
+ }
+
+ TEST_ASSERT(auth_err_cnt == partition_cnt,
+ "Expected exactly %d auth errors, saw %d", partition_cnt,
+ auth_err_cnt);
+
+ delete c;
+
+ Test::Say(tostr() << _C_GRN << "[ Test unauthorized Fetch PASS ]\n");
+}
+
+extern "C" {
+int main_0119_consumer_auth(int argc, char **argv) {
+ /* We can't bother passing Java security config to kafka-acls.sh */
+ if (test_needs_auth()) {
+ Test::Skip("Cluster authentication required\n");
+ return 0;
+ }
+
+ do_test_fetch_unauth();
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c
new file mode 100644
index 000000000..2031dcba1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0120-asymmetric_subscription.c
@@ -0,0 +1,183 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+#define _PART_CNT 4
+
+
+/**
+ * @brief Verify proper assignment for asymmetrical subscriptions.
+ */
+static void do_test_asymmetric(const char *assignor, const char *bootstraps) {
+ rd_kafka_conf_t *conf;
+#define _C_CNT 3
+ rd_kafka_t *c[_C_CNT];
+#define _S_CNT 2 /* max subscription count per consumer */
+ const char *topics[_C_CNT][_S_CNT] = {
+ /* c0 */ {"t1", "t2"},
+ /* c1 */ {"t2", "t3"},
+ /* c2 */ {"t4"},
+ };
+ struct {
+ const char *topic;
+ const int cnt;
+ int seen;
+ } expect[_C_CNT][_S_CNT] = {
+ /* c0 */
+ {
+ {"t1", _PART_CNT},
+ {"t2", _PART_CNT / 2},
+ },
+ /* c1 */
+ {
+ {"t2", _PART_CNT / 2},
+ {"t3", _PART_CNT},
+ },
+ /* c2 */
+ {
+ {"t4", _PART_CNT},
+ },
+ };
+ const char *groupid = assignor;
+ int i;
+
+ SUB_TEST_QUICK("%s assignor", assignor);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ test_conf_set(conf, "partition.assignment.strategy", assignor);
+
+ for (i = 0; i < _C_CNT; i++) {
+ char name[16];
+ rd_kafka_topic_partition_list_t *tlist =
+ rd_kafka_topic_partition_list_new(2);
+ int j;
+
+ rd_snprintf(name, sizeof(name), "c%d", i);
+ test_conf_set(conf, "client.id", name);
+
+ for (j = 0; j < _S_CNT && topics[i][j]; j++)
+ rd_kafka_topic_partition_list_add(
+ tlist, topics[i][j], RD_KAFKA_PARTITION_UA);
+
+ c[i] = test_create_consumer(groupid, NULL,
+ rd_kafka_conf_dup(conf), NULL);
+
+ TEST_CALL_ERR__(rd_kafka_subscribe(c[i], tlist));
+
+ rd_kafka_topic_partition_list_destroy(tlist);
+ }
+
+ rd_kafka_conf_destroy(conf);
+
+
+ /* Await assignments for all consumers */
+ for (i = 0; i < _C_CNT; i++)
+ test_consumer_wait_assignment(c[i], rd_true);
+
+ /* All have assignments, grab them. */
+ for (i = 0; i < _C_CNT; i++) {
+ int j;
+ int p;
+ rd_kafka_topic_partition_list_t *assignment;
+
+ TEST_CALL_ERR__(rd_kafka_assignment(c[i], &assignment));
+
+ TEST_ASSERT(assignment, "No assignment for %s",
+ rd_kafka_name(c[i]));
+
+ for (p = 0; p < assignment->cnt; p++) {
+ const rd_kafka_topic_partition_t *part =
+ &assignment->elems[p];
+ rd_bool_t found = rd_false;
+
+ for (j = 0; j < _S_CNT && expect[i][j].topic; j++) {
+ if (!strcmp(part->topic, expect[i][j].topic)) {
+ expect[i][j].seen++;
+ found = rd_true;
+ break;
+ }
+ }
+
+ TEST_ASSERT(found,
+ "%s was assigned unexpected topic %s",
+ rd_kafka_name(c[i]), part->topic);
+ }
+
+ for (j = 0; j < _S_CNT && expect[i][j].topic; j++) {
+ TEST_ASSERT(expect[i][j].seen == expect[i][j].cnt,
+ "%s expected %d assigned partitions "
+ "for %s, not %d",
+ rd_kafka_name(c[i]), expect[i][j].cnt,
+ expect[i][j].topic, expect[i][j].seen);
+ }
+
+ rd_kafka_topic_partition_list_destroy(assignment);
+ }
+
+
+ for (i = 0; i < _C_CNT; i++) {
+ if (strcmp(assignor, "range") && (i & 1) == 0)
+ test_consumer_close(c[i]);
+ rd_kafka_destroy(c[i]);
+ }
+
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0120_asymmetric_subscription(int argc, char **argv) {
+ const char *bootstraps;
+ rd_kafka_mock_cluster_t *mcluster;
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ mcluster = test_mock_cluster_new(3, &bootstraps);
+
+
+ /* Create topics */
+ rd_kafka_mock_topic_create(mcluster, "t1", _PART_CNT, 1);
+ rd_kafka_mock_topic_create(mcluster, "t2", _PART_CNT, 1);
+ rd_kafka_mock_topic_create(mcluster, "t3", _PART_CNT, 1);
+ rd_kafka_mock_topic_create(mcluster, "t4", _PART_CNT, 1);
+
+
+ do_test_asymmetric("roundrobin", bootstraps);
+ do_test_asymmetric("range", bootstraps);
+ do_test_asymmetric("cooperative-sticky", bootstraps);
+
+ test_mock_cluster_destroy(mcluster);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c
new file mode 100644
index 000000000..35f5d529e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0121-clusterid.c
@@ -0,0 +1,118 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+#include "../src/rdkafka_proto.h"
+#include "../src/rdunittest.h"
+
+#include <stdarg.h>
+
+
+/**
+ * @name Connecting to two different clusters should emit warning.
+ *
+ */
+
+static void
+log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ rd_atomic32_t *log_cntp = rd_kafka_opaque(rk);
+ rd_bool_t matched = !strcmp(fac, "CLUSTERID") &&
+ strstr(buf, "reports different ClusterId");
+
+ TEST_SAY("%sLog: %s level %d fac %s: %s\n", matched ? _C_GRN : "",
+ rd_kafka_name(rk), level, fac, buf);
+
+ if (matched)
+ rd_atomic32_add(log_cntp, 1);
+}
+
+
+int main_0121_clusterid(int argc, char **argv) {
+ rd_kafka_mock_cluster_t *cluster_a, *cluster_b;
+ const char *bootstraps_a, *bootstraps_b;
+ size_t bs_size;
+ char *bootstraps;
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_atomic32_t log_cnt;
+ int cnt = 0;
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ /* Create two clusters */
+ cluster_a = test_mock_cluster_new(1, &bootstraps_a);
+ cluster_b = test_mock_cluster_new(1, &bootstraps_b);
+ rd_kafka_mock_broker_set_down(cluster_b, 1);
+
+ test_conf_init(&conf, NULL, 10);
+
+ /* Combine bootstraps from both clusters */
+ bs_size = strlen(bootstraps_a) + strlen(bootstraps_b) + 2;
+ bootstraps = malloc(bs_size);
+ rd_snprintf(bootstraps, bs_size, "%s,%s", bootstraps_a, bootstraps_b);
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+ free(bootstraps);
+
+ rd_atomic32_init(&log_cnt, 0);
+ rd_kafka_conf_set_log_cb(conf, log_cb);
+ rd_kafka_conf_set_opaque(conf, &log_cnt);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+
+ while (rd_atomic32_get(&log_cnt) == 0) {
+ const rd_kafka_metadata_t *md;
+
+ /* After 3 seconds bring down cluster a and bring up
+ * cluster b, this is to force the client to connect to
+ * the other cluster. */
+ if (cnt == 3) {
+ rd_kafka_mock_broker_set_down(cluster_a, 1);
+ rd_kafka_mock_broker_set_up(cluster_b, 1);
+ }
+
+ if (!rd_kafka_metadata(rk, 1, NULL, &md, 1000))
+ rd_kafka_metadata_destroy(md);
+ rd_sleep(1);
+
+ cnt++;
+ }
+
+
+ rd_kafka_destroy(rk);
+ test_mock_cluster_destroy(cluster_a);
+ test_mock_cluster_destroy(cluster_b);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c
new file mode 100644
index 000000000..4f8727017
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0122-buffer_cleaning_after_rebalance.c
@@ -0,0 +1,226 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+typedef struct consumer_s {
+ const char *what;
+ rd_kafka_queue_t *rkq;
+ int timeout_ms;
+ int consume_msg_cnt;
+ int expected_msg_cnt;
+ rd_kafka_t *rk;
+ uint64_t testid;
+ test_msgver_t *mv;
+ struct test *test;
+} consumer_t;
+
+static int consumer_batch_queue(void *arg) {
+ consumer_t *arguments = arg;
+ int msg_cnt = 0;
+ int i;
+ test_timing_t t_cons;
+
+ rd_kafka_queue_t *rkq = arguments->rkq;
+ int timeout_ms = arguments->timeout_ms;
+ const int consume_msg_cnt = arguments->consume_msg_cnt;
+ rd_kafka_t *rk = arguments->rk;
+ uint64_t testid = arguments->testid;
+ rd_kafka_message_t **rkmessage =
+ malloc(consume_msg_cnt * sizeof(*rkmessage));
+
+ if (arguments->test)
+ test_curr = arguments->test;
+
+ TEST_SAY(
+ "%s calling consume_batch_queue(timeout=%d, msgs=%d) "
+ "and expecting %d messages back\n",
+ rd_kafka_name(rk), timeout_ms, consume_msg_cnt,
+ arguments->expected_msg_cnt);
+
+ TIMING_START(&t_cons, "CONSUME");
+ msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage,
+ consume_msg_cnt);
+ TIMING_STOP(&t_cons);
+
+ TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk),
+ msg_cnt, arguments->consume_msg_cnt,
+ arguments->expected_msg_cnt);
+ TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt,
+ "consumed %d messages, expected %d", msg_cnt,
+ arguments->expected_msg_cnt);
+
+ for (i = 0; i < msg_cnt; i++) {
+ if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0)
+ TEST_FAIL(
+ "The message is not from testid "
+ "%" PRId64 " \n",
+ testid);
+ rd_kafka_message_destroy(rkmessage[i]);
+ }
+
+ free(rkmessage);
+
+ return 0;
+}
+
+
+/**
+ * @brief Produce 400 messages and consume 500 messages totally by 2 consumers
+ * using batch queue method, verify if there isn't any missed or
+ * duplicate messages received by the two consumers.
+ * The reasons for setting the consume messages number is higher than
+ * or equal to the produce messages number are:
+ * 1) Make sure each consumer can at most receive half of the produced
+ * messages even though the consumers expect more.
+ * 2) If the consume messages number is smaller than the produce
+ * messages number, it's hard to verify that the messages returned
+ * are added to the batch queue before or after the rebalancing.
+ * But if the consume messages number is larger than the produce
+ * messages number, and we still received half of the produced
+ * messages by each consumer, we can make sure that the buffer
+ * cleaning is happened during the batch queue process to guarantee
+ * only received messages added to the batch queue after the
+ * rebalance.
+ *
+ * 1. Produce 100 messages to each of the 4 partitions
+ * 2. First consumer subscribes to the topic, wait for it's assignment
+ * 3. The first consumer consumes 500 messages using the batch queue
+ * method
+ * 4. Second consumer subscribes to the topic, wait for it's assignment
+ * 5. Rebalance happenes
+ * 6. The second consumer consumes 500 messages using the batch queue
+ * method
+ * 7. Each consumer receives 200 messages finally
+ * 8. Combine all the messages received by the 2 consumers and
+ * verify if there isn't any missed or duplicate messages
+ *
+ */
+static void do_test_consume_batch(const char *strategy) {
+ const int partition_cnt = 4;
+ rd_kafka_queue_t *rkq1, *rkq2;
+ const char *topic;
+ rd_kafka_t *c1;
+ rd_kafka_t *c2;
+ int p;
+ const int timeout_ms = 12000; /* Must be > rebalance time */
+ uint64_t testid;
+ const int consume_msg_cnt = 500;
+ const int produce_msg_cnt = 400;
+ rd_kafka_conf_t *conf;
+ consumer_t c1_args = RD_ZERO_INIT;
+ consumer_t c2_args = RD_ZERO_INIT;
+ test_msgver_t mv;
+ thrd_t thread_id;
+
+ SUB_TEST("partition.assignment.strategy = %s", strategy);
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "partition.assignment.strategy", strategy);
+
+ testid = test_id_generate();
+ test_msgver_init(&mv, testid);
+
+ /* Produce messages */
+ topic = test_mk_topic_name("0122-buffer_cleaning", 1);
+
+ for (p = 0; p < partition_cnt; p++)
+ test_produce_msgs_easy(topic, testid, p,
+ produce_msg_cnt / partition_cnt);
+
+ /* Create consumers */
+ c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ c2 = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(c1, topic);
+ test_consumer_wait_assignment(c1, rd_false);
+
+ /* Create generic consume queue */
+ rkq1 = rd_kafka_queue_get_consumer(c1);
+
+ c1_args.what = "C1.PRE";
+ c1_args.rkq = rkq1;
+ c1_args.timeout_ms = timeout_ms;
+ c1_args.consume_msg_cnt = consume_msg_cnt;
+ c1_args.expected_msg_cnt = produce_msg_cnt / 2;
+ c1_args.rk = c1;
+ c1_args.testid = testid;
+ c1_args.mv = &mv;
+ c1_args.test = test_curr;
+ if (thrd_create(&thread_id, consumer_batch_queue, &c1_args) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread for %s", "C1.PRE");
+
+ test_consumer_subscribe(c2, topic);
+ test_consumer_wait_assignment(c2, rd_false);
+
+ thrd_join(thread_id, NULL);
+
+ /* Create generic consume queue */
+ rkq2 = rd_kafka_queue_get_consumer(c2);
+
+ c2_args.what = "C2.PRE";
+ c2_args.rkq = rkq2;
+ /* Second consumer should be able to consume all messages right away */
+ c2_args.timeout_ms = 5000;
+ c2_args.consume_msg_cnt = consume_msg_cnt;
+ c2_args.expected_msg_cnt = produce_msg_cnt / 2;
+ c2_args.rk = c2;
+ c2_args.testid = testid;
+ c2_args.mv = &mv;
+
+ consumer_batch_queue(&c2_args);
+
+ test_msgver_verify("C1.PRE + C2.PRE", &mv,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP, 0,
+ produce_msg_cnt);
+ test_msgver_clear(&mv);
+
+ rd_kafka_queue_destroy(rkq1);
+ rd_kafka_queue_destroy(rkq2);
+
+ test_consumer_close(c1);
+ test_consumer_close(c2);
+
+ rd_kafka_destroy(c1);
+ rd_kafka_destroy(c2);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0122_buffer_cleaning_after_rebalance(int argc, char **argv) {
+ do_test_consume_batch("range");
+ do_test_consume_batch("cooperative-sticky");
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c
new file mode 100644
index 000000000..734467017
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0123-connections_max_idle.c
@@ -0,0 +1,98 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+#include "../src/rdkafka_proto.h"
+#include "../src/rdunittest.h"
+
+#include <stdarg.h>
+
+
+/**
+ * @name Verify connections.max.idle.ms
+ *
+ */
+
+static void
+log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ rd_atomic32_t *log_cntp = rd_kafka_opaque(rk);
+
+ if (!strstr(buf, "Connection max idle time exceeded"))
+ return;
+
+ TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac,
+ buf);
+
+ rd_atomic32_add(log_cntp, 1);
+}
+
+static void do_test_idle(rd_bool_t set_idle) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_atomic32_t log_cnt;
+
+ SUB_TEST_QUICK("set_idle = %s", set_idle ? "yes" : "no");
+
+ test_conf_init(&conf, NULL, 10);
+ test_conf_set(conf, "debug", "broker");
+ test_conf_set(conf, "connections.max.idle.ms", set_idle ? "5000" : "0");
+ rd_atomic32_init(&log_cnt, 0);
+ rd_kafka_conf_set_log_cb(conf, log_cb);
+ rd_kafka_conf_set_opaque(conf, &log_cnt);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rd_sleep(3);
+ TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0,
+ "Should not have seen an idle disconnect this soon");
+
+ rd_sleep(5);
+ if (set_idle)
+ TEST_ASSERT(rd_atomic32_get(&log_cnt) > 0,
+ "Should have seen at least one idle "
+ "disconnect by now");
+ else
+ TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0,
+ "Should not have seen an idle disconnect");
+
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0123_connections_max_idle(int argc, char **argv) {
+
+ do_test_idle(rd_true);
+ do_test_idle(rd_false);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c
new file mode 100644
index 000000000..5c61e5318
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0124-openssl_invalid_engine.c
@@ -0,0 +1,69 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+int main_0124_openssl_invalid_engine(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ char errstr[512];
+ rd_kafka_conf_res_t res;
+
+ test_conf_init(&conf, NULL, 30);
+ res = rd_kafka_conf_set(conf, "ssl.engine.location", "invalid_path",
+ errstr, sizeof(errstr));
+
+ if (res == RD_KAFKA_CONF_INVALID) {
+ rd_kafka_conf_destroy(conf);
+ TEST_SKIP("%s\n", errstr);
+ return 0;
+ }
+
+ if (res != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s", errstr);
+
+ if (rd_kafka_conf_set(conf, "security.protocol", "ssl", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s", errstr);
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(!rk,
+ "kafka_new() should not succeed with invalid engine"
+ " path, error: %s",
+ errstr);
+ TEST_SAY("rd_kafka_new() failed (as expected): %s\n", errstr);
+
+ TEST_ASSERT(strstr(errstr, "engine initialization failed in"),
+ "engine"
+ " initialization failure expected because of invalid engine"
+ " path, error: %s",
+ errstr);
+
+ rd_kafka_conf_destroy(conf);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c
new file mode 100644
index 000000000..12f36cf19
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0125-immediate_flush.c
@@ -0,0 +1,78 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * Verify that flush() overrides the linger.ms time.
+ *
+ */
+int main_0125_immediate_flush(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ const char *topic = test_mk_topic_name("0125_immediate_flush", 1);
+ const int msgcnt = 100;
+ int remains = 0;
+ test_timing_t t_time;
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "linger.ms", "10000");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_create_topic(rk, topic, 1, 1);
+
+ /* Produce half set of messages without waiting for delivery. */
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50,
+ &remains);
+
+ TIMING_START(&t_time, "NO_FLUSH");
+ do {
+ rd_kafka_poll(rk, 1000);
+ } while (remains > 0);
+ TIMING_ASSERT(&t_time, 10000, 15000);
+
+ /* Produce remaining messages without waiting for delivery. */
+ test_produce_msgs2_nowait(rk, topic, 0, 0, 0, msgcnt / 2, NULL, 50,
+ &remains);
+
+ /* The linger time should be overriden when flushing */
+ TIMING_START(&t_time, "FLUSH");
+ TEST_CALL_ERR__(rd_kafka_flush(rk, 2000));
+ TIMING_ASSERT(&t_time, 0, 2500);
+
+ rd_kafka_destroy(rk);
+
+
+ /* Verify messages were actually produced by consuming them back. */
+ test_consume_msgs_easy(topic, topic, 0, 1, msgcnt, NULL);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c
new file mode 100644
index 000000000..8eb187068
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0126-oauthbearer_oidc.c
@@ -0,0 +1,213 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+static rd_bool_t error_seen;
+/**
+ * @brief After config OIDC, make sure the producer and consumer
+ * can work successfully.
+ *
+ */
+static void
+do_test_produce_consumer_with_OIDC(const rd_kafka_conf_t *base_conf) {
+ const char *topic;
+ uint64_t testid;
+ rd_kafka_t *p1;
+ rd_kafka_t *c1;
+ rd_kafka_conf_t *conf;
+
+ const char *url = test_getenv("VALID_OIDC_URL", NULL);
+
+ SUB_TEST("Test producer and consumer with oidc configuration");
+
+ if (!url) {
+ SUB_TEST_SKIP(
+ "VALID_OIDC_URL environment variable is not set\n");
+ return;
+ }
+
+ conf = rd_kafka_conf_dup(base_conf);
+ test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", url);
+
+ testid = test_id_generate();
+
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ p1 = test_create_handle(RD_KAFKA_PRODUCER, rd_kafka_conf_dup(conf));
+
+ topic = test_mk_topic_name("0126-oauthbearer_oidc", 1);
+ test_create_topic(p1, topic, 1, 3);
+ TEST_SAY("Topic: %s is created\n", topic);
+
+ test_produce_msgs2(p1, topic, testid, 0, 0, 1, NULL, 0);
+
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ c1 = test_create_consumer(topic, NULL, rd_kafka_conf_dup(conf), NULL);
+ test_consumer_subscribe(c1, topic);
+
+ /* Give it some time to trigger the token refresh. */
+ rd_usleep(5 * 1000 * 1000, NULL);
+ test_consumer_poll("OIDC.C1", c1, testid, 1, -1, 1, NULL);
+
+ test_consumer_close(c1);
+
+ rd_kafka_destroy(p1);
+ rd_kafka_destroy(c1);
+ SUB_TEST_PASS();
+}
+
+
+static void
+auth_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ if (err == RD_KAFKA_RESP_ERR__AUTHENTICATION ||
+ err == RD_KAFKA_RESP_ERR__ALL_BROKERS_DOWN) {
+ TEST_SAY("Expected error: %s: %s\n", rd_kafka_err2str(err),
+ reason);
+ error_seen = rd_true;
+ } else
+ TEST_FAIL("Unexpected error: %s: %s", rd_kafka_err2str(err),
+ reason);
+ rd_kafka_yield(rk);
+}
+
+
+/**
+ * @brief After config OIDC, if the token is expired, make sure
+ * the authentication fail as expected.
+ *
+ */
+static void do_test_produce_consumer_with_OIDC_expired_token_should_fail(
+ const rd_kafka_conf_t *base_conf) {
+ rd_kafka_t *c1;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+
+ const char *expired_url = test_getenv("EXPIRED_TOKEN_OIDC_URL", NULL);
+
+ SUB_TEST("Test OAUTHBEARER/OIDC failing with expired JWT");
+
+ if (!expired_url) {
+ SUB_TEST_SKIP(
+ "EXPIRED_TOKEN_OIDC_URL environment variable is not set\n");
+ return;
+ }
+
+ conf = rd_kafka_conf_dup(base_conf);
+
+ error_seen = rd_false;
+ test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", expired_url);
+
+ rd_kafka_conf_set_error_cb(conf, auth_error_cb);
+
+ testid = test_id_generate();
+
+ c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL);
+
+ test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000);
+ TEST_ASSERT(error_seen);
+
+ test_consumer_close(c1);
+ rd_kafka_destroy(c1);
+ SUB_TEST_PASS();
+}
+
+
+/**
+ * @brief After config OIDC, if the token is not valid, make sure the
+ * authentication fail as expected.
+ *
+ */
+static void do_test_produce_consumer_with_OIDC_should_fail(
+ const rd_kafka_conf_t *base_conf) {
+ rd_kafka_t *c1;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+
+ const char *invalid_url = test_getenv("INVALID_OIDC_URL", NULL);
+
+ SUB_TEST("Test OAUTHBEARER/OIDC failing with invalid JWT");
+
+ if (!invalid_url) {
+ SUB_TEST_SKIP(
+ "INVALID_OIDC_URL environment variable is not set\n");
+ return;
+ }
+
+ conf = rd_kafka_conf_dup(base_conf);
+
+ error_seen = rd_false;
+
+ test_conf_set(conf, "sasl.oauthbearer.token.endpoint.url", invalid_url);
+
+ rd_kafka_conf_set_error_cb(conf, auth_error_cb);
+
+ testid = test_id_generate();
+
+ c1 = test_create_consumer("OIDC.fail.C1", NULL, conf, NULL);
+
+ test_consumer_poll_no_msgs("OIDC.fail.C1", c1, testid, 10 * 1000);
+
+ TEST_ASSERT(error_seen);
+
+ test_consumer_close(c1);
+ rd_kafka_destroy(c1);
+ SUB_TEST_PASS();
+}
+
+
+int main_0126_oauthbearer_oidc(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ const char *sec;
+ const char *oidc;
+
+ test_conf_init(&conf, NULL, 60);
+
+ sec = test_conf_get(conf, "security.protocol");
+ if (!strstr(sec, "sasl")) {
+ TEST_SKIP("Apache Kafka cluster not configured for SASL\n");
+ return 0;
+ }
+
+ oidc = test_conf_get(conf, "sasl.oauthbearer.method");
+ if (rd_strcasecmp(oidc, "OIDC")) {
+ TEST_SKIP("`sasl.oauthbearer.method=OIDC` is required\n");
+ return 0;
+ }
+
+ do_test_produce_consumer_with_OIDC(conf);
+ do_test_produce_consumer_with_OIDC_should_fail(conf);
+ do_test_produce_consumer_with_OIDC_expired_token_should_fail(conf);
+
+ rd_kafka_conf_destroy(conf);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp
new file mode 100644
index 000000000..784f09bf6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0128-sasl_callback_queue.cpp
@@ -0,0 +1,125 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Verify that background SASL callback queues work by calling
+ * a non-polling API after client creation.
+ */
+#include "testcpp.h"
+#include "rdatomic.h"
+
+namespace {
+/* Provide our own token refresh callback */
+class MyCb : public RdKafka::OAuthBearerTokenRefreshCb {
+ public:
+ MyCb() {
+ rd_atomic32_init(&called_, 0);
+ }
+
+ bool called() {
+ return rd_atomic32_get(&called_) > 0;
+ }
+
+ void oauthbearer_token_refresh_cb(RdKafka::Handle *handle,
+ const std::string &oauthbearer_config) {
+ handle->oauthbearer_set_token_failure(
+ "Not implemented by this test, "
+ "but that's okay");
+ rd_atomic32_add(&called_, 1);
+ Test::Say("Callback called!\n");
+ }
+
+ rd_atomic32_t called_;
+};
+}; // namespace
+
+
+static void do_test(bool use_background_queue) {
+ SUB_TEST("Use background queue = %s", use_background_queue ? "yes" : "no");
+
+ bool expect_called = use_background_queue;
+
+ RdKafka::Conf *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+
+ Test::conf_set(conf, "security.protocol", "SASL_PLAINTEXT");
+ Test::conf_set(conf, "sasl.mechanism", "OAUTHBEARER");
+
+ std::string errstr;
+
+ MyCb mycb;
+ if (conf->set("oauthbearer_token_refresh_cb", &mycb, errstr))
+ Test::Fail("Failed to set refresh callback: " + errstr);
+
+ if (use_background_queue)
+ if (conf->enable_sasl_queue(true, errstr))
+ Test::Fail("Failed to enable SASL queue: " + errstr);
+
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ if (use_background_queue) {
+ RdKafka::Error *error = p->sasl_background_callbacks_enable();
+ if (error)
+ Test::Fail("sasl_background_callbacks_enable() failed: " + error->str());
+ }
+
+ /* This call should fail since the refresh callback fails,
+ * and there are no brokers configured anyway. */
+ const std::string clusterid = p->clusterid(5 * 1000);
+
+ TEST_ASSERT(clusterid.empty(),
+ "Expected clusterid() to fail since the token was not set");
+
+ if (expect_called)
+ TEST_ASSERT(mycb.called(),
+ "Expected refresh callback to have been called by now");
+ else
+ TEST_ASSERT(!mycb.called(),
+ "Did not expect refresh callback to have been called");
+
+ delete p;
+
+ SUB_TEST_PASS();
+}
+
+extern "C" {
+int main_0128_sasl_callback_queue(int argc, char **argv) {
+ if (!test_check_builtin("sasl_oauthbearer")) {
+ Test::Skip("Test requires OAUTHBEARER support\n");
+ return 0;
+ }
+
+ do_test(true);
+ do_test(false);
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c
new file mode 100644
index 000000000..cc150fecc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0129-fetch_aborted_msgs.c
@@ -0,0 +1,78 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2021, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * @brief Verify that a FetchResponse containing only aborted messages does not
+ * raise a ERR_MSG_SIZE_TOO_LARGE error. #2993.
+ *
+ * 1. Create topic with a small message.max.bytes to make sure that
+ * there's at least one full fetch response without any control messages,
+ * just aborted messages.
+ * 2. Transactionally produce 10x the message.max.bytes.
+ * 3. Abort the transaction.
+ * 4. Consume from start, verify that no error is received, wait for EOF.
+ *
+ */
+int main_0129_fetch_aborted_msgs(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ const char *topic = test_mk_topic_name("0129_fetch_aborted_msgs", 1);
+ const int msgcnt = 1000;
+ const size_t msgsize = 1000;
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "linger.ms", "10000");
+ test_conf_set(conf, "transactional.id", topic);
+ test_conf_set(conf, "message.max.bytes", "10000");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_admin_create_topic(rk, topic, 1, 1,
+ (const char *[]) {"max.message.bytes", "10000",
+ "segment.bytes", "20000",
+ NULL});
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(rk, -1));
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(rk));
+
+ /* Produce half set of messages without waiting for delivery. */
+ test_produce_msgs2(rk, topic, 0, 0, 0, msgcnt, NULL, msgsize);
+
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(rk, -1));
+
+ rd_kafka_destroy(rk);
+
+ /* Verify messages were actually produced by consuming them back. */
+ test_consume_msgs_easy(topic, topic, 0, 1, 0, NULL);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c
new file mode 100644
index 000000000..9fb8d2350
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0130-store_offsets.c
@@ -0,0 +1,127 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+/**
+ * Verify that offsets_store() is not allowed for unassigned partitions,
+ * and that those offsets are not committed.
+ */
+static void do_test_store_unassigned(void) {
+ const char *topic = test_mk_topic_name("0130_store_unassigned", 1);
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *c;
+ rd_kafka_topic_partition_list_t *parts;
+ rd_kafka_resp_err_t err;
+ rd_kafka_message_t *rkmessage;
+ const int64_t proper_offset = 900, bad_offset = 300;
+
+ SUB_TEST_QUICK();
+
+ test_produce_msgs_easy(topic, 0, 0, 1000);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+ test_conf_set(conf, "enable.auto.offset.store", "false");
+ test_conf_set(conf, "enable.partition.eof", "true");
+
+ c = test_create_consumer(topic, NULL, conf, NULL);
+
+ parts = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(parts, topic, 0);
+ TEST_CALL_ERR__(rd_kafka_assign(c, parts));
+
+ TEST_SAY("Consume one message\n");
+ test_consumer_poll_once(c, NULL, tmout_multip(3000));
+
+ parts->elems[0].offset = proper_offset;
+ TEST_SAY("Storing offset %" PRId64 " while assigned: should succeed\n",
+ parts->elems[0].offset);
+ TEST_CALL_ERR__(rd_kafka_offsets_store(c, parts));
+
+ TEST_SAY("Committing\n");
+ TEST_CALL_ERR__(rd_kafka_commit(c, NULL, rd_false /*sync*/));
+
+ TEST_SAY("Unassigning partitions and trying to store again\n");
+ TEST_CALL_ERR__(rd_kafka_assign(c, NULL));
+
+ parts->elems[0].offset = bad_offset;
+ TEST_SAY("Storing offset %" PRId64 " while unassigned: should fail\n",
+ parts->elems[0].offset);
+ err = rd_kafka_offsets_store(c, parts);
+ TEST_ASSERT_LATER(err != RD_KAFKA_RESP_ERR_NO_ERROR,
+ "Expected offsets_store() to fail");
+ TEST_ASSERT(parts->cnt == 1);
+
+ TEST_ASSERT(parts->elems[0].err == RD_KAFKA_RESP_ERR__STATE,
+ "Expected %s [%" PRId32
+ "] to fail with "
+ "_STATE, not %s",
+ parts->elems[0].topic, parts->elems[0].partition,
+ rd_kafka_err2name(parts->elems[0].err));
+
+ TEST_SAY("Committing: should fail\n");
+ err = rd_kafka_commit(c, NULL, rd_false /*sync*/);
+ TEST_ASSERT(err == RD_KAFKA_RESP_ERR__NO_OFFSET,
+ "Expected commit() to fail with NO_OFFSET, not %s",
+ rd_kafka_err2name(err));
+
+ TEST_SAY("Assigning partition again\n");
+ parts->elems[0].offset = RD_KAFKA_OFFSET_INVALID; /* Use committed */
+ TEST_CALL_ERR__(rd_kafka_assign(c, parts));
+
+ TEST_SAY("Consuming message to verify committed offset\n");
+ rkmessage = rd_kafka_consumer_poll(c, tmout_multip(3000));
+ TEST_ASSERT(rkmessage != NULL, "Expected message");
+ TEST_SAY("Consumed message with offset %" PRId64 "\n",
+ rkmessage->offset);
+ TEST_ASSERT(!rkmessage->err, "Expected proper message, not error %s",
+ rd_kafka_message_errstr(rkmessage));
+ TEST_ASSERT(rkmessage->offset == proper_offset,
+ "Expected first message to be properly stored "
+ "offset %" PRId64 ", not %" PRId64,
+ proper_offset, rkmessage->offset);
+
+ rd_kafka_message_destroy(rkmessage);
+
+ rd_kafka_topic_partition_list_destroy(parts);
+
+ rd_kafka_consumer_close(c);
+ rd_kafka_destroy(c);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0130_store_offsets(int argc, char **argv) {
+
+ do_test_store_unassigned();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c
new file mode 100644
index 000000000..8cac87ea0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0131-connect_timeout.c
@@ -0,0 +1,81 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+
+/**
+ * @name Verify socket.connection.setup.timeout.ms by using
+ * a mock cluster with an rtt > timeout.
+ */
+
+static void
+log_cb(const rd_kafka_t *rk, int level, const char *fac, const char *buf) {
+ rd_atomic32_t *log_cntp = rd_kafka_opaque(rk);
+
+ if (!strstr(buf, "Connection setup timed out"))
+ return;
+
+ TEST_SAY("Log: %s level %d fac %s: %s\n", rd_kafka_name(rk), level, fac,
+ buf);
+
+ rd_atomic32_add(log_cntp, 1);
+}
+
+int main_0131_connect_timeout(int argc, char **argv) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_atomic32_t log_cnt;
+
+ test_conf_init(NULL, NULL, 20);
+ conf = rd_kafka_conf_new();
+ test_conf_set(conf, "test.mock.num.brokers", "2");
+ test_conf_set(conf, "test.mock.broker.rtt", "10000");
+ test_conf_set(conf, "socket.connection.setup.timeout.ms", "6000");
+ test_conf_set(conf, "debug", "broker");
+ rd_atomic32_init(&log_cnt, 0);
+ rd_kafka_conf_set_log_cb(conf, log_cb);
+ rd_kafka_conf_set_opaque(conf, &log_cnt);
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rd_sleep(3);
+ TEST_ASSERT(rd_atomic32_get(&log_cnt) == 0,
+ "Should not have seen a disconnect this soon");
+
+ rd_sleep(5);
+ TEST_ASSERT(rd_atomic32_get(&log_cnt) > 0,
+ "Should have seen at least one "
+ "disconnect by now");
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c
new file mode 100644
index 000000000..5199f4f81
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0132-strategy_ordering.c
@@ -0,0 +1,171 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+#define _PART_CNT 4
+
+static void verify_roundrobin_assignment(rd_kafka_t *c[]) {
+ rd_kafka_topic_partition_list_t *assignment1;
+ rd_kafka_topic_partition_list_t *assignment2;
+
+ TEST_CALL_ERR__(rd_kafka_assignment(c[0], &assignment1));
+
+ TEST_ASSERT(assignment1->cnt == _PART_CNT / 2,
+ "Roundrobin: Assignment partitions for %s"
+ "is %d, but the expected is %d\n",
+ rd_kafka_name(c[0]), assignment1->cnt, _PART_CNT / 2);
+
+ TEST_ASSERT(assignment1->elems[0].partition == 0,
+ "Roundrobin: First assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[0]), assignment1->elems[0].partition, 0);
+ TEST_ASSERT(assignment1->elems[1].partition == 2,
+ "Roundrobin: Second assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[0]), assignment1->elems[1].partition, 2);
+
+ TEST_CALL_ERR__(rd_kafka_assignment(c[1], &assignment2));
+ TEST_ASSERT(assignment2->cnt == _PART_CNT / 2,
+ "Roundrobin: Assignment partitions for %s"
+ "is %d, but the expected is %d\n",
+ rd_kafka_name(c[1]), assignment2->cnt, _PART_CNT / 2);
+
+ TEST_ASSERT(assignment2->elems[0].partition == 1,
+ "Roundrobin: First assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[1]), assignment2->elems[0].partition, 1);
+ TEST_ASSERT(assignment2->elems[1].partition == 3,
+ "Roundrobin: Second assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[1]), assignment2->elems[1].partition, 3);
+
+ rd_kafka_topic_partition_list_destroy(assignment1);
+ rd_kafka_topic_partition_list_destroy(assignment2);
+}
+
+static void verify_range_assignment(rd_kafka_t *c[]) {
+ rd_kafka_topic_partition_list_t *assignment1;
+ rd_kafka_topic_partition_list_t *assignment2;
+
+ TEST_CALL_ERR__(rd_kafka_assignment(c[0], &assignment1));
+
+ TEST_ASSERT(assignment1->cnt == _PART_CNT / 2,
+ "Range: Assignment partition for %s"
+ "is %d, but the expected is %d\n",
+ rd_kafka_name(c[0]), assignment1->cnt, _PART_CNT / 2);
+
+ TEST_ASSERT(assignment1->elems[0].partition == 0,
+ "Range: First assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[0]), assignment1->elems[0].partition, 0);
+ TEST_ASSERT(assignment1->elems[1].partition == 1,
+ "Range: Second assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[0]), assignment1->elems[1].partition, 1);
+
+ TEST_CALL_ERR__(rd_kafka_assignment(c[1], &assignment2));
+ TEST_ASSERT(assignment2->cnt == _PART_CNT / 2,
+ "Range: Assignment partition for %s"
+ "is %d, but the expected is %d\n",
+ rd_kafka_name(c[1]), assignment2->cnt, _PART_CNT / 2);
+
+ TEST_ASSERT(assignment2->elems[0].partition == 2,
+ "Range: First assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[1]), assignment2->elems[0].partition, 2);
+ TEST_ASSERT(assignment2->elems[1].partition == 3,
+ "Range: Second assignment partition for %s"
+ "is %d, but the expectation is %d\n",
+ rd_kafka_name(c[1]), assignment2->elems[1].partition, 3);
+
+ rd_kafka_topic_partition_list_destroy(assignment1);
+ rd_kafka_topic_partition_list_destroy(assignment2);
+}
+
+static void do_test_stragety_ordering(const char *assignor,
+ const char *expected_assignor) {
+ rd_kafka_conf_t *conf;
+#define _C_CNT 2
+ rd_kafka_t *c[_C_CNT];
+
+ const char *topic;
+ const int msgcnt = 100;
+ int i;
+ uint64_t testid;
+
+ SUB_TEST("partition.assignment.strategy = %s", assignor);
+
+ testid = test_id_generate();
+
+ topic = test_mk_topic_name("0132-strategy_ordering", 1);
+ test_create_topic(NULL, topic, _PART_CNT, 1);
+ test_produce_msgs_easy(topic, testid, RD_KAFKA_PARTITION_UA, msgcnt);
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "partition.assignment.strategy", assignor);
+
+ for (i = 0; i < _C_CNT; i++) {
+ char name[16];
+
+ rd_snprintf(name, sizeof(name), "c%d", i);
+ test_conf_set(conf, "client.id", name);
+
+ c[i] = test_create_consumer(assignor, NULL,
+ rd_kafka_conf_dup(conf), NULL);
+
+ test_consumer_subscribe(c[i], topic);
+ }
+
+ rd_kafka_conf_destroy(conf);
+
+ /* Await assignments for all consumers */
+ for (i = 0; i < _C_CNT; i++) {
+ test_consumer_wait_assignment(c[i], rd_true);
+ }
+
+ if (!strcmp(expected_assignor, "range"))
+ verify_range_assignment(c);
+ else
+ verify_roundrobin_assignment(c);
+
+ for (i = 0; i < _C_CNT; i++) {
+ test_consumer_close(c[i]);
+ rd_kafka_destroy(c[i]);
+ }
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0132_strategy_ordering(int argc, char **argv) {
+ do_test_stragety_ordering("roundrobin,range", "roundrobin");
+ do_test_stragety_ordering("range,roundrobin", "range");
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c
new file mode 100644
index 000000000..850fa2761
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0133-ssl_keys.c
@@ -0,0 +1,113 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+#include "rdstring.h"
+
+/**
+ * @brief Tests reading SSL PKCS#12 keystore or PEM certificate and key from
+ * file. Decoding it with the correct password or not.
+ *
+ * Ensures it's read correctly on Windows too.
+ * See https://github.com/edenhill/librdkafka/issues/3992
+ */
+static void do_test_ssl_keys(const char *type, rd_bool_t correct_password) {
+#define TEST_FIXTURES_FOLDER "./fixtures"
+#define TEST_FIXTURES_SSL_FOLDER TEST_FIXTURES_FOLDER "/ssl/"
+#define TEST_FIXTURES_KEYSTORE_PASSWORD "use_strong_password_keystore_client"
+#define TEST_FIXTURES_KEY_PASSWORD "use_strong_password_keystore_client2"
+#define TEST_KEYSTORE_LOCATION TEST_FIXTURES_SSL_FOLDER "client.keystore.p12"
+#define TEST_CERTIFICATE_LOCATION \
+ TEST_FIXTURES_SSL_FOLDER "client2.certificate.pem"
+#define TEST_KEY_LOCATION TEST_FIXTURES_SSL_FOLDER "client2.key"
+
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ char errstr[256];
+
+ SUB_TEST_QUICK("keystore type = %s, correct password = %s", type,
+ RD_STR_ToF(correct_password));
+
+ test_conf_init(&conf, NULL, 30);
+ test_conf_set(conf, "security.protocol", "SSL");
+
+ if (!strcmp(type, "PKCS12")) {
+ test_conf_set(conf, "ssl.keystore.location",
+ TEST_KEYSTORE_LOCATION);
+ if (correct_password)
+ test_conf_set(conf, "ssl.keystore.password",
+ TEST_FIXTURES_KEYSTORE_PASSWORD);
+ else
+ test_conf_set(conf, "ssl.keystore.password",
+ TEST_FIXTURES_KEYSTORE_PASSWORD
+ " and more");
+ } else if (!strcmp(type, "PEM")) {
+ test_conf_set(conf, "ssl.certificate.location",
+ TEST_CERTIFICATE_LOCATION);
+ test_conf_set(conf, "ssl.key.location", TEST_KEY_LOCATION);
+ if (correct_password)
+ test_conf_set(conf, "ssl.key.password",
+ TEST_FIXTURES_KEY_PASSWORD);
+ else
+ test_conf_set(conf, "ssl.keystore.password",
+ TEST_FIXTURES_KEYSTORE_PASSWORD
+ " and more");
+ } else {
+ TEST_FAIL("Unexpected key type\n");
+ }
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ if ((rk != NULL) != correct_password) {
+ TEST_FAIL("Expected rd_kafka creation to %s\n",
+ correct_password ? "succeed" : "fail");
+ }
+
+ if (rk)
+ rd_kafka_destroy(rk);
+ else
+ rd_kafka_conf_destroy(conf);
+
+ SUB_TEST_PASS();
+
+#undef TEST_FIXTURES_KEYSTORE_PASSWORD
+#undef TEST_FIXTURES_KEY_PASSWORD
+#undef TEST_KEYSTORE_LOCATION
+#undef TEST_CERTIFICATE_LOCATION
+#undef TEST_KEY_LOCATION
+#undef TEST_FIXTURES_FOLDER
+#undef TEST_FIXTURES_SSL_FOLDER
+}
+
+
+int main_0133_ssl_keys(int argc, char **argv) {
+ do_test_ssl_keys("PKCS12", rd_true);
+ do_test_ssl_keys("PKCS12", rd_false);
+ do_test_ssl_keys("PEM", rd_true);
+ do_test_ssl_keys("PEM", rd_false);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c
new file mode 100644
index 000000000..d24d52c64
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0134-ssl_provider.c
@@ -0,0 +1,92 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+
+static void test_providers(const char *providers,
+ rd_bool_t must_pass,
+ rd_bool_t must_fail) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ char errstr[512];
+
+ SUB_TEST_QUICK("providers=%s, %s pass, %s fail", providers,
+ must_pass ? "must" : "may", must_fail ? "must" : "may");
+
+ test_conf_init(&conf, NULL, 10);
+
+ /* Enable debugging so we get some extra information on
+ * OpenSSL version and provider versions in the test log. */
+ test_conf_set(conf, "debug", "security");
+ test_conf_set(conf, "ssl.providers", providers);
+ test_conf_set(conf, "security.protocol", "ssl");
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+
+ TEST_SAY("rd_kafka_new(ssl.providers=%s): %s\n", providers,
+ rk ? "success" : errstr);
+
+ if (must_pass && !rk)
+ TEST_FAIL("Expected ssl.providers=%s to work, got %s",
+ providers, errstr);
+ else if (must_fail && rk)
+ TEST_FAIL("Expected ssl.providers=%s to fail", providers);
+
+ if (!rk)
+ rd_kafka_conf_destroy(conf);
+ else
+ rd_kafka_destroy(rk);
+
+ SUB_TEST_PASS();
+}
+
+int main_0134_ssl_provider(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ char errstr[512];
+ rd_kafka_conf_res_t res;
+
+ test_conf_init(&conf, NULL, 10);
+
+ /* Check that we're linked/built with OpenSSL 3.x */
+ res = rd_kafka_conf_set(conf, "ssl.providers", "a,b", errstr,
+ sizeof(errstr));
+ rd_kafka_conf_destroy(conf);
+ if (res == RD_KAFKA_CONF_INVALID) {
+ TEST_SKIP("%s\n", errstr);
+ return 0;
+ }
+
+ /* Must pass since 'default' is always built in */
+ test_providers("default", rd_true, rd_false);
+ /* May fail, if legacy provider is not available. */
+ test_providers("default,legacy", rd_false, rd_false);
+ /* Must fail since non-existent provider */
+ test_providers("default,thisProviderDoesNotExist", rd_false, rd_true);
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp
new file mode 100644
index 000000000..20e2e4f65
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0135-sasl_credentials.cpp
@@ -0,0 +1,143 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Verify that SASL credentials can be updated.
+ */
+#include "testcpp.h"
+
+
+
+class authErrorEventCb : public RdKafka::EventCb {
+ public:
+ authErrorEventCb() : error_seen(false) {
+ }
+
+ void event_cb(RdKafka::Event &event) {
+ switch (event.type()) {
+ case RdKafka::Event::EVENT_ERROR:
+ Test::Say(tostr() << "Error: " << RdKafka::err2str(event.err()) << ": "
+ << event.str() << "\n");
+ if (event.err() == RdKafka::ERR__AUTHENTICATION)
+ error_seen = true;
+ break;
+
+ case RdKafka::Event::EVENT_LOG:
+ Test::Say(tostr() << "Log: " << event.str() << "\n");
+ break;
+
+ default:
+ break;
+ }
+ }
+
+ bool error_seen;
+};
+
+
+/**
+ * @brief Test setting SASL credentials.
+ *
+ * 1. Switch out the proper username/password for invalid ones.
+ * 2. Verify that we get an auth failure.
+ * 3. Set the proper username/passwords.
+ * 4. Verify that we can now connect.
+ */
+static void do_test(bool set_after_auth_failure) {
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 30);
+
+ SUB_TEST_QUICK("set_after_auth_failure=%s",
+ set_after_auth_failure ? "yes" : "no");
+
+ /* Get the correct sasl.username and sasl.password */
+ std::string username, password;
+ if (conf->get("sasl.username", username) ||
+ conf->get("sasl.password", password)) {
+ delete conf;
+ SUB_TEST_SKIP("sasl.username and/or sasl.password not configured\n");
+ return;
+ }
+
+ /* Replace with incorrect ones */
+ Test::conf_set(conf, "sasl.username", "ThisIsNotRight");
+ Test::conf_set(conf, "sasl.password", "Neither Is This");
+
+ /* Set up an event callback to track authentication errors */
+ authErrorEventCb pEvent = authErrorEventCb();
+ std::string errstr;
+ if (conf->set("event_cb", &pEvent, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail(errstr);
+
+ /* Create client */
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ if (set_after_auth_failure) {
+ Test::Say("Awaiting auth failure\n");
+
+ while (!pEvent.error_seen)
+ p->poll(1000);
+
+ Test::Say("Authentication error seen\n");
+ }
+
+ Test::Say("Setting proper credentials\n");
+ RdKafka::Error *error = p->sasl_set_credentials(username, password);
+ if (error)
+ Test::Fail("Failed to set credentials: " + error->str());
+
+ Test::Say("Expecting successful cluster authentication\n");
+ const std::string clusterid = p->clusterid(5 * 1000);
+
+ if (clusterid.empty())
+ Test::Fail("Expected clusterid() to succeed");
+
+ delete p;
+
+ SUB_TEST_PASS();
+}
+
+extern "C" {
+int main_0135_sasl_credentials(int argc, char **argv) {
+ const char *mech = test_conf_get(NULL, "sasl.mechanism");
+
+ if (strcmp(mech, "PLAIN") && strncmp(mech, "SCRAM", 5)) {
+ Test::Skip("Test requires SASL PLAIN or SASL SCRAM\n");
+ return 0;
+ }
+
+ do_test(false);
+ do_test(true);
+
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c
new file mode 100644
index 000000000..2c29bd14a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0136-resolve_cb.c
@@ -0,0 +1,181 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "rdkafka.h"
+
+#ifndef _WIN32
+#include <netdb.h>
+#else
+#define WIN32_MEAN_AND_LEAN
+#include <winsock2.h>
+#include <ws2ipdef.h>
+#include <ws2tcpip.h>
+#endif
+
+/**
+ * @name Test a custom address resolution callback.
+ *
+ * The test sets bogus bootstrap.servers, uses the resolution callback to
+ * resolve to a bogus address, and then verifies that the address is passed
+ * to the connect callback. If the resolution callback is not invoked, or if the
+ * connect callback is not invoked with the output of the resolution callback,
+ * the test will fail.
+ */
+
+/**
+ * Stage of the test:
+ * 0: expecting resolve_cb to be invoked with TESTING_RESOLVE_CB:1234
+ * 1: expecting resolve_cb to be invoked with NULL, NULL
+ * 2: expecting connect_cb to invoked with socket address 127.1.2.3:57616
+ * 3: done
+ */
+static rd_atomic32_t stage;
+
+/** Exposes current test struct (in TLS) to callbacks. */
+static struct test *this_test;
+
+static int resolve_cb(const char *node,
+ const char *service,
+ const struct addrinfo *hints,
+ struct addrinfo **res,
+ void *opaque) {
+
+ int32_t cnt;
+
+ test_curr = this_test;
+
+ cnt = rd_atomic32_get(&stage);
+
+ TEST_SAY("resolve_cb invoked: node=%s service=%s stage=%d\n", node,
+ service, cnt);
+
+ if (cnt == 0) {
+ /* Stage 0: return a bogus address. */
+
+ struct sockaddr_in *addr;
+
+ TEST_ASSERT(node != NULL);
+ TEST_ASSERT(strcmp(node, "TESTING_RESOLVE_CB") == 0,
+ "unexpected node: %s", node);
+ TEST_ASSERT(service != NULL);
+ TEST_ASSERT(strcmp(service, "1234") == 0,
+ "unexpected service: %s", service);
+
+ addr = calloc(1, sizeof(struct sockaddr_in));
+ addr->sin_family = AF_INET;
+ addr->sin_port = htons(4321);
+ addr->sin_addr.s_addr = htonl(0x7f010203) /* 127.1.2.3 */;
+
+ *res = calloc(1, sizeof(struct addrinfo));
+ (*res)->ai_family = AF_INET;
+ (*res)->ai_socktype = SOCK_STREAM;
+ (*res)->ai_protocol = IPPROTO_TCP;
+ (*res)->ai_addrlen = sizeof(struct sockaddr_in);
+ (*res)->ai_addr = (struct sockaddr *)addr;
+ } else if (cnt == 1) {
+ /* Stage 1: free the bogus address returned in stage 0. */
+
+ TEST_ASSERT(node == NULL);
+ TEST_ASSERT(service == NULL);
+ TEST_ASSERT(hints == NULL);
+ free((*res)->ai_addr);
+ free(*res);
+ } else {
+ /* Stage 2+: irrelevant, simply fail to resolve. */
+
+ return -1;
+ }
+
+ rd_atomic32_add(&stage, 1);
+ return 0;
+}
+
+static int connect_cb(int s,
+ const struct sockaddr *addr,
+ int addrlen,
+ const char *id,
+ void *opaque) {
+ /* Stage 3: assert address is expected bogus. */
+
+ int32_t cnt;
+ struct sockaddr_in *addr_in;
+
+ test_curr = this_test;
+
+ cnt = rd_atomic32_get(&stage);
+
+ TEST_SAY("connect_cb invoked: stage=%d\n", cnt);
+
+ TEST_ASSERT(cnt == 2, "connect_cb invoked in unexpected stage: %d",
+ cnt);
+
+ TEST_ASSERT(addr->sa_family == AF_INET,
+ "address has unexpected type: %d", addr->sa_family);
+
+ addr_in = (struct sockaddr_in *)(void *)addr;
+
+ TEST_ASSERT(addr_in->sin_port == htons(4321),
+ "address has unexpected port: %d",
+ ntohs(addr_in->sin_port));
+ TEST_ASSERT(addr_in->sin_addr.s_addr == htonl(0x7f010203),
+ "address has unexpected host: 0x%x",
+ ntohl(addr_in->sin_addr.s_addr));
+
+ rd_atomic32_add(&stage, 1);
+
+ /* The test has succeeded. Just report the connection as faile
+ * for simplicity. */
+ return -1;
+}
+
+int main_0136_resolve_cb(int argc, char **argv) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+
+ this_test = test_curr;
+
+ rd_atomic32_init(&stage, 0);
+
+ test_conf_init(&conf, NULL, 0);
+ rd_kafka_conf_set_resolve_cb(conf, resolve_cb);
+ rd_kafka_conf_set_connect_cb(conf, connect_cb);
+
+ TEST_SAY("Setting bogus broker list\n");
+ test_conf_set(conf, "bootstrap.servers", "TESTING_RESOLVE_CB:1234");
+
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ while (rd_atomic32_get(&stage) != 3)
+ rd_sleep(1);
+
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c
new file mode 100644
index 000000000..4e3c855d2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0137-barrier_batch_consume.c
@@ -0,0 +1,608 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2022, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+typedef struct consumer_s {
+ const char *what;
+ rd_kafka_queue_t *rkq;
+ int timeout_ms;
+ int consume_msg_cnt;
+ int expected_msg_cnt;
+ rd_kafka_t *rk;
+ uint64_t testid;
+ test_msgver_t *mv;
+ struct test *test;
+} consumer_t;
+
+static int consumer_batch_queue(void *arg) {
+ consumer_t *arguments = arg;
+ int msg_cnt = 0;
+ int i;
+ test_timing_t t_cons;
+
+ rd_kafka_queue_t *rkq = arguments->rkq;
+ int timeout_ms = arguments->timeout_ms;
+ const int consume_msg_cnt = arguments->consume_msg_cnt;
+ rd_kafka_t *rk = arguments->rk;
+ uint64_t testid = arguments->testid;
+ rd_kafka_message_t **rkmessage =
+ malloc(consume_msg_cnt * sizeof(*rkmessage));
+
+ if (arguments->test)
+ test_curr = arguments->test;
+
+ TEST_SAY(
+ "%s calling consume_batch_queue(timeout=%d, msgs=%d) "
+ "and expecting %d messages back\n",
+ rd_kafka_name(rk), timeout_ms, consume_msg_cnt,
+ arguments->expected_msg_cnt);
+
+ TIMING_START(&t_cons, "CONSUME");
+ msg_cnt = (int)rd_kafka_consume_batch_queue(rkq, timeout_ms, rkmessage,
+ consume_msg_cnt);
+ TIMING_STOP(&t_cons);
+
+ TEST_SAY("%s consumed %d/%d/%d message(s)\n", rd_kafka_name(rk),
+ msg_cnt, arguments->consume_msg_cnt,
+ arguments->expected_msg_cnt);
+ TEST_ASSERT(msg_cnt == arguments->expected_msg_cnt,
+ "consumed %d messages, expected %d", msg_cnt,
+ arguments->expected_msg_cnt);
+
+ for (i = 0; i < msg_cnt; i++) {
+ if (test_msgver_add_msg(rk, arguments->mv, rkmessage[i]) == 0)
+ TEST_FAIL(
+ "The message is not from testid "
+ "%" PRId64,
+ testid);
+ rd_kafka_message_destroy(rkmessage[i]);
+ }
+
+ rd_free(rkmessage);
+
+ return 0;
+}
+
+
+static void do_test_consume_batch_with_seek(void) {
+ rd_kafka_queue_t *rkq;
+ const char *topic;
+ rd_kafka_t *consumer;
+ int p;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ consumer_t consumer_args = RD_ZERO_INIT;
+ test_msgver_t mv;
+ thrd_t thread_id;
+ rd_kafka_error_t *err;
+ rd_kafka_topic_partition_list_t *seek_toppars;
+ const int partition_cnt = 2;
+ const int timeout_ms = 10000;
+ const int consume_msg_cnt = 10;
+ const int produce_msg_cnt = 8;
+ const int32_t seek_partition = 0;
+ const int64_t seek_offset = 1;
+ const int expected_msg_cnt = produce_msg_cnt - seek_offset;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ testid = test_id_generate();
+ test_msgver_init(&mv, testid);
+
+ /* Produce messages */
+ topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
+
+ test_create_topic(NULL, topic, partition_cnt, 1);
+
+ for (p = 0; p < partition_cnt; p++)
+ test_produce_msgs_easy(topic, testid, p,
+ produce_msg_cnt / partition_cnt);
+
+ /* Create consumers */
+ consumer = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(consumer, topic);
+ test_consumer_wait_assignment(consumer, rd_false);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_consumer(consumer);
+
+ consumer_args.what = "CONSUMER";
+ consumer_args.rkq = rkq;
+ consumer_args.timeout_ms = timeout_ms;
+ consumer_args.consume_msg_cnt = consume_msg_cnt;
+ consumer_args.expected_msg_cnt = expected_msg_cnt;
+ consumer_args.rk = consumer;
+ consumer_args.testid = testid;
+ consumer_args.mv = &mv;
+ consumer_args.test = test_curr;
+ if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread for %s", "CONSUMER");
+
+ seek_toppars = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(seek_toppars, topic, seek_partition);
+ rd_kafka_topic_partition_list_set_offset(seek_toppars, topic,
+ seek_partition, seek_offset);
+ err = rd_kafka_seek_partitions(consumer, seek_toppars, 2000);
+
+ TEST_ASSERT(
+ !err, "Failed to seek partition %d for topic %s to offset %" PRId64,
+ seek_partition, topic, seek_offset);
+
+ thrd_join(thread_id, NULL);
+
+ test_msgver_verify("CONSUME", &mv,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
+ TEST_MSGVER_BY_OFFSET,
+ 0, expected_msg_cnt);
+ test_msgver_clear(&mv);
+
+ rd_kafka_topic_partition_list_destroy(seek_toppars);
+
+ rd_kafka_queue_destroy(rkq);
+
+ test_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_consume_batch_with_pause_and_resume_different_batch(void) {
+ rd_kafka_queue_t *rkq;
+ const char *topic;
+ rd_kafka_t *consumer;
+ int p;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ consumer_t consumer_args = RD_ZERO_INIT;
+ test_msgver_t mv;
+ thrd_t thread_id;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *pause_partition_list;
+ const int timeout_ms = 2000;
+ const int consume_msg_cnt = 10;
+ const int produce_msg_cnt = 8;
+ const int partition_cnt = 2;
+ const int expected_msg_cnt = 4;
+ int32_t pause_partition = 0;
+ int32_t running_partition = 1;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ testid = test_id_generate();
+ test_msgver_init(&mv, testid);
+
+ /* Produce messages */
+ topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
+
+ test_create_topic(NULL, topic, partition_cnt, 1);
+
+ for (p = 0; p < partition_cnt; p++)
+ test_produce_msgs_easy(topic, testid, p,
+ produce_msg_cnt / partition_cnt);
+
+ /* Create consumers */
+ consumer = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(consumer, topic);
+ test_consumer_wait_assignment(consumer, rd_false);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_consumer(consumer);
+
+ consumer_args.what = "CONSUMER";
+ consumer_args.rkq = rkq;
+ consumer_args.timeout_ms = timeout_ms;
+ consumer_args.consume_msg_cnt = consume_msg_cnt;
+ consumer_args.expected_msg_cnt = expected_msg_cnt;
+ consumer_args.rk = consumer;
+ consumer_args.testid = testid;
+ consumer_args.mv = &mv;
+ consumer_args.test = test_curr;
+ if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread for %s", "CONSUMER");
+
+ pause_partition_list = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(pause_partition_list, topic,
+ pause_partition);
+
+ rd_sleep(1);
+ err = rd_kafka_pause_partitions(consumer, pause_partition_list);
+
+ TEST_ASSERT(!err, "Failed to pause partition %d for topic %s",
+ pause_partition, topic);
+
+ thrd_join(thread_id, NULL);
+
+ test_msgver_verify_part("CONSUME", &mv,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
+ TEST_MSGVER_BY_OFFSET,
+ topic, running_partition, 0, expected_msg_cnt);
+
+ test_msgver_clear(&mv);
+ test_msgver_init(&mv, testid);
+ consumer_args.mv = &mv;
+
+ err = rd_kafka_resume_partitions(consumer, pause_partition_list);
+
+ TEST_ASSERT(!err, "Failed to resume partition %d for topic %s",
+ pause_partition, topic);
+
+ consumer_batch_queue(&consumer_args);
+
+ test_msgver_verify_part("CONSUME", &mv,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
+ TEST_MSGVER_BY_OFFSET,
+ topic, pause_partition, 0, expected_msg_cnt);
+
+ rd_kafka_topic_partition_list_destroy(pause_partition_list);
+
+ test_msgver_clear(&mv);
+
+ rd_kafka_queue_destroy(rkq);
+
+ test_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_consume_batch_with_pause_and_resume_same_batch(void) {
+ rd_kafka_queue_t *rkq;
+ const char *topic;
+ rd_kafka_t *consumer;
+ int p;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ consumer_t consumer_args = RD_ZERO_INIT;
+ test_msgver_t mv;
+ thrd_t thread_id;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_partition_list_t *pause_partition_list;
+ const int timeout_ms = 10000;
+ const int consume_msg_cnt = 10;
+ const int produce_msg_cnt = 8;
+ const int partition_cnt = 2;
+ int32_t pause_partition = 0;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ testid = test_id_generate();
+ test_msgver_init(&mv, testid);
+
+ /* Produce messages */
+ topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
+
+ test_create_topic(NULL, topic, partition_cnt, 1);
+
+ for (p = 0; p < partition_cnt; p++)
+ test_produce_msgs_easy(topic, testid, p,
+ produce_msg_cnt / partition_cnt);
+
+ /* Create consumers */
+ consumer = test_create_consumer(topic, NULL, conf, NULL);
+
+ test_consumer_subscribe(consumer, topic);
+ test_consumer_wait_assignment(consumer, rd_false);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_consumer(consumer);
+
+ consumer_args.what = "CONSUMER";
+ consumer_args.rkq = rkq;
+ consumer_args.timeout_ms = timeout_ms;
+ consumer_args.consume_msg_cnt = consume_msg_cnt;
+ consumer_args.expected_msg_cnt = produce_msg_cnt;
+ consumer_args.rk = consumer;
+ consumer_args.testid = testid;
+ consumer_args.mv = &mv;
+ consumer_args.test = test_curr;
+ if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread for %s", "CONSUMER");
+
+ pause_partition_list = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(pause_partition_list, topic,
+ pause_partition);
+
+ rd_sleep(1);
+ err = rd_kafka_pause_partitions(consumer, pause_partition_list);
+
+ TEST_ASSERT(!err, "Failed to pause partition %d for topic %s",
+ pause_partition, topic);
+
+ rd_sleep(1);
+
+ err = rd_kafka_resume_partitions(consumer, pause_partition_list);
+
+ TEST_ASSERT(!err, "Failed to resume partition %d for topic %s",
+ pause_partition, topic);
+
+ thrd_join(thread_id, NULL);
+
+ test_msgver_verify("CONSUME", &mv,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
+ TEST_MSGVER_BY_OFFSET,
+ 0, produce_msg_cnt);
+
+ rd_kafka_topic_partition_list_destroy(pause_partition_list);
+
+ test_msgver_clear(&mv);
+
+ rd_kafka_queue_destroy(rkq);
+
+ test_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_consume_batch_store_offset(void) {
+ rd_kafka_queue_t *rkq;
+ const char *topic;
+ rd_kafka_t *consumer;
+ int p;
+ int i;
+ uint64_t testid;
+ rd_kafka_conf_t *conf;
+ consumer_t consumer_args = RD_ZERO_INIT;
+ test_msgver_t mv;
+ const int partition_cnt = 1;
+ const int timeout_ms = 10000;
+ const int consume_msg_cnt = 4;
+ const int no_of_consume = 2;
+ const int produce_msg_cnt = 8;
+ const int expected_msg_cnt = produce_msg_cnt;
+
+ SUB_TEST();
+
+ test_conf_init(&conf, NULL, 60);
+ test_conf_set(conf, "enable.auto.commit", "false");
+ test_conf_set(conf, "enable.auto.offset.store", "true");
+ test_conf_set(conf, "auto.offset.reset", "earliest");
+
+ testid = test_id_generate();
+ test_msgver_init(&mv, testid);
+
+ /* Produce messages */
+ topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
+
+ test_create_topic(NULL, topic, partition_cnt, 1);
+
+ for (p = 0; p < partition_cnt; p++)
+ test_produce_msgs_easy(topic, testid, p,
+ produce_msg_cnt / partition_cnt);
+
+ for (i = 0; i < no_of_consume; i++) {
+
+ /* Create consumers */
+ consumer = test_create_consumer(topic, NULL,
+ rd_kafka_conf_dup(conf), NULL);
+ test_consumer_subscribe(consumer, topic);
+ test_consumer_wait_assignment(consumer, rd_false);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_consumer(consumer);
+
+ consumer_args.what = "CONSUMER";
+ consumer_args.rkq = rkq;
+ consumer_args.timeout_ms = timeout_ms;
+ consumer_args.consume_msg_cnt = consume_msg_cnt;
+ consumer_args.expected_msg_cnt =
+ produce_msg_cnt / no_of_consume;
+ consumer_args.rk = consumer;
+ consumer_args.testid = testid;
+ consumer_args.mv = &mv;
+ consumer_args.test = test_curr;
+
+ consumer_batch_queue(&consumer_args);
+ rd_kafka_commit(consumer, NULL, rd_false);
+
+ rd_kafka_queue_destroy(rkq);
+ test_consumer_close(consumer);
+ rd_kafka_destroy(consumer);
+ }
+
+ test_msgver_verify("CONSUME", &mv,
+ TEST_MSGVER_ORDER | TEST_MSGVER_DUP |
+ TEST_MSGVER_BY_OFFSET,
+ 0, expected_msg_cnt);
+
+ test_msgver_clear(&mv);
+
+ rd_kafka_conf_destroy(conf);
+
+ SUB_TEST_PASS();
+}
+
+
+static void do_test_consume_batch_control_msgs(void) {
+ const char *topic = test_mk_topic_name("0137-barrier_batch_consume", 1);
+ const int32_t partition = 0;
+ rd_kafka_conf_t *conf, *c_conf;
+ rd_kafka_t *producer, *consumer;
+ uint64_t testid;
+ const int msgcnt[2] = {2, 3};
+ test_msgver_t mv;
+ rd_kafka_queue_t *rkq;
+ consumer_t consumer_args = RD_ZERO_INIT;
+ const int partition_cnt = 1;
+ const int timeout_ms = 5000;
+ const int consume_msg_cnt = 10;
+ const int expected_msg_cnt = 2;
+ int32_t pause_partition = 0;
+ int64_t expected_offset = msgcnt[0] + msgcnt[1] + 2;
+ rd_kafka_topic_partition_list_t *pause_partition_list;
+ rd_kafka_resp_err_t err;
+ thrd_t thread_id;
+
+ SUB_TEST("Testing control msgs flow");
+
+ testid = test_id_generate();
+
+ test_conf_init(&conf, NULL, 30);
+
+ test_conf_set(conf, "transactional.id", topic);
+ test_conf_set(conf, "batch.num.messages", "1");
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ producer = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ test_create_topic(producer, topic, partition_cnt, 1);
+
+ TEST_CALL_ERROR__(rd_kafka_init_transactions(producer, 30 * 1000));
+
+ /*
+ * Transaction 1
+ */
+ TEST_SAY("Transaction 1: %d msgs\n", msgcnt[0]);
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(producer));
+ test_produce_msgs2(producer, topic, testid, partition, 0, msgcnt[0],
+ NULL, 0);
+ TEST_CALL_ERROR__(rd_kafka_commit_transaction(producer, -1));
+
+ /*
+ * Transaction 2
+ */
+ TEST_SAY("Transaction 2: %d msgs\n", msgcnt[1]);
+ TEST_CALL_ERROR__(rd_kafka_begin_transaction(producer));
+ test_produce_msgs2(producer, topic, testid, partition, 0, msgcnt[1],
+ NULL, 0);
+ TEST_CALL_ERROR__(rd_kafka_abort_transaction(producer, -1));
+
+ rd_kafka_destroy(producer);
+
+ rd_sleep(2);
+
+ /*
+ * Consumer
+ */
+ test_conf_init(&c_conf, NULL, 0);
+ test_conf_set(c_conf, "enable.auto.commit", "false");
+ test_conf_set(c_conf, "enable.auto.offset.store", "true");
+ test_conf_set(c_conf, "auto.offset.reset", "earliest");
+ consumer = test_create_consumer(topic, NULL, c_conf, NULL);
+
+ test_consumer_subscribe(consumer, topic);
+ test_consumer_wait_assignment(consumer, rd_false);
+
+ /* Create generic consume queue */
+ rkq = rd_kafka_queue_get_consumer(consumer);
+
+ test_msgver_init(&mv, testid);
+ test_msgver_ignore_eof(&mv);
+
+ consumer_args.what = "CONSUMER";
+ consumer_args.rkq = rkq;
+ consumer_args.timeout_ms = timeout_ms;
+ consumer_args.consume_msg_cnt = consume_msg_cnt;
+ consumer_args.expected_msg_cnt = expected_msg_cnt;
+ consumer_args.rk = consumer;
+ consumer_args.testid = testid;
+ consumer_args.mv = &mv;
+ consumer_args.test = test_curr;
+
+
+ if (thrd_create(&thread_id, consumer_batch_queue, &consumer_args) !=
+ thrd_success)
+ TEST_FAIL("Failed to create thread for %s", "CONSUMER");
+
+ pause_partition_list = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(pause_partition_list, topic,
+ pause_partition);
+
+ rd_sleep(1);
+ err = rd_kafka_pause_partitions(consumer, pause_partition_list);
+
+ TEST_ASSERT(!err, "Failed to pause partition %d for topic %s",
+ pause_partition, topic);
+
+ rd_sleep(1);
+
+ err = rd_kafka_resume_partitions(consumer, pause_partition_list);
+
+ TEST_ASSERT(!err, "Failed to resume partition %d for topic %s",
+ pause_partition, topic);
+
+ thrd_join(thread_id, NULL);
+
+ rd_kafka_commit(consumer, NULL, rd_false);
+
+ rd_kafka_committed(consumer, pause_partition_list, timeout_ms);
+
+ TEST_ASSERT(pause_partition_list->elems[0].offset == expected_offset,
+ "Expected offset should be %" PRId64 ", but it is %" PRId64,
+ expected_offset, pause_partition_list->elems[0].offset);
+
+ rd_kafka_topic_partition_list_destroy(pause_partition_list);
+
+ rd_kafka_queue_destroy(rkq);
+
+ test_msgver_clear(&mv);
+
+ test_consumer_close(consumer);
+
+ rd_kafka_destroy(consumer);
+
+ SUB_TEST_PASS();
+}
+
+
+int main_0137_barrier_batch_consume(int argc, char **argv) {
+ do_test_consume_batch_with_seek();
+ do_test_consume_batch_store_offset();
+ do_test_consume_batch_with_pause_and_resume_different_batch();
+ do_test_consume_batch_with_pause_and_resume_same_batch();
+ do_test_consume_batch_control_msgs();
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c b/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c
new file mode 100644
index 000000000..0f9021de9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/0138-admin_mock.c
@@ -0,0 +1,189 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2023, Confluent Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+#include "../src/rdkafka_proto.h"
+
+#include <stdarg.h>
+
+/**
+ * @brief Verify that a error codes returned by the OffsetCommit call of
+ * AlterConsumerGroupOffsets return the corresponding error code
+ * in the passed partition.
+ */
+static void do_test_AlterConsumerGroupOffsets_errors(int req_timeout_ms) {
+#define TEST_ERR_SIZE 10
+ int i, j;
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *rk;
+ rd_kafka_queue_t *q;
+ rd_kafka_mock_cluster_t *mcluster;
+ rd_kafka_topic_partition_list_t *to_alter;
+ const rd_kafka_topic_partition_list_t *partitions;
+ rd_kafka_AlterConsumerGroupOffsets_t *cgoffsets;
+ const rd_kafka_AlterConsumerGroupOffsets_result_t *res;
+ const rd_kafka_group_result_t **gres;
+ size_t gres_cnt;
+ char errstr[512];
+ const char *bootstraps;
+ const char *topic = "test";
+ const char *group_id = topic;
+ rd_kafka_AdminOptions_t *options = NULL;
+ rd_kafka_event_t *rkev = NULL;
+ rd_kafka_resp_err_t errs[TEST_ERR_SIZE] = {
+ RD_KAFKA_RESP_ERR_COORDINATOR_LOAD_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_REBALANCE_IN_PROGRESS,
+ RD_KAFKA_RESP_ERR_COORDINATOR_NOT_AVAILABLE,
+ RD_KAFKA_RESP_ERR_NOT_COORDINATOR,
+ RD_KAFKA_RESP_ERR_INVALID_GROUP_ID,
+ RD_KAFKA_RESP_ERR_INVALID_COMMIT_OFFSET_SIZE,
+ RD_KAFKA_RESP_ERR_GROUP_AUTHORIZATION_FAILED,
+ RD_KAFKA_RESP_ERR_UNKNOWN_TOPIC_OR_PART,
+ RD_KAFKA_RESP_ERR_OFFSET_METADATA_TOO_LARGE,
+ RD_KAFKA_RESP_ERR_TOPIC_AUTHORIZATION_FAILED};
+
+ SUB_TEST_QUICK("request timeout %d", req_timeout_ms);
+
+ test_conf_init(&conf, NULL, 60);
+
+ mcluster = test_mock_cluster_new(1, &bootstraps);
+
+ rd_kafka_mock_topic_create(mcluster, topic, 1, 1);
+
+ test_conf_set(conf, "bootstrap.servers", bootstraps);
+
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ q = rd_kafka_queue_get_main(rk);
+
+ if (req_timeout_ms > 0) {
+ /* Admin options */
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_ALTERCONSUMERGROUPOFFSETS);
+ TEST_CALL_ERR__(rd_kafka_AdminOptions_set_request_timeout(
+ options, req_timeout_ms, errstr, sizeof(errstr)));
+ }
+
+
+ for (i = 0; i < TEST_ERR_SIZE; i++) {
+ /* Offsets to alter */
+ to_alter = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(to_alter, topic, 0)->offset =
+ 3;
+ cgoffsets =
+ rd_kafka_AlterConsumerGroupOffsets_new(group_id, to_alter);
+
+ TEST_SAY("Call AlterConsumerGroupOffsets, err %s\n",
+ rd_kafka_err2name(errs[i]));
+ rd_kafka_mock_push_request_errors(
+ mcluster, RD_KAFKAP_OffsetCommit, 1, errs[i]);
+ rd_kafka_AlterConsumerGroupOffsets(rk, &cgoffsets, 1, options,
+ q);
+
+ rd_kafka_topic_partition_list_destroy(to_alter);
+ rd_kafka_AlterConsumerGroupOffsets_destroy(cgoffsets);
+
+ TEST_SAY("AlterConsumerGroupOffsets.queue_poll, err %s\n",
+ rd_kafka_err2name(errs[i]));
+ /* Poll result queue for AlterConsumerGroupOffsets result.
+ * Print but otherwise ignore other event types
+ * (typically generic Error events). */
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout_multip(10 * 1000));
+ TEST_SAY("AlterConsumerGroupOffsets: got %s\n",
+ rd_kafka_event_name(rkev));
+ if (rkev == NULL)
+ continue;
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("%s: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+
+ if (rd_kafka_event_type(rkev) ==
+ RD_KAFKA_EVENT_ALTERCONSUMERGROUPOFFSETS_RESULT)
+ break;
+
+ rd_kafka_event_destroy(rkev);
+ }
+
+ /* Convert event to proper result */
+ res = rd_kafka_event_AlterConsumerGroupOffsets_result(rkev);
+ TEST_ASSERT(res,
+ "expected AlterConsumerGroupOffsets_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ gres = rd_kafka_AlterConsumerGroupOffsets_result_groups(
+ res, &gres_cnt);
+ TEST_ASSERT(gres && gres_cnt == 1,
+ "expected gres_cnt == 1, not %" PRIusz, gres_cnt);
+
+ partitions = rd_kafka_group_result_partitions(gres[0]);
+
+ /* Verify expected errors */
+ for (j = 0; j < partitions->cnt; j++) {
+ rd_kafka_topic_partition_t *rktpar =
+ &partitions->elems[j];
+ TEST_ASSERT_LATER(rktpar->err == errs[i],
+ "Result %s [%" PRId32
+ "] has error %s, "
+ "expected %s",
+ topic, 0,
+ rd_kafka_err2name(rktpar->err),
+ rd_kafka_err2name(errs[i]));
+ }
+
+ rd_kafka_event_destroy(rkev);
+ }
+ if (options)
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_queue_destroy(q);
+
+ rd_kafka_destroy(rk);
+
+ test_mock_cluster_destroy(mcluster);
+
+ TEST_LATER_CHECK();
+
+ SUB_TEST_PASS();
+
+#undef TEST_ERR_SIZE
+}
+
+int main_0138_admin_mock(int argc, char **argv) {
+
+ if (test_needs_auth()) {
+ TEST_SKIP("Mock cluster does not support SSL/SASL\n");
+ return 0;
+ }
+
+ do_test_AlterConsumerGroupOffsets_errors(-1);
+ do_test_AlterConsumerGroupOffsets_errors(1000);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c b/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c
new file mode 100644
index 000000000..ad2b7e870
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/1000-unktopic.c
@@ -0,0 +1,164 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * Tests that producing to unknown topic fails.
+ * Issue #39
+ *
+ * NOTE! This test requires auto.create.topics.enable=false to be
+ * configured on the broker!
+ */
+
+#define _GNU_SOURCE
+#include <sys/time.h>
+#include <time.h>
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+static int msgs_wait = 0; /* bitmask */
+
+/**
+ * Delivery report callback.
+ * Called for each message once to signal its delivery status.
+ */
+static void dr_cb(rd_kafka_t *rk,
+ void *payload,
+ size_t len,
+ rd_kafka_resp_err_t err,
+ void *opaque,
+ void *msg_opaque) {
+ int msgid = *(int *)msg_opaque;
+
+ free(msg_opaque);
+
+ if (!(msgs_wait & (1 << msgid)))
+ TEST_FAIL(
+ "Unwanted delivery report for message #%i "
+ "(waiting for 0x%x)\n",
+ msgid, msgs_wait);
+
+ TEST_SAY("Delivery report for message #%i: %s\n", msgid,
+ rd_kafka_err2str(err));
+
+ msgs_wait &= ~(1 << msgid);
+
+ if (err != RD_KAFKA_RESP_ERR__UNKNOWN_TOPIC)
+ TEST_FAIL("Message #%i failed with unexpected error %s\n",
+ msgid, rd_kafka_err2str(err));
+}
+
+
+int main(int argc, char **argv) {
+ char topic[64];
+ int partition = 0;
+ int r;
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ rd_kafka_conf_t *conf;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ char msg[128];
+ int msgcnt = 10;
+ int i;
+
+ /* Generate unique topic name */
+ test_conf_init(&conf, &topic_conf, 10);
+
+ rd_snprintf(topic, sizeof(topic), "rdkafkatest1_unk_%x%x", rand(),
+ rand());
+
+ TEST_SAY(
+ "\033[33mNOTE! This test requires "
+ "auto.create.topics.enable=false to be configured on "
+ "the broker!\033[0m\n");
+
+ /* Set delivery report callback */
+ rd_kafka_conf_set_dr_cb(conf, dr_cb);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n", strerror(errno));
+
+ /* Produce a message */
+ for (i = 0; i < msgcnt; i++) {
+ int *msgidp = malloc(sizeof(*msgidp));
+ *msgidp = i;
+ rd_snprintf(msg, sizeof(msg), "%s test message #%i", argv[0],
+ i);
+ r = rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, msg,
+ strlen(msg), NULL, 0, msgidp);
+ if (r == -1) {
+ if (errno == ENOENT)
+ TEST_SAY(
+ "Failed to produce message #%i: "
+ "unknown topic: good!\n",
+ i);
+ else
+ TEST_FAIL("Failed to produce message #%i: %s\n",
+ i, strerror(errno));
+ } else {
+ if (i > 5)
+ TEST_FAIL(
+ "Message #%i produced: "
+ "should've failed\n",
+ i);
+ msgs_wait |= (1 << i);
+ }
+
+ /* After half the messages: sleep to allow the metadata
+ * to be fetched from broker and update the actual partition
+ * count: this will make subsequent produce() calls fail
+ * immediately. */
+ if (i == 5)
+ sleep(2);
+ }
+
+ /* Wait for messages to time out */
+ while (rd_kafka_outq_len(rk) > 0)
+ rd_kafka_poll(rk, 50);
+
+ if (msgs_wait != 0)
+ TEST_FAIL("Still waiting for messages: 0x%x\n", msgs_wait);
+
+ /* Destroy topic */
+ rd_kafka_topic_destroy(rkt);
+
+ /* Destroy rdkafka instance */
+ TEST_SAY("Destroying kafka instance %s\n", rd_kafka_name(rk));
+ rd_kafka_destroy(rk);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp
new file mode 100644
index 000000000..9659ade97
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/8000-idle.cpp
@@ -0,0 +1,60 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2016, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "testcpp.h"
+#include <cstring>
+
+/**
+ * Manual test: idle producer
+ */
+
+
+static void do_test_idle_producer() {
+ RdKafka::Conf *conf;
+ Test::conf_init(&conf, NULL, 0);
+
+ std::string errstr;
+ RdKafka::Producer *p = RdKafka::Producer::create(conf, errstr);
+ if (!p)
+ Test::Fail("Failed to create Producer: " + errstr);
+ delete conf;
+
+ while (true)
+ p->poll(1000);
+
+ delete p;
+}
+
+
+extern "C" {
+int main_8000_idle(int argc, char **argv) {
+ do_test_idle_producer();
+ return 0;
+}
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt
new file mode 100644
index 000000000..a9dccfa5e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/CMakeLists.txt
@@ -0,0 +1,154 @@
+set(
+ sources
+ 0000-unittests.c
+ 0001-multiobj.c
+ 0002-unkpart.c
+ 0003-msgmaxsize.c
+ 0004-conf.c
+ 0005-order.c
+ 0006-symbols.c
+ 0007-autotopic.c
+ 0008-reqacks.c
+ 0009-mock_cluster.c
+ 0011-produce_batch.c
+ 0012-produce_consume.c
+ 0013-null-msgs.c
+ 0014-reconsume-191.c
+ 0015-offset_seeks.c
+ 0016-client_swname.c
+ 0017-compression.c
+ 0018-cgrp_term.c
+ 0019-list_groups.c
+ 0020-destroy_hang.c
+ 0021-rkt_destroy.c
+ 0022-consume_batch.c
+ 0025-timers.c
+ 0026-consume_pause.c
+ 0028-long_topicnames.c
+ 0029-assign_offset.c
+ 0030-offset_commit.c
+ 0031-get_offsets.c
+ 0033-regex_subscribe.c
+ 0034-offset_reset.c
+ 0035-api_version.c
+ 0036-partial_fetch.c
+ 0037-destroy_hang_local.c
+ 0038-performance.c
+ 0039-event.c
+ 0040-io_event.c
+ 0041-fetch_max_bytes.c
+ 0042-many_topics.c
+ 0043-no_connection.c
+ 0044-partition_cnt.c
+ 0045-subscribe_update.c
+ 0046-rkt_cache.c
+ 0047-partial_buf_tmout.c
+ 0048-partitioner.c
+ 0049-consume_conn_close.c
+ 0050-subscribe_adds.c
+ 0051-assign_adds.c
+ 0052-msg_timestamps.c
+ 0053-stats_cb.cpp
+ 0054-offset_time.cpp
+ 0055-producer_latency.c
+ 0056-balanced_group_mt.c
+ 0057-invalid_topic.cpp
+ 0058-log.cpp
+ 0059-bsearch.cpp
+ 0060-op_prio.cpp
+ 0061-consumer_lag.cpp
+ 0062-stats_event.c
+ 0063-clusterid.cpp
+ 0064-interceptors.c
+ 0065-yield.cpp
+ 0066-plugins.cpp
+ 0067-empty_topic.cpp
+ 0068-produce_timeout.c
+ 0069-consumer_add_parts.c
+ 0070-null_empty.cpp
+ 0072-headers_ut.c
+ 0073-headers.c
+ 0074-producev.c
+ 0075-retry.c
+ 0076-produce_retry.c
+ 0077-compaction.c
+ 0078-c_from_cpp.cpp
+ 0079-fork.c
+ 0080-admin_ut.c
+ 0081-admin.c
+ 0082-fetch_max_bytes.cpp
+ 0083-cb_event.c
+ 0084-destroy_flags.c
+ 0085-headers.cpp
+ 0086-purge.c
+ 0088-produce_metadata_timeout.c
+ 0089-max_poll_interval.c
+ 0090-idempotence.c
+ 0091-max_poll_interval_timeout.c
+ 0092-mixed_msgver.c
+ 0093-holb.c
+ 0094-idempotence_msg_timeout.c
+ 0095-all_brokers_down.cpp
+ 0097-ssl_verify.cpp
+ 0098-consumer-txn.cpp
+ 0099-commit_metadata.c
+ 0100-thread_interceptors.cpp
+ 0101-fetch-from-follower.cpp
+ 0102-static_group_rebalance.c
+ 0103-transactions.c
+ 0104-fetch_from_follower_mock.c
+ 0105-transactions_mock.c
+ 0106-cgrp_sess_timeout.c
+ 0107-topic_recreate.c
+ 0109-auto_create_topics.cpp
+ 0110-batch_size.cpp
+ 0111-delay_create_topics.cpp
+ 0112-assign_unknown_part.c
+ 0113-cooperative_rebalance.cpp
+ 0114-sticky_partitioning.cpp
+ 0115-producer_auth.cpp
+ 0116-kafkaconsumer_close.cpp
+ 0117-mock_errors.c
+ 0118-commit_rebalance.c
+ 0119-consumer_auth.cpp
+ 0120-asymmetric_subscription.c
+ 0121-clusterid.c
+ 0122-buffer_cleaning_after_rebalance.c
+ 0123-connections_max_idle.c
+ 0124-openssl_invalid_engine.c
+ 0125-immediate_flush.c
+ 0126-oauthbearer_oidc.c
+ 0128-sasl_callback_queue.cpp
+ 0129-fetch_aborted_msgs.c
+ 0130-store_offsets.c
+ 0131-connect_timeout.c
+ 0132-strategy_ordering.c
+ 0133-ssl_keys.c
+ 0134-ssl_provider.c
+ 0135-sasl_credentials.cpp
+ 0136-resolve_cb.c
+ 0137-barrier_batch_consume.c
+ 0138-admin_mock.c
+ 8000-idle.cpp
+ test.c
+ testcpp.cpp
+ rusage.c
+)
+
+if(NOT WIN32)
+ list(APPEND sources sockem.c sockem_ctrl.c)
+else()
+ list(APPEND sources ../src/tinycthread.c ../src/tinycthread_extra.c)
+endif()
+
+add_executable(test-runner ${sources})
+target_link_libraries(test-runner PUBLIC rdkafka++)
+
+add_test(NAME RdKafkaTestInParallel COMMAND test-runner -p5)
+add_test(NAME RdKafkaTestSequentially COMMAND test-runner -p1)
+add_test(NAME RdKafkaTestBrokerLess COMMAND test-runner -p5 -l)
+
+if(NOT WIN32 AND NOT APPLE)
+ set(tests_OUTPUT_DIRECTORY ${CMAKE_CURRENT_BINARY_DIR})
+ add_subdirectory(interceptor_test)
+endif()
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py b/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py
new file mode 100644
index 000000000..696fa88cc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/LibrdkafkaTestApp.py
@@ -0,0 +1,256 @@
+#!/usr/bin/env python3
+#
+# librdkafka test trivup app module
+#
+# Requires:
+# trivup python module
+# gradle in your PATH
+
+from trivup.trivup import App, UuidAllocator
+from trivup.apps.ZookeeperApp import ZookeeperApp
+from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
+from trivup.apps.KerberosKdcApp import KerberosKdcApp
+from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
+
+import json
+
+
+class LibrdkafkaTestApp(App):
+ """ Sets up and executes the librdkafka regression tests.
+ Assumes tests are in the current directory.
+ Must be instantiated after ZookeeperApp and KafkaBrokerApp """
+
+ def __init__(self, cluster, version, conf=None,
+ tests=None, scenario="default"):
+ super(LibrdkafkaTestApp, self).__init__(cluster, conf=conf)
+
+ self.appid = UuidAllocator(self.cluster).next(self, trunc=8)
+ self.autostart = False
+ self.local_tests = True
+ self.test_mode = conf.get('test_mode', 'bare')
+ self.version = version
+
+ # Generate test config file
+ conf_blob = list()
+ self.security_protocol = 'PLAINTEXT'
+
+ f, self.test_conf_file = self.open_file('test.conf', 'perm')
+ f.write('broker.address.family=v4\n'.encode('ascii'))
+ f.write(('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
+ f.write('test.timeout.multiplier=2\n'.encode('ascii'))
+
+ sparse = conf.get('sparse_connections', None)
+ if sparse is not None:
+ f.write('enable.sparse.connections={}\n'.format(
+ sparse).encode('ascii'))
+
+ if version.startswith('0.9') or version.startswith('0.8'):
+ conf_blob.append('api.version.request=false')
+ conf_blob.append('broker.version.fallback=%s' % version)
+ else:
+ # any broker version with ApiVersion support
+ conf_blob.append('broker.version.fallback=0.10.0.0')
+ conf_blob.append('api.version.fallback.ms=0')
+
+ # SASL (only one mechanism supported at a time)
+ mech = self.conf.get('sasl_mechanisms', '').split(',')[0]
+ if mech != '':
+ conf_blob.append('sasl.mechanisms=%s' % mech)
+ if mech == 'PLAIN' or mech.find('SCRAM-') != -1:
+ self.security_protocol = 'SASL_PLAINTEXT'
+ # Use first user as SASL user/pass
+ for up in self.conf.get('sasl_users', '').split(','):
+ u, p = up.split('=')
+ conf_blob.append('sasl.username=%s' % u)
+ conf_blob.append('sasl.password=%s' % p)
+ break
+
+ elif mech == 'OAUTHBEARER':
+ self.security_protocol = 'SASL_PLAINTEXT'
+ oidc = cluster.find_app(OauthbearerOIDCApp)
+ if oidc is not None:
+ conf_blob.append('sasl.oauthbearer.method=%s\n' %
+ oidc.conf.get('sasl_oauthbearer_method'))
+ conf_blob.append('sasl.oauthbearer.client.id=%s\n' %
+ oidc.conf.get(
+ 'sasl_oauthbearer_client_id'))
+ conf_blob.append('sasl.oauthbearer.client.secret=%s\n' %
+ oidc.conf.get(
+ 'sasl_oauthbearer_client_secret'))
+ conf_blob.append('sasl.oauthbearer.extensions=%s\n' %
+ oidc.conf.get(
+ 'sasl_oauthbearer_extensions'))
+ conf_blob.append('sasl.oauthbearer.scope=%s\n' %
+ oidc.conf.get('sasl_oauthbearer_scope'))
+ conf_blob.append('sasl.oauthbearer.token.endpoint.url=%s\n'
+ % oidc.conf.get('valid_url'))
+ self.env_add('VALID_OIDC_URL', oidc.conf.get('valid_url'))
+ self.env_add(
+ 'INVALID_OIDC_URL',
+ oidc.conf.get('badformat_url'))
+ self.env_add(
+ 'EXPIRED_TOKEN_OIDC_URL',
+ oidc.conf.get('expired_url'))
+ else:
+ conf_blob.append(
+ 'enable.sasl.oauthbearer.unsecure.jwt=true\n')
+ conf_blob.append(
+ 'sasl.oauthbearer.config=%s\n' %
+ self.conf.get('sasl_oauthbearer_config'))
+
+ elif mech == 'GSSAPI':
+ self.security_protocol = 'SASL_PLAINTEXT'
+ kdc = cluster.find_app(KerberosKdcApp)
+ if kdc is None:
+ self.log(
+ 'WARNING: sasl_mechanisms is GSSAPI set but no '
+ 'KerberosKdcApp available: client SASL config will '
+ 'be invalid (which might be intentional)')
+ else:
+ self.env_add('KRB5_CONFIG', kdc.conf['krb5_conf'])
+ self.env_add('KRB5_KDC_PROFILE', kdc.conf['kdc_conf'])
+ principal, keytab = kdc.add_principal(
+ self.name,
+ conf.get('advertised_hostname', self.node.name))
+ conf_blob.append('sasl.kerberos.service.name=%s' %
+ self.conf.get('sasl_servicename',
+ 'kafka'))
+ conf_blob.append('sasl.kerberos.keytab=%s' % keytab)
+ conf_blob.append(
+ 'sasl.kerberos.principal=%s' %
+ principal.split('@')[0])
+
+ else:
+ self.log(
+ 'WARNING: FIXME: SASL %s client config not written to %s: unhandled mechanism' % # noqa: E501
+ (mech, self.test_conf_file))
+
+ # SSL config
+ if getattr(cluster, 'ssl', None) is not None:
+ ssl = cluster.ssl
+
+ key = ssl.create_cert('librdkafka%s' % self.appid)
+
+ conf_blob.append('ssl.ca.location=%s' % ssl.ca['pem'])
+ conf_blob.append('ssl.certificate.location=%s' % key['pub']['pem'])
+ conf_blob.append('ssl.key.location=%s' % key['priv']['pem'])
+ conf_blob.append('ssl.key.password=%s' % key['password'])
+
+ # Some tests need fine-grained access to various cert files,
+ # set up the env vars accordingly.
+ for k, v in ssl.ca.items():
+ self.env_add('SSL_ca_{}'.format(k), v)
+
+ # Set envs for all generated keys so tests can find them.
+ for k, v in key.items():
+ if isinstance(v, dict):
+ for k2, v2 in v.items():
+ # E.g. "SSL_priv_der=path/to/librdkafka-priv.der"
+ self.env_add('SSL_{}_{}'.format(k, k2), v2)
+ else:
+ self.env_add('SSL_{}'.format(k), v)
+
+ if 'SASL' in self.security_protocol:
+ self.security_protocol = 'SASL_SSL'
+ else:
+ self.security_protocol = 'SSL'
+
+ # Define bootstrap brokers based on selected security protocol
+ self.dbg('Using client security.protocol=%s' % self.security_protocol)
+ all_listeners = (
+ ','.join(
+ cluster.get_all(
+ 'advertised.listeners',
+ '',
+ KafkaBrokerApp))).split(',')
+ bootstrap_servers = ','.join(
+ [x for x in all_listeners if x.startswith(self.security_protocol)])
+ if len(bootstrap_servers) == 0:
+ bootstrap_servers = all_listeners[0]
+ self.log(
+ 'WARNING: No eligible listeners for security.protocol=%s in %s: falling back to first listener: %s: tests will fail (which might be the intention)' % # noqa: E501
+ (self.security_protocol, all_listeners, bootstrap_servers))
+
+ self.bootstrap_servers = bootstrap_servers
+
+ conf_blob.append('bootstrap.servers=%s' % bootstrap_servers)
+ conf_blob.append('security.protocol=%s' % self.security_protocol)
+
+ f.write(('\n'.join(conf_blob)).encode('ascii'))
+ f.close()
+
+ self.env_add('TEST_SCENARIO', scenario)
+ self.env_add('RDKAFKA_TEST_CONF', self.test_conf_file)
+ self.env_add('TEST_KAFKA_VERSION', version)
+ self.env_add('TRIVUP_ROOT', cluster.instance_path())
+
+ if self.test_mode != 'bash':
+ self.test_report_file = self.mkpath('test_report', pathtype='perm')
+ self.env_add('TEST_REPORT', self.test_report_file)
+
+ if tests is not None:
+ self.env_add('TESTS', ','.join(tests))
+
+ def start_cmd(self):
+ self.env_add(
+ 'KAFKA_PATH',
+ self.cluster.get_all(
+ 'destdir',
+ '',
+ KafkaBrokerApp)[0],
+ False)
+ self.env_add(
+ 'ZK_ADDRESS',
+ self.cluster.get_all(
+ 'address',
+ '',
+ ZookeeperApp)[0],
+ False)
+ self.env_add('BROKERS', self.cluster.bootstrap_servers(), False)
+
+ # Provide a HTTPS REST endpoint for the HTTP client tests.
+ self.env_add(
+ 'RD_UT_HTTP_URL',
+ 'https://jsonplaceholder.typicode.com/users')
+
+ # Per broker env vars
+ for b in [x for x in self.cluster.apps if isinstance(
+ x, KafkaBrokerApp)]:
+ self.env_add('BROKER_ADDRESS_%d' % b.appid,
+ ','.join([x for x in
+ b.conf['listeners'].split(',')
+ if x.startswith(self.security_protocol)]))
+ # Add each broker pid as an env so they can be killed
+ # indivdidually.
+ self.env_add('BROKER_PID_%d' % b.appid, str(b.proc.pid))
+ # JMX port, if available
+ jmx_port = b.conf.get('jmx_port', None)
+ if jmx_port is not None:
+ self.env_add('BROKER_JMX_PORT_%d' % b.appid, str(jmx_port))
+
+ extra_args = list()
+ if not self.local_tests:
+ extra_args.append('-L')
+ if self.conf.get('args', None) is not None:
+ extra_args.append(self.conf.get('args'))
+ extra_args.append('-E')
+ return './run-test.sh -p%d -K %s %s' % (
+ int(self.conf.get('parallel', 5)), ' '.join(extra_args),
+ self.test_mode)
+
+ def report(self):
+ if self.test_mode == 'bash':
+ return None
+
+ try:
+ with open(self.test_report_file, 'r') as f:
+ res = json.load(f)
+ except Exception as e:
+ self.log(
+ 'Failed to read report %s: %s' %
+ (self.test_report_file, str(e)))
+ return {'root_path': self.root_path(), 'error': str(e)}
+ return res
+
+ def deploy(self):
+ pass
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile
new file mode 100644
index 000000000..73eab2140
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/Makefile
@@ -0,0 +1,182 @@
+TESTSRCS_C = $(wildcard [08]*-*.c)
+TESTSRCS_CXX= $(wildcard [08]*-*.cpp)
+OBJS = $(TESTSRCS_C:%.c=%.o) $(TESTSRCS_CXX:%.cpp=%.o)
+
+BIN = test-runner
+LIBS += -lrdkafka++ -lrdkafka
+OBJS += test.o rusage.o testcpp.o \
+ tinycthread.o tinycthread_extra.o rdlist.o sockem.o \
+ sockem_ctrl.o
+CFLAGS += -I../src
+CXXFLAGS += -I../src -I../src-cpp
+LDFLAGS += -rdynamic -L../src -L../src-cpp
+
+# Latest Kafka version
+KAFKA_VERSION?=3.1.0
+# Kafka versions for compatibility tests
+COMPAT_KAFKA_VERSIONS?=0.8.2.2 0.9.0.1 0.11.0.3 1.0.2 2.4.1 2.8.1 $(KAFKA_VERSION)
+
+# Non-default scenarios (FIXME: read from scenarios/*)
+SCENARIOS?=noautocreate ak23
+
+# A subset of rudimentary (and quick) tests suitable for quick smoke testing.
+# The smoke test should preferably finish in under a minute.
+SMOKE_TESTS?=0000,0001,0004,0012,0017,0022,0030,0039,0049,0087,0103
+
+-include ../Makefile.config
+
+# Use C++ compiler as linker
+CC_LD=$(CXX)
+
+all: $(BIN) run_par
+
+#
+# These targets spin up a cluster and runs the test suite
+# with different parameters.
+#
+
+broker: $(BIN)
+ ./broker_version_tests.py --conf '{"parallel":1, "args":"-Q"}' $(KAFKA_VERSION)
+
+broker_idempotent: $(BIN)
+ ./broker_version_tests.py --conf '{"parallel":1, "args":"-P -L -Q"}' $(KAFKA_VERSION)
+
+sasl: $(BIN)
+ ./sasl_test.py --conf '{"parallel":1, "args":"-L -Q"}' $(KAFKA_VERSION)
+
+# Run the full test suite(s)
+full: broker broker_idempotent sasl
+
+
+#
+# The following targets require an existing cluster running (test.conf)
+#
+quick:
+ @echo "Running quick(er) test suite (without sockem)"
+ ./run-test.sh -Q -E
+
+smoke:
+ @echo "Running smoke tests: $(SMOKE_TESTS)"
+ TESTS="$(SMOKE_TESTS)" $(MAKE) quick
+
+run_par: $(BIN)
+ @echo "Running tests in parallel"
+ ./run-test.sh
+
+run_seq: $(BIN)
+ @echo "Running tests sequentially"
+ ./run-test.sh -p1
+
+run_local: $(BIN)
+ @echo "Running local broker-less tests with idempotent producer"
+ ./run-test.sh -l -P
+
+run_local_quick: $(BIN)
+ @echo "Running quick local broker-less tests with idempotent producer"
+ ./run-test.sh -l -Q -P
+
+idempotent_par: $(BIN)
+ ./run-test.sh -P
+
+idempotent_seq: $(BIN)
+ ./run-test.sh -P
+
+idempotent: idempotent_par
+
+transactions: $(BIN)
+ for _test in 0098 0101; do TESTS=$$_test ./run-test.sh ./$(BIN) ; done
+
+# Run unit tests
+unit: $(BIN)
+ TESTS=0000 ./run-test.sh -p1
+
+
+# Delete all test topics (based on prefix)
+delete_topics:
+ TESTS=none ./run-test.sh -D bare
+
+.PHONY:
+
+build: $(BIN) interceptor_test
+
+test.o: ../src/librdkafka.a ../src-cpp/librdkafka++.a interceptor_test
+
+
+
+include ../mklove/Makefile.base
+
+ifeq ($(_UNAME_S),Darwin)
+interceptor_test: .PHONY
+else
+interceptor_test: .PHONY
+ $(MAKE) -C $@
+endif
+
+
+tinycthread.o: ../src/tinycthread.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -c $<
+
+tinycthread_extra.o: ../src/tinycthread_extra.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -c $<
+
+rdlist.o: ../src/rdlist.c
+ $(CC) $(CPPFLAGS) $(CFLAGS) -c $<
+
+
+clean:
+ rm -f *.test $(OBJS) $(BIN)
+ $(MAKE) -C interceptor_test clean
+
+# Remove test reports, temporary test files, crash dumps, etc.
+clean-output:
+ rm -f *.offset stats_*.json core vgcore.* _until_fail_*.log gdbrun??????
+
+realclean: clean clean-output
+ rm -f test_report_*.json
+
+java: .PHONY
+ make -C java
+
+# Run test-suite with ASAN
+asan:
+ @(echo "### Running tests with AddressSanitizer")
+ (cd .. ; ./dev-conf.sh asan)
+ CI=true ./broker_version_tests.py --conf '{"args":"-Q"}' $(KAFKA_VERSION)
+
+# Run test-suite with TSAN
+tsan:
+ @(echo "### Running tests with ThreadSanitizer")
+ (cd .. ; ./dev-conf.sh tsan)
+ CI=true ./broker_version_tests.py --conf '{"args":"-Q"}' $(KAFKA_VERSION)
+
+# Run full test-suite with a clean release build
+pristine-full:
+ @(echo "### Running full test-suite with clean build")
+ (cd .. ; ./dev-conf.sh clean)
+ make full
+
+# Run backward compatibility tests
+compat:
+ @(echo "### Running compatibility tests with Apache Kafka versions $(COMPAT_KAFKA_VERSIONS)")
+ ./broker_version_tests.py --rdkconf '{"args": "-Q"}' \
+ $(COMPAT_KAFKA_VERSIONS)
+
+# Run non-default scenarios
+scenarios: .PHONY
+ @echo "### Running test scenarios: $(SCENARIOS)"
+ @(for _SCENARIO in $(SCENARIOS) ; do \
+ ./broker_version_tests.py --scenario "$$_SCENARIO" $(KAFKA_VERSION) ; \
+ done)
+
+
+# Run a full release / PR test.
+# (| is for not running suites in parallel)
+release-test: | asan tsan pristine-full scenarios compat
+
+# Check resource usage (requires a running cluster environment)
+rusage:
+ ./run-test.sh -R bare
+
+
+
+-include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/README.md
new file mode 100644
index 000000000..b0d99b0bb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/README.md
@@ -0,0 +1,505 @@
+# Automated regression tests for librdkafka
+
+
+## Supported test environments
+
+While the standard test suite works well on OSX and Windows,
+the full test suite (which must be run for PRs and releases) will
+only run on recent Linux distros due to its use of ASAN, Kerberos, etc.
+
+
+## Automated broker cluster setup using trivup
+
+A local broker cluster can be set up using
+[trivup](https://github.com/edenhill/trivup), which is a Python package
+available on PyPi.
+These self-contained clusters are used to run the librdkafka test suite
+on a number of different broker versions or with specific broker configs.
+
+trivup will download the specified Kafka version into its root directory,
+the root directory is also used for cluster instances, where Kafka will
+write messages, logs, etc.
+The trivup root directory is by default `tmp` in the current directory but
+may be specified by setting the `TRIVUP_ROOT` environment variable
+to alternate directory, e.g., `TRIVUP_ROOT=$HOME/trivup make full`.
+
+First install required Python packages (trivup with friends):
+
+ $ python3 -m pip install -U -r requirements.txt
+
+Bring up a Kafka cluster (with the specified version) and start an interactive
+shell, when the shell is exited the cluster is brought down and deleted.
+
+ $ python3 -m trivup.clusters.KafkaCluster 2.3.0 # Broker version
+ # You can also try adding:
+ # --ssl To enable SSL listeners
+ # --sasl <mechanism> To enable SASL authentication
+ # --sr To provide a Schema-Registry instance
+ # .. and so on, see --help for more.
+
+In the trivup shell, run the test suite:
+
+ $ make
+
+
+If you'd rather use an existing cluster, you may omit trivup and
+provide a `test.conf` file that specifies the brokers and possibly other
+librdkafka configuration properties:
+
+ $ cp test.conf.example test.conf
+ $ $EDITOR test.conf
+
+
+
+## Run specific tests
+
+To run tests:
+
+ # Run tests in parallel (quicker, but harder to troubleshoot)
+ $ make
+
+ # Run a condensed test suite (quickest)
+ # This is what is run on CI builds.
+ $ make quick
+
+ # Run tests in sequence
+ $ make run_seq
+
+ # Run specific test
+ $ TESTS=0004 make
+
+ # Run test(s) with helgrind, valgrind, gdb
+ $ TESTS=0009 ./run-test.sh valgrind|helgrind|gdb
+
+
+All tests in the 0000-0999 series are run automatically with `make`.
+
+Tests 1000-1999 are subject to specific non-standard setups or broker
+configuration, these tests are run with `TESTS=1nnn make`.
+See comments in the test's source file for specific requirements.
+
+To insert test results into SQLite database make sure the `sqlite3` utility
+is installed, then add this to `test.conf`:
+
+ test.sql.command=sqlite3 rdktests
+
+
+
+## Adding a new test
+
+The simplest way to add a new test is to copy one of the recent
+(higher `0nnn-..` number) tests to the next free
+`0nnn-<what-is-tested>` file.
+
+If possible and practical, try to use the C++ API in your test as that will
+cover both the C and C++ APIs and thus provide better test coverage.
+Do note that the C++ test framework is not as feature rich as the C one,
+so if you need message verification, etc, you're better off with a C test.
+
+After creating your test file it needs to be added in a couple of places:
+
+ * Add to [tests/CMakeLists.txt](tests/CMakeLists.txt)
+ * Add to [win32/tests/tests.vcxproj](win32/tests/tests.vcxproj)
+ * Add to both locations in [tests/test.c](tests/test.c) - search for an
+ existing test number to see what needs to be done.
+
+You don't need to add the test to the Makefile, it is picked up automatically.
+
+Some additional guidelines:
+ * If your test depends on a minimum broker version, make sure to specify it
+ in test.c using `TEST_BRKVER()` (see 0091 as an example).
+ * If your test can run without an active cluster, flag the test
+ with `TEST_F_LOCAL`.
+ * If your test runs for a long time or produces/consumes a lot of messages
+ it might not be suitable for running on CI (which should run quickly
+ and are bound by both time and resources). In this case it is preferred
+ if you modify your test to be able to run quicker and/or with less messages
+ if the `test_quick` variable is true.
+ * There's plenty of helper wrappers in test.c for common librdkafka functions
+ that makes tests easier to write by not having to deal with errors, etc.
+ * Fail fast, use `TEST_ASSERT()` et.al., the sooner an error is detected
+ the better since it makes troubleshooting easier.
+ * Use `TEST_SAY()` et.al. to inform the developer what your test is doing,
+ making it easier to troubleshoot upon failure. But try to keep output
+ down to reasonable levels. There is a `TEST_LEVEL` environment variable
+ that can be used with `TEST_SAYL()` to only emit certain printouts
+ if the test level is increased. The default test level is 2.
+ * The test runner will automatically adjust timeouts (it knows about)
+ if running under valgrind, on CI, or similar environment where the
+ execution speed may be slower.
+ To make sure your test remains sturdy in these type of environments, make
+ sure to use the `tmout_multip(milliseconds)` macro when passing timeout
+ values to non-test functions, e.g, `rd_kafka_poll(rk, tmout_multip(3000))`.
+ * If your test file contains multiple separate sub-tests, use the
+ `SUB_TEST()`, `SUB_TEST_QUICK()` and `SUB_TEST_PASS()` from inside
+ the test functions to help differentiate test failures.
+
+
+## Test scenarios
+
+A test scenario defines the cluster configuration used by tests.
+The majority of tests use the "default" scenario which matches the
+Apache Kafka default broker configuration (topic auto creation enabled, etc).
+
+If a test relies on cluster configuration that is mutually exclusive with
+the default configuration an alternate scenario must be defined in
+`scenarios/<scenario>.json` which is a configuration object which
+is passed to [trivup](https://github.com/edenhill/trivup).
+
+Try to reuse an existing test scenario as far as possible to speed up
+test times, since each new scenario will require a new cluster incarnation.
+
+
+## A guide to testing, verifying, and troubleshooting, librdkafka
+
+
+### Creating a development build
+
+The [dev-conf.sh](../dev-conf.sh) script configures and builds librdkafka and
+the test suite for development use, enabling extra runtime
+checks (`ENABLE_DEVEL`, `rd_dassert()`, etc), disabling optimization
+(to get accurate stack traces and line numbers), enable ASAN, etc.
+
+ # Reconfigure librdkafka for development use and rebuild.
+ $ ./dev-conf.sh
+
+**NOTE**: Performance tests and benchmarks should not use a development build.
+
+
+### Controlling the test framework
+
+A test run may be dynamically set up using a number of environment variables.
+These environment variables work for all different ways of invocing the tests,
+be it `make`, `run-test.sh`, `until-fail.sh`, etc.
+
+ * `TESTS=0nnn` - only run a single test identified by its full number, e.g.
+ `TESTS=0102 make`. (Yes, the var should have been called TEST)
+ * `SUBTESTS=...` - only run sub-tests (tests that are using `SUB_TEST()`)
+ that contains this string.
+ * `TESTS_SKIP=...` - skip these tests.
+ * `TEST_DEBUG=...` - this will automatically set the `debug` config property
+ of all instantiated clients to the value.
+ E.g.. `TEST_DEBUG=broker,protocol TESTS=0001 make`
+ * `TEST_LEVEL=n` - controls the `TEST_SAY()` output level, a higher number
+ yields more test output. Default level is 2.
+ * `RD_UT_TEST=name` - only run unittest containing `name`, should be used
+ with `TESTS=0000`.
+ See [../src/rdunittest.c](../src/rdunittest.c) for
+ unit test names.
+
+
+Let's say that you run the full test suite and get a failure in test 0061,
+which is a consumer test. You want to quickly reproduce the issue
+and figure out what is wrong, so limit the tests to just 0061, and provide
+the relevant debug options (which is typically `cgrp,fetch` for consumers):
+
+ $ TESTS=0061 TEST_DEBUG=cgrp,fetch make
+
+If the test did not fail you've found an intermittent issue, this is where
+[until-fail.sh](until-fail.sh) comes in to play, so run the test until it fails:
+
+ # bare means to run the test without valgrind
+ $ TESTS=0061 TEST_DEBUG=cgrp,fetch ./until-fail.sh bare
+
+
+### How to run tests
+
+The standard way to run the test suite is firing up a trivup cluster
+in an interactive shell:
+
+ $ ./interactive_broker_version.py 2.3.0 # Broker version
+
+
+And then running the test suite in parallel:
+
+ $ make
+
+
+Run one test at a time:
+
+ $ make run_seq
+
+
+Run a single test:
+
+ $ TESTS=0034 make
+
+
+Run test suite with valgrind (see instructions below):
+
+ $ ./run-test.sh valgrind # memory checking
+
+or with helgrind (the valgrind thread checker):
+
+ $ ./run-test.sh helgrind # thread checking
+
+
+To run the tests in gdb:
+
+**NOTE**: gdb support is flaky on OSX due to signing issues.
+
+ $ ./run-test.sh gdb
+ (gdb) run
+
+ # wait for test to crash, or interrupt with Ctrl-C
+
+ # backtrace of current thread
+ (gdb) bt
+ # move up or down a stack frame
+ (gdb) up
+ (gdb) down
+ # select specific stack frame
+ (gdb) frame 3
+ # show code at location
+ (gdb) list
+
+ # print variable content
+ (gdb) p rk.rk_conf.group_id
+ (gdb) p *rkb
+
+ # continue execution (if interrupted)
+ (gdb) cont
+
+ # single-step one instruction
+ (gdb) step
+
+ # restart
+ (gdb) run
+
+ # see all threads
+ (gdb) info threads
+
+ # see backtraces of all threads
+ (gdb) thread apply all bt
+
+ # exit gdb
+ (gdb) exit
+
+
+If a test crashes and produces a core file (make sure your shell has
+`ulimit -c unlimited` set!), do:
+
+ # On linux
+ $ LD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner <core-file>
+ (gdb) bt
+
+ # On OSX
+ $ DYLD_LIBRARY_PATH=../src:../src-cpp gdb ./test-runner /cores/core.<pid>
+ (gdb) bt
+
+
+To run all tests repeatedly until one fails, this is a good way of finding
+intermittent failures, race conditions, etc:
+
+ $ ./until-fail.sh bare # bare is to run the test without valgrind,
+ # may also be one or more of the modes supported
+ # by run-test.sh:
+ # bare valgrind helgrind gdb, etc..
+
+To run a single test repeatedly with valgrind until failure:
+
+ $ TESTS=0103 ./until-fail.sh valgrind
+
+
+
+### Finding memory leaks, memory corruption, etc.
+
+There are two ways to verifying there are no memory leaks, out of bound
+memory accesses, use after free, etc. ASAN or valgrind.
+
+#### ASAN - AddressSanitizer
+
+The first option is using AddressSanitizer, this is build-time instrumentation
+provided by clang and gcc to insert memory checks in the build library.
+
+To enable AddressSanitizer (ASAN), run `./dev-conf.sh asan` from the
+librdkafka root directory.
+This script will rebuild librdkafka and the test suite with ASAN enabled.
+
+Then run tests as usual. Memory access issues will be reported on stderr
+in real time as they happen (and the test will fail eventually), while
+memory leaks will be reported on stderr when the test run exits successfully,
+i.e., no tests failed.
+
+Test failures will typically cause the current test to exit hard without
+cleaning up, in which case there will be a large number of reported memory
+leaks, these shall be ignored. The memory leak report is only relevant
+when the test suite passes.
+
+**NOTE**: The OSX version of ASAN does not provide memory leak protection,
+ you will need to run the test suite on Linux (native or in Docker).
+
+**NOTE**: ASAN, TSAN and valgrind are mutually exclusive.
+
+
+#### Valgrind - memory checker
+
+Valgrind is a powerful virtual machine that intercepts all memory accesses
+of an unmodified program, reporting memory access violations, use after free,
+memory leaks, etc.
+
+Valgrind provides additional checks over ASAN and is mostly useful
+for troubleshooting crashes, memory issues and leaks when ASAN falls short.
+
+To use valgrind, make sure librdkafka and the test suite is built without
+ASAN or TSAN, it must be a clean build without any other instrumentation,
+then simply run:
+
+ $ ./run-test.sh valgrind
+
+Valgrind will report to stderr, just like ASAN.
+
+
+**NOTE**: Valgrind only runs on Linux.
+
+**NOTE**: ASAN, TSAN and valgrind are mutually exclusive.
+
+
+### TSAN - Thread and locking issues
+
+librdkafka uses a number of internal threads which communicate and share state
+through op queues, conditional variables, mutexes and atomics.
+
+While the docstrings in the librdkafka source code specify what locking is
+required it is very hard to manually verify that the correct locks
+are acquired, and in the correct order (to avoid deadlocks).
+
+TSAN, ThreadSanitizer, is of great help here. As with ASAN, TSAN is a
+build-time option: run `./dev-conf.sh tsan` to rebuild with TSAN.
+
+Run the test suite as usual, preferably in parallel. TSAN will output
+thread errors to stderr and eventually fail the test run.
+
+If you're having threading issues and TSAN does not provide enough information
+to sort it out, you can also try running the test with helgrind, which
+is valgrind's thread checker (`./run-test.sh helgrind`).
+
+
+**NOTE**: ASAN, TSAN and valgrind are mutually exclusive.
+
+
+### Resource usage thresholds (experimental)
+
+**NOTE**: This is an experimental feature, some form of system-specific
+ calibration will be needed.
+
+If the `-R` option is passed to the `test-runner`, or the `make rusage`
+target is used, the test framework will monitor each test's resource usage
+and fail the test if the default or test-specific thresholds are exceeded.
+
+Per-test thresholds are specified in test.c using the `_THRES()` macro.
+
+Currently monitored resources are:
+ * `utime` - User CPU time in seconds (default 1.0s)
+ * `stime` - System/Kernel CPU time in seconds (default 0.5s).
+ * `rss` - RSS (memory) usage (default 10.0 MB)
+ * `ctxsw` - Number of voluntary context switches, e.g. syscalls (default 10000).
+
+Upon successful test completion a log line will be emitted with a resource
+usage summary, e.g.:
+
+ Test resource usage summary: 20.161s (32.3%) User CPU time, 12.976s (20.8%) Sys CPU time, 0.000MB RSS memory increase, 4980 Voluntary context switches
+
+The User and Sys CPU thresholds are based on observations running the
+test suite on an Intel(R) Core(TM) i7-2600 CPU @ 3.40GHz (8 cores)
+which define the base line system.
+
+Since no two development environments are identical a manual CPU calibration
+value can be passed as `-R<C>`, where `C` is the CPU calibration for
+the local system compared to the base line system.
+The CPU threshold will be multiplied by the CPU calibration value (default 1.0),
+thus a value less than 1.0 means the local system is faster than the
+base line system, and a value larger than 1.0 means the local system is
+slower than the base line system.
+I.e., if you are on an i5 system, pass `-R2.0` to allow higher CPU usages,
+or `-R0.8` if your system is faster than the base line system.
+The the CPU calibration value may also be set with the
+`TEST_CPU_CALIBRATION=1.5` environment variable.
+
+In an ideal future, the test suite would be able to auto-calibrate.
+
+
+**NOTE**: The resource usage threshold checks will run tests in sequence,
+ not parallell, to be able to effectively measure per-test usage.
+
+
+# PR and release verification
+
+Prior to pushing your PR you must verify that your code change has not
+introduced any regression or new issues, this requires running the test
+suite in multiple different modes:
+
+ * PLAINTEXT, SSL transports
+ * All SASL mechanisms (PLAIN, GSSAPI, SCRAM, OAUTHBEARER)
+ * Idempotence enabled for all tests
+ * With memory checking
+ * With thread checking
+ * Compatibility with older broker versions
+
+These tests must also be run for each release candidate that is created.
+
+ $ make release-test
+
+This will take approximately 30 minutes.
+
+**NOTE**: Run this on Linux (for ASAN and Kerberos tests to work properly), not OSX.
+
+
+# Test mode specifics
+
+The following sections rely on trivup being installed.
+
+
+### Compatbility tests with multiple broker versions
+
+To ensure compatibility across all supported broker versions the entire
+test suite is run in a trivup based cluster, one test run for each
+relevant broker version.
+
+ $ ./broker_version_tests.py
+
+
+### SASL tests
+
+Testing SASL requires a bit of configuration on the brokers, to automate
+this the entire test suite is run on trivup based clusters.
+
+ $ ./sasl_tests.py
+
+
+
+### Full test suite(s) run
+
+To run all tests, including the broker version and SASL tests, etc, use
+
+ $ make full
+
+**NOTE**: `make full` is a sub-set of the more complete `make release-test` target.
+
+
+### Idempotent Producer tests
+
+To run the entire test suite with `enable.idempotence=true` enabled, use
+`make idempotent_seq` or `make idempotent_par` for sequencial or
+parallel testing.
+Some tests are skipped or slightly modified when idempotence is enabled.
+
+
+## Manual testing notes
+
+The following manual tests are currently performed manually, they should be
+implemented as automatic tests.
+
+### LZ4 interop
+
+ $ ./interactive_broker_version.py -c ./lz4_manual_test.py 0.8.2.2 0.9.0.1 2.3.0
+
+Check the output and follow the instructions.
+
+
+
+
+## Test numbers
+
+Automated tests: 0000-0999
+Manual tests: 8000-8999
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh
new file mode 100755
index 000000000..9d17706f3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/autotest.sh
@@ -0,0 +1,33 @@
+#!/bin/bash
+#
+# autotest.sh runs the integration tests using a temporary Kafka cluster.
+# This is intended to be used on CI.
+#
+
+set -e
+
+KAFKA_VERSION=$1
+
+if [[ -z $KAFKA_VERSION ]]; then
+ echo "Usage: $0 <broker-version>"
+ exit 1
+fi
+
+set -x
+
+pushd tests
+
+[[ -d _venv ]] || virtualenv _venv
+source _venv/bin/activate
+
+# Install the requirements
+pip3 install -U -r requirements.txt
+
+# Run tests that automatically spin up their clusters
+export KAFKA_VERSION
+
+echo "## Running full test suite for broker version $KAFKA_VERSION ##"
+time make full
+
+
+popd # tests
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb b/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb
new file mode 100644
index 000000000..f98d9b462
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/backtrace.gdb
@@ -0,0 +1,30 @@
+p *test
+bt full
+list
+
+p *rk
+p *rkb
+p *rkb.rkb_rk
+
+up
+p *rk
+p *rkb
+p *rkb.rkb_rk
+
+up
+p *rk
+p *rkb
+p *rkb.rkb_rk
+
+up
+p *rk
+p *rkb
+p *rkb.rkb_rk
+
+up
+p *rk
+p *rkb
+p *rkb.rkb_rk
+
+thread apply all bt
+quit
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py b/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py
new file mode 100755
index 000000000..717da28d5
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/broker_version_tests.py
@@ -0,0 +1,297 @@
+#!/usr/bin/env python3
+#
+#
+# Run librdkafka regression tests on with different SASL parameters
+# and broker verisons.
+#
+# Requires:
+# trivup python module
+# gradle in your PATH
+
+from cluster_testing import (
+ LibrdkafkaTestCluster,
+ print_report_summary,
+ read_scenario_conf)
+from LibrdkafkaTestApp import LibrdkafkaTestApp
+
+import subprocess
+import tempfile
+import os
+import sys
+import argparse
+import json
+
+
+def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None,
+ interact=False, debug=False, scenario="default"):
+ """
+ @brief Create, deploy and start a Kafka cluster using Kafka \\p version
+ Then run librdkafka's regression tests.
+ """
+
+ cluster = LibrdkafkaTestCluster(version, conf,
+ num_brokers=int(conf.get('broker_cnt', 3)),
+ debug=debug, scenario=scenario)
+
+ # librdkafka's regression tests, as an App.
+ _rdkconf = conf.copy() # Base rdkconf on cluster conf + rdkconf
+ _rdkconf.update(rdkconf)
+ rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests,
+ scenario=scenario)
+ rdkafka.do_cleanup = False
+
+ if deploy:
+ cluster.deploy()
+
+ cluster.start(timeout=30)
+
+ if conf.get('test_mode', '') == 'bash':
+ cmd = 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % ( # noqa: E501
+ cluster.name, version)
+ subprocess.call(
+ cmd,
+ env=rdkafka.env,
+ shell=True,
+ executable='/bin/bash')
+ report = None
+
+ else:
+ rdkafka.start()
+ print(
+ '# librdkafka regression tests started, logs in %s' %
+ rdkafka.root_path())
+ rdkafka.wait_stopped(timeout=60 * 30)
+
+ report = rdkafka.report()
+ report['root_path'] = rdkafka.root_path()
+
+ if report.get('tests_failed', 0) > 0 and interact:
+ print(
+ '# Connect to cluster with bootstrap.servers %s' %
+ cluster.bootstrap_servers())
+ print('# Exiting the shell will bring down the cluster. '
+ 'Good luck.')
+ subprocess.call(
+ 'bash --rcfile <(cat ~/.bashrc; echo \'PS1="[TRIVUP:%s@%s] \\u@\\h:\\w$ "\')' % # noqa: E501
+ (cluster.name, version), env=rdkafka.env, shell=True,
+ executable='/bin/bash')
+
+ cluster.stop(force=True)
+
+ cluster.cleanup()
+ return report
+
+
+def handle_report(report, version, suite):
+ """ Parse test report and return tuple (Passed(bool), Reason(str)) """
+ test_cnt = report.get('tests_run', 0)
+
+ if test_cnt == 0:
+ return (False, 'No tests run')
+
+ passed = report.get('tests_passed', 0)
+ failed = report.get('tests_failed', 0)
+ if 'all' in suite.get('expect_fail', []) or version in suite.get(
+ 'expect_fail', []):
+ expect_fail = True
+ else:
+ expect_fail = False
+
+ if expect_fail:
+ if failed == test_cnt:
+ return (True, 'All %d/%d tests failed as expected' %
+ (failed, test_cnt))
+ else:
+ return (False, '%d/%d tests failed: expected all to fail' %
+ (failed, test_cnt))
+ else:
+ if failed > 0:
+ return (False, '%d/%d tests passed: expected all to pass' %
+ (passed, test_cnt))
+ else:
+ return (True, 'All %d/%d tests passed as expected' %
+ (passed, test_cnt))
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser(
+ description='Run librdkafka tests on a range of broker versions')
+
+ parser.add_argument('--debug', action='store_true', default=False,
+ help='Enable trivup debugging')
+ parser.add_argument('--conf', type=str, dest='conf', default=None,
+ help='trivup JSON config object (not file)')
+ parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None,
+ help='trivup JSON config object (not file) '
+ 'for LibrdkafkaTestApp')
+ parser.add_argument('--scenario', type=str, dest='scenario',
+ default='default',
+ help='Test scenario (see scenarios/ directory)')
+ parser.add_argument('--tests', type=str, dest='tests', default=None,
+ help='Test to run (e.g., "0002")')
+ parser.add_argument('--report', type=str, dest='report', default=None,
+ help='Write test suites report to this filename')
+ parser.add_argument('--interact', action='store_true', dest='interact',
+ default=False,
+ help='On test failure start a shell before bringing '
+ 'the cluster down.')
+ parser.add_argument('versions', type=str, nargs='*',
+ default=['0.8.1.1', '0.8.2.2', '0.9.0.1', '2.3.0'],
+ help='Broker versions to test')
+ parser.add_argument('--interactive', action='store_true',
+ dest='interactive',
+ default=False,
+ help='Start a shell instead of running tests')
+ parser.add_argument(
+ '--root',
+ type=str,
+ default=os.environ.get(
+ 'TRIVUP_ROOT',
+ 'tmp'),
+ help='Root working directory')
+ parser.add_argument(
+ '--port',
+ default=None,
+ help='Base TCP port to start allocating from')
+ parser.add_argument(
+ '--kafka-src',
+ dest='kafka_path',
+ type=str,
+ default=None,
+ help='Path to Kafka git repo checkout (used for version=trunk)')
+ parser.add_argument(
+ '--brokers',
+ dest='broker_cnt',
+ type=int,
+ default=3,
+ help='Number of Kafka brokers')
+ parser.add_argument('--ssl', dest='ssl', action='store_true',
+ default=False,
+ help='Enable SSL endpoints')
+ parser.add_argument(
+ '--sasl',
+ dest='sasl',
+ type=str,
+ default=None,
+ help='SASL mechanism (PLAIN, GSSAPI)')
+
+ args = parser.parse_args()
+
+ conf = dict()
+ rdkconf = dict()
+
+ if args.conf is not None:
+ args.conf = json.loads(args.conf)
+ else:
+ args.conf = {}
+
+ if args.port is not None:
+ args.conf['port_base'] = int(args.port)
+ if args.kafka_path is not None:
+ args.conf['kafka_path'] = args.kafka_path
+ if args.ssl:
+ args.conf['security.protocol'] = 'SSL'
+ if args.sasl:
+ if args.sasl == 'PLAIN' and 'sasl_users' not in args.conf:
+ args.conf['sasl_users'] = 'testuser=testpass'
+ args.conf['sasl_mechanisms'] = args.sasl
+ args.conf['sasl_servicename'] = 'kafka'
+ if args.interactive:
+ args.conf['test_mode'] = 'bash'
+ args.conf['broker_cnt'] = args.broker_cnt
+
+ conf.update(args.conf)
+ if args.rdkconf is not None:
+ rdkconf.update(json.loads(args.rdkconf))
+
+ conf.update(read_scenario_conf(args.scenario))
+
+ if args.tests is not None:
+ tests = args.tests.split(',')
+ elif 'tests' in conf:
+ tests = conf.get('tests', '').split(',')
+ else:
+ tests = None
+
+ # Test version + suite matrix
+ if 'versions' in conf:
+ versions = conf.get('versions')
+ else:
+ versions = args.versions
+ suites = [{'name': 'standard'}]
+
+ pass_cnt = 0
+ fail_cnt = 0
+ for version in versions:
+ for suite in suites:
+ _conf = conf.copy()
+ _conf.update(suite.get('conf', {}))
+ _rdkconf = rdkconf.copy()
+ _rdkconf.update(suite.get('rdkconf', {}))
+
+ if 'version' not in suite:
+ suite['version'] = dict()
+
+ # Run tests
+ print('#### Version %s, suite %s, scenario %s: STARTING' %
+ (version, suite['name'], args.scenario))
+ report = test_it(version, tests=tests, conf=_conf,
+ rdkconf=_rdkconf,
+ interact=args.interact, debug=args.debug,
+ scenario=args.scenario)
+
+ if not report:
+ continue
+
+ # Handle test report
+ report['version'] = version
+ passed, reason = handle_report(report, version, suite)
+ report['PASSED'] = passed
+ report['REASON'] = reason
+
+ if passed:
+ print('\033[42m#### Version %s, suite %s: PASSED: %s\033[0m' %
+ (version, suite['name'], reason))
+ pass_cnt += 1
+ else:
+ print('\033[41m#### Version %s, suite %s: FAILED: %s\033[0m' %
+ (version, suite['name'], reason))
+ fail_cnt += 1
+
+ # Emit hopefully relevant parts of the log on failure
+ subprocess.call(
+ "grep --color=always -B100 -A10 FAIL %s" %
+ (os.path.join(
+ report['root_path'],
+ 'stderr.log')),
+ shell=True)
+
+ print('#### Test output: %s/stderr.log' % (report['root_path']))
+
+ suite['version'][version] = report
+
+ # Write test suite report JSON file
+ if args.report is not None:
+ test_suite_report_file = args.report
+ f = open(test_suite_report_file, 'w')
+ else:
+ fd, test_suite_report_file = tempfile.mkstemp(prefix='test_suite_',
+ suffix='.json',
+ dir='.')
+ f = os.fdopen(fd, 'w')
+
+ full_report = {'suites': suites, 'pass_cnt': pass_cnt,
+ 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt}
+
+ f.write(json.dumps(full_report))
+ f.close()
+
+ print('\n\n\n')
+ print_report_summary(full_report)
+ print('#### Full test suites report in: %s' % test_suite_report_file)
+
+ if pass_cnt == 0 or fail_cnt > 0:
+ sys.exit(1)
+ else:
+ sys.exit(0)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh
new file mode 100755
index 000000000..bce137109
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/buildbox.sh
@@ -0,0 +1,17 @@
+#!/bin/bash
+#
+# Build script for buildbox.io
+# Must be ran from top-level directory.
+
+PFX=tmp_install
+
+[ -d $PFX ] && rm -rf "$PFX"
+
+make clean || true
+./configure --clean
+./configure "--prefix=$PFX" || exit 1
+make || exit 1
+make install || exit 1
+
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh
new file mode 100755
index 000000000..f396d8bed
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/cleanup-checker-tests.sh
@@ -0,0 +1,20 @@
+#!/bin/bash
+#
+#
+# This script runs all tests with valgrind, one by one, forever, to
+# make sure there aren't any memory leaks.
+
+ALL=$(seq 0 15)
+CNT=0
+while true ; do
+ for T in $ALL; do
+ echo "#################### Test $T run #$CNT #################"
+ TESTS=$(printf %04d $T) ./run-test.sh -p valgrind || exit 1
+ CNT=$(expr $CNT + 1)
+ done
+ echo "################## Cleaning up"
+ rm -f *.offset
+ ./delete-test-topics.sh 0
+done
+done
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py b/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py
new file mode 100755
index 000000000..cfdc08db6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/cluster_testing.py
@@ -0,0 +1,183 @@
+#!/usr/bin/env python3
+#
+#
+# Cluster testing helper
+#
+# Requires:
+# trivup python module
+# gradle in your PATH
+
+from trivup.trivup import Cluster
+from trivup.apps.ZookeeperApp import ZookeeperApp
+from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
+from trivup.apps.KerberosKdcApp import KerberosKdcApp
+from trivup.apps.SslApp import SslApp
+from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
+
+import os
+import sys
+import json
+import argparse
+import re
+from jsoncomment import JsonComment
+
+
+def version_as_list(version):
+ if version == 'trunk':
+ return [sys.maxsize]
+ return [int(a) for a in re.findall('\\d+', version)][0:3]
+
+
+def read_scenario_conf(scenario):
+ """ Read scenario configuration from scenarios/<scenario>.json """
+ parser = JsonComment(json)
+ with open(os.path.join('scenarios', scenario + '.json'), 'r') as f:
+ return parser.load(f)
+
+
+class LibrdkafkaTestCluster(Cluster):
+ def __init__(self, version, conf={}, num_brokers=3, debug=False,
+ scenario="default"):
+ """
+ @brief Create, deploy and start a Kafka cluster using Kafka \\p version
+
+ Supported \\p conf keys:
+ * security.protocol - PLAINTEXT, SASL_PLAINTEXT, SASL_SSL
+
+ \\p conf dict is passed to KafkaBrokerApp classes, etc.
+ """
+
+ super(LibrdkafkaTestCluster, self).__init__(
+ self.__class__.__name__,
+ os.environ.get('TRIVUP_ROOT', 'tmp'), debug=debug)
+
+ # Read trivup config from scenario definition.
+ defconf = read_scenario_conf(scenario)
+ defconf.update(conf)
+
+ # Enable SSL if desired
+ if 'SSL' in conf.get('security.protocol', ''):
+ self.ssl = SslApp(self, defconf)
+
+ self.brokers = list()
+
+ # One ZK (from Kafka repo)
+ ZookeeperApp(self)
+
+ # Start Kerberos KDC if GSSAPI (Kerberos) is configured
+ if 'GSSAPI' in defconf.get('sasl_mechanisms', []):
+ kdc = KerberosKdcApp(self, 'MYREALM')
+ # Kerberos needs to be started prior to Kafka so that principals
+ # and keytabs are available at the time of Kafka config generation.
+ kdc.start()
+
+ if 'OAUTHBEARER'.casefold() == \
+ defconf.get('sasl_mechanisms', "").casefold() and \
+ 'OIDC'.casefold() == \
+ defconf.get('sasl_oauthbearer_method', "").casefold():
+ self.oidc = OauthbearerOIDCApp(self)
+
+ # Brokers
+ defconf.update({'replication_factor': min(num_brokers, 3),
+ 'version': version,
+ 'security.protocol': 'PLAINTEXT'})
+ self.conf = defconf
+
+ for n in range(0, num_brokers):
+ # Configure rack & replica selector if broker supports
+ # fetch-from-follower
+ if version_as_list(version) >= [2, 4, 0]:
+ defconf.update(
+ {
+ 'conf': [
+ 'broker.rack=RACK${appid}',
+ 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501
+ self.brokers.append(KafkaBrokerApp(self, defconf))
+
+ def bootstrap_servers(self):
+ """ @return Kafka bootstrap servers based on security.protocol """
+ all_listeners = (
+ ','.join(
+ self.get_all(
+ 'advertised_listeners',
+ '',
+ KafkaBrokerApp))).split(',')
+ return ','.join([x for x in all_listeners if x.startswith(
+ self.conf.get('security.protocol'))])
+
+
+def result2color(res):
+ if res == 'PASSED':
+ return '\033[42m'
+ elif res == 'FAILED':
+ return '\033[41m'
+ else:
+ return ''
+
+
+def print_test_report_summary(name, report):
+ """ Print summary for a test run. """
+ passed = report.get('PASSED', False)
+ if passed:
+ resstr = '\033[42mPASSED\033[0m'
+ else:
+ resstr = '\033[41mFAILED\033[0m'
+
+ print('%6s %-50s: %s' % (resstr, name, report.get('REASON', 'n/a')))
+ if not passed:
+ # Print test details
+ for name, test in report.get('tests', {}).items():
+ testres = test.get('state', '')
+ if testres == 'SKIPPED':
+ continue
+ print('%s --> %-20s \033[0m' %
+ ('%s%s\033[0m' %
+ (result2color(test.get('state', 'n/a')),
+ test.get('state', 'n/a')),
+ test.get('name', 'n/a')))
+ print('%8s --> %s/%s' %
+ ('', report.get('root_path', '.'), 'stderr.log'))
+
+
+def print_report_summary(fullreport):
+ """ Print summary from a full report suite """
+ suites = fullreport.get('suites', list())
+ print('#### Full test suite report (%d suite(s))' % len(suites))
+ for suite in suites:
+ for version, report in suite.get('version', {}).items():
+ print_test_report_summary('%s @ %s' %
+ (suite.get('name', 'n/a'), version),
+ report)
+
+ pass_cnt = fullreport.get('pass_cnt', -1)
+ if pass_cnt == 0:
+ pass_clr = ''
+ else:
+ pass_clr = '\033[42m'
+
+ fail_cnt = fullreport.get('fail_cnt', -1)
+ if fail_cnt == 0:
+ fail_clr = ''
+ else:
+ fail_clr = '\033[41m'
+
+ print('#### %d suites %sPASSED\033[0m, %d suites %sFAILED\033[0m' %
+ (pass_cnt, pass_clr, fail_cnt, fail_clr))
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser(description='Show test suite report')
+ parser.add_argument('report', type=str, nargs=1,
+ help='Show summary from test suites report file')
+
+ args = parser.parse_args()
+
+ passed = False
+ with open(args.report[0], 'r') as f:
+ passed = print_report_summary(json.load(f))
+
+ if passed:
+ sys.exit(0)
+ else:
+ sys.exit(1)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh
new file mode 100755
index 000000000..bc40bf65d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/delete-test-topics.sh
@@ -0,0 +1,56 @@
+#!/bin/bash
+#
+
+set -e
+
+if [[ "$1" == "-n" ]]; then
+ DO_DELETE=0
+ shift
+else
+ DO_DELETE=1
+fi
+
+ZK=$1
+KATOPS=$2
+RE=$3
+
+if [[ -z "$ZK" ]]; then
+ ZK="$ZK_ADDRESS"
+fi
+
+if [[ -z "$KATOPS" ]]; then
+ if [[ -d "$KAFKA_PATH" ]]; then
+ KATOPS="$KAFKA_PATH/bin/kafka-topics.sh"
+ fi
+fi
+
+if [[ -z "$RE" ]]; then
+ RE="^rdkafkatest_"
+fi
+
+if [[ -z "$KATOPS" ]]; then
+ echo "Usage: $0 [-n] <zookeeper-address> <kafka-topics.sh> [<topic-name-regex>]"
+ echo ""
+ echo "Deletes all topics matching regex $RE"
+ echo ""
+ echo " -n - Just collect, dont actually delete anything"
+ exit 1
+fi
+
+set -u
+echo -n "Collecting list of matching topics... "
+TOPICS=$($KATOPS --zookeeper $ZK --list 2>/dev/null | grep "$RE") || true
+N_TOPICS=$(echo "$TOPICS" | wc -w)
+echo "$N_TOPICS topics found"
+
+
+for t in $TOPICS; do
+ if [[ $DO_DELETE == 1 ]]; then
+ echo -n "Deleting topic $t... "
+ ($KATOPS --zookeeper $ZK --delete --topic "$t" 2>/dev/null && echo "deleted") || echo "failed"
+ else
+ echo "Topic $t"
+ fi
+done
+
+echo "Done"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore
new file mode 100644
index 000000000..e58fd014d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/.gitignore
@@ -0,0 +1,11 @@
+*.key
+*.crt
+*.jks
+*.csr
+*.pem
+*.p12
+*.srl
+extfile
+!client.keystore.p12
+!client2.certificate.pem
+!client2.key
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile
new file mode 100644
index 000000000..d12bbda9f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/Makefile
@@ -0,0 +1,8 @@
+ssl_keys: clear_keys
+ @./create_keys.sh client client2
+
+clear_keys:
+ @rm -f *.key *.crt *.jks \
+ *.csr *.pem *.p12 *.srl extfile
+
+.PHONY: ssl_keys
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md
new file mode 100644
index 000000000..43204036c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/README.md
@@ -0,0 +1,13 @@
+# SSL keys generation for tests
+
+The Makefile in this directory generates a PKCS#12 keystore
+and corresponding PEM certificate and key for testing
+SSL keys and keystore usage in librdkafka.
+
+To update those files with a newer OpenSSL version, just run `make`.
+
+# Requirements
+
+* OpenSSL >= 1.1.1
+* Java keytool >= Java 11
+* GNU Make >= 4.2 \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12 b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12
new file mode 100644
index 000000000..e8c8347ee
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client.keystore.p12
Binary files differ
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem
new file mode 100644
index 000000000..34a1da408
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.certificate.pem
@@ -0,0 +1,109 @@
+Bag Attributes
+ friendlyName: client2
+ localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32
+Key Attributes: <No Attributes>
+-----BEGIN PRIVATE KEY-----
+MIIEuwIBADANBgkqhkiG9w0BAQEFAASCBKUwggShAgEAAoIBAQDMrI+QK7Q6L9TU
+cVjEbl4sMu3KhXgs71JNgQl8joFPVjb3PZF6YHegZo0FAOU1F6lysD3NNnI21HIz
+LbCe6BJRogNFKtcFvWS6uQok1HperDO/DVQkH9ARAcvlxE/I6dPbb1YCi7EMHrjM
+Dle+NXWV3nKCe7BcMkETkki5Bj5fNA5oa/pmS0gSS/HXnB8rxyFv4mB/R+oGC1wO
+WOvgn6ip5bKdjMEEnyqYsDCH8w3xYkKlZ6Ag5w1yxnr6D41J64Go2R62MuLrScVr
++4CM+XJl3Y08+emlCz5m5wuh6A31bp7MFY+f3Gs9AI5qiN3tyjZ//EzoIrfb68tQ
+td+UvT4fAgMBAAECggEALoLkWQHlgfeOqPxdDL57/hVQvl4YUjXMgTpamoiT0CCq
+ewLtxV6YsMW9NC7g53DKG/r7AGBoEhezH/g5E9NvHkfv8E7s8Cv68QfNy1LRwCPn
+2nm/7jmggczjtgInk2O3tj0V0ZxHDpcIra5wuBPT9cvIP+i1yi3NZhIvHoTRtbZp
+lWelovML6SGcbmYDZHWwL8C/quX2/Vp72dJa7ySatlJCe8lcdolazUAhe6W3FGf2
+DojupWddAbwcogQsjQ0WNgtIov5JDF1vHjLkw0uCvh24P+DYBA0JjHybLTR70Ypp
+POwCV5O96JntWfcXYivi4LQrSDFCIDyDwwrbkIkdoQKBgQDuNesfC7C0LJikB+I1
+UgrDJiu4lFVoXwbaWRRuZD58j0mDGeTY9gZzBJ7pJgv3qJbfk1iwpUU25R2Np946
+h63EqpSSoP/TnMBePUBjnu+C5iXxk2KPjNb9Xu8m4Q8tgYvYf5IJ7iLllY2uiT6B
+e+0EGAEPvP1HLbPP22IUMsG6jwKBgQDb9X6fHMeHtP6Du+qhqiMmLK6R2lB7cQ1j
+2FSDySekabucaFhDpK3n2klw2MfF2oZHMrxAfYFySV1kGMil4dvFox8mGBJHc/d5
+lNXGNOfQbVV8P1NRjaPwjyAAgAPZfZgFr+6s+pawMRGnGw5Y6p03sLnD5FWU9Wfa
+vM6RLE5LcQJ/FHiNvB1FEjbC51XGGs7yHdMp7rLQpCeGbz04hEQZGps1tg6DnCGI
+bFn5Tg/291GFpbED7ipFyHHoGERU1LLUPBJssi0jzwupfG/HGMiPzK/6ksgXsD5q
+O1vtMWol48M+QVy1MCVG2nP/uQASXw5HUBLABJo5KeTDjxlLVHEINQKBgAe54c64
+9hFAPEhoS1+OWFm47BDXeEg9ulitepp+cFQIGrzttVv65tjkA/xgwPOkL19E2vPw
+9KENDqi7biDVhCC3EBsIcWvtGN4+ahviM9pQXNZWaxjMPtvuSxN5a6kyDir0+Q8+
+ZhieQJ58Bs78vrT8EipdVNw8mn9GboMO6VkhAoGBAJ+NUvcO3nIVJOCEG3qnweHA
+zqa4JyxFonljwsUFKCIHoiKYlp0KW4wTJJIkTKvLYcRY6kMzP/H1Ja9GqdVnf8ou
+tJOe793M+HkYUMTxscYGoCXXtsWKN2ZOv8aVBA7RvpJS8gE6ApScUrjeM76h20CS
+xxqrrSc37NSjuiaTyOTG
+-----END PRIVATE KEY-----
+Bag Attributes
+ friendlyName: client2
+ localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32
+subject=C = , ST = , L = , O = , OU = , CN = client2
+
+issuer=CN = caroot
+
+-----BEGIN CERTIFICATE-----
+MIIDCzCCAfOgAwIBAgIUIRg5w7eGA6xivHxzAmzh2PLUJq8wDQYJKoZIhvcNAQEL
+BQAwETEPMA0GA1UEAwwGY2Fyb290MCAXDTIyMTAwNzE1MTI0NFoYDzIwNTAwMjIx
+MTUxMjQ0WjBJMQkwBwYDVQQGEwAxCTAHBgNVBAgTADEJMAcGA1UEBxMAMQkwBwYD
+VQQKEwAxCTAHBgNVBAsTADEQMA4GA1UEAxMHY2xpZW50MjCCASIwDQYJKoZIhvcN
+AQEBBQADggEPADCCAQoCggEBAMysj5ArtDov1NRxWMRuXiwy7cqFeCzvUk2BCXyO
+gU9WNvc9kXpgd6BmjQUA5TUXqXKwPc02cjbUcjMtsJ7oElGiA0Uq1wW9ZLq5CiTU
+el6sM78NVCQf0BEBy+XET8jp09tvVgKLsQweuMwOV741dZXecoJ7sFwyQROSSLkG
+Pl80Dmhr+mZLSBJL8decHyvHIW/iYH9H6gYLXA5Y6+CfqKnlsp2MwQSfKpiwMIfz
+DfFiQqVnoCDnDXLGevoPjUnrgajZHrYy4utJxWv7gIz5cmXdjTz56aULPmbnC6Ho
+DfVunswVj5/caz0AjmqI3e3KNn/8TOgit9vry1C135S9Ph8CAwEAAaMhMB8wHQYD
+VR0RBBYwFIIHY2xpZW50MoIJbG9jYWxob3N0MA0GCSqGSIb3DQEBCwUAA4IBAQBd
+d5Sl51/aLcCnc5vo2h2fyNQIVbZGbgEyWRbYdHv5a4X7JxUalipvRhXTpYLQ+0R5
+Fzgl5Mwo6dUpJjtzwXZUOAt59WhqVV5+TMe8eDHBl+lKM/YUgZ+kOlGMExEaygrh
+cG+/rVZLAgcC+HnHNaIo2guyn6RqFtBMzkRmjhH96AcygbsN5OFHY0NOzGV9WTDJ
++A9dlJIy2bEU/yYpXerdXp9lM8fKaPc0JDYwwESMS7ND70dcpGmrRa9pSTSDPUaK
+KSzzOyK+8E5mzcqEbUCrlpz0sklNYDNMIn48Qjkz52Kv8XHvcYS1gv0XvQZtIH3M
+x6X3/J+ivx6L72BOm+ar
+-----END CERTIFICATE-----
+Bag Attributes
+ friendlyName: CN=caroot
+subject=CN = caroot
+
+issuer=CN = caroot
+
+-----BEGIN CERTIFICATE-----
+MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL
+BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1
+MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj
+FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk
+daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA
+xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4
+B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m
+bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH
+18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N
+L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF
+UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn
+KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc
+MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK
+0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7
+MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e
+QHIFE8+PTQ==
+-----END CERTIFICATE-----
+Bag Attributes
+ friendlyName: caroot
+ 2.16.840.1.113894.746875.1.1: <Unsupported tag 6>
+subject=CN = caroot
+
+issuer=CN = caroot
+
+-----BEGIN CERTIFICATE-----
+MIIDAzCCAeugAwIBAgIUPj85Dz0tuzZERfolrR54arwFPSIwDQYJKoZIhvcNAQEL
+BQAwETEPMA0GA1UEAwwGY2Fyb290MB4XDTIyMTAwNzE1MTI0MVoXDTMyMTAwNDE1
+MTI0MVowETEPMA0GA1UEAwwGY2Fyb290MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8A
+MIIBCgKCAQEAxfb08Gd64ilCYePn821WJsnCC2/nEYxOHlBzT9tkx6edzpdsvIvj
+FO6Weeyb2f1vv6eJsmBaZUdV2CfOHNIhBvw5IemzUaSiCr8688jHUS6uHCxBYCXk
+daFDXKO+JhaPN/ys6wOC8SHYRRynIhp6QVNSBzoO/1WT/J3i58R8TErDi5txr+JA
+xJd3mnAW4lDiqRLSVQFq3W4jvba3Dy2zK1l4NcShzlYgfsAd9cCi6b+T2mcz9Vl4
+B1qvsOfOMi8AmVTbS77oaxLczBpLyFIrzI5OPNmMw3A7uObgws9QTyYxUfYqc/0m
+bO7bHPX0Iz+WPqrzTHZ+3k5QE/bfGIRnsQIDAQABo1MwUTAdBgNVHQ4EFgQUCgQH
+18kzzHsk3KbdDB4g+94NL70wHwYDVR0jBBgwFoAUCgQH18kzzHsk3KbdDB4g+94N
+L70wDwYDVR0TAQH/BAUwAwEB/zANBgkqhkiG9w0BAQsFAAOCAQEAhKlj3zPuYaMF
+UFROvAWeOXIdDIExbHd5qukYj5UStLhoVKe/1ZKMvdAICejMs51QSJ05d22KqeHn
+KaTrq3al61rvufkNhrQo2B+qwM5dEV8qGVZGI/oSaWkk5W33FrKHqSUvwdi/saOc
+MfQDUuyS7IznLMlR8g0ZcmIPO3cyHPXQhgk80SNJODqpkfgCgHAa1kDz9PmT7VMK
+0f/6U3XEkdRdsvWyWDXMSBFx1m/pu9n7fnL8+6QLczyhoX0NhPnOICC3oSYVVuN7
+MOtCLIhwxsv5BlDFnOeBFxq+VKqZDH+z6587Wl0KQyxsJmuJKZ1kYR3XO7j5jw1e
+QHIFE8+PTQ==
+-----END CERTIFICATE-----
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key
new file mode 100644
index 000000000..6b0b0f87d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/client2.key
@@ -0,0 +1,34 @@
+Bag Attributes
+ friendlyName: client2
+ localKeyID: 54 69 6D 65 20 31 36 36 35 31 35 35 35 36 34 38 38 32
+Key Attributes: <No Attributes>
+-----BEGIN ENCRYPTED PRIVATE KEY-----
+MIIFFDBOBgkqhkiG9w0BBQ0wQTApBgkqhkiG9w0BBQwwHAQILalIN2MbG7QCAggA
+MAwGCCqGSIb3DQIJBQAwFAYIKoZIhvcNAwcECD+gqk7gSkEFBIIEwETSFzC1yYTM
+/O6lA8BMkl5Wzt4e7Jw7WnfWSmOFTtpXZqOgxvN9dNPsMIpxvU7nF3Iwhqw0WXMF
+lpKqCy2FLM+XWqaQYV+2++s23lH0Eqfofc0IZoYk7FB92MAO1dUI7iDJeT0kwrmU
+mgAKAqa6e4REZgDEUXYVAOiAHqszs0JjXlsxlPSws2EZQyU8kEALggy+60Jozviq
+a9fUZ9JnbtCPkuSOipC8N+erNIEkruzbXRbookTQF+qAyTyXMciL0fTqdAJB/xfO
+h66TQvr1XZorqqVPYI+yXwRBF7oVfJyk0kVfhcpo6SoedNJ3onUlyktcF2RPj1xh
+612L4ytNp/TN8jvSs5EKHTuwS2+dnYp2jTS4rcbSRe53RylhFudAn9/aZad0/C72
+JXeiax3i0071sWbvKX3YsW/2QCaeMALhiqbzx+8PcgVV9BVfjO8qxJSNjaOwmVRy
+I/22pufTDkoNL/aQSiw1NAL22IPdD0uvLCHj27nBct4KancvgSdTxMK9lfwJZet1
+D0S9ChUa2tCY0pDH7F9XUfcS7VAij+VWtlGIyEw7rPOWx6fGT15fj/QnepuJ5xON
+qiAH7IhJesWWhG7xp7c3QsdeGNowkMtoLBlz5fEKDRaauPlbLI5IoXy+ZyOO1tIo
+kH5wHDE1bn5cWn7qRy5X5HtPga1OjF11R+XquJ88+6gqmxPlsrK45/FiGdP4iLN/
+dp10cnFgAVA2kEaTXCH1LctGlR+3XQgfrwWDfvk7uMtvybqFcEEBv8vBih1UsF6v
+RFfoUYq8Zle2x9kX/cfad52FxtDWnhZAgNtT53tWRUb/oAt7fXQxJMlRXKjSV05q
+S/uwevnj49eVFdyiroPofipB8LAK4I+gzZ8AYJob5GoRTlPonC1pj/n3vKRsDMOA
+Lwy3gXoyQ+/MBUPcDG/ewdusrJncnkAlFNt0w97CmOJU0czuJJw5rRozfvZF1Hs9
+2BVcwVPmZH9Nr3+6Yb+GTCRvsM7DBuLZIEN4WzjoLYAcrjZ2XYLsC6XmnDzIp1HF
+nZwrXUROp4MhKuy+SIdFqZLoU/+AIB28WI3euIDDuERSZLff11hphRG5S9wZ8EJH
+Jyl2WgP4r8wQtHs71iT06KDFuBcNqGYPwCjnvE86WFXE3wOJ91+l9u8MYvOSVOHq
+4iUIpRFD4hlCWOIc1V9QYKf2s8Vkeoop/pUutK5NpLtMFgJpFPNYxyfBL13fo9lM
+0iVuoG3W+iDjqZyUPoDxG4rI6Q9WvkswLxVwpMgzDUbUl2aKHcm4Z215dBMm40zh
+ft+QzZEnMVzln2eTCcH91IXcsyPPACmKwraAik5ULEn4m++KtdwDZ6R1zzgRJrn9
+FI6L7C0nfKKemBdzGMCzQuciuPLIjfzXHdKr5bb0C1WS88IB0lYIs+pzpvms2P0F
+AQ2nDgFKA9xlzX2f1O/YQNKA1ctc8RH5tpZUUVfheIqd0U4udp9Rqecd+/r23ENU
+7kjeuxXfUbH83P0hrsQQFkkOeRWWz8+UYvqIEwWaSObdZCvTdIjRpNmmamWsAmsJ
+D5Q2AMMMmNwIi5fUKYJgwTfsgY0XIekk6wmugKs3gCj1RKX930b9fniiol/Gv2VS
+fJRrqds7F0s=
+-----END ENCRYPTED PRIVATE KEY-----
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh
new file mode 100755
index 000000000..36e92bd30
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fixtures/ssl/create_keys.sh
@@ -0,0 +1,93 @@
+#!/bin/sh
+set -e
+CA_PASSWORD="${CA_PASSWORD:-use_strong_password_ca}"
+KEYSTORE_PASSWORD="${KEYSTORE_PASSWORD:-use_strong_password_keystore}"
+TRUSTSTORE_PASSWORD="${TRUSTSTORE_PASSWORD:-use_strong_password_truststore}"
+OUTPUT_FOLDER=${OUTPUT_FOLDER:-$( dirname "$0" )}
+CNS=${@:-client}
+
+cd ${OUTPUT_FOLDER}
+CA_ROOT_KEY=caroot.key
+CA_ROOT_CRT=caroot.crt
+
+echo "# Generate CA"
+openssl req -new -x509 -keyout $CA_ROOT_KEY \
+ -out $CA_ROOT_CRT -days 3650 -subj \
+ '/CN=caroot/OU=/O=/L=/ST=/C=' -passin "pass:${CA_PASSWORD}" \
+ -passout "pass:${CA_PASSWORD}"
+
+for CN in $CNS; do
+ KEYSTORE=$CN.keystore.p12
+ TRUSTSTORE=$CN.truststore.p12
+ SIGNED_CRT=$CN-ca-signed.crt
+ CERTIFICATE=$CN.certificate.pem
+ KEY=$CN.key
+ # Get specific password for this CN
+ CN_KEYSTORE_PASSWORD="$(eval echo \$${CN}_KEYSTORE_PASSWORD)"
+ if [ -z "$CN_KEYSTORE_PASSWORD" ]; then
+ CN_KEYSTORE_PASSWORD=${KEYSTORE_PASSWORD}_$CN
+ fi
+
+ echo ${CN_KEYSTORE_PASSWORD}
+
+ echo "# $CN: Generate Keystore"
+ keytool -genkey -noprompt \
+ -alias $CN \
+ -dname "CN=$CN,OU=,O=,L=,S=,C=" \
+ -ext "SAN=dns:$CN,dns:localhost" \
+ -keystore $KEYSTORE \
+ -keyalg RSA \
+ -storepass "${CN_KEYSTORE_PASSWORD}" \
+ -storetype pkcs12
+
+ echo "# $CN: Generate Truststore"
+ keytool -noprompt -keystore \
+ $TRUSTSTORE -alias caroot -import \
+ -file $CA_ROOT_CRT -storepass "${TRUSTSTORE_PASSWORD}"
+
+ echo "# $CN: Generate CSR"
+ keytool -keystore $KEYSTORE -alias $CN \
+ -certreq -file $CN.csr -storepass "${CN_KEYSTORE_PASSWORD}" \
+ -keypass "${CN_KEYSTORE_PASSWORD}" \
+ -ext "SAN=dns:$CN,dns:localhost"
+
+ echo "# $CN: Generate extfile"
+ cat << EOF > extfile
+[req]
+distinguished_name = req_distinguished_name
+x509_extensions = v3_req
+prompt = no
+[req_distinguished_name]
+CN = $CN
+[v3_req]
+subjectAltName = @alt_names
+[alt_names]
+DNS.1 = $CN
+DNS.2 = localhost
+EOF
+
+ echo "# $CN: Sign the certificate with the CA"
+ openssl x509 -req -CA $CA_ROOT_CRT -CAkey $CA_ROOT_KEY \
+ -in $CN.csr \
+ -out $CN-ca-signed.crt -days 9999 \
+ -CAcreateserial -passin "pass:${CA_PASSWORD}" \
+ -extensions v3_req -extfile extfile
+
+ echo "# $CN: Import root certificate"
+ keytool -noprompt -keystore $KEYSTORE \
+ -alias caroot -import -file $CA_ROOT_CRT -storepass "${CN_KEYSTORE_PASSWORD}"
+
+ echo "# $CN: Import signed certificate"
+ keytool -noprompt -keystore $KEYSTORE -alias $CN \
+ -import -file $SIGNED_CRT -storepass "${CN_KEYSTORE_PASSWORD}" \
+ -ext "SAN=dns:$CN,dns:localhost"
+
+ echo "# $CN: Export PEM certificate"
+ openssl pkcs12 -in "$KEYSTORE" -out "$CERTIFICATE" \
+ -nodes -passin "pass:${CN_KEYSTORE_PASSWORD}"
+
+ echo "# $CN: Export PEM key"
+ openssl pkcs12 -in "$KEYSTORE" -out "$KEY" \
+ -nocerts -passin "pass:${CN_KEYSTORE_PASSWORD}" \
+ -passout "pass:${CN_KEYSTORE_PASSWORD}"
+done
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore
new file mode 100644
index 000000000..ee48ae07b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/.gitignore
@@ -0,0 +1 @@
+fuzz_regex
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile
new file mode 100644
index 000000000..dc3e78bf3
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/Makefile
@@ -0,0 +1,12 @@
+PROGRAMS?=fuzz_regex
+
+all: $(PROGRAMS)
+
+
+fuzz_%:
+ $(CC) -fsanitize=address -D WITH_MAIN -g -Wall \
+ -I../../src $@.c -o $@ ../../src/librdkafka.a
+
+
+clean:
+ rm -f $(PROGRAMS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md
new file mode 100644
index 000000000..b5a0333b1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/README.md
@@ -0,0 +1,31 @@
+# Fuzzing
+librdkafka supports fuzzing by way of Libfuzzer and OSS-Fuzz. This is ongoing work.
+
+## Launching the fuzzers
+The easiest way to launch the fuzzers are to go through OSS-Fuzz. The only prerequisite to this is having Docker installed.
+
+With Docker installed, the following commands will build and run the fuzzers in this directory:
+
+```
+git clone https://github.com/google/oss-fuzz
+cd oss-fuzz
+python3 infra/helper.py build_image librdkafka
+python3 infra/helper.py build_fuzzers librdkafka
+python3 infra/helper.py run_fuzzer librdkafka FUZZ_NAME
+```
+where FUZZ_NAME references the name of the fuzzer. Currently the only fuzzer we have is fuzz_regex
+
+Notice that the OSS-Fuzz `helper.py` script above will create a Docker image in which the code of librdkafka will be built. As such, depending on how you installed Docker, you may be asked to have root access (i.e. run with `sudo`).
+
+
+## Running a single reproducer
+
+Download the reproducer file from the OSS-Fuzz issue tracker, then build
+the failed test case by running `make` in this directory, and then
+run the test case and pass it the reproducer files, e.g:
+
+ $ make
+ $ ./fuzz_regex ~/Downloads/clusterfuzz-testcase-...
+
+**Note:** Some test cases, such as fuzz_regex, requires specific librdkafka
+ build configuration. See the test case source for details.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c
new file mode 100644
index 000000000..2facc19f0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/fuzz_regex.c
@@ -0,0 +1,74 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * Fuzzer test case for the builtin regexp engine in src/regexp.c
+ *
+ * librdkafka must be built with --disable-regex-ext
+ */
+
+#include "rd.h"
+
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+
+#include "regexp.h"
+
+int LLVMFuzzerTestOneInput(uint8_t *data, size_t size) {
+ /* wrap random data in a null-terminated string */
+ char *null_terminated = malloc(size + 1);
+ memcpy(null_terminated, data, size);
+ null_terminated[size] = '\0';
+
+ const char *error;
+ Reprog *p = re_regcomp(null_terminated, 0, &error);
+ if (p != NULL) {
+ re_regfree(p);
+ }
+
+ /* cleanup */
+ free(null_terminated);
+
+ return 0;
+}
+
+#if WITH_MAIN
+#include "helpers.h"
+
+int main(int argc, char **argv) {
+ int i;
+ for (i = 1; i < argc; i++) {
+ size_t size;
+ uint8_t *buf = read_file(argv[i], &size);
+ LLVMFuzzerTestOneInput(buf, size);
+ free(buf);
+ }
+}
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h
new file mode 100644
index 000000000..cfab03777
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/fuzzers/helpers.h
@@ -0,0 +1,90 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _HELPERS_H_
+#define _HELPERS_H_
+
+#include <stdio.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+
+
+/**
+ * Fuzz program helpers
+ */
+
+static __attribute__((unused)) uint8_t *read_file(const char *path,
+ size_t *sizep) {
+ int fd;
+ uint8_t *buf;
+ struct stat st;
+
+ if ((fd = open(path, O_RDONLY)) == -1) {
+ fprintf(stderr, "Failed to open %s: %s\n", path,
+ strerror(errno));
+ exit(2);
+ return NULL; /* NOTREACHED */
+ }
+
+ if (fstat(fd, &st) == -1) {
+ fprintf(stderr, "Failed to stat %s: %s\n", path,
+ strerror(errno));
+ close(fd);
+ exit(2);
+ return NULL; /* NOTREACHED */
+ }
+
+
+ buf = malloc(st.st_size + 1);
+ if (!buf) {
+ fprintf(stderr, "Failed to malloc %d bytes for %s\n",
+ (int)st.st_size, path);
+ close(fd);
+ exit(2);
+ return NULL; /* NOTREACHED */
+ }
+
+ buf[st.st_size] = '\0';
+
+ *sizep = read(fd, buf, st.st_size);
+ if (*sizep != st.st_size) {
+ fprintf(stderr, "Could only read %d/%d bytes from %s\n",
+ (int)*sizep, (int)st.st_size, path);
+ free(buf);
+ close(fd);
+ exit(2);
+ return NULL; /* NOTREACHED */
+ }
+
+ return buf;
+}
+
+
+#endif /* _HELPERS_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh
new file mode 100755
index 000000000..0e04c149d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/gen-ssl-certs.sh
@@ -0,0 +1,165 @@
+#!/bin/bash
+#
+#
+# This scripts generates:
+# - root CA certificate
+# - server certificate and keystore
+# - client keys
+#
+# https://cwiki.apache.org/confluence/display/KAFKA/Deploying+SSL+for+Kafka
+#
+
+
+if [[ "$1" == "-k" ]]; then
+ USE_KEYTOOL=1
+ shift
+else
+ USE_KEYTOOL=0
+fi
+
+OP="$1"
+CA_CERT="$2"
+PFX="$3"
+HOST="$4"
+
+C=NN
+ST=NN
+L=NN
+O=NN
+OU=NN
+CN="$HOST"
+
+
+# Password
+PASS="abcdefgh"
+
+# Cert validity, in days
+VALIDITY=10000
+
+set -e
+
+export LC_ALL=C
+
+if [[ $OP == "ca" && ! -z "$CA_CERT" && ! -z "$3" ]]; then
+ CN="$3"
+ openssl req -new -x509 -keyout ${CA_CERT}.key -out $CA_CERT -days $VALIDITY -passin "pass:$PASS" -passout "pass:$PASS" <<EOF
+${C}
+${ST}
+${L}
+${O}
+${OU}
+${CN}
+$USER@${CN}
+.
+.
+EOF
+
+
+
+elif [[ $OP == "server" && ! -z "$CA_CERT" && ! -z "$PFX" && ! -z "$CN" ]]; then
+
+ #Step 1
+ echo "############ Generating key"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias localhost -validity $VALIDITY -genkey -keyalg RSA <<EOF
+$CN
+$OU
+$O
+$L
+$ST
+$C
+yes
+yes
+EOF
+
+ #Step 2
+ echo "############ Adding CA"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.truststore.jks -alias CARoot -import -file $CA_CERT <<EOF
+yes
+EOF
+
+ #Step 3
+ echo "############ Export certificate"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias localhost -certreq -file ${PFX}cert-file
+
+ echo "############ Sign certificate"
+ openssl x509 -req -CA $CA_CERT -CAkey ${CA_CERT}.key -in ${PFX}cert-file -out ${PFX}cert-signed -days $VALIDITY -CAcreateserial -passin "pass:$PASS"
+
+
+ echo "############ Import CA"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias CARoot -import -file $CA_CERT <<EOF
+yes
+EOF
+
+ echo "############ Import signed CA"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}server.keystore.jks -alias localhost -import -file ${PFX}cert-signed
+
+
+elif [[ $OP == "client" && ! -z "$CA_CERT" && ! -z "$PFX" && ! -z "$CN" ]]; then
+
+ if [[ $USE_KEYTOOL == 1 ]]; then
+ echo "############ Creating client truststore"
+
+ [[ -f ${PFX}client.truststore.jks ]] || keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.truststore.jks -alias CARoot -import -file $CA_CERT <<EOF
+yes
+EOF
+
+ echo "############ Generating key"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.keystore.jks -alias localhost -validity $VALIDITY -genkey -keyalg RSA <<EOF
+$CN
+$OU
+$O
+$L
+$ST
+$C
+yes
+yes
+EOF
+ echo "########### Export certificate"
+ keytool -storepass "$PASS" -keystore ${PFX}client.keystore.jks -alias localhost -certreq -file ${PFX}cert-file
+
+ echo "########### Sign certificate"
+ openssl x509 -req -CA ${CA_CERT} -CAkey ${CA_CERT}.key -in ${PFX}cert-file -out ${PFX}cert-signed -days $VALIDITY -CAcreateserial -passin pass:$PASS
+
+ echo "########### Import CA"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.keystore.jks -alias CARoot -import -file ${CA_CERT} <<EOF
+yes
+EOF
+
+ echo "########### Import signed CA"
+ keytool -storepass "$PASS" -keypass "$PASS" -keystore ${PFX}client.keystore.jks -alias localhost -import -file ${PFX}cert-signed
+
+ else
+ # Standard OpenSSL keys
+ echo "############ Generating key"
+ openssl genrsa -des3 -passout "pass:$PASS" -out ${PFX}client.key 2048
+
+ echo "############ Generating request"
+ openssl req -passin "pass:$PASS" -passout "pass:$PASS" -key ${PFX}client.key -new -out ${PFX}client.req \
+ <<EOF
+$C
+$ST
+$L
+$O
+$OU
+$CN
+.
+$PASS
+.
+EOF
+
+ echo "########### Signing key"
+ openssl x509 -req -passin "pass:$PASS" -in ${PFX}client.req -CA $CA_CERT -CAkey ${CA_CERT}.key -CAcreateserial -out ${PFX}client.pem -days $VALIDITY
+
+ fi
+
+
+
+
+else
+ echo "Usage: $0 ca <ca-cert-file> <CN>"
+ echo " $0 [-k] server|client <ca-cert-file> <file_prefix> <hostname>"
+ echo ""
+ echo " -k = Use keytool/Java Keystore, else standard SSL keys"
+ exit 1
+fi
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py b/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py
new file mode 100755
index 000000000..bcd4931f9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/interactive_broker_version.py
@@ -0,0 +1,363 @@
+#!/usr/bin/env python3
+#
+#
+# Run librdkafka regression tests on different supported broker versions.
+#
+# Requires:
+# trivup python module
+# gradle in your PATH
+
+from trivup.trivup import Cluster
+from trivup.apps.ZookeeperApp import ZookeeperApp
+from trivup.apps.KafkaBrokerApp import KafkaBrokerApp
+from trivup.apps.KerberosKdcApp import KerberosKdcApp
+from trivup.apps.SslApp import SslApp
+from trivup.apps.OauthbearerOIDCApp import OauthbearerOIDCApp
+
+from cluster_testing import read_scenario_conf
+
+import subprocess
+import tempfile
+import os
+import sys
+import argparse
+import json
+
+
+def version_as_number(version):
+ if version == 'trunk':
+ return sys.maxsize
+ tokens = version.split('.')
+ return float('%s.%s' % (tokens[0], tokens[1]))
+
+
+def test_version(version, cmd=None, deploy=True, conf={}, debug=False,
+ exec_cnt=1,
+ root_path='tmp', broker_cnt=3, scenario='default'):
+ """
+ @brief Create, deploy and start a Kafka cluster using Kafka \\p version
+ Then run librdkafka's regression tests.
+ """
+
+ print('## Test version %s' % version)
+
+ cluster = Cluster('LibrdkafkaTestCluster', root_path, debug=debug)
+
+ if conf.get('sasl_oauthbearer_method') == 'OIDC':
+ oidc = OauthbearerOIDCApp(cluster)
+
+ # Enable SSL if desired
+ if 'SSL' in conf.get('security.protocol', ''):
+ cluster.ssl = SslApp(cluster, conf)
+
+ # One ZK (from Kafka repo)
+ zk1 = ZookeeperApp(cluster)
+ zk_address = zk1.get('address')
+
+ # Start Kerberos KDC if GSSAPI is configured
+ if 'GSSAPI' in args.conf.get('sasl_mechanisms', []):
+ KerberosKdcApp(cluster, 'MYREALM').start()
+
+ defconf = {'version': version}
+ defconf.update(conf)
+
+ print('conf: ', defconf)
+
+ brokers = []
+ for n in range(0, broker_cnt):
+ # Configure rack & replica selector if broker supports
+ # fetch-from-follower
+ if version_as_number(version) >= 2.4:
+ defconf.update(
+ {
+ 'conf': [
+ 'broker.rack=RACK${appid}',
+ 'replica.selector.class=org.apache.kafka.common.replica.RackAwareReplicaSelector']}) # noqa: E501
+ brokers.append(KafkaBrokerApp(cluster, defconf))
+
+ cmd_env = os.environ.copy()
+
+ # Generate test config file
+ security_protocol = 'PLAINTEXT'
+ fd, test_conf_file = tempfile.mkstemp(prefix='test_conf', text=True)
+ os.write(fd, ('test.sql.command=sqlite3 rdktests\n').encode('ascii'))
+ os.write(fd, 'broker.address.family=v4\n'.encode('ascii'))
+ if version.startswith('0.9') or version.startswith('0.8'):
+ os.write(fd, 'api.version.request=false\n'.encode('ascii'))
+ os.write(
+ fd, ('broker.version.fallback=%s\n' %
+ version).encode('ascii'))
+ # SASL (only one mechanism supported)
+ mech = defconf.get('sasl_mechanisms', '').split(',')[0]
+ if mech != '':
+ os.write(fd, ('sasl.mechanisms=%s\n' % mech).encode('ascii'))
+ if mech == 'PLAIN' or mech.find('SCRAM') != -1:
+ print(
+ '# Writing SASL %s client config to %s' %
+ (mech, test_conf_file))
+ security_protocol = 'SASL_PLAINTEXT'
+ # Use first user as SASL user/pass
+ for up in defconf.get('sasl_users', '').split(','):
+ u, p = up.split('=')
+ os.write(fd, ('sasl.username=%s\n' % u).encode('ascii'))
+ os.write(fd, ('sasl.password=%s\n' % p).encode('ascii'))
+ break
+ elif mech == 'OAUTHBEARER':
+ security_protocol = 'SASL_PLAINTEXT'
+ if defconf.get('sasl_oauthbearer_method') == 'OIDC':
+ os.write(
+ fd, ('sasl.oauthbearer.method=OIDC\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.client.id=123\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.client.secret=abc\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.extensions=\
+ ExtensionworkloadIdentity=develC348S,\
+ Extensioncluster=lkc123\n'.encode(
+ 'ascii')))
+ os.write(
+ fd, ('sasl.oauthbearer.scope=test\n'.encode(
+ 'ascii')))
+ cmd_env['VALID_OIDC_URL'] = oidc.conf.get('valid_url')
+ cmd_env['INVALID_OIDC_URL'] = oidc.conf.get('badformat_url')
+ cmd_env['EXPIRED_TOKEN_OIDC_URL'] = oidc.conf.get(
+ 'expired_url')
+
+ else:
+ os.write(
+ fd, ('enable.sasl.oauthbearer.unsecure.jwt=true\n'.encode(
+ 'ascii')))
+ os.write(fd, ('sasl.oauthbearer.config=%s\n' %
+ 'scope=requiredScope principal=admin').encode(
+ 'ascii'))
+ else:
+ print(
+ '# FIXME: SASL %s client config not written to %s' %
+ (mech, test_conf_file))
+
+ # SSL support
+ ssl = getattr(cluster, 'ssl', None)
+ if ssl is not None:
+ if 'SASL' in security_protocol:
+ security_protocol = 'SASL_SSL'
+ else:
+ security_protocol = 'SSL'
+
+ key = ssl.create_cert('librdkafka')
+
+ os.write(fd, ('ssl.ca.location=%s\n' % ssl.ca['pem']).encode('ascii'))
+ os.write(fd, ('ssl.certificate.location=%s\n' %
+ key['pub']['pem']).encode('ascii'))
+ os.write(
+ fd, ('ssl.key.location=%s\n' %
+ key['priv']['pem']).encode('ascii'))
+ os.write(
+ fd, ('ssl.key.password=%s\n' %
+ key['password']).encode('ascii'))
+
+ for k, v in ssl.ca.items():
+ cmd_env['SSL_ca_{}'.format(k)] = v
+
+ # Set envs for all generated keys so tests can find them.
+ for k, v in key.items():
+ if isinstance(v, dict):
+ for k2, v2 in v.items():
+ # E.g. "SSL_priv_der=path/to/librdkafka-priv.der"
+ cmd_env['SSL_{}_{}'.format(k, k2)] = v2
+ else:
+ cmd_env['SSL_{}'.format(k)] = v
+
+ # Define bootstrap brokers based on selected security protocol
+ print('# Using client security.protocol=%s' % security_protocol)
+ all_listeners = (
+ ','.join(
+ cluster.get_all(
+ 'listeners',
+ '',
+ KafkaBrokerApp))).split(',')
+ bootstrap_servers = ','.join(
+ [x for x in all_listeners if x.startswith(security_protocol)])
+ os.write(fd, ('bootstrap.servers=%s\n' %
+ bootstrap_servers).encode('ascii'))
+ os.write(fd, ('security.protocol=%s\n' %
+ security_protocol).encode('ascii'))
+ os.close(fd)
+
+ if deploy:
+ print('# Deploying cluster')
+ cluster.deploy()
+ else:
+ print('# Not deploying')
+
+ print('# Starting cluster, instance path %s' % cluster.instance_path())
+ cluster.start()
+
+ print('# Waiting for brokers to come up')
+
+ if not cluster.wait_operational(30):
+ cluster.stop(force=True)
+ raise Exception('Cluster %s did not go operational, see logs in %s/%s' % # noqa: E501
+ (cluster.name, cluster.root_path, cluster.instance))
+
+ print('# Connect to cluster with bootstrap.servers %s' % bootstrap_servers)
+
+ cmd_env['KAFKA_PATH'] = brokers[0].conf.get('destdir')
+ cmd_env['RDKAFKA_TEST_CONF'] = test_conf_file
+ cmd_env['ZK_ADDRESS'] = zk_address
+ cmd_env['BROKERS'] = bootstrap_servers
+ cmd_env['TEST_KAFKA_VERSION'] = version
+ cmd_env['TRIVUP_ROOT'] = cluster.instance_path()
+ cmd_env['TEST_SCENARIO'] = scenario
+
+ # Provide a HTTPS REST endpoint for the HTTP client tests.
+ cmd_env['RD_UT_HTTP_URL'] = 'https://jsonplaceholder.typicode.com/users'
+
+ # Per broker env vars
+ for b in [x for x in cluster.apps if isinstance(x, KafkaBrokerApp)]:
+ cmd_env['BROKER_ADDRESS_%d' % b.appid] = \
+ ','.join([x for x in b.conf['listeners'].split(
+ ',') if x.startswith(security_protocol)])
+ # Add each broker pid as an env so they can be killed indivdidually.
+ cmd_env['BROKER_PID_%d' % b.appid] = str(b.proc.pid)
+ # JMX port, if available
+ jmx_port = b.conf.get('jmx_port', None)
+ if jmx_port is not None:
+ cmd_env['BROKER_JMX_PORT_%d' % b.appid] = str(jmx_port)
+
+ if not cmd:
+ cmd_env['PS1'] = '[TRIVUP:%s@%s] \\u@\\h:\\w$ ' % (
+ cluster.name, version)
+ cmd = 'bash --rcfile <(cat ~/.bashrc)'
+
+ ret = True
+
+ for i in range(0, exec_cnt):
+ retcode = subprocess.call(
+ cmd,
+ env=cmd_env,
+ shell=True,
+ executable='/bin/bash')
+ if retcode != 0:
+ print('# Command failed with returncode %d: %s' % (retcode, cmd))
+ ret = False
+
+ try:
+ os.remove(test_conf_file)
+ except BaseException:
+ pass
+
+ cluster.stop(force=True)
+
+ cluster.cleanup(keeptypes=['log'])
+ return ret
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser(
+ description='Start a Kafka cluster and provide an interactive shell')
+
+ parser.add_argument('versions', type=str, default=None, nargs='+',
+ help='Kafka version(s) to deploy')
+ parser.add_argument('--no-deploy', action='store_false', dest='deploy',
+ default=True,
+ help='Dont deploy applications, '
+ 'assume already deployed.')
+ parser.add_argument('--conf', type=str, dest='conf', default=None,
+ help='JSON config object (not file)')
+ parser.add_argument('--scenario', type=str, dest='scenario',
+ default='default',
+ help='Test scenario (see scenarios/ directory)')
+ parser.add_argument('-c', type=str, dest='cmd', default=None,
+ help='Command to execute instead of shell')
+ parser.add_argument('-n', type=int, dest='exec_cnt', default=1,
+ help='Number of times to execute -c ..')
+ parser.add_argument('--debug', action='store_true', dest='debug',
+ default=False,
+ help='Enable trivup debugging')
+ parser.add_argument(
+ '--root',
+ type=str,
+ default=os.environ.get(
+ 'TRIVUP_ROOT',
+ 'tmp'),
+ help='Root working directory')
+ parser.add_argument(
+ '--port',
+ default=None,
+ help='Base TCP port to start allocating from')
+ parser.add_argument(
+ '--kafka-src',
+ dest='kafka_path',
+ type=str,
+ default=None,
+ help='Path to Kafka git repo checkout (used for version=trunk)')
+ parser.add_argument(
+ '--brokers',
+ dest='broker_cnt',
+ type=int,
+ default=3,
+ help='Number of Kafka brokers')
+ parser.add_argument('--ssl', dest='ssl', action='store_true',
+ default=False,
+ help='Enable SSL endpoints')
+ parser.add_argument(
+ '--sasl',
+ dest='sasl',
+ type=str,
+ default=None,
+ help='SASL mechanism (PLAIN, SCRAM-SHA-nnn, GSSAPI, OAUTHBEARER)')
+ parser.add_argument(
+ '--oauthbearer-method',
+ dest='sasl_oauthbearer_method',
+ type=str,
+ default=None,
+ help='OAUTHBEARER/OIDC method (DEFAULT, OIDC), \
+ must config SASL mechanism to OAUTHBEARER')
+
+ args = parser.parse_args()
+ if args.conf is not None:
+ args.conf = json.loads(args.conf)
+ else:
+ args.conf = {}
+
+ args.conf.update(read_scenario_conf(args.scenario))
+
+ if args.port is not None:
+ args.conf['port_base'] = int(args.port)
+ if args.kafka_path is not None:
+ args.conf['kafka_path'] = args.kafka_path
+ if args.ssl:
+ args.conf['security.protocol'] = 'SSL'
+ if args.sasl:
+ if (args.sasl == 'PLAIN' or args.sasl.find('SCRAM')
+ != -1) and 'sasl_users' not in args.conf:
+ args.conf['sasl_users'] = 'testuser=testpass'
+ args.conf['sasl_mechanisms'] = args.sasl
+ retcode = 0
+ if args.sasl_oauthbearer_method:
+ if args.sasl_oauthbearer_method == "OIDC" and \
+ args.conf['sasl_mechanisms'] != 'OAUTHBEARER':
+ print('If config `--oauthbearer-method=OIDC`, '
+ '`--sasl` must be set to `OAUTHBEARER`')
+ retcode = 3
+ sys.exit(retcode)
+ args.conf['sasl_oauthbearer_method'] = \
+ args.sasl_oauthbearer_method
+
+ args.conf.get('conf', list()).append("log.retention.bytes=1000000000")
+
+ for version in args.versions:
+ r = test_version(version, cmd=args.cmd, deploy=args.deploy,
+ conf=args.conf, debug=args.debug,
+ exec_cnt=args.exec_cnt,
+ root_path=args.root, broker_cnt=args.broker_cnt,
+ scenario=args.scenario)
+ if not r:
+ retcode = 2
+
+ sys.exit(retcode)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore
new file mode 100644
index 000000000..6fd0ef029
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/.gitignore
@@ -0,0 +1 @@
+*.pc
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt
new file mode 100644
index 000000000..c606bc426
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/CMakeLists.txt
@@ -0,0 +1,16 @@
+set(
+ sources
+ interceptor_test.c
+)
+
+
+add_library(interceptor_test SHARED ${sources})
+
+target_include_directories(interceptor_test PUBLIC ${PROJECT_SOURCE_DIR}/src)
+
+target_link_libraries(interceptor_test PUBLIC rdkafka)
+
+# Remove "lib" prefix
+set_target_properties(interceptor_test PROPERTIES PREFIX "")
+set_target_properties(interceptor_test PROPERTIES
+ LIBRARY_OUTPUT_DIRECTORY ${tests_OUTPUT_DIRECTORY}/interceptor_test/)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile
new file mode 100644
index 000000000..125e36032
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/Makefile
@@ -0,0 +1,22 @@
+PKGNAME= interceptor_test
+LIBNAME= interceptor_test
+LIBVER= 1
+
+-include ../../Makefile.config
+
+SRCS= interceptor_test.c
+
+OBJS= $(SRCS:.c=.o)
+
+# For rdkafka.h
+CPPFLAGS+=-I../../src
+LDFLAGS+=-L../../src
+LIBS+=-lrdkafka
+
+all: lib
+
+include ../../mklove/Makefile.base
+
+clean: lib-clean
+
+-include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c
new file mode 100644
index 000000000..ee8a63ba9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.c
@@ -0,0 +1,314 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @brief Interceptor plugin test library
+ *
+ * Interceptors can be implemented in the app itself and use
+ * the direct API to set the interceptors methods, or be implemented
+ * as an external plugin library that uses the direct APIs.
+ *
+ * This file implements the latter, an interceptor plugin library.
+ */
+
+#define _CRT_SECURE_NO_WARNINGS /* Silence MSVC nonsense */
+
+#include "../test.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <assert.h>
+
+/* typical include path outside tests is <librdkafka/rdkafka.h> */
+#include "rdkafka.h"
+
+#include "interceptor_test.h"
+
+#ifdef _WIN32
+#define DLL_EXPORT __declspec(dllexport)
+#else
+#define DLL_EXPORT
+#endif
+
+/**
+ * @brief Interceptor instance.
+ *
+ * An interceptor instance is created for each intercepted configuration
+ * object (triggered through conf_init() which is the plugin loader,
+ * or by conf_dup() which is a copying of a conf previously seen by conf_init())
+ */
+struct ici {
+ rd_kafka_conf_t *conf; /**< Interceptor config */
+ char *config1; /**< Interceptor-specific config */
+ char *config2;
+
+ int on_new_cnt;
+ int on_conf_destroy_cnt;
+};
+
+static char *my_interceptor_plug_opaque = "my_interceptor_plug_opaque";
+
+
+
+/* Producer methods */
+rd_kafka_resp_err_t
+on_send(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ printf("on_send: %p\n", ici);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+rd_kafka_resp_err_t on_acknowledgement(rd_kafka_t *rk,
+ rd_kafka_message_t *rkmessage,
+ void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ printf("on_acknowledgement: %p: err %d, partition %" PRId32 "\n", ici,
+ rkmessage->err, rkmessage->partition);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/* Consumer methods */
+rd_kafka_resp_err_t
+on_consume(rd_kafka_t *rk, rd_kafka_message_t *rkmessage, void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ printf("on_consume: %p: partition %" PRId32 " @ %" PRId64 "\n", ici,
+ rkmessage->partition, rkmessage->offset);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+rd_kafka_resp_err_t on_commit(rd_kafka_t *rk,
+ const rd_kafka_topic_partition_list_t *offsets,
+ rd_kafka_resp_err_t err,
+ void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ printf("on_commit: %p: err %d\n", ici, err);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static void ici_destroy(struct ici *ici) {
+ if (ici->conf)
+ rd_kafka_conf_destroy(ici->conf);
+ if (ici->config1)
+ free(ici->config1);
+ if (ici->config2)
+ free(ici->config2);
+ free(ici);
+}
+
+rd_kafka_resp_err_t on_destroy(rd_kafka_t *rk, void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ printf("on_destroy: %p\n", ici);
+ /* the ici is freed from on_conf_destroy() */
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Called from rd_kafka_new(). We use it to set up interceptors.
+ */
+static rd_kafka_resp_err_t on_new(rd_kafka_t *rk,
+ const rd_kafka_conf_t *conf,
+ void *ic_opaque,
+ char *errstr,
+ size_t errstr_size) {
+ struct ici *ici = ic_opaque;
+
+ ictest.on_new.cnt++;
+ ici->on_new_cnt++;
+
+ TEST_SAY("on_new(rk %p, conf %p, ici->conf %p): %p: #%d\n", rk, conf,
+ ici->conf, ici, ictest.on_new.cnt);
+
+ ICTEST_CNT_CHECK(on_new);
+ TEST_ASSERT(ici->on_new_cnt == 1);
+
+ TEST_ASSERT(!ictest.session_timeout_ms);
+ TEST_ASSERT(!ictest.socket_timeout_ms);
+ /* Extract some well known config properties from the interceptor's
+ * configuration. */
+ ictest.session_timeout_ms =
+ rd_strdup(test_conf_get(ici->conf, "session.timeout.ms"));
+ ictest.socket_timeout_ms =
+ rd_strdup(test_conf_get(ici->conf, "socket.timeout.ms"));
+ ictest.config1 = rd_strdup(ici->config1);
+ ictest.config2 = rd_strdup(ici->config2);
+
+ rd_kafka_interceptor_add_on_send(rk, __FILE__, on_send, ici);
+ rd_kafka_interceptor_add_on_acknowledgement(rk, __FILE__,
+ on_acknowledgement, ici);
+ rd_kafka_interceptor_add_on_consume(rk, __FILE__, on_consume, ici);
+ rd_kafka_interceptor_add_on_commit(rk, __FILE__, on_commit, ici);
+ rd_kafka_interceptor_add_on_destroy(rk, __FILE__, on_destroy, ici);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+/**
+ * @brief Configuration set handler
+ */
+static rd_kafka_conf_res_t on_conf_set(rd_kafka_conf_t *conf,
+ const char *name,
+ const char *val,
+ char *errstr,
+ size_t errstr_size,
+ void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ int level = 3;
+
+ if (!strcmp(name, "session.timeout.ms") ||
+ !strcmp(name, "socket.timeout.ms") ||
+ !strncmp(name, "interceptor_test", strlen("interceptor_test")))
+ level = 2;
+
+ TEST_SAYL(level, "on_conf_set(conf %p, \"%s\", \"%s\"): %p\n", conf,
+ name, val, ici);
+
+ if (!strcmp(name, "interceptor_test.good"))
+ return RD_KAFKA_CONF_OK;
+ else if (!strcmp(name, "interceptor_test.bad")) {
+ strncpy(errstr, "on_conf_set failed deliberately",
+ errstr_size - 1);
+ errstr[errstr_size - 1] = '\0';
+ return RD_KAFKA_CONF_INVALID;
+ } else if (!strcmp(name, "interceptor_test.config1")) {
+ if (ici->config1) {
+ free(ici->config1);
+ ici->config1 = NULL;
+ }
+ if (val)
+ ici->config1 = rd_strdup(val);
+ TEST_SAY("on_conf_set(conf %p, %s, %s): %p\n", conf, name, val,
+ ici);
+ return RD_KAFKA_CONF_OK;
+ } else if (!strcmp(name, "interceptor_test.config2")) {
+ if (ici->config2) {
+ free(ici->config2);
+ ici->config2 = NULL;
+ }
+ if (val)
+ ici->config2 = rd_strdup(val);
+ return RD_KAFKA_CONF_OK;
+ } else {
+ /* Apply intercepted client's config properties on
+ * interceptor config. */
+ rd_kafka_conf_set(ici->conf, name, val, errstr, errstr_size);
+ /* UNKNOWN makes the conf_set() call continue with
+ * other interceptors and finally the librdkafka properties. */
+ return RD_KAFKA_CONF_UNKNOWN;
+ }
+
+ return RD_KAFKA_CONF_UNKNOWN;
+}
+
+static void conf_init0(rd_kafka_conf_t *conf);
+
+
+/**
+ * @brief Set up new configuration on copy.
+ */
+static rd_kafka_resp_err_t on_conf_dup(rd_kafka_conf_t *new_conf,
+ const rd_kafka_conf_t *old_conf,
+ size_t filter_cnt,
+ const char **filter,
+ void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ TEST_SAY("on_conf_dup(new_conf %p, old_conf %p, filter_cnt %" PRIusz
+ ", ici %p)\n",
+ new_conf, old_conf, filter_cnt, ici);
+ conf_init0(new_conf);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+static rd_kafka_resp_err_t on_conf_destroy(void *ic_opaque) {
+ struct ici *ici = ic_opaque;
+ ici->on_conf_destroy_cnt++;
+ printf("conf_destroy called (opaque %p vs %p) ici %p\n", ic_opaque,
+ my_interceptor_plug_opaque, ici);
+ TEST_ASSERT(ici->on_conf_destroy_cnt == 1);
+ ici_destroy(ici);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+
+
+/**
+ * @brief Configuration init is intercepted both from plugin.library.paths
+ * as well as rd_kafka_conf_dup().
+ * This internal method serves both cases.
+ */
+static void conf_init0(rd_kafka_conf_t *conf) {
+ struct ici *ici;
+ const char *filter[] = {"plugin.library.paths", "interceptor_test."};
+ size_t filter_cnt = sizeof(filter) / sizeof(*filter);
+
+ /* Create new interceptor instance */
+ ici = calloc(1, sizeof(*ici));
+
+ ictest.conf_init.cnt++;
+ ICTEST_CNT_CHECK(conf_init);
+
+ /* Create own copy of configuration, after filtering out what
+ * brought us here (plugins and our own interceptor config). */
+ ici->conf = rd_kafka_conf_dup_filter(conf, filter_cnt, filter);
+ TEST_SAY("conf_init0(conf %p) for ici %p with ici->conf %p\n", conf,
+ ici, ici->conf);
+
+
+ /* Add interceptor methods */
+ rd_kafka_conf_interceptor_add_on_new(conf, __FILE__, on_new, ici);
+
+ rd_kafka_conf_interceptor_add_on_conf_set(conf, __FILE__, on_conf_set,
+ ici);
+ rd_kafka_conf_interceptor_add_on_conf_dup(conf, __FILE__, on_conf_dup,
+ ici);
+ rd_kafka_conf_interceptor_add_on_conf_destroy(conf, __FILE__,
+ on_conf_destroy, ici);
+}
+
+/**
+ * @brief Plugin conf initializer called when plugin.library.paths is set.
+ */
+DLL_EXPORT
+rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf,
+ void **plug_opaquep,
+ char *errstr,
+ size_t errstr_size) {
+ *plug_opaquep = (void *)my_interceptor_plug_opaque;
+
+ TEST_SAY("conf_init(conf %p) called (setting opaque to %p)\n", conf,
+ *plug_opaquep);
+
+ conf_init0(conf);
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h
new file mode 100644
index 000000000..646b4b4d6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/interceptor_test/interceptor_test.h
@@ -0,0 +1,54 @@
+#ifndef _INTERCEPTOR_TEST_H_
+#define _INTERCEPTOR_TEST_H_
+
+
+struct ictcnt {
+ int cnt;
+ int min;
+ int max;
+};
+
+struct ictest {
+ struct ictcnt conf_init;
+ struct ictcnt on_new;
+
+ /* intercepted interceptor_test.config1 and .config2 properties */
+ char *config1;
+ char *config2;
+
+ /* intercepted session.timeout.ms and socket.timeout.ms */
+ char *session_timeout_ms;
+ char *socket_timeout_ms;
+};
+
+#define ictest_init(ICT) memset((ICT), 0, sizeof(ictest))
+#define ictest_cnt_init(CNT, MIN, MAX) \
+ do { \
+ (CNT)->cnt = 0; \
+ (CNT)->min = MIN; \
+ (CNT)->max = MAX; \
+ } while (0)
+
+#define ictest_free(ICT) \
+ do { \
+ if ((ICT)->config1) \
+ free((ICT)->config1); \
+ if ((ICT)->config2) \
+ free((ICT)->config2); \
+ if ((ICT)->session_timeout_ms) \
+ free((ICT)->session_timeout_ms); \
+ if ((ICT)->socket_timeout_ms) \
+ free((ICT)->socket_timeout_ms); \
+ } while (0)
+
+#define ICTEST_CNT_CHECK(F) \
+ do { \
+ if (ictest.F.cnt > ictest.F.max) \
+ TEST_FAIL("interceptor %s count %d > max %d", #F, \
+ ictest.F.cnt, ictest.F.max); \
+ } while (0)
+
+/* The ictest struct is defined and set up by the calling test. */
+extern struct ictest ictest;
+
+#endif /* _INTERCEPTOR_TEST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore
new file mode 100644
index 000000000..5241a7220
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/.gitignore
@@ -0,0 +1 @@
+*.class \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java b/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java
new file mode 100644
index 000000000..de044ae58
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/IncrementalRebalanceCli.java
@@ -0,0 +1,97 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import org.apache.kafka.clients.consumer.KafkaConsumer;
+import org.apache.kafka.clients.consumer.Consumer;
+import org.apache.kafka.clients.consumer.ConsumerConfig;
+import org.apache.kafka.clients.consumer.ConsumerRecord;
+import org.apache.kafka.clients.consumer.ConsumerRecords;
+import org.apache.kafka.clients.consumer.CooperativeStickyAssignor;
+import org.apache.kafka.common.KafkaException;
+
+import java.lang.Integer;
+import java.util.HashMap;
+import java.util.List;
+import java.util.ArrayList;
+import java.util.Properties;
+import java.time.Duration;
+
+
+public class IncrementalRebalanceCli {
+ public static void main (String[] args) throws Exception {
+ String testName = args[0];
+ String brokerList = args[1];
+ String topic1 = args[2];
+ String topic2 = args[3];
+ String group = args[4];
+
+ if (!testName.equals("test1")) {
+ throw new Exception("Unknown command: " + testName);
+ }
+
+ Properties consumerConfig = new Properties();
+ consumerConfig.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
+ consumerConfig.put(ConsumerConfig.GROUP_ID_CONFIG, group);
+ consumerConfig.put(ConsumerConfig.CLIENT_ID_CONFIG, "java_incrreb_consumer");
+ consumerConfig.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
+ consumerConfig.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
+ consumerConfig.put(ConsumerConfig.PARTITION_ASSIGNMENT_STRATEGY_CONFIG, CooperativeStickyAssignor.class.getName());
+ Consumer<byte[], byte[]> consumer = new KafkaConsumer<>(consumerConfig);
+
+ List<String> topics = new ArrayList<>();
+ topics.add(topic1);
+ topics.add(topic2);
+ consumer.subscribe(topics);
+
+ long startTime = System.currentTimeMillis();
+ long timeout_s = 300;
+
+ try {
+ boolean running = true;
+ while (running) {
+ ConsumerRecords<byte[], byte[]> records = consumer.poll(Duration.ofMillis(1000));
+ if (System.currentTimeMillis() - startTime > 1000 * timeout_s) {
+ // Ensure process exits eventually no matter what happens.
+ System.out.println("IncrementalRebalanceCli timed out");
+ running = false;
+ }
+ if (consumer.assignment().size() == 6) {
+ // librdkafka has unsubscribed from topic #2, exit cleanly.
+ running = false;
+ }
+ }
+ } finally {
+ consumer.close();
+ }
+
+ System.out.println("Java consumer process exiting");
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile
new file mode 100644
index 000000000..68847075a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/Makefile
@@ -0,0 +1,12 @@
+
+KAFKA_JARS?=$(KAFKA_PATH)/libs
+
+CLASSES=Murmur2Cli.class TransactionProducerCli.class IncrementalRebalanceCli.class
+
+all: $(CLASSES)
+
+%.class: %.java
+ javac -classpath $(KAFKA_JARS)/kafka-clients-*.jar $^
+
+clean:
+ rm -f *.class
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java b/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java
new file mode 100644
index 000000000..22444532d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/Murmur2Cli.java
@@ -0,0 +1,46 @@
+
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+import org.apache.kafka.common.utils.Utils;
+
+public class Murmur2Cli {
+ public static int toPositive(int number) {
+ return number & 0x7fffffff;
+ }
+ public static void main (String[] args) throws Exception {
+ for (String key : args) {
+ System.out.println(String.format("%s\t0x%08x", key,
+ toPositive(Utils.murmur2(key.getBytes()))));
+ }
+ /* If no args, print hash for empty string */
+ if (args.length == 0)
+ System.out.println(String.format("%s\t0x%08x", "",
+ toPositive(Utils.murmur2("".getBytes()))));
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md
new file mode 100644
index 000000000..a2754c258
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/README.md
@@ -0,0 +1,14 @@
+# Misc Java tools
+
+## Murmur2 CLI
+
+Build:
+
+ $ KAFKA_JARS=/your/kafka/libs make
+
+Run:
+
+ $ KAFKA_JARS=/your/kafka/libs ./run-class.sh Murmur2Cli "a sentence" and a word
+
+If KAFKA_JARS is not set it will default to $KAFKA_PATH/libs
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java b/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java
new file mode 100644
index 000000000..f880c1422
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/TransactionProducerCli.java
@@ -0,0 +1,162 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2020, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+import java.io.IOException;
+import java.io.PrintWriter;
+
+import org.apache.kafka.clients.producer.KafkaProducer;
+import org.apache.kafka.clients.producer.Producer;
+import org.apache.kafka.clients.producer.ProducerConfig;
+import org.apache.kafka.clients.producer.ProducerRecord;
+import org.apache.kafka.common.KafkaException;
+
+import java.lang.Integer;
+import java.util.HashMap;
+import java.util.Properties;
+
+
+public class TransactionProducerCli {
+
+ enum TransactionType {
+ None,
+ BeginAbort,
+ BeginCommit,
+ BeginOpen,
+ ContinueAbort,
+ ContinueCommit,
+ ContinueOpen
+ }
+
+ enum FlushType {
+ DoFlush,
+ DontFlush
+ }
+
+ static Producer<byte[], byte[]> createProducer(String testid, String id, String brokerList, boolean transactional) {
+ Properties producerConfig = new Properties();
+ producerConfig.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);
+ producerConfig.put(ProducerConfig.CLIENT_ID_CONFIG, transactional ? "transactional-producer-" + id : "producer-" + id);
+ producerConfig.put(ProducerConfig.ENABLE_IDEMPOTENCE_CONFIG, true);
+ if (transactional) {
+ producerConfig.put(ProducerConfig.TRANSACTIONAL_ID_CONFIG, "test-transactional-id-" + testid + "-" + id);
+ }
+ producerConfig.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
+ producerConfig.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArraySerializer");
+ producerConfig.put(ProducerConfig.LINGER_MS_CONFIG, "5"); // ensure batching.
+ Producer<byte[], byte[]> producer = new KafkaProducer<>(producerConfig);
+ if (transactional) {
+ producer.initTransactions();
+ }
+ return producer;
+ }
+
+ static void makeTestMessages(
+ Producer<byte[], byte[]> producer,
+ String topic, int partition,
+ int idStart, int count,
+ TransactionType tt,
+ FlushType flush) throws InterruptedException {
+ byte[] payload = { 0x10, 0x20, 0x30, 0x40 };
+ if (tt != TransactionType.None &&
+ tt != TransactionType.ContinueOpen &&
+ tt != TransactionType.ContinueCommit &&
+ tt != TransactionType.ContinueAbort) {
+ producer.beginTransaction();
+ }
+ for (int i = 0; i <count; ++i) {
+ ProducerRecord<byte[], byte[]> r = partition != -1
+ ? new ProducerRecord<byte[],byte[]>(topic, partition, new byte[] { (byte)(i + idStart) }, payload)
+ : new ProducerRecord<byte[], byte[]>(topic, new byte[] { (byte)(i + idStart) }, payload);
+ producer.send(r);
+ }
+ if (flush == FlushType.DoFlush) {
+ producer.flush();
+ }
+ if (tt == TransactionType.BeginAbort || tt == TransactionType.ContinueAbort) {
+ producer.abortTransaction();
+ } else if (tt == TransactionType.BeginCommit || tt == TransactionType.ContinueCommit) {
+ producer.commitTransaction();
+ }
+ }
+
+ static String[] csvSplit(String input) {
+ return input.split("\\s*,\\s*");
+ }
+
+ public static void main (String[] args) throws Exception {
+
+ String bootstrapServers = args[0];
+
+ HashMap<String, Producer<byte[], byte[]>> producers = new HashMap<String, Producer<byte[], byte[]>>();
+
+ String topic = null;
+ String testid = null;
+
+ /* Parse commands */
+ for (int i = 1 ; i < args.length ; i++) {
+ String cmd[] = csvSplit(args[i]);
+
+ System.out.println("TransactionProducerCli.java: command: '" + args[i] + "'");
+
+ if (cmd[0].equals("sleep")) {
+ Thread.sleep(Integer.decode(cmd[1]));
+
+ } else if (cmd[0].equals("exit")) {
+ System.exit(Integer.decode(cmd[1]));
+
+ } else if (cmd[0].equals("topic")) {
+ topic = cmd[1];
+
+ } else if (cmd[0].equals("testid")) {
+ testid = cmd[1];
+
+ } else if (cmd[0].startsWith("producer")) {
+ Producer<byte[], byte[]> producer = producers.get(cmd[0]);
+
+ if (producer == null) {
+ producer = createProducer(testid, cmd[0], bootstrapServers,
+ TransactionType.valueOf(cmd[4]) != TransactionType.None);
+ producers.put(cmd[0], producer);
+ }
+
+ makeTestMessages(producer, /* producer */
+ topic, /* topic */
+ Integer.decode(cmd[1]), /* partition, or -1 for any */
+ Integer.decode(cmd[2]), /* idStart */
+ Integer.decode(cmd[3]), /* msg count */
+ TransactionType.valueOf(cmd[4]), /* TransactionType */
+ FlushType.valueOf(cmd[5])); /* Flush */
+
+ } else {
+ throw new Exception("Unknown command: " + args[i]);
+ }
+ }
+
+ producers.forEach((k,p) -> p.close());
+ }
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh
new file mode 100755
index 000000000..e3e52b1cc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/java/run-class.sh
@@ -0,0 +1,11 @@
+#!/bin/bash
+#
+
+if [[ -z $KAFKA_PATH ]]; then
+ echo "$0: requires \$KAFKA_PATH to point to the kafka release top directory"
+ exit 1
+fi
+
+JAVA_TESTS_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" >/dev/null 2>&1 && pwd )"
+
+CLASSPATH=$JAVA_TESTS_DIR $KAFKA_PATH/bin/kafka-run-class.sh "$@"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions b/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions
new file mode 100644
index 000000000..6259dadb1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/librdkafka.suppressions
@@ -0,0 +1,483 @@
+# Valgrind suppression file for librdkafka
+{
+ allocate_tls_despite_detached_1
+ Memcheck:Leak
+ fun:calloc
+ fun:_dl_allocate_tls
+ fun:pthread_create@@GLIBC_2.2.5
+}
+
+{
+ helgrind---_dl_allocate_tls
+ Helgrind:Race
+ fun:mempcpy
+ fun:_dl_allocate_tls_init
+ ...
+ fun:pthread_create@@GLIBC_2.2*
+ fun:pthread_create_WRK
+ fun:pthread_create@*
+}
+{
+ drd_nss1
+ drd:ConflictingAccess
+ fun:pthread_mutex_lock
+ fun:_nss_files_gethostbyname4_r
+ fun:gaih_inet
+ fun:getaddrinfo
+ fun:rd_getaddrinfo
+ fun:rd_kafka_broker_resolve
+ fun:rd_kafka_broker_connect
+ fun:rd_kafka_broker_thread_main
+ fun:_thrd_wrapper_function
+ obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
+ fun:start_thread
+ fun:clone
+}
+
+{
+ drd_nss2
+ drd:ConflictingAccess
+ fun:strlen
+ fun:nss_load_library
+ fun:__nss_lookup_function
+ fun:gaih_inet
+ fun:getaddrinfo
+ fun:rd_getaddrinfo
+ fun:rd_kafka_broker_resolve
+ fun:rd_kafka_broker_connect
+ fun:rd_kafka_broker_thread_main
+ fun:_thrd_wrapper_function
+ obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
+ fun:start_thread
+ fun:clone
+}
+{
+ drd_nss3
+ drd:ConflictingAccess
+ fun:__GI_stpcpy
+ fun:nss_load_library
+ fun:__nss_lookup_function
+ fun:gaih_inet
+ fun:getaddrinfo
+ fun:rd_getaddrinfo
+ fun:rd_kafka_broker_resolve
+ fun:rd_kafka_broker_connect
+ fun:rd_kafka_broker_thread_main
+ fun:_thrd_wrapper_function
+ obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
+ fun:start_thread
+ fun:clone
+}
+{
+ drd_nss4
+ drd:ConflictingAccess
+ fun:strlen
+ fun:__nss_lookup_function
+ fun:gaih_inet
+ fun:getaddrinfo
+ fun:rd_getaddrinfo
+ fun:rd_kafka_broker_resolve
+ fun:rd_kafka_broker_connect
+ fun:rd_kafka_broker_thread_main
+ fun:_thrd_wrapper_function
+ obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
+ fun:start_thread
+ fun:clone
+}
+{
+ drd_nss5
+ drd:ConflictingAccess
+ fun:strlen
+ fun:__nss_lookup_function
+ fun:gaih_inet
+ fun:getaddrinfo
+ fun:rd_getaddrinfo
+ fun:rd_kafka_broker_resolve
+ fun:rd_kafka_broker_connect
+ fun:rd_kafka_broker_thread_main
+ fun:_thrd_wrapper_function
+ obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
+ fun:start_thread
+ fun:clone
+}
+{
+ drd_nss6
+ drd:ConflictingAccess
+ fun:internal_setent
+ fun:_nss_files_gethostbyname4_r
+ fun:gaih_inet
+ fun:getaddrinfo
+ fun:rd_getaddrinfo
+ fun:rd_kafka_broker_resolve
+ fun:rd_kafka_broker_connect
+ fun:rd_kafka_broker_thread_main
+ fun:_thrd_wrapper_function
+ obj:/usr/lib/valgrind/vgpreload_drd-amd64-linux.so
+ fun:start_thread
+ fun:clone
+}
+{
+ ssl_read
+ Memcheck:Cond
+ fun:ssl3_read_bytes
+ fun:ssl3_read_internal
+}
+
+
+
+{
+ ssl_noterm_leak1
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:SSL_library_init
+}
+{
+ ssl_noterm_leak2
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:OPENSSL_add_all_algorithms_noconf
+}
+{
+ ssl_noterm_leak3
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:OpenSSL_add_all_digests
+}
+{
+ ssl_noterm_leak3b
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:realloc
+ ...
+ fun:OpenSSL_add_all_digests
+}
+{
+ ssl_noterm_leak4
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:EVP_add_digest
+}
+{
+ ssl_noterm_leak5
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:SSL_load_error_strings
+}
+{
+ ssl_noterm_leak6
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:realloc
+ ...
+ fun:OPENSSL_add_all_algorithms_noconf
+}
+{
+ ssl_noterm_leak7
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:ERR_load_SSL_strings
+}
+{
+ ssl_noterm_leak8
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:err_load_strings
+}
+{
+ ssl_noterm_leak8b
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:ERR_load_strings
+}
+{
+ ssl_noterm_leak8c
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:realloc
+ ...
+ fun:ERR_load_strings
+}
+{
+ ssl_noterm_leak9
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:realloc
+ ...
+ fun:ERR_load_SSL_strings
+}
+{
+ ssl_noterm_leak10
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:OPENSSL_init_library
+}
+{
+ ssl_noterm_leak10b
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:calloc
+ ...
+ fun:OPENSSL_init_library
+}
+{
+ ssl_noterm_leak11
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:EVP_SignFinal
+}
+{
+ ssl_noterm_leak12
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:FIPS_mode_set
+}
+{
+ thrd_tls_alloc_stack
+ Memcheck:Leak
+ match-leak-kinds: possible
+ fun:calloc
+ fun:allocate_dtv
+ fun:_dl_allocate_tls
+ fun:allocate_stack
+ fun:pthread_create@@GLIBC_2.2.5
+ fun:thrd_create
+}
+{
+ more_tls1
+ Memcheck:Leak
+ match-leak-kinds: possible
+ fun:calloc
+ fun:allocate_dtv
+ fun:_dl_allocate_tls
+ fun:allocate_stack
+}
+
+{
+ ssl_uninit1
+ Memcheck:Cond
+ fun:rd_kafka_metadata_handle
+ fun:rd_kafka_broker_metadata_reply
+}
+{
+ ssl_uninit2
+ Memcheck:Value8
+ fun:rd_kafka_metadata_handle
+ fun:rd_kafka_broker_metadata_reply
+}
+{
+ ssl_uninit3
+ Memcheck:Cond
+ fun:memcpy@@GLIBC_2.14
+ fun:rd_kafka_metadata_handle
+ fun:rd_kafka_broker_metadata_reply
+}
+
+{
+ log_races0
+ Helgrind:Race
+ fun:rd_kafka_log0
+}
+{
+ glibc_tls
+ Helgrind:Race
+ fun:mempcpy
+ fun:_dl_allocate_tls_init
+ fun:get_cached_stack
+ fun:allocate_stack
+ fun:pthread_create@@GLIBC_2.2.5
+}
+{
+ false_tls
+ Helgrind:Race
+ fun:thrd_detach
+}
+
+
+# cyrus libsasl2 global/once memory "leaks"
+{
+ leak_sasl_global_init1
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:sasl_client_init
+}
+{
+ leak_sasl_global_init6
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:calloc
+ ...
+ fun:sasl_client_init
+}
+
+{
+ leak_sasl_dlopen
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:?alloc
+ ...
+ fun:_dl_catch_error
+}
+{
+ leak_sasl_add_plugin
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ ...
+ fun:sasl_client_add_plugin
+}
+{
+ leak_sasl_add_plugin2
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:calloc
+ ...
+ fun:sasl_client_add_plugin
+}
+{
+ debian_testing_ld_uninitialized
+ Memcheck:Cond
+ fun:index
+ fun:expand_dynamic_string_token
+ ...
+ fun:_dl_start
+ ...
+}
+{
+ glibc_internals_nss_race1
+ Helgrind:Race
+ ...
+ fun:getaddrinfo
+ ...
+}
+{
+ nss_files
+ Helgrind:Race
+ ...
+ fun:_dl_runtime_resolve_avx
+ ...
+}
+{
+ cpp_glibc_globals
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ fun:pool
+ fun:__static_initialization_and_destruction_0
+ fun:_GLOBAL__sub_I_eh_alloc.cc
+}
+{
+ mtx_unlock_plus_destroy
+ Helgrind:Race
+ obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
+ obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
+ fun:rd_kafka_q_destroy_final
+}
+{
+ mtx_unlock_plus_destroy2
+ Helgrind:Race
+ obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
+ obj:/usr/lib/valgrind/vgpreload_helgrind-amd64-linux.so
+ fun:rd_refcnt_destroy
+}
+{
+ nss_dl_lookup
+ Helgrind:Race
+ ...
+ fun:do_lookup_x
+ fun:_dl_lookup_symbol_x
+ ...
+}
+{
+ dlopen1
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ ...
+ fun:_dl_open
+}
+
+{
+ atomics32_set
+ Helgrind:Race
+ fun:rd_atomic32_set
+}
+
+{
+ atomics32_get
+ Helgrind:Race
+ fun:rd_atomic32_get
+}
+
+{
+ atomics64_set
+ Helgrind:Race
+ fun:rd_atomic64_set
+}
+
+{
+ atomics64_get
+ Helgrind:Race
+ fun:rd_atomic64_get
+}
+
+{
+ osx_dyld_img
+ Memcheck:Leak
+ match-leak-kinds: reachable
+ fun:malloc
+ fun:strdup
+ fun:__si_module_static_ds_block_invoke
+ fun:_dispatch_client_callout
+ fun:_dispatch_once_callout
+ fun:si_module_static_ds
+ fun:si_module_with_name
+ fun:si_module_config_modules_for_category
+ fun:__si_module_static_search_block_invoke
+ fun:_dispatch_client_callout
+ fun:_dispatch_once_callout
+ fun:si_module_static_search
+ fun:si_module_with_name
+ fun:si_search
+ fun:getpwuid_r
+ fun:_CFRuntimeBridgeClasses
+ fun:__CFInitialize
+ fun:_ZN16ImageLoaderMachO11doImageInitERKN11ImageLoader11LinkContextE
+ fun:_ZN16ImageLoaderMachO16doInitializationERKN11ImageLoader11LinkContextE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader23recursiveInitializationERKNS_11LinkContextEjPKcRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader19processInitializersERKNS_11LinkContextEjRNS_21InitializerTimingListERNS_15UninitedUpwardsE
+ fun:_ZN11ImageLoader15runInitializersERKNS_11LinkContextERNS_21InitializerTimingListE
+ fun:_ZN4dyld24initializeMainExecutableEv
+ fun:_ZN4dyld5_mainEPK12macho_headermiPPKcS5_S5_Pm
+ fun:_ZN13dyldbootstrap5startEPKN5dyld311MachOLoadedEiPPKcS3_Pm
+ fun:_dyld_start
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh
new file mode 100755
index 000000000..7c604df73
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/lz4_manual_test.sh
@@ -0,0 +1,59 @@
+#!/bin/bash
+#
+
+#
+# Manual test (verification) of LZ4
+# See README for details
+#
+
+set -e
+# Debug what commands are being executed:
+#set -x
+
+TOPIC=lz4
+
+if [[ $TEST_KAFKA_VERSION == "trunk" ]]; then
+ RDK_ARGS="$RDK_ARGS -X api.version.request=true"
+else
+ if [[ $TEST_KAFKA_VERSION == 0.8.* ]]; then
+ BROKERS=$(echo $BROKERS | sed -e 's/PLAINTEXT:\/\///g')
+ fi
+ RDK_ARGS="$RDK_ARGS -X broker.version.fallback=$TEST_KAFKA_VERSION"
+fi
+
+# Create topic
+${KAFKA_PATH}/bin/kafka-topics.sh --zookeeper $ZK_ADDRESS --create \
+ --topic $TOPIC --partitions 1 --replication-factor 1
+
+# Produce messages with rdkafka
+echo "### Producing with librdkafka: ids 1000-1010"
+seq 1000 1010 | ../examples/rdkafka_example -P -b $BROKERS -t $TOPIC \
+ -z lz4 $RDK_ARGS
+
+# Produce with Kafka
+echo "### Producing with Kafka: ids 2000-2010"
+seq 2000 2010 | ${KAFKA_PATH}/bin/kafka-console-producer.sh \
+ --broker-list $BROKERS --compression-codec lz4 \
+ --topic $TOPIC
+
+# Consume with rdkafka
+echo "### Consuming with librdkafka: expect 1000-1010 and 2000-2010"
+../examples/rdkafka_example -C -b $BROKERS -t $TOPIC -p 0 -o beginning -e -q -A \
+ $RDK_ARGS
+
+# Consume with Kafka
+echo "### Consuming with Kafka: expect 1000-1010 and 2000-2010"
+if [[ $TEST_KAFKA_VERSION == "trunk" ]]; then
+ ${KAFKA_PATH}/bin/kafka-console-consumer.sh -new-consumer \
+ --bootstrap-server $BROKERS --from-beginning --topic $TOPIC \
+ --timeout-ms 1000
+else
+ ${KAFKA_PATH}/bin/kafka-console-consumer.sh \
+ --zookeeper $ZK_ADDRESS --from-beginning --topic $TOPIC \
+ --max-messages 22
+fi
+
+
+echo ""
+echo "### $TEST_KAFKA_VERSION: Did you see messages 1000-1010 and 2000-2010 from both consumers?"
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh
new file mode 100755
index 000000000..3a0a9d104
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/multi-broker-version-test.sh
@@ -0,0 +1,50 @@
+#!/bin/bash
+#
+
+set -e
+
+# Test current librdkafka with multiple broker versions.
+
+if [[ ! -z $TEST_KAFKA_VERSION ]]; then
+ echo "Must not be run from within a trivup session"
+ exit 1
+fi
+
+
+VERSIONS="$*"
+if [[ -z $VERSIONS ]]; then
+ VERSIONS="0.8.2.1 0.9.0.1 0.10.0.1 0.10.1.1 0.10.2.1 0.11.0.0"
+fi
+
+FAILED_VERSIONS=""
+PASSED_VERSIONS=""
+for VERSION in $VERSIONS ; do
+ echo "Testing broker version $VERSION"
+ if [[ $VERSION == "trunk" ]]; then
+ extra_args="--kafka-src ~/src/kafka --no-deploy"
+ else
+ extra_args=""
+ fi
+ ./interactive_broker_version.py \
+ --root ~/old/kafka -c "make run_seq" $extra_args "$VERSION"
+
+ if [[ $? == 0 ]] ; then
+ echo "#### broker $VERSION passed ####"
+ PASSED_VERSIONS="${PASSED_VERSIONS}${VERSION} "
+ else
+ echo "#### broker $VERSION FAILED ####"
+ FAILED_VERSIONS="${FAILED_VERSIONS}${VERSION} "
+ fi
+done
+
+
+echo "broker versions PASSED: ${PASSED_VERSIONS}"
+echo "broker versions FAILED: ${FAILED_VERSIONS}"
+
+if [[ ! -z $FAILED_VERSIONS ]]; then
+ exit 1
+else
+ exit 0
+fi
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh
new file mode 100755
index 000000000..f77b2a127
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/parse-refcnt.sh
@@ -0,0 +1,43 @@
+#!/bin/bash
+#
+#
+
+set -e
+
+# Parse a log with --enable-refcnt output enabled.
+
+log="$1"
+
+if [[ ! -f $log ]]; then
+ echo "Usage: $0 <log-file>"
+ exit 1
+fi
+
+
+# Create a file with all refcnt creations
+cfile=$(mktemp)
+grep 'REFCNT.* 0 +1:' $log | awk '{print $6}' | sort > $cfile
+
+# .. and one file with all refcnt destructions
+dfile=$(mktemp)
+grep 'REFCNT.* 1 -1:' $log | awk '{print $6}' | sort > $dfile
+
+# For each refcnt that was never destructed (never reached 0), find it
+# in the input log.
+
+seen=
+for p in $(grep -v -f $dfile $cfile) ; do
+ echo "=== REFCNT $p never reached 0 ==="
+ grep -nH "$p" $log
+ echo ""
+ seen=yes
+done
+
+rm -f "$cfile" "$dfile"
+
+if [[ -z $seen ]]; then
+ echo "No refcount leaks found"
+ exit 0
+fi
+
+exit 2
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py b/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py
new file mode 100755
index 000000000..b699377f1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/performance_plot.py
@@ -0,0 +1,115 @@
+#!/usr/bin/env python3
+#
+
+import sys
+import json
+import numpy as np
+import matplotlib.pyplot as plt
+
+from collections import defaultdict
+
+
+def semver2int(semver):
+ if semver == 'trunk':
+ semver = '0.10.0.0'
+ vi = 0
+ i = 0
+ for v in reversed(semver.split('.')):
+ vi += int(v) * (i * 10)
+ i += 1
+ return vi
+
+
+def get_perf_data(perfname, stats):
+ """ Return [labels,x,y,errs] for perfname 'mb_per_sec' as a numpy arrays
+ labels: broker versions
+ x: list with identical value (to plot on same x point)
+ y: perfname counter (average)
+ errs: errors
+ """
+ ver = defaultdict(list)
+
+ # Per version:
+ # * accumulate values
+ # * calculate average
+ # * calculate error
+
+ # Accumulate values per version
+ for x in stats:
+ v = str(x[0])
+ ver[v].append(x[1][perfname])
+ print('%s is %s' % (perfname, ver))
+
+ labels0 = sorted(ver.keys(), key=semver2int)
+ y0 = list()
+ errs0 = list()
+
+ # Maintain order by using labels0
+ for v in labels0:
+ # Calculate average
+ avg = sum(ver[v]) / float(len(ver[v]))
+ y0.append(avg)
+ # Calculate error
+ errs0.append(max(ver[v]) - avg)
+
+ labels = np.array(labels0)
+ y1 = np.array(y0)
+ x1 = np.array(range(0, len(labels)))
+ errs = np.array(errs0)
+ return [labels, x1, y1, errs]
+
+
+def plot(description, name, stats, perfname, outfile=None):
+ labels, x, y, errs = get_perf_data(perfname, stats)
+ plt.title('%s: %s %s' % (description, name, perfname))
+ plt.xlabel('Kafka version')
+ plt.ylabel(perfname)
+ plt.errorbar(x, y, yerr=errs, alpha=0.5)
+ plt.xticks(x, labels, rotation='vertical')
+ plt.margins(0.2)
+ plt.subplots_adjust(bottom=0.2)
+ if outfile is None:
+ plt.show()
+ else:
+ plt.savefig(outfile, bbox_inches='tight')
+ return
+
+
+if __name__ == '__main__':
+
+ outfile = sys.argv[1]
+
+ reports = []
+ for rf in sys.argv[2:]:
+ with open(rf) as f:
+ reports.append(json.load(f))
+
+ stats = defaultdict(list)
+
+ # Extract performance test data
+ for rep in reports:
+ perfs = rep.get(
+ 'tests',
+ dict()).get(
+ '0038_performance',
+ list).get(
+ 'report',
+ None)
+ if perfs is None:
+ continue
+
+ for perf in perfs:
+ for n in ['producer', 'consumer']:
+ o = perf.get(n, None)
+ if o is None:
+ print('no %s in %s' % (n, perf))
+ continue
+
+ stats[n].append((rep.get('broker_version', 'unknown'), o))
+
+ for t in ['producer', 'consumer']:
+ for perfname in ['mb_per_sec', 'records_per_sec']:
+ plot('librdkafka 0038_performance test: %s (%d samples)' %
+ (outfile, len(reports)),
+ t, stats[t], perfname, outfile='%s_%s_%s.png' % (
+ outfile, t, perfname))
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile b/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile
new file mode 100644
index 000000000..a39f18270
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/Makefile
@@ -0,0 +1,19 @@
+LIBNAME= plugin_test
+LIBVER= 1
+
+-include ../../Makefile.config
+
+SRCS= plugin_test.c
+
+OBJS= $(SRCS:.c=.o)
+
+# For rdkafka.h
+CPPFLAGS+=-I../../src
+
+all: lib
+
+include ../../mklove/Makefile.base
+
+clean: lib-clean
+
+-include $(DEPS)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c b/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c
new file mode 100644
index 000000000..54639a5a8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/plugin_test/plugin_test.c
@@ -0,0 +1,58 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2017 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @brief Plugin test library
+ */
+
+#include <stdio.h>
+#include <assert.h>
+
+/* typical include path outside tests is <librdkafka/rdkafka.h> */
+#include "rdkafka.h"
+
+
+
+static void *my_opaque = (void *)0x5678;
+/*
+ * Common methods
+ */
+rd_kafka_resp_err_t conf_init(rd_kafka_conf_t *conf,
+ void **plug_opaquep,
+ char *errstr,
+ size_t errstr_size) {
+ printf("plugin conf_init called!\n");
+ *plug_opaquep = my_opaque;
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+void conf_destroy(const rd_kafka_conf_t *conf, void *plug_opaque) {
+ assert(plug_opaque == plug_opaque);
+ printf("plugin destroy called\n");
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt
new file mode 100644
index 000000000..c15a66f47
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/requirements.txt
@@ -0,0 +1,2 @@
+trivup >= 0.12.1
+jsoncomment
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh
new file mode 100755
index 000000000..32165c2d4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/run-consumer-tests.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+#
+# Run all tests that employ a consumer.
+#
+
+set -e
+
+TESTS=$(for t in $(grep -l '[Cc]onsume' 0*.{c,cpp}); do \
+ echo $t | sed -e 's/^\([0-9][0-9][0-9][0-9]\)-.*/\1/g' ; \
+ done)
+
+export TESTS
+echo "# Running consumer tests: $TESTS"
+
+./run-test.sh $*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh
new file mode 100755
index 000000000..7f1035cbb
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/run-producer-tests.sh
@@ -0,0 +1,16 @@
+#!/bin/bash
+#
+#
+# Run all tests that employ a producer.
+#
+
+set -e
+
+TESTS=$(for t in $(grep -l '[pp]roduce' 0*.{c,cpp}); do \
+ echo $t | sed -e 's/^\([0-9][0-9][0-9][0-9]\)-.*/\1/g' ; \
+ done)
+
+export TESTS
+echo "# Running producer tests: $TESTS"
+
+./run-test.sh $*
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh
new file mode 100755
index 000000000..2f531c61f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/run-test.sh
@@ -0,0 +1,140 @@
+#!/usr/bin/env bash
+#
+
+RED='\033[31m'
+GREEN='\033[32m'
+CYAN='\033[36m'
+CCLR='\033[0m'
+
+if [[ $1 == -h ]]; then
+ echo "Usage: $0 [-..] [modes..]"
+ echo ""
+ echo " Modes: bare valgrind helgrind cachegrind drd gdb lldb bash"
+ echo " Options:"
+ echo " -.. - test-runner command arguments (pass thru)"
+ exit 0
+fi
+
+ARGS=
+
+while [[ $1 == -* ]]; do
+ ARGS="$ARGS $1"
+ shift
+done
+
+TEST=./test-runner
+
+if [ ! -z "$1" ]; then
+ MODES=$1
+else
+ MODES="bare"
+ # Enable valgrind:
+ #MODES="bare valgrind"
+fi
+
+FAILED=0
+
+export RDKAFKA_GITVER="$(git rev-parse --short HEAD)@$(git symbolic-ref -q --short HEAD)"
+
+# Enable valgrind suppressions for false positives
+SUPP="--suppressions=librdkafka.suppressions"
+
+# Uncomment to generate valgrind suppressions
+#GEN_SUPP="--gen-suppressions=yes"
+
+# Common valgrind arguments
+VALGRIND_ARGS="--error-exitcode=3"
+
+# Enable vgdb on valgrind errors.
+#VALGRIND_ARGS="$VALGRIND_ARGS --vgdb-error=1"
+
+# Exit valgrind on first error
+VALGRIND_ARGS="$VALGRIND_ARGS --exit-on-first-error=yes"
+
+export LD_LIBRARY_PATH=$LD_LIBRARY_PATH:../src:../src-cpp
+export DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH:../src:../src-cpp
+
+echo -e "${CYAN}############## $TEST ################${CCLR}"
+
+for mode in $MODES; do
+ echo -e "${CYAN}### Running test $TEST in $mode mode ###${CCLR}"
+ export TEST_MODE=$mode
+ case "$mode" in
+ valgrind)
+ valgrind $VALGRIND_ARGS --leak-check=full --show-leak-kinds=all \
+ --errors-for-leak-kinds=all \
+ --track-origins=yes \
+ --track-fds=yes \
+ $SUPP $GEN_SUPP \
+ $TEST $ARGS
+ RET=$?
+ ;;
+ helgrind)
+ valgrind $VALGRIND_ARGS --tool=helgrind \
+ --sim-hints=no-nptl-pthread-stackcache \
+ $SUPP $GEN_SUPP \
+ $TEST $ARGS
+ RET=$?
+ ;;
+ cachegrind|callgrind)
+ valgrind $VALGRIND_ARGS --tool=$mode \
+ $SUPP $GEN_SUPP \
+ $TEST $ARGS
+ RET=$?
+ ;;
+ drd)
+ valgrind $VALGRIND_ARGS --tool=drd $SUPP $GEN_SUPP \
+ $TEST $ARGS
+ RET=$?
+ ;;
+ callgrind)
+ valgrind $VALGRIND_ARGS --tool=callgrind $SUPP $GEN_SUPP \
+ $TEST $ARGS
+ RET=$?
+ ;;
+ gdb)
+ grun=$(mktemp gdbrunXXXXXX)
+ cat >$grun <<EOF
+set \$_exitcode = -999
+run $ARGS
+if \$_exitcode != -999
+ quit
+end
+EOF
+ export ASAN_OPTIONS="$ASAN_OPTIONS:abort_on_error=1"
+ gdb -x $grun $TEST
+ RET=$?
+ rm $grun
+ ;;
+ bare)
+ $TEST $ARGS
+ RET=$?
+ ;;
+ lldb)
+ lldb -b -o "process launch --environment DYLD_LIBRARY_PATH=$DYLD_LIBRARY_PATH" -- $TEST $ARGS
+ RET=$?
+ ;;
+ bash)
+ PS1="[run-test.sh] $PS1" bash
+ RET=$?
+ ;;
+ *)
+ echo -e "${RED}### Unknown mode $mode for $TEST ###${CCLR}"
+ RET=1
+ ;;
+ esac
+
+ if [ $RET -gt 0 ]; then
+ echo -e "${RED}###"
+ echo -e "### Test $TEST in $mode mode FAILED! (return code $RET) ###"
+ echo -e "###${CCLR}"
+ FAILED=1
+ else
+ echo -e "${GREEN}###"
+ echo -e "### $Test $TEST in $mode mode PASSED! ###"
+ echo -e "###${CCLR}"
+ fi
+done
+
+exit $FAILED
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c b/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c
new file mode 100644
index 000000000..48e702f3f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/rusage.c
@@ -0,0 +1,249 @@
+/*
+ * librdkafka - The Apache Kafka C/C++ library
+ *
+ * Copyright (c) 2019 Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name Track test resource usage.
+ */
+
+#ifdef __APPLE__
+#define _DARWIN_C_SOURCE /* required for rusage.ru_maxrss, etc. */
+#endif
+
+#include "test.h"
+
+#if HAVE_GETRUSAGE
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include "rdfloat.h"
+
+
+/**
+ * @brief Call getrusage(2)
+ */
+static int test_getrusage(struct rusage *ru) {
+ if (getrusage(RUSAGE_SELF, ru) == -1) {
+ TEST_WARN("getrusage() failed: %s\n", rd_strerror(errno));
+ return -1;
+ }
+
+ return 0;
+}
+
+/* Convert timeval to seconds */
+#define _tv2s(TV) \
+ (double)((double)(TV).tv_sec + ((double)(TV).tv_usec / 1000000.0))
+
+/* Convert timeval to CPU usage percentage (5 = 5%, 130.3 = 130.3%) */
+#define _tv2cpu(TV, DURATION) ((_tv2s(TV) / (DURATION)) * 100.0)
+
+
+/**
+ * @brief Calculate difference between \p end and \p start rusage.
+ *
+ * @returns the delta
+ */
+static struct rusage test_rusage_calc(const struct rusage *start,
+ const struct rusage *end,
+ double duration) {
+ struct rusage delta = RD_ZERO_INIT;
+
+ timersub(&end->ru_utime, &start->ru_utime, &delta.ru_utime);
+ timersub(&end->ru_stime, &start->ru_stime, &delta.ru_stime);
+ /* FIXME: maxrss doesn't really work when multiple tests are
+ * run in the same process since it only registers the
+ * maximum RSS, not the current one.
+ * Read this from /proc/<pid>/.. instead */
+ delta.ru_maxrss = end->ru_maxrss - start->ru_maxrss;
+ delta.ru_nvcsw = end->ru_nvcsw - start->ru_nvcsw;
+ /* skip fields we're not interested in */
+
+ TEST_SAY(_C_MAG
+ "Test resource usage summary: "
+ "%.3fs (%.1f%%) User CPU time, "
+ "%.3fs (%.1f%%) Sys CPU time, "
+ "%.3fMB RSS memory increase, "
+ "%ld Voluntary context switches\n",
+ _tv2s(delta.ru_utime), _tv2cpu(delta.ru_utime, duration),
+ _tv2s(delta.ru_stime), _tv2cpu(delta.ru_stime, duration),
+ (double)delta.ru_maxrss / (1024.0 * 1024.0), delta.ru_nvcsw);
+
+ return delta;
+}
+
+
+/**
+ * @brief Check that test ran within threshold levels
+ */
+static int test_rusage_check_thresholds(struct test *test,
+ const struct rusage *ru,
+ double duration) {
+ static const struct rusage_thres defaults = {
+ .ucpu = 5.0, /* min value, see below */
+ .scpu = 2.5, /* min value, see below */
+ .rss = 10.0, /* 10 megs */
+ .ctxsw = 100, /* this is the default number of context switches
+ * per test second.
+ * note: when ctxsw is specified on a test
+ * it should be specified as the total
+ * number of context switches. */
+ };
+ /* CPU usage thresholds are too blunt for very quick tests.
+ * Use a forgiving default CPU threshold for any test that
+ * runs below a certain duration. */
+ const double min_duration = 2.0; /* minimum test duration for
+ * CPU thresholds to have effect. */
+ const double lax_cpu = 1000.0; /* 1000% CPU usage (e.g 10 cores
+ * at full speed) allowed for any
+ * test that finishes in under 2s */
+ const struct rusage_thres *thres = &test->rusage_thres;
+ double cpu, mb, uthres, uthres_orig, sthres, rssthres;
+ int csthres;
+ char reasons[3][128];
+ int fails = 0;
+
+ if (duration < min_duration)
+ uthres = lax_cpu;
+ else if (rd_dbl_zero((uthres = thres->ucpu)))
+ uthres = defaults.ucpu;
+
+ uthres_orig = uthres;
+ uthres *= test_rusage_cpu_calibration;
+
+ cpu = _tv2cpu(ru->ru_utime, duration);
+ if (cpu > uthres) {
+ rd_snprintf(reasons[fails], sizeof(reasons[fails]),
+ "User CPU time (%.3fs) exceeded: %.1f%% > %.1f%%",
+ _tv2s(ru->ru_utime), cpu, uthres);
+ TEST_WARN("%s\n", reasons[fails]);
+ fails++;
+ }
+
+ /* Let the default Sys CPU be the maximum of the defaults.cpu
+ * and 20% of the User CPU. */
+ if (rd_dbl_zero((sthres = thres->scpu)))
+ sthres = duration < min_duration
+ ? lax_cpu
+ : RD_MAX(uthres_orig * 0.20, defaults.scpu);
+
+ sthres *= test_rusage_cpu_calibration;
+
+ cpu = _tv2cpu(ru->ru_stime, duration);
+ if (cpu > sthres) {
+ rd_snprintf(reasons[fails], sizeof(reasons[fails]),
+ "Sys CPU time (%.3fs) exceeded: %.1f%% > %.1f%%",
+ _tv2s(ru->ru_stime), cpu, sthres);
+ TEST_WARN("%s\n", reasons[fails]);
+ fails++;
+ }
+
+ rssthres = thres->rss > 0.0 ? thres->rss : defaults.rss;
+ if ((mb = (double)ru->ru_maxrss / (1024.0 * 1024.0)) > rssthres) {
+ rd_snprintf(reasons[fails], sizeof(reasons[fails]),
+ "RSS memory exceeded: %.2fMB > %.2fMB", mb,
+ rssthres);
+ TEST_WARN("%s\n", reasons[fails]);
+ fails++;
+ }
+
+
+ if (!(csthres = thres->ctxsw))
+ csthres = duration < min_duration
+ ? defaults.ctxsw * 100
+ : (int)(duration * (double)defaults.ctxsw);
+
+ /* FIXME: not sure how to use this */
+ if (0 && ru->ru_nvcsw > csthres) {
+ TEST_WARN(
+ "Voluntary context switches exceeded: "
+ "%ld > %d\n",
+ ru->ru_nvcsw, csthres);
+ fails++;
+ }
+
+ TEST_ASSERT(fails <= (int)RD_ARRAYSIZE(reasons),
+ "reasons[] array not big enough (needs %d slots)", fails);
+
+ if (!fails || !test_rusage)
+ return 0;
+
+ TEST_FAIL("Test resource usage exceeds %d threshold(s): %s%s%s%s%s",
+ fails, reasons[0], fails > 1 ? ", " : "",
+ fails > 1 ? reasons[1] : "", fails > 2 ? ", " : "",
+ fails > 2 ? reasons[2] : "");
+
+
+ return -1;
+}
+#endif
+
+
+
+void test_rusage_start(struct test *test) {
+#if HAVE_GETRUSAGE
+ /* Can't do per-test rusage checks when tests run in parallel. */
+ if (test_concurrent_max > 1)
+ return;
+
+ if (test_getrusage(&test->rusage) == -1)
+ return;
+#endif
+}
+
+
+/**
+ * @brief Stop test rusage and check if thresholds were exceeded.
+ * Call when test has finished.
+ *
+ * @returns -1 if thresholds were exceeded, else 0.
+ */
+int test_rusage_stop(struct test *test, double duration) {
+#if HAVE_GETRUSAGE
+ struct rusage start, end;
+
+ /* Can't do per-test rusage checks when tests run in parallel. */
+ if (test_concurrent_max > 1)
+ return 0;
+
+ if (test_getrusage(&end) == -1)
+ return 0;
+
+ /* Let duration be at least 1ms to avoid
+ * too-close-to-zero comparisons */
+ if (duration < 0.001)
+ duration = 0.001;
+
+ start = test->rusage;
+ test->rusage = test_rusage_calc(&start, &end, duration);
+
+ return test_rusage_check_thresholds(test, &test->rusage, duration);
+#else
+ return 0;
+#endif
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py b/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py
new file mode 100755
index 000000000..9cb7d194a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/sasl_test.py
@@ -0,0 +1,328 @@
+#!/usr/bin/env python3
+#
+#
+# Run librdkafka regression tests on with different SASL parameters
+# and broker verisons.
+#
+# Requires:
+# trivup python module
+# gradle in your PATH
+
+from cluster_testing import (
+ LibrdkafkaTestCluster,
+ print_report_summary,
+ print_test_report_summary,
+ read_scenario_conf)
+from LibrdkafkaTestApp import LibrdkafkaTestApp
+
+import os
+import sys
+import argparse
+import json
+import tempfile
+
+
+def test_it(version, deploy=True, conf={}, rdkconf={}, tests=None, debug=False,
+ scenario="default"):
+ """
+ @brief Create, deploy and start a Kafka cluster using Kafka \\p version
+ Then run librdkafka's regression tests.
+ """
+
+ cluster = LibrdkafkaTestCluster(
+ version, conf, debug=debug, scenario=scenario)
+
+ # librdkafka's regression tests, as an App.
+ rdkafka = LibrdkafkaTestApp(cluster, version, _rdkconf, tests=tests,
+ scenario=scenario)
+ rdkafka.do_cleanup = False
+ rdkafka.local_tests = False
+
+ if deploy:
+ cluster.deploy()
+
+ cluster.start(timeout=30)
+
+ print(
+ '# Connect to cluster with bootstrap.servers %s' %
+ cluster.bootstrap_servers())
+ rdkafka.start()
+ print(
+ '# librdkafka regression tests started, logs in %s' %
+ rdkafka.root_path())
+ try:
+ rdkafka.wait_stopped(timeout=60 * 30)
+ rdkafka.dbg(
+ 'wait stopped: %s, runtime %ds' %
+ (rdkafka.state, rdkafka.runtime()))
+ except KeyboardInterrupt:
+ print('# Aborted by user')
+
+ report = rdkafka.report()
+ if report is not None:
+ report['root_path'] = rdkafka.root_path()
+
+ cluster.stop(force=True)
+
+ cluster.cleanup()
+ return report
+
+
+def handle_report(report, version, suite):
+ """ Parse test report and return tuple (Passed(bool), Reason(str)) """
+ test_cnt = report.get('tests_run', 0)
+
+ if test_cnt == 0:
+ return (False, 'No tests run')
+
+ passed = report.get('tests_passed', 0)
+ failed = report.get('tests_failed', 0)
+ if 'all' in suite.get('expect_fail', []) or version in suite.get(
+ 'expect_fail', []):
+ expect_fail = True
+ else:
+ expect_fail = False
+
+ if expect_fail:
+ if failed == test_cnt:
+ return (True, 'All %d/%d tests failed as expected' %
+ (failed, test_cnt))
+ else:
+ return (False, '%d/%d tests failed: expected all to fail' %
+ (failed, test_cnt))
+ else:
+ if failed > 0:
+ return (False, '%d/%d tests passed: expected all to pass' %
+ (passed, test_cnt))
+ else:
+ return (True, 'All %d/%d tests passed as expected' %
+ (passed, test_cnt))
+
+
+if __name__ == '__main__':
+
+ parser = argparse.ArgumentParser(
+ description='Run librdkafka test suit using SASL on a '
+ 'trivupped cluster')
+
+ parser.add_argument('--conf', type=str, dest='conf', default=None,
+ help='trivup JSON config object (not file)')
+ parser.add_argument('--rdkconf', type=str, dest='rdkconf', default=None,
+ help='trivup JSON config object (not file) '
+ 'for LibrdkafkaTestApp')
+ parser.add_argument('--scenario', type=str, dest='scenario',
+ default='default',
+ help='Test scenario (see scenarios/ directory)')
+ parser.add_argument('--tests', type=str, dest='tests', default=None,
+ help='Test to run (e.g., "0002")')
+ parser.add_argument('--no-ssl', action='store_false', dest='ssl',
+ default=True,
+ help='Don\'t run SSL tests')
+ parser.add_argument('--no-sasl', action='store_false', dest='sasl',
+ default=True,
+ help='Don\'t run SASL tests')
+ parser.add_argument('--no-oidc', action='store_false', dest='oidc',
+ default=True,
+ help='Don\'t run OAuth/OIDC tests')
+ parser.add_argument('--no-plaintext', action='store_false',
+ dest='plaintext', default=True,
+ help='Don\'t run PLAINTEXT tests')
+
+ parser.add_argument('--report', type=str, dest='report', default=None,
+ help='Write test suites report to this filename')
+ parser.add_argument('--debug', action='store_true', dest='debug',
+ default=False,
+ help='Enable trivup debugging')
+ parser.add_argument('--suite', type=str, default=None,
+ help='Only run matching suite(s) (substring match)')
+ parser.add_argument('versions', type=str, default=None,
+ nargs='*', help='Limit broker versions to these')
+ args = parser.parse_args()
+
+ conf = dict()
+ rdkconf = dict()
+
+ if args.conf is not None:
+ conf.update(json.loads(args.conf))
+ if args.rdkconf is not None:
+ rdkconf.update(json.loads(args.rdkconf))
+ if args.tests is not None:
+ tests = args.tests.split(',')
+ else:
+ tests = None
+
+ conf.update(read_scenario_conf(args.scenario))
+
+ # Test version,supported mechs + suite matrix
+ versions = list()
+ if len(args.versions):
+ for v in args.versions:
+ versions.append(
+ (v, ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']))
+ else:
+ versions = [('3.1.0',
+ ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']),
+ ('2.1.0',
+ ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI', 'OAUTHBEARER']),
+ ('0.10.2.0', ['SCRAM-SHA-512', 'PLAIN', 'GSSAPI']),
+ ('0.9.0.1', ['GSSAPI']),
+ ('0.8.2.2', [])]
+ sasl_plain_conf = {'sasl_mechanisms': 'PLAIN',
+ 'sasl_users': 'myuser=mypassword'}
+ sasl_scram_conf = {'sasl_mechanisms': 'SCRAM-SHA-512',
+ 'sasl_users': 'myuser=mypassword'}
+ ssl_sasl_plain_conf = {'sasl_mechanisms': 'PLAIN',
+ 'sasl_users': 'myuser=mypassword',
+ 'security.protocol': 'SSL'}
+ sasl_oauthbearer_conf = {'sasl_mechanisms': 'OAUTHBEARER',
+ 'sasl_oauthbearer_config':
+ 'scope=requiredScope principal=admin'}
+ sasl_oauth_oidc_conf = {'sasl_mechanisms': 'OAUTHBEARER',
+ 'sasl_oauthbearer_method': 'OIDC'}
+ sasl_kerberos_conf = {'sasl_mechanisms': 'GSSAPI',
+ 'sasl_servicename': 'kafka'}
+ suites = [{'name': 'SASL PLAIN',
+ 'run': (args.sasl and args.plaintext),
+ 'conf': sasl_plain_conf,
+ 'tests': ['0001'],
+ 'expect_fail': ['0.9.0.1', '0.8.2.2']},
+ {'name': 'SASL SCRAM',
+ 'run': (args.sasl and args.plaintext),
+ 'conf': sasl_scram_conf,
+ 'expect_fail': ['0.9.0.1', '0.8.2.2']},
+ {'name': 'PLAINTEXT (no SASL)',
+ 'run': args.plaintext,
+ 'tests': ['0001']},
+ {'name': 'SSL (no SASL)',
+ 'run': args.ssl,
+ 'conf': {'security.protocol': 'SSL'},
+ 'expect_fail': ['0.8.2.2']},
+ {'name': 'SASL_SSL PLAIN',
+ 'run': (args.sasl and args.ssl and args.plaintext),
+ 'conf': ssl_sasl_plain_conf,
+ 'expect_fail': ['0.9.0.1', '0.8.2.2']},
+ {'name': 'SASL PLAIN with wrong username',
+ 'run': (args.sasl and args.plaintext),
+ 'conf': sasl_plain_conf,
+ 'rdkconf': {'sasl_users': 'wrongjoe=mypassword'},
+ 'tests': ['0001'],
+ 'expect_fail': ['all']},
+ {'name': 'SASL OAUTHBEARER',
+ 'run': args.sasl,
+ 'conf': sasl_oauthbearer_conf,
+ 'tests': ['0001'],
+ 'expect_fail': ['0.10.2.0', '0.9.0.1', '0.8.2.2']},
+ {'name': 'SASL OAUTHBEARER with wrong scope',
+ 'run': args.sasl,
+ 'conf': sasl_oauthbearer_conf,
+ 'rdkconf': {'sasl_oauthbearer_config': 'scope=wrongScope'},
+ 'tests': ['0001'],
+ 'expect_fail': ['all']},
+ {'name': 'OAuth/OIDC',
+ 'run': args.oidc,
+ 'tests': ['0001', '0126'],
+ 'conf': sasl_oauth_oidc_conf,
+ 'minver': '3.1.0',
+ 'expect_fail': ['2.8.1', '2.1.0', '0.10.2.0',
+ '0.9.0.1', '0.8.2.2']},
+ {'name': 'SASL Kerberos',
+ 'run': args.sasl,
+ 'conf': sasl_kerberos_conf,
+ 'expect_fail': ['0.8.2.2']}]
+
+ pass_cnt = 0
+ fail_cnt = 0
+ for version, supported in versions:
+ if len(args.versions) > 0 and version not in args.versions:
+ print('### Skipping version %s' % version)
+ continue
+
+ for suite in suites:
+ if not suite.get('run', True):
+ continue
+
+ if args.suite is not None and suite['name'].find(args.suite) == -1:
+ print(
+ f'# Skipping {suite["name"]} due to --suite {args.suite}')
+ continue
+
+ if 'minver' in suite:
+ minver = [int(x) for x in suite['minver'].split('.')][:3]
+ this_version = [int(x) for x in version.split('.')][:3]
+ if this_version < minver:
+ print(
+ f'# Skipping {suite["name"]} due to version {version} < minimum required version {suite["minver"]}') # noqa: E501
+ continue
+
+ _conf = conf.copy()
+ _conf.update(suite.get('conf', {}))
+ _rdkconf = _conf.copy()
+ _rdkconf.update(rdkconf)
+ _rdkconf.update(suite.get('rdkconf', {}))
+
+ if 'version' not in suite:
+ suite['version'] = dict()
+
+ # Disable SASL broker config if broker version does
+ # not support the selected mechanism
+ mech = suite.get('conf', dict()).get('sasl_mechanisms', None)
+ if mech is not None and mech not in supported:
+ print('# Disabled SASL for broker version %s' % version)
+ _conf.pop('sasl_mechanisms', None)
+
+ # Run tests
+ print(
+ '#### Version %s, suite %s: STARTING' %
+ (version, suite['name']))
+ if tests is None:
+ tests_to_run = suite.get('tests', None)
+ else:
+ tests_to_run = tests
+ report = test_it(version, tests=tests_to_run, conf=_conf,
+ rdkconf=_rdkconf,
+ debug=args.debug, scenario=args.scenario)
+
+ # Handle test report
+ report['version'] = version
+ passed, reason = handle_report(report, version, suite)
+ report['PASSED'] = passed
+ report['REASON'] = reason
+
+ if passed:
+ print('\033[42m#### Version %s, suite %s: PASSED: %s\033[0m' %
+ (version, suite['name'], reason))
+ pass_cnt += 1
+ else:
+ print('\033[41m#### Version %s, suite %s: FAILED: %s\033[0m' %
+ (version, suite['name'], reason))
+ print_test_report_summary('%s @ %s' %
+ (suite['name'], version), report)
+ fail_cnt += 1
+ print('#### Test output: %s/stderr.log' % (report['root_path']))
+
+ suite['version'][version] = report
+
+ # Write test suite report JSON file
+ if args.report is not None:
+ test_suite_report_file = args.report
+ f = open(test_suite_report_file, 'w')
+ else:
+ fd, test_suite_report_file = tempfile.mkstemp(prefix='test_suite_',
+ suffix='.json',
+ dir='.')
+ f = os.fdopen(fd, 'w')
+
+ full_report = {'suites': suites, 'pass_cnt': pass_cnt,
+ 'fail_cnt': fail_cnt, 'total_cnt': pass_cnt + fail_cnt}
+
+ f.write(json.dumps(full_report))
+ f.close()
+
+ print('\n\n\n')
+ print_report_summary(full_report)
+ print('#### Full test suites report in: %s' % test_suite_report_file)
+
+ if pass_cnt == 0 or fail_cnt > 0:
+ sys.exit(1)
+ else:
+ sys.exit(0)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md
new file mode 100644
index 000000000..97027f386
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/README.md
@@ -0,0 +1,6 @@
+# Test scenarios
+
+A test scenario defines the trivup Kafka cluster setup.
+
+The scenario name is the name of the file (without .json extension)
+and the contents is the trivup configuration dict.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json
new file mode 100644
index 000000000..80a587589
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/ak23.json
@@ -0,0 +1,6 @@
+{
+ "versions": ["2.3.0"],
+ "auto_create_topics": "true",
+ "num_partitions": 4,
+ "replication_factor": 3,
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json
new file mode 100644
index 000000000..92287a763
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/default.json
@@ -0,0 +1,5 @@
+{
+ "auto_create_topics": "true",
+ "num_partitions": 4,
+ "replication_factor": 3,
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json
new file mode 100644
index 000000000..8727995bd
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/scenarios/noautocreate.json
@@ -0,0 +1,5 @@
+{
+ "auto_create_topics": "false",
+ "num_partitions": 4,
+ "replication_factor": 3,
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c
new file mode 100644
index 000000000..2de01627d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.c
@@ -0,0 +1,801 @@
+/*
+ * sockem - socket-level network emulation
+ *
+ * Copyright (c) 2016, Magnus Edenhill, Andreas Smas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#define _GNU_SOURCE /* for strdupa() and RTLD_NEXT */
+#include <errno.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <poll.h>
+#include <assert.h>
+#include <netinet/in.h>
+#include <dlfcn.h>
+
+#include "sockem.h"
+
+#include <sys/queue.h>
+
+#ifdef __APPLE__
+#include <sys/time.h> /* for gettimeofday() */
+#endif
+
+#ifdef _WIN32
+#define socket_errno() WSAGetLastError()
+#else
+#define socket_errno() errno
+#define SOCKET_ERROR -1
+#endif
+
+#ifndef strdupa
+#define strdupa(s) \
+ ({ \
+ const char *_s = (s); \
+ size_t _len = strlen(_s) + 1; \
+ char *_d = (char *)alloca(_len); \
+ (char *)memcpy(_d, _s, _len); \
+ })
+#endif
+
+#include <pthread.h>
+typedef pthread_mutex_t mtx_t;
+#define mtx_init(M) pthread_mutex_init(M, NULL)
+#define mtx_destroy(M) pthread_mutex_destroy(M)
+#define mtx_lock(M) pthread_mutex_lock(M)
+#define mtx_unlock(M) pthread_mutex_unlock(M)
+
+typedef pthread_t thrd_t;
+#define thrd_create(THRD, START_ROUTINE, ARG) \
+ pthread_create(THRD, NULL, START_ROUTINE, ARG)
+#define thrd_join0(THRD) pthread_join(THRD, NULL)
+
+
+static mtx_t sockem_lock;
+static LIST_HEAD(, sockem_s) sockems;
+
+static pthread_once_t sockem_once = PTHREAD_ONCE_INIT;
+static char *sockem_conf_str = "";
+
+typedef int64_t sockem_ts_t;
+
+
+#ifdef LIBSOCKEM_PRELOAD
+static int (*sockem_orig_connect)(int, const struct sockaddr *, socklen_t);
+static int (*sockem_orig_close)(int);
+
+#define sockem_close0(S) (sockem_orig_close(S))
+#define sockem_connect0(S, A, AL) (sockem_orig_connect(S, A, AL))
+#else
+#define sockem_close0(S) close(S)
+#define sockem_connect0(S, A, AL) connect(S, A, AL)
+#endif
+
+
+struct sockem_conf {
+ /* FIXME: these needs to be implemented */
+ int tx_thruput; /* app->peer bytes/second */
+ int rx_thruput; /* peer->app bytes/second */
+ int delay; /* latency in ms */
+ int jitter; /* latency variation in ms */
+ int debug; /* enable sockem printf debugging */
+ size_t recv_bufsz; /* recv chunk/buffer size */
+ int direct; /* direct forward, no delay or rate-limiting */
+};
+
+
+typedef struct sockem_buf_s {
+ TAILQ_ENTRY(sockem_buf_s) sb_link;
+ size_t sb_size;
+ size_t sb_of;
+ char *sb_data;
+ int64_t sb_at; /* Transmit at this absolute time. */
+} sockem_buf_t;
+
+
+struct sockem_s {
+ LIST_ENTRY(sockem_s) link;
+
+ enum {
+ /* Forwarder thread run states */
+ SOCKEM_INIT,
+ SOCKEM_START,
+ SOCKEM_RUN,
+ SOCKEM_TERM
+ } run;
+
+ int as; /* application's socket. */
+ int ls; /* internal application listen socket */
+ int ps; /* internal peer socket connecting sockem to the peer.*/
+
+ void *recv_buf; /* Receive buffer */
+ size_t recv_bufsz; /* .. size */
+
+ int linked; /* On sockems list */
+
+ thrd_t thrd; /* Forwarder thread */
+
+ mtx_t lock;
+
+ struct sockem_conf conf; /* application-set config.
+ * protected by .lock */
+
+ struct sockem_conf use; /* last copy of .conf
+ * local to skm thread */
+
+ TAILQ_HEAD(, sockem_buf_s)
+ bufs; /* Buffers in queue waiting for
+ * transmission (delayed) */
+
+ size_t bufs_size; /* Total number of bytes currently enqueued
+ * for transmission */
+ size_t bufs_size_max; /* Soft max threshold for bufs_size,
+ * when this value is exceeded the app fd
+ * is removed from the poll set until
+ * bufs_size falls below the threshold again. */
+ int poll_fd_cnt;
+ int64_t ts_last_fwd; /* For rate-limiter: timestamp of last forward */
+};
+
+
+static int sockem_vset(sockem_t *skm, va_list ap);
+
+
+/**
+ * A microsecond monotonic clock
+ */
+static __attribute__((unused)) __inline int64_t sockem_clock(void) {
+#ifdef __APPLE__
+ /* No monotonic clock on Darwin */
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((int64_t)tv.tv_sec * 1000000LLU) + (int64_t)tv.tv_usec;
+#elif defined(_WIN32)
+ return (int64_t)GetTickCount64() * 1000LLU;
+#else
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ((int64_t)ts.tv_sec * 1000000LLU) +
+ ((int64_t)ts.tv_nsec / 1000LLU);
+#endif
+}
+
+/**
+ * @brief Initialize libsockem once.
+ */
+static void sockem_init(void) {
+ mtx_init(&sockem_lock);
+ sockem_conf_str = getenv("SOCKEM_CONF");
+ if (!sockem_conf_str)
+ sockem_conf_str = "";
+ if (strstr(sockem_conf_str, "debug"))
+ fprintf(stderr, "%% libsockem pre-loaded (%s)\n",
+ sockem_conf_str);
+#ifdef LIBSOCKEM_PRELOAD
+ sockem_orig_connect = dlsym(RTLD_NEXT, "connect");
+ sockem_orig_close = dlsym(RTLD_NEXT, "close");
+#endif
+}
+
+
+/**
+ * @returns the maximum waittime in ms for poll(), at most 1000 ms.
+ * @remark lock must be held
+ */
+static int sockem_calc_waittime(sockem_t *skm, int64_t now) {
+ const sockem_buf_t *sb;
+ int64_t r;
+
+ if (!(sb = TAILQ_FIRST(&skm->bufs)))
+ return 1000;
+ else if (now >= sb->sb_at || skm->use.direct)
+ return 0;
+ else if ((r = (sb->sb_at - now)) < 1000000) {
+ if (r < 1000)
+ return 1; /* Ceil to 1 to avoid busy-loop during
+ * last millisecond. */
+ else
+ return (int)(r / 1000);
+ } else
+ return 1000;
+}
+
+
+/**
+ * @brief Unlink and destroy a buffer
+ */
+static void sockem_buf_destroy(sockem_t *skm, sockem_buf_t *sb) {
+ skm->bufs_size -= sb->sb_size - sb->sb_of;
+ TAILQ_REMOVE(&skm->bufs, sb, sb_link);
+ free(sb);
+}
+
+/**
+ * @brief Add delayed buffer to transmit.
+ */
+static sockem_buf_t *
+sockem_buf_add(sockem_t *skm, size_t size, const void *data) {
+ sockem_buf_t *sb;
+
+ skm->bufs_size += size;
+ if (skm->bufs_size > skm->bufs_size_max) {
+ /* No more buffer space, halt recv fd until
+ * queued buffers drop below threshold. */
+ skm->poll_fd_cnt = 1;
+ }
+
+ sb = malloc(sizeof(*sb) + size);
+
+ sb->sb_of = 0;
+ sb->sb_size = size;
+ sb->sb_data = (char *)(sb + 1);
+ sb->sb_at = sockem_clock() +
+ ((skm->use.delay + (skm->use.jitter / 2) /*FIXME*/) * 1000);
+ memcpy(sb->sb_data, data, size);
+
+ TAILQ_INSERT_TAIL(&skm->bufs, sb, sb_link);
+
+ return sb;
+}
+
+
+/**
+ * @brief Forward any delayed buffers that have passed their deadline
+ * @remark lock must be held but will be released momentarily while
+ * performing send syscall.
+ */
+static int sockem_fwd_bufs(sockem_t *skm, int ofd) {
+ sockem_buf_t *sb;
+ int64_t now = sockem_clock();
+ size_t to_write;
+ int64_t elapsed;
+
+
+ if (skm->use.direct)
+ to_write = 1024 * 1024 * 100;
+ else if ((elapsed = now - skm->ts_last_fwd)) {
+ /* Calculate how many bytes to send to adhere to rate-limit */
+ to_write = (size_t)((double)skm->use.tx_thruput *
+ ((double)elapsed / 1000000.0));
+ } else
+ return 0;
+
+ while (to_write > 0 && (sb = TAILQ_FIRST(&skm->bufs)) &&
+ (skm->use.direct || sb->sb_at <= now)) {
+ ssize_t r;
+ size_t remain = sb->sb_size - sb->sb_of;
+ size_t wr = to_write < remain ? to_write : remain;
+
+ if (wr == 0)
+ break;
+
+ mtx_unlock(&skm->lock);
+
+ r = send(ofd, sb->sb_data + sb->sb_of, wr, 0);
+
+ mtx_lock(&skm->lock);
+
+ if (r == -1) {
+ if (errno == ENOBUFS || errno == EAGAIN ||
+ errno == EWOULDBLOCK)
+ return 0;
+ return -1;
+ }
+
+ skm->ts_last_fwd = now;
+
+ sb->sb_of += r;
+ to_write -= r;
+
+ if (sb->sb_of < sb->sb_size)
+ break;
+
+ sockem_buf_destroy(skm, sb);
+
+ now = sockem_clock();
+ }
+
+ /* Re-enable app fd poll if queued buffers are below threshold */
+ if (skm->bufs_size < skm->bufs_size_max)
+ skm->poll_fd_cnt = 2;
+
+ return 0;
+}
+
+
+/**
+ * @brief read from \p ifd, write to \p ofd in a blocking fashion.
+ *
+ * @returns the number of bytes forwarded, or -1 on error.
+ */
+static int sockem_recv_fwd(sockem_t *skm, int ifd, int ofd, int direct) {
+ ssize_t r, wr;
+
+ r = recv(ifd, skm->recv_buf, skm->recv_bufsz, MSG_DONTWAIT);
+ if (r == -1) {
+ int serr = socket_errno();
+ if (serr == EAGAIN || serr == EWOULDBLOCK)
+ return 0;
+ return -1;
+
+ } else if (r == 0) {
+ /* Socket closed */
+ return -1;
+ }
+
+ if (direct) {
+ /* No delay, rate limit, or buffered data: send right away */
+ wr = send(ofd, skm->recv_buf, r, 0);
+ if (wr < r)
+ return -1;
+
+ return wr;
+ } else {
+ sockem_buf_add(skm, r, skm->recv_buf);
+ return r;
+ }
+}
+
+
+/**
+ * @brief Close all sockets and unsets ->run.
+ * @remark Preserves caller's errno.
+ * @remark lock must be held.
+ */
+static void sockem_close_all(sockem_t *skm) {
+ int serr = socket_errno();
+
+ if (skm->ls != -1) {
+ sockem_close0(skm->ls);
+ skm->ls = -1;
+ }
+
+ if (skm->ps != -1) {
+ sockem_close0(skm->ps);
+ skm->ps = -1;
+ }
+
+ skm->run = SOCKEM_TERM;
+
+ errno = serr;
+}
+
+
+/**
+ * @brief Copy desired (app) config to internally use(d) configuration.
+ * @remark lock must be held
+ */
+static __inline void sockem_conf_use(sockem_t *skm) {
+ skm->use = skm->conf;
+ /* Figure out if direct forward is to be used */
+ skm->use.direct = !(skm->use.delay || skm->use.jitter ||
+ (skm->use.tx_thruput < (1 << 30)));
+}
+
+/**
+ * @brief sockem internal per-socket forwarder thread
+ */
+static void *sockem_run(void *arg) {
+ sockem_t *skm = arg;
+ int cs = -1;
+ int ls;
+ struct pollfd pfd[2];
+
+ mtx_lock(&skm->lock);
+ if (skm->run == SOCKEM_START)
+ skm->run = SOCKEM_RUN;
+ sockem_conf_use(skm);
+ ls = skm->ls;
+ mtx_unlock(&skm->lock);
+
+ skm->recv_bufsz = skm->use.recv_bufsz;
+ skm->recv_buf = malloc(skm->recv_bufsz);
+
+ /* Accept connection from sockfd in sockem_connect() */
+ cs = accept(ls, NULL, 0);
+ if (cs == -1) {
+ mtx_lock(&skm->lock);
+ if (skm->run == SOCKEM_TERM) {
+ /* App socket was closed. */
+ goto done;
+ }
+ fprintf(stderr, "%% sockem: accept(%d) failed: %s\n", ls,
+ strerror(socket_errno()));
+ mtx_unlock(&skm->lock);
+ assert(cs != -1);
+ }
+
+ /* Set up poll (blocking IO) */
+ memset(pfd, 0, sizeof(pfd));
+ pfd[1].fd = cs;
+ pfd[1].events = POLLIN;
+
+ mtx_lock(&skm->lock);
+ pfd[0].fd = skm->ps;
+ mtx_unlock(&skm->lock);
+ pfd[0].events = POLLIN;
+
+ skm->poll_fd_cnt = 2;
+
+ mtx_lock(&skm->lock);
+ while (skm->run == SOCKEM_RUN) {
+ int r;
+ int i;
+ int waittime = sockem_calc_waittime(skm, sockem_clock());
+
+ mtx_unlock(&skm->lock);
+ r = poll(pfd, skm->poll_fd_cnt, waittime);
+ if (r == -1)
+ break;
+
+ /* Send/forward delayed buffers */
+ mtx_lock(&skm->lock);
+ sockem_conf_use(skm);
+
+ if (sockem_fwd_bufs(skm, skm->ps) == -1) {
+ mtx_unlock(&skm->lock);
+ skm->run = SOCKEM_TERM;
+ break;
+ }
+ mtx_unlock(&skm->lock);
+
+ for (i = 0; r > 0 && i < 2; i++) {
+ if (pfd[i].revents & (POLLHUP | POLLERR)) {
+ skm->run = SOCKEM_TERM;
+
+ } else if (pfd[i].revents & POLLIN) {
+ if (sockem_recv_fwd(
+ skm, pfd[i].fd, pfd[i ^ 1].fd,
+ /* direct mode for app socket
+ * without delay, and always for
+ * peer socket (receive channel) */
+ i == 0 || (skm->use.direct &&
+ skm->bufs_size == 0)) ==
+ -1) {
+ skm->run = SOCKEM_TERM;
+ break;
+ }
+ }
+ }
+
+ mtx_lock(&skm->lock);
+ }
+done:
+ if (cs != -1)
+ sockem_close0(cs);
+ sockem_close_all(skm);
+
+ mtx_unlock(&skm->lock);
+ free(skm->recv_buf);
+
+
+ return NULL;
+}
+
+
+
+/**
+ * @brief Connect socket \p s to \p addr
+ */
+static int
+sockem_do_connect(int s, const struct sockaddr *addr, socklen_t addrlen) {
+ int r;
+
+ r = sockem_connect0(s, addr, addrlen);
+ if (r == SOCKET_ERROR) {
+ int serr = socket_errno();
+ if (serr != EINPROGRESS
+#ifdef _WIN32
+ && serr != WSAEWOULDBLOCK
+#endif
+ ) {
+#ifndef _WIN32
+ errno = serr;
+#endif
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+
+sockem_t *sockem_connect(int sockfd,
+ const struct sockaddr *addr,
+ socklen_t addrlen,
+ ...) {
+ sockem_t *skm;
+ int ls, ps;
+ struct sockaddr_in6 sin6 = {.sin6_family = addr->sa_family};
+ socklen_t addrlen2 = addrlen;
+ va_list ap;
+
+ pthread_once(&sockem_once, sockem_init);
+
+ /* Create internal app listener socket */
+ ls = socket(addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
+ if (ls == -1)
+ return NULL;
+
+ if (bind(ls, (struct sockaddr *)&sin6, addrlen) == -1) {
+ sockem_close0(ls);
+ return NULL;
+ }
+
+ /* Get bound address */
+ if (getsockname(ls, (struct sockaddr *)&sin6, &addrlen2) == -1) {
+ sockem_close0(ls);
+ return NULL;
+ }
+
+ if (listen(ls, 1) == -1) {
+ sockem_close0(ls);
+ return NULL;
+ }
+
+ /* Create internal peer socket */
+ ps = socket(addr->sa_family, SOCK_STREAM, IPPROTO_TCP);
+ if (ps == -1) {
+ sockem_close0(ls);
+ return NULL;
+ }
+
+ /* Connect to peer */
+ if (sockem_do_connect(ps, addr, addrlen) == -1) {
+ sockem_close0(ls);
+ sockem_close0(ps);
+ return NULL;
+ }
+
+ /* Create sockem handle */
+ skm = calloc(1, sizeof(*skm));
+ skm->as = sockfd;
+ skm->ls = ls;
+ skm->ps = ps;
+ skm->bufs_size_max = 16 * 1024 * 1024; /* 16kb of queue buffer */
+ TAILQ_INIT(&skm->bufs);
+ mtx_init(&skm->lock);
+
+ /* Default config */
+ skm->conf.rx_thruput = 1 << 30;
+ skm->conf.tx_thruput = 1 << 30;
+ skm->conf.delay = 0;
+ skm->conf.jitter = 0;
+ skm->conf.recv_bufsz = 1024 * 1024;
+ skm->conf.direct = 1;
+
+ /* Apply passed configuration */
+ va_start(ap, addrlen);
+ if (sockem_vset(skm, ap) == -1) {
+ va_end(ap);
+ sockem_close(skm);
+ return NULL;
+ }
+ va_end(ap);
+
+ mtx_lock(&skm->lock);
+ skm->run = SOCKEM_START;
+
+ /* Create pipe thread */
+ if (thrd_create(&skm->thrd, sockem_run, skm) != 0) {
+ mtx_unlock(&skm->lock);
+ sockem_close(skm);
+ return NULL;
+ }
+ mtx_unlock(&skm->lock);
+
+ /* Connect application socket to listen socket */
+ if (sockem_do_connect(sockfd, (struct sockaddr *)&sin6, addrlen2) ==
+ -1) {
+ sockem_close(skm);
+ return NULL;
+ }
+
+ mtx_lock(&sockem_lock);
+ LIST_INSERT_HEAD(&sockems, skm, link);
+ mtx_lock(&skm->lock);
+ skm->linked = 1;
+ mtx_unlock(&skm->lock);
+ mtx_unlock(&sockem_lock);
+
+ return skm;
+}
+
+
+/**
+ * @brief Purge/drop all queued buffers
+ */
+static void sockem_bufs_purge(sockem_t *skm) {
+ sockem_buf_t *sb;
+
+ while ((sb = TAILQ_FIRST(&skm->bufs)))
+ sockem_buf_destroy(skm, sb);
+}
+
+
+void sockem_close(sockem_t *skm) {
+ mtx_lock(&sockem_lock);
+ mtx_lock(&skm->lock);
+ if (skm->linked)
+ LIST_REMOVE(skm, link);
+ mtx_unlock(&sockem_lock);
+
+ /* If thread is running let it close the sockets
+ * to avoid race condition. */
+ if (skm->run == SOCKEM_START || skm->run == SOCKEM_RUN)
+ skm->run = SOCKEM_TERM;
+ else
+ sockem_close_all(skm);
+
+ mtx_unlock(&skm->lock);
+
+ thrd_join0(skm->thrd);
+
+ sockem_bufs_purge(skm);
+
+ mtx_destroy(&skm->lock);
+
+
+ free(skm);
+}
+
+
+/**
+ * @brief Set single conf key.
+ * @remark lock must be held.
+ * @returns 0 on success or -1 if key is unknown
+ */
+static int sockem_set0(sockem_t *skm, const char *key, int val) {
+ if (!strcmp(key, "rx.thruput") || !strcmp(key, "rx.throughput"))
+ skm->conf.rx_thruput = val;
+ else if (!strcmp(key, "tx.thruput") || !strcmp(key, "tx.throughput"))
+ skm->conf.tx_thruput = val;
+ else if (!strcmp(key, "delay"))
+ skm->conf.delay = val;
+ else if (!strcmp(key, "jitter"))
+ skm->conf.jitter = val;
+ else if (!strcmp(key, "rx.bufsz"))
+ skm->conf.recv_bufsz = val;
+ else if (!strcmp(key, "debug"))
+ skm->conf.debug = val;
+ else if (!strcmp(key, "true"))
+ ; /* dummy key for allowing non-empty but default config */
+ else if (!strchr(key, ',')) {
+ char *s = strdupa(key);
+ while (*s) {
+ char *t = strchr(s, ',');
+ char *d = strchr(s, '=');
+ if (t)
+ *t = '\0';
+ if (!d)
+ return -1;
+ *(d++) = '\0';
+
+ if (sockem_set0(skm, s, atoi(d)) == -1)
+ return -1;
+
+ if (!t)
+ break;
+ s += 1;
+ }
+ } else
+ return -1;
+
+ return 0;
+}
+
+
+/**
+ * @brief Set sockem config parameters
+ */
+static int sockem_vset(sockem_t *skm, va_list ap) {
+ const char *key;
+ int val;
+
+ mtx_lock(&skm->lock);
+ while ((key = va_arg(ap, const char *))) {
+ val = va_arg(ap, int);
+ if (sockem_set0(skm, key, val) == -1) {
+ mtx_unlock(&skm->lock);
+ return -1;
+ }
+ }
+ mtx_unlock(&skm->lock);
+
+ return 0;
+}
+
+int sockem_set(sockem_t *skm, ...) {
+ va_list ap;
+ int r;
+
+ va_start(ap, skm);
+ r = sockem_vset(skm, ap);
+ va_end(ap);
+
+ return r;
+}
+
+
+sockem_t *sockem_find(int sockfd) {
+ sockem_t *skm;
+
+ pthread_once(&sockem_once, sockem_init);
+
+ mtx_lock(&sockem_lock);
+ LIST_FOREACH(skm, &sockems, link)
+ if (skm->as == sockfd)
+ break;
+ mtx_unlock(&sockem_lock);
+
+ return skm;
+}
+
+
+#ifdef LIBSOCKEM_PRELOAD
+/**
+ * Provide overloading socket APIs and conf bootstrapping from env vars.
+ *
+ */
+
+
+
+/**
+ * @brief connect(2) overload
+ */
+int connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen) {
+ sockem_t *skm;
+
+ pthread_once(&sockem_once, sockem_init);
+
+ skm = sockem_connect(sockfd, addr, addrlen, sockem_conf_str, 0, NULL);
+ if (!skm)
+ return -1;
+
+ return 0;
+}
+
+/**
+ * @brief close(2) overload
+ */
+int close(int fd) {
+ sockem_t *skm;
+
+ pthread_once(&sockem_once, sockem_init);
+
+ mtx_lock(&sockem_lock);
+ skm = sockem_find(fd);
+
+ if (skm)
+ sockem_close(skm);
+ mtx_unlock(&sockem_lock);
+
+ return sockem_close0(fd);
+}
+
+#endif
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h
new file mode 100644
index 000000000..8a2ddcd87
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem.h
@@ -0,0 +1,85 @@
+/*
+ * sockem - socket-level network emulation
+ *
+ * Copyright (c) 2016, Magnus Edenhill, Andreas Smas
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _RD_SOCKEM_H_
+#define _RD_SOCKEM_H_
+
+#include <sys/types.h>
+#include <sys/socket.h>
+
+
+typedef struct sockem_s sockem_t;
+
+
+
+/**
+ * @brief Connect to \p addr
+ *
+ * See sockem_set for the va-arg list definition.
+ *
+ * @returns a sockem handle on success or NULL on failure.
+ */
+sockem_t *
+sockem_connect(int sockfd, const struct sockaddr *addr, socklen_t addrlen, ...);
+
+/**
+ * @brief Close the connection and destroy the sockem.
+ */
+void sockem_close(sockem_t *skm);
+
+
+
+/**
+ * @brief Set sockem parameters by `char *key, int val` tuples.
+ *
+ * Keys:
+ * rx.thruput
+ * tx.thruput
+ * delay
+ * jitter
+ * rx.bufsz
+ * true (dummy, ignored)
+ *
+ * The key may also be a CSV-list of "key=val,key2=val2" pairs in which case
+ * val must be 0 and the sentinel NULL.
+ *
+ * The va-arg list must be terminated with a NULL sentinel
+ *
+ * @returns 0 on success or -1 if a key was unknown.
+ */
+int sockem_set(sockem_t *skm, ...);
+
+
+
+/**
+ * @brief Find sockem by (application) socket.
+ * @remark Application is responsible for locking.
+ */
+sockem_t *sockem_find(int sockfd);
+
+#endif /* _RD_SOCKEM_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c
new file mode 100644
index 000000000..c3e8ce92e
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.c
@@ -0,0 +1,145 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+/**
+ * @name Thin abstraction on top of sockem to provide scheduled delays,
+ * e.g.; set delay to 500ms in 2000ms
+ */
+
+#include "test.h"
+#include "sockem.h"
+#include "sockem_ctrl.h"
+
+static int sockem_ctrl_thrd_main(void *arg) {
+ sockem_ctrl_t *ctrl = (sockem_ctrl_t *)arg;
+ int64_t next_wakeup = 0;
+ mtx_lock(&ctrl->lock);
+
+ test_curr = ctrl->test;
+
+ while (!ctrl->term) {
+ int64_t now;
+ struct sockem_cmd *cmd;
+ int wait_time = 1000;
+
+ if (next_wakeup)
+ wait_time = (int)(next_wakeup - test_clock()) / 1000;
+
+ if (wait_time > 0)
+ cnd_timedwait_ms(&ctrl->cnd, &ctrl->lock, wait_time);
+
+ /* Ack last command */
+ if (ctrl->cmd_ack != ctrl->cmd_seq) {
+ ctrl->cmd_ack = ctrl->cmd_seq;
+ cnd_signal(&ctrl->cnd); /* signal back to caller */
+ }
+
+ /* Serve expired commands */
+ next_wakeup = 0;
+ now = test_clock();
+ while ((cmd = TAILQ_FIRST(&ctrl->cmds))) {
+ if (!ctrl->term) {
+ if (cmd->ts_at > now) {
+ next_wakeup = cmd->ts_at;
+ break;
+ }
+
+ printf(_C_CYA
+ "## %s: "
+ "sockem: setting socket delay to "
+ "%d\n" _C_CLR,
+ __FILE__, cmd->delay);
+ test_socket_sockem_set_all("delay", cmd->delay);
+ }
+ TAILQ_REMOVE(&ctrl->cmds, cmd, link);
+ free(cmd);
+ }
+ }
+ mtx_unlock(&ctrl->lock);
+
+ return 0;
+}
+
+
+
+/**
+ * @brief Set socket delay to kick in after \p after ms
+ */
+void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay) {
+ struct sockem_cmd *cmd;
+ int wait_seq;
+
+ TEST_SAY("Set delay to %dms (after %dms)\n", delay, after);
+
+ cmd = calloc(1, sizeof(*cmd));
+ cmd->ts_at = test_clock() + (after * 1000);
+ cmd->delay = delay;
+
+ mtx_lock(&ctrl->lock);
+ wait_seq = ++ctrl->cmd_seq;
+ TAILQ_INSERT_TAIL(&ctrl->cmds, cmd, link);
+ cnd_broadcast(&ctrl->cnd);
+
+ /* Wait for ack from sockem thread */
+ while (ctrl->cmd_ack < wait_seq) {
+ TEST_SAY("Waiting for sockem control ack\n");
+ cnd_timedwait_ms(&ctrl->cnd, &ctrl->lock, 1000);
+ }
+ mtx_unlock(&ctrl->lock);
+}
+
+
+void sockem_ctrl_init(sockem_ctrl_t *ctrl) {
+ memset(ctrl, 0, sizeof(*ctrl));
+ mtx_init(&ctrl->lock, mtx_plain);
+ cnd_init(&ctrl->cnd);
+ TAILQ_INIT(&ctrl->cmds);
+ ctrl->test = test_curr;
+
+ mtx_lock(&ctrl->lock);
+ if (thrd_create(&ctrl->thrd, sockem_ctrl_thrd_main, ctrl) !=
+ thrd_success)
+ TEST_FAIL("Failed to create sockem ctrl thread");
+ mtx_unlock(&ctrl->lock);
+}
+
+void sockem_ctrl_term(sockem_ctrl_t *ctrl) {
+ int res;
+
+ /* Join controller thread */
+ mtx_lock(&ctrl->lock);
+ ctrl->term = 1;
+ cnd_broadcast(&ctrl->cnd);
+ mtx_unlock(&ctrl->lock);
+
+ thrd_join(ctrl->thrd, &res);
+
+ cnd_destroy(&ctrl->cnd);
+ mtx_destroy(&ctrl->lock);
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h
new file mode 100644
index 000000000..d33c87fca
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/sockem_ctrl.h
@@ -0,0 +1,61 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2018, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef _SOCKEM_CTRL_H_
+#define _SOCKEM_CTRL_H_
+
+#include <sys/queue.h>
+
+struct sockem_cmd {
+ TAILQ_ENTRY(sockem_cmd) link;
+ int64_t ts_at; /**< to ctrl thread: at this time, set delay*/
+ int delay;
+};
+
+
+typedef struct sockem_ctrl_s {
+ mtx_t lock;
+ cnd_t cnd;
+ thrd_t thrd;
+
+ int cmd_seq; /**< Command sequence id */
+ int cmd_ack; /**< Last acked (seen) command sequence id */
+
+ TAILQ_HEAD(, sockem_cmd) cmds; /**< Queue of commands. */
+
+ int term; /**< Terminate */
+
+ struct test *test;
+} sockem_ctrl_t;
+
+
+void sockem_ctrl_set_delay(sockem_ctrl_t *ctrl, int after, int delay);
+void sockem_ctrl_init(sockem_ctrl_t *ctrl);
+void sockem_ctrl_term(sockem_ctrl_t *ctrl);
+
+#endif /* _SOCKEM_CTRL_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/test.c b/fluent-bit/lib/librdkafka-2.1.0/tests/test.c
new file mode 100644
index 000000000..71180c8f4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/test.c
@@ -0,0 +1,6960 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2013, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#define _CRT_RAND_S // rand_s() on MSVC
+#include <stdarg.h>
+#include "test.h"
+#include <signal.h>
+#include <stdlib.h>
+#include <stdio.h>
+
+#ifdef _WIN32
+#include <direct.h> /* _getcwd */
+#else
+#include <sys/wait.h> /* waitpid */
+#endif
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h"
+
+int test_level = 2;
+int test_seed = 0;
+
+char test_mode[64] = "bare";
+char test_scenario[64] = "default";
+static volatile sig_atomic_t test_exit = 0;
+static char test_topic_prefix[128] = "rdkafkatest";
+static int test_topic_random = 0;
+int tests_running_cnt = 0;
+int test_concurrent_max = 5;
+int test_assert_on_fail = 0;
+double test_timeout_multiplier = 1.0;
+static char *test_sql_cmd = NULL;
+int test_session_timeout_ms = 6000;
+int test_broker_version;
+static const char *test_broker_version_str = "2.4.0.0";
+int test_flags = 0;
+int test_neg_flags = TEST_F_KNOWN_ISSUE;
+/* run delete-test-topics.sh between each test (when concurrent_max = 1) */
+static int test_delete_topics_between = 0;
+static const char *test_git_version = "HEAD";
+static const char *test_sockem_conf = "";
+int test_on_ci = 0; /* Tests are being run on CI, be more forgiving
+ * with regards to timeouts, etc. */
+int test_quick = 0; /** Run tests quickly */
+int test_idempotent_producer = 0;
+int test_rusage = 0; /**< Check resource usage */
+/**< CPU speed calibration for rusage threshold checks.
+ * >1.0: CPU is slower than base line system,
+ * <1.0: CPU is faster than base line system. */
+double test_rusage_cpu_calibration = 1.0;
+static const char *tests_to_run = NULL; /* all */
+static const char *subtests_to_run = NULL; /* all */
+static const char *tests_to_skip = NULL; /* none */
+int test_write_report = 0; /**< Write test report file */
+
+static int show_summary = 1;
+static int test_summary(int do_lock);
+
+/**
+ * Protects shared state, such as tests[]
+ */
+mtx_t test_mtx;
+cnd_t test_cnd;
+
+static const char *test_states[] = {
+ "DNS", "SKIPPED", "RUNNING", "PASSED", "FAILED",
+};
+
+
+
+#define _TEST_DECL(NAME) extern int main_##NAME(int, char **)
+#define _TEST(NAME, FLAGS, ...) \
+ { .name = #NAME, .mainfunc = main_##NAME, .flags = FLAGS, __VA_ARGS__ }
+
+
+/**
+ * Declare all tests here
+ */
+_TEST_DECL(0000_unittests);
+_TEST_DECL(0001_multiobj);
+_TEST_DECL(0002_unkpart);
+_TEST_DECL(0003_msgmaxsize);
+_TEST_DECL(0004_conf);
+_TEST_DECL(0005_order);
+_TEST_DECL(0006_symbols);
+_TEST_DECL(0007_autotopic);
+_TEST_DECL(0008_reqacks);
+_TEST_DECL(0009_mock_cluster);
+_TEST_DECL(0011_produce_batch);
+_TEST_DECL(0012_produce_consume);
+_TEST_DECL(0013_null_msgs);
+_TEST_DECL(0014_reconsume_191);
+_TEST_DECL(0015_offsets_seek);
+_TEST_DECL(0016_client_swname);
+_TEST_DECL(0017_compression);
+_TEST_DECL(0018_cgrp_term);
+_TEST_DECL(0019_list_groups);
+_TEST_DECL(0020_destroy_hang);
+_TEST_DECL(0021_rkt_destroy);
+_TEST_DECL(0022_consume_batch);
+_TEST_DECL(0022_consume_batch_local);
+_TEST_DECL(0025_timers);
+_TEST_DECL(0026_consume_pause);
+_TEST_DECL(0028_long_topicnames);
+_TEST_DECL(0029_assign_offset);
+_TEST_DECL(0030_offset_commit);
+_TEST_DECL(0031_get_offsets);
+_TEST_DECL(0033_regex_subscribe);
+_TEST_DECL(0033_regex_subscribe_local);
+_TEST_DECL(0034_offset_reset);
+_TEST_DECL(0034_offset_reset_mock);
+_TEST_DECL(0035_api_version);
+_TEST_DECL(0036_partial_fetch);
+_TEST_DECL(0037_destroy_hang_local);
+_TEST_DECL(0038_performance);
+_TEST_DECL(0039_event_dr);
+_TEST_DECL(0039_event_log);
+_TEST_DECL(0039_event);
+_TEST_DECL(0040_io_event);
+_TEST_DECL(0041_fetch_max_bytes);
+_TEST_DECL(0042_many_topics);
+_TEST_DECL(0043_no_connection);
+_TEST_DECL(0044_partition_cnt);
+_TEST_DECL(0045_subscribe_update);
+_TEST_DECL(0045_subscribe_update_topic_remove);
+_TEST_DECL(0045_subscribe_update_non_exist_and_partchange);
+_TEST_DECL(0045_subscribe_update_mock);
+_TEST_DECL(0046_rkt_cache);
+_TEST_DECL(0047_partial_buf_tmout);
+_TEST_DECL(0048_partitioner);
+_TEST_DECL(0049_consume_conn_close);
+_TEST_DECL(0050_subscribe_adds);
+_TEST_DECL(0051_assign_adds);
+_TEST_DECL(0052_msg_timestamps);
+_TEST_DECL(0053_stats_timing);
+_TEST_DECL(0053_stats);
+_TEST_DECL(0054_offset_time);
+_TEST_DECL(0055_producer_latency);
+_TEST_DECL(0056_balanced_group_mt);
+_TEST_DECL(0057_invalid_topic);
+_TEST_DECL(0058_log);
+_TEST_DECL(0059_bsearch);
+_TEST_DECL(0060_op_prio);
+_TEST_DECL(0061_consumer_lag);
+_TEST_DECL(0062_stats_event);
+_TEST_DECL(0063_clusterid);
+_TEST_DECL(0064_interceptors);
+_TEST_DECL(0065_yield);
+_TEST_DECL(0066_plugins);
+_TEST_DECL(0067_empty_topic);
+_TEST_DECL(0068_produce_timeout);
+_TEST_DECL(0069_consumer_add_parts);
+_TEST_DECL(0070_null_empty);
+_TEST_DECL(0072_headers_ut);
+_TEST_DECL(0073_headers);
+_TEST_DECL(0074_producev);
+_TEST_DECL(0075_retry);
+_TEST_DECL(0076_produce_retry);
+_TEST_DECL(0077_compaction);
+_TEST_DECL(0078_c_from_cpp);
+_TEST_DECL(0079_fork);
+_TEST_DECL(0080_admin_ut);
+_TEST_DECL(0081_admin);
+_TEST_DECL(0082_fetch_max_bytes);
+_TEST_DECL(0083_cb_event);
+_TEST_DECL(0084_destroy_flags_local);
+_TEST_DECL(0084_destroy_flags);
+_TEST_DECL(0085_headers);
+_TEST_DECL(0086_purge_local);
+_TEST_DECL(0086_purge_remote);
+_TEST_DECL(0088_produce_metadata_timeout);
+_TEST_DECL(0089_max_poll_interval);
+_TEST_DECL(0090_idempotence);
+_TEST_DECL(0091_max_poll_interval_timeout);
+_TEST_DECL(0092_mixed_msgver);
+_TEST_DECL(0093_holb_consumer);
+_TEST_DECL(0094_idempotence_msg_timeout);
+_TEST_DECL(0095_all_brokers_down);
+_TEST_DECL(0097_ssl_verify);
+_TEST_DECL(0097_ssl_verify_local);
+_TEST_DECL(0098_consumer_txn);
+_TEST_DECL(0099_commit_metadata);
+_TEST_DECL(0100_thread_interceptors);
+_TEST_DECL(0101_fetch_from_follower);
+_TEST_DECL(0102_static_group_rebalance);
+_TEST_DECL(0103_transactions_local);
+_TEST_DECL(0103_transactions);
+_TEST_DECL(0104_fetch_from_follower_mock);
+_TEST_DECL(0105_transactions_mock);
+_TEST_DECL(0106_cgrp_sess_timeout);
+_TEST_DECL(0107_topic_recreate);
+_TEST_DECL(0109_auto_create_topics);
+_TEST_DECL(0110_batch_size);
+_TEST_DECL(0111_delay_create_topics);
+_TEST_DECL(0112_assign_unknown_part);
+_TEST_DECL(0113_cooperative_rebalance_local);
+_TEST_DECL(0113_cooperative_rebalance);
+_TEST_DECL(0114_sticky_partitioning);
+_TEST_DECL(0115_producer_auth);
+_TEST_DECL(0116_kafkaconsumer_close);
+_TEST_DECL(0117_mock_errors);
+_TEST_DECL(0118_commit_rebalance);
+_TEST_DECL(0119_consumer_auth);
+_TEST_DECL(0120_asymmetric_subscription);
+_TEST_DECL(0121_clusterid);
+_TEST_DECL(0122_buffer_cleaning_after_rebalance);
+_TEST_DECL(0123_connections_max_idle);
+_TEST_DECL(0124_openssl_invalid_engine);
+_TEST_DECL(0125_immediate_flush);
+_TEST_DECL(0126_oauthbearer_oidc);
+_TEST_DECL(0128_sasl_callback_queue);
+_TEST_DECL(0129_fetch_aborted_msgs);
+_TEST_DECL(0130_store_offsets);
+_TEST_DECL(0131_connect_timeout);
+_TEST_DECL(0132_strategy_ordering);
+_TEST_DECL(0133_ssl_keys);
+_TEST_DECL(0134_ssl_provider);
+_TEST_DECL(0135_sasl_credentials);
+_TEST_DECL(0136_resolve_cb);
+_TEST_DECL(0137_barrier_batch_consume);
+_TEST_DECL(0138_admin_mock);
+
+/* Manual tests */
+_TEST_DECL(8000_idle);
+
+
+/* Define test resource usage thresholds if the default limits
+ * are not tolerable.
+ *
+ * Fields:
+ * .ucpu - Max User CPU percentage (double)
+ * .scpu - Max System/Kernel CPU percentage (double)
+ * .rss - Max RSS (memory) in megabytes (double)
+ * .ctxsw - Max number of voluntary context switches (int)
+ *
+ * Also see test_rusage_check_thresholds() in rusage.c
+ *
+ * Make a comment in the _THRES() below why the extra thresholds are required.
+ *
+ * Usage:
+ * _TEST(00...., ...,
+ * _THRES(.ucpu = 15.0)), <-- Max 15% User CPU usage
+ */
+#define _THRES(...) .rusage_thres = {__VA_ARGS__}
+
+/**
+ * Define all tests here
+ */
+struct test tests[] = {
+ /* Special MAIN test to hold over-all timings, etc. */
+ {.name = "<MAIN>", .flags = TEST_F_LOCAL},
+ _TEST(0000_unittests,
+ TEST_F_LOCAL,
+ /* The msgq insert order tests are heavy on
+ * user CPU (memory scan), RSS, and
+ * system CPU (lots of allocations -> madvise(2)). */
+ _THRES(.ucpu = 100.0, .scpu = 20.0, .rss = 900.0)),
+ _TEST(0001_multiobj, 0),
+ _TEST(0002_unkpart, 0),
+ _TEST(0003_msgmaxsize, 0),
+ _TEST(0004_conf, TEST_F_LOCAL),
+ _TEST(0005_order, 0),
+ _TEST(0006_symbols, TEST_F_LOCAL),
+ _TEST(0007_autotopic, 0),
+ _TEST(0008_reqacks, 0),
+ _TEST(0009_mock_cluster,
+ TEST_F_LOCAL,
+ /* Mock cluster requires MsgVersion 2 */
+ TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0011_produce_batch,
+ 0,
+ /* Produces a lot of messages */
+ _THRES(.ucpu = 40.0, .scpu = 8.0)),
+ _TEST(0012_produce_consume, 0),
+ _TEST(0013_null_msgs, 0),
+ _TEST(0014_reconsume_191, 0),
+ _TEST(0015_offsets_seek, 0),
+ _TEST(0016_client_swname, 0),
+ _TEST(0017_compression, 0),
+ _TEST(0018_cgrp_term, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0019_list_groups, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0020_destroy_hang, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0021_rkt_destroy, 0),
+ _TEST(0022_consume_batch, 0),
+ _TEST(0022_consume_batch_local, TEST_F_LOCAL),
+ _TEST(0025_timers, TEST_F_LOCAL),
+ _TEST(0026_consume_pause, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0028_long_topicnames,
+ TEST_F_KNOWN_ISSUE,
+ TEST_BRKVER(0, 9, 0, 0),
+ .extra = "https://github.com/edenhill/librdkafka/issues/529"),
+ _TEST(0029_assign_offset, 0),
+ _TEST(0030_offset_commit,
+ 0,
+ TEST_BRKVER(0, 9, 0, 0),
+ /* Loops over committed() until timeout */
+ _THRES(.ucpu = 10.0, .scpu = 5.0)),
+ _TEST(0031_get_offsets, 0),
+ _TEST(0033_regex_subscribe, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0033_regex_subscribe_local, TEST_F_LOCAL),
+ _TEST(0034_offset_reset, 0),
+ _TEST(0034_offset_reset_mock, TEST_F_LOCAL),
+ _TEST(0035_api_version, 0),
+ _TEST(0036_partial_fetch, 0),
+ _TEST(0037_destroy_hang_local, TEST_F_LOCAL),
+ _TEST(0038_performance,
+ 0,
+ /* Produces and consumes a lot of messages */
+ _THRES(.ucpu = 150.0, .scpu = 10)),
+ _TEST(0039_event_dr, 0),
+ _TEST(0039_event_log, TEST_F_LOCAL),
+ _TEST(0039_event, TEST_F_LOCAL),
+ _TEST(0040_io_event, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0041_fetch_max_bytes,
+ 0,
+ /* Re-fetches large messages multiple times */
+ _THRES(.ucpu = 20.0, .scpu = 10.0)),
+ _TEST(0042_many_topics, 0),
+ _TEST(0043_no_connection, TEST_F_LOCAL),
+ _TEST(0044_partition_cnt,
+ 0,
+ TEST_BRKVER(1, 0, 0, 0),
+ /* Produces a lot of messages */
+ _THRES(.ucpu = 30.0)),
+ _TEST(0045_subscribe_update, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0045_subscribe_update_topic_remove,
+ 0,
+ TEST_BRKVER(0, 9, 0, 0),
+ .scenario = "noautocreate"),
+ _TEST(0045_subscribe_update_non_exist_and_partchange,
+ 0,
+ TEST_BRKVER(0, 9, 0, 0),
+ .scenario = "noautocreate"),
+ _TEST(0045_subscribe_update_mock, TEST_F_LOCAL),
+ _TEST(0046_rkt_cache, TEST_F_LOCAL),
+ _TEST(0047_partial_buf_tmout, TEST_F_KNOWN_ISSUE),
+ _TEST(0048_partitioner,
+ 0,
+ /* Produces many small messages */
+ _THRES(.ucpu = 10.0, .scpu = 5.0)),
+#if WITH_SOCKEM
+ _TEST(0049_consume_conn_close, TEST_F_SOCKEM, TEST_BRKVER(0, 9, 0, 0)),
+#endif
+ _TEST(0050_subscribe_adds, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0051_assign_adds, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0052_msg_timestamps, 0, TEST_BRKVER(0, 10, 0, 0)),
+ _TEST(0053_stats_timing, TEST_F_LOCAL),
+ _TEST(0053_stats, 0),
+ _TEST(0054_offset_time, 0, TEST_BRKVER(0, 10, 1, 0)),
+ _TEST(0055_producer_latency, TEST_F_KNOWN_ISSUE_WIN32),
+ _TEST(0056_balanced_group_mt, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0057_invalid_topic, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0058_log, TEST_F_LOCAL),
+ _TEST(0059_bsearch, 0, TEST_BRKVER(0, 10, 0, 0)),
+ _TEST(0060_op_prio, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0061_consumer_lag, 0),
+ _TEST(0062_stats_event, TEST_F_LOCAL),
+ _TEST(0063_clusterid, 0, TEST_BRKVER(0, 10, 1, 0)),
+ _TEST(0064_interceptors, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0065_yield, 0),
+ _TEST(0066_plugins,
+ TEST_F_LOCAL | TEST_F_KNOWN_ISSUE_WIN32 | TEST_F_KNOWN_ISSUE_OSX,
+ .extra =
+ "dynamic loading of tests might not be fixed for this platform"),
+ _TEST(0067_empty_topic, 0),
+#if WITH_SOCKEM
+ _TEST(0068_produce_timeout, TEST_F_SOCKEM),
+#endif
+ _TEST(0069_consumer_add_parts,
+ TEST_F_KNOWN_ISSUE_WIN32,
+ TEST_BRKVER(1, 0, 0, 0)),
+ _TEST(0070_null_empty, 0),
+ _TEST(0072_headers_ut, TEST_F_LOCAL),
+ _TEST(0073_headers, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0074_producev, TEST_F_LOCAL),
+#if WITH_SOCKEM
+ _TEST(0075_retry, TEST_F_SOCKEM),
+#endif
+ _TEST(0076_produce_retry, TEST_F_SOCKEM),
+ _TEST(0077_compaction,
+ 0,
+ /* The test itself requires message headers */
+ TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0078_c_from_cpp, TEST_F_LOCAL),
+ _TEST(0079_fork,
+ TEST_F_LOCAL | TEST_F_KNOWN_ISSUE,
+ .extra = "using a fork():ed rd_kafka_t is not supported and will "
+ "most likely hang"),
+ _TEST(0080_admin_ut, TEST_F_LOCAL),
+ _TEST(0081_admin, 0, TEST_BRKVER(0, 10, 2, 0)),
+ _TEST(0082_fetch_max_bytes, 0, TEST_BRKVER(0, 10, 1, 0)),
+ _TEST(0083_cb_event, 0, TEST_BRKVER(0, 9, 0, 0)),
+ _TEST(0084_destroy_flags_local, TEST_F_LOCAL),
+ _TEST(0084_destroy_flags, 0),
+ _TEST(0085_headers, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0086_purge_local, TEST_F_LOCAL),
+ _TEST(0086_purge_remote, 0),
+#if WITH_SOCKEM
+ _TEST(0088_produce_metadata_timeout, TEST_F_SOCKEM),
+#endif
+ _TEST(0089_max_poll_interval, 0, TEST_BRKVER(0, 10, 1, 0)),
+ _TEST(0090_idempotence, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0091_max_poll_interval_timeout, 0, TEST_BRKVER(0, 10, 1, 0)),
+ _TEST(0092_mixed_msgver, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0093_holb_consumer, 0, TEST_BRKVER(0, 10, 1, 0)),
+#if WITH_SOCKEM
+ _TEST(0094_idempotence_msg_timeout,
+ TEST_F_SOCKEM,
+ TEST_BRKVER(0, 11, 0, 0)),
+#endif
+ _TEST(0095_all_brokers_down, TEST_F_LOCAL),
+ _TEST(0097_ssl_verify, 0),
+ _TEST(0097_ssl_verify_local, TEST_F_LOCAL),
+ _TEST(0098_consumer_txn, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0099_commit_metadata, 0),
+ _TEST(0100_thread_interceptors, TEST_F_LOCAL),
+ _TEST(0101_fetch_from_follower, 0, TEST_BRKVER(2, 4, 0, 0)),
+ _TEST(0102_static_group_rebalance, 0, TEST_BRKVER(2, 3, 0, 0)),
+ _TEST(0103_transactions_local, TEST_F_LOCAL),
+ _TEST(0103_transactions,
+ 0,
+ TEST_BRKVER(0, 11, 0, 0),
+ .scenario = "default,ak23"),
+ _TEST(0104_fetch_from_follower_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)),
+ _TEST(0105_transactions_mock, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0106_cgrp_sess_timeout, TEST_F_LOCAL, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0107_topic_recreate,
+ 0,
+ TEST_BRKVER_TOPIC_ADMINAPI,
+ .scenario = "noautocreate"),
+ _TEST(0109_auto_create_topics, 0),
+ _TEST(0110_batch_size, 0),
+ _TEST(0111_delay_create_topics,
+ 0,
+ TEST_BRKVER_TOPIC_ADMINAPI,
+ .scenario = "noautocreate"),
+ _TEST(0112_assign_unknown_part, 0),
+ _TEST(0113_cooperative_rebalance_local,
+ TEST_F_LOCAL,
+ TEST_BRKVER(2, 4, 0, 0)),
+ _TEST(0113_cooperative_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)),
+ _TEST(0114_sticky_partitioning, 0),
+ _TEST(0115_producer_auth, 0, TEST_BRKVER(2, 1, 0, 0)),
+ _TEST(0116_kafkaconsumer_close, TEST_F_LOCAL),
+ _TEST(0117_mock_errors, TEST_F_LOCAL),
+ _TEST(0118_commit_rebalance, 0),
+ _TEST(0119_consumer_auth, 0, TEST_BRKVER(2, 1, 0, 0)),
+ _TEST(0120_asymmetric_subscription, TEST_F_LOCAL),
+ _TEST(0121_clusterid, TEST_F_LOCAL),
+ _TEST(0122_buffer_cleaning_after_rebalance, 0, TEST_BRKVER(2, 4, 0, 0)),
+ _TEST(0123_connections_max_idle, 0),
+ _TEST(0124_openssl_invalid_engine, TEST_F_LOCAL),
+ _TEST(0125_immediate_flush, 0),
+ _TEST(0126_oauthbearer_oidc, 0, TEST_BRKVER(3, 1, 0, 0)),
+ _TEST(0128_sasl_callback_queue, TEST_F_LOCAL, TEST_BRKVER(2, 0, 0, 0)),
+ _TEST(0129_fetch_aborted_msgs, 0, TEST_BRKVER(0, 11, 0, 0)),
+ _TEST(0130_store_offsets, 0),
+ _TEST(0131_connect_timeout, TEST_F_LOCAL),
+ _TEST(0132_strategy_ordering, 0, TEST_BRKVER(2, 4, 0, 0)),
+ _TEST(0133_ssl_keys, TEST_F_LOCAL),
+ _TEST(0134_ssl_provider, TEST_F_LOCAL),
+ _TEST(0135_sasl_credentials, 0),
+ _TEST(0136_resolve_cb, TEST_F_LOCAL),
+ _TEST(0137_barrier_batch_consume, 0),
+ _TEST(0138_admin_mock, TEST_F_LOCAL, TEST_BRKVER(2, 4, 0, 0)),
+
+ /* Manual tests */
+ _TEST(8000_idle, TEST_F_MANUAL),
+
+ {NULL}};
+
+
+RD_TLS struct test *test_curr = &tests[0];
+
+
+
+#if WITH_SOCKEM
+/**
+ * Socket network emulation with sockem
+ */
+
+static void test_socket_add(struct test *test, sockem_t *skm) {
+ TEST_LOCK();
+ rd_list_add(&test->sockets, skm);
+ TEST_UNLOCK();
+}
+
+static void test_socket_del(struct test *test, sockem_t *skm, int do_lock) {
+ if (do_lock)
+ TEST_LOCK();
+ /* Best effort, skm might not have been added if connect_cb failed */
+ rd_list_remove(&test->sockets, skm);
+ if (do_lock)
+ TEST_UNLOCK();
+}
+
+int test_socket_sockem_set_all(const char *key, int val) {
+ int i;
+ sockem_t *skm;
+ int cnt = 0;
+
+ TEST_LOCK();
+
+ cnt = rd_list_cnt(&test_curr->sockets);
+ TEST_SAY("Setting sockem %s=%d on %s%d socket(s)\n", key, val,
+ cnt > 0 ? "" : _C_RED, cnt);
+
+ RD_LIST_FOREACH(skm, &test_curr->sockets, i) {
+ if (sockem_set(skm, key, val, NULL) == -1)
+ TEST_FAIL("sockem_set(%s, %d) failed", key, val);
+ }
+
+ TEST_UNLOCK();
+
+ return cnt;
+}
+
+void test_socket_sockem_set(int s, const char *key, int value) {
+ sockem_t *skm;
+
+ TEST_LOCK();
+ skm = sockem_find(s);
+ if (skm)
+ sockem_set(skm, key, value, NULL);
+ TEST_UNLOCK();
+}
+
+void test_socket_close_all(struct test *test, int reinit) {
+ TEST_LOCK();
+ rd_list_destroy(&test->sockets);
+ if (reinit)
+ rd_list_init(&test->sockets, 16, (void *)sockem_close);
+ TEST_UNLOCK();
+}
+
+
+static int test_connect_cb(int s,
+ const struct sockaddr *addr,
+ int addrlen,
+ const char *id,
+ void *opaque) {
+ struct test *test = opaque;
+ sockem_t *skm;
+ int r;
+
+ skm = sockem_connect(s, addr, addrlen, test_sockem_conf, 0, NULL);
+ if (!skm)
+ return errno;
+
+ if (test->connect_cb) {
+ r = test->connect_cb(test, skm, id);
+ if (r)
+ return r;
+ }
+
+ test_socket_add(test, skm);
+
+ return 0;
+}
+
+static int test_closesocket_cb(int s, void *opaque) {
+ struct test *test = opaque;
+ sockem_t *skm;
+
+ TEST_LOCK();
+ skm = sockem_find(s);
+ if (skm) {
+ /* Close sockem's sockets */
+ sockem_close(skm);
+ test_socket_del(test, skm, 0 /*nolock*/);
+ }
+ TEST_UNLOCK();
+
+ /* Close librdkafka's socket */
+#ifdef _WIN32
+ closesocket(s);
+#else
+ close(s);
+#endif
+
+ return 0;
+}
+
+
+void test_socket_enable(rd_kafka_conf_t *conf) {
+ rd_kafka_conf_set_connect_cb(conf, test_connect_cb);
+ rd_kafka_conf_set_closesocket_cb(conf, test_closesocket_cb);
+ rd_kafka_conf_set_opaque(conf, test_curr);
+}
+#endif /* WITH_SOCKEM */
+
+/**
+ * @brief For use as the is_fatal_cb(), treating no errors as test-fatal.
+ */
+int test_error_is_not_fatal_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason) {
+ return 0;
+}
+
+static void
+test_error_cb(rd_kafka_t *rk, int err, const char *reason, void *opaque) {
+ if (test_curr->is_fatal_cb &&
+ !test_curr->is_fatal_cb(rk, err, reason)) {
+ TEST_SAY(_C_YEL "%s rdkafka error (non-testfatal): %s: %s\n",
+ rd_kafka_name(rk), rd_kafka_err2str(err), reason);
+ } else {
+ if (err == RD_KAFKA_RESP_ERR__FATAL) {
+ char errstr[512];
+ TEST_SAY(_C_RED "%s Fatal error: %s\n",
+ rd_kafka_name(rk), reason);
+
+ err = rd_kafka_fatal_error(rk, errstr, sizeof(errstr));
+
+ if (test_curr->is_fatal_cb &&
+ !test_curr->is_fatal_cb(rk, err, reason))
+ TEST_SAY(_C_YEL
+ "%s rdkafka ignored FATAL error: "
+ "%s: %s\n",
+ rd_kafka_name(rk),
+ rd_kafka_err2str(err), errstr);
+ else
+ TEST_FAIL("%s rdkafka FATAL error: %s: %s",
+ rd_kafka_name(rk),
+ rd_kafka_err2str(err), errstr);
+
+ } else {
+ TEST_FAIL("%s rdkafka error: %s: %s", rd_kafka_name(rk),
+ rd_kafka_err2str(err), reason);
+ }
+ }
+}
+
+static int
+test_stats_cb(rd_kafka_t *rk, char *json, size_t json_len, void *opaque) {
+ struct test *test = test_curr;
+ if (test->stats_fp)
+ fprintf(test->stats_fp,
+ "{\"test\": \"%s\", \"instance\":\"%s\", "
+ "\"stats\": %s}\n",
+ test->name, rd_kafka_name(rk), json);
+ return 0;
+}
+
+
+/**
+ * @brief Limit the test run time (in seconds)
+ */
+void test_timeout_set(int timeout) {
+ TEST_LOCK();
+ TEST_SAY("Setting test timeout to %ds * %.1f\n", timeout,
+ test_timeout_multiplier);
+ timeout = (int)((double)timeout * test_timeout_multiplier);
+ test_curr->timeout = test_clock() + ((int64_t)timeout * 1000000);
+ TEST_UNLOCK();
+}
+
+int tmout_multip(int msecs) {
+ int r;
+ TEST_LOCK();
+ r = (int)(((double)(msecs)) * test_timeout_multiplier);
+ TEST_UNLOCK();
+ return r;
+}
+
+
+
+#ifdef _WIN32
+static void test_init_win32(void) {
+ /* Enable VT emulation to support colored output. */
+ HANDLE hOut = GetStdHandle(STD_OUTPUT_HANDLE);
+ DWORD dwMode = 0;
+
+ if (hOut == INVALID_HANDLE_VALUE || !GetConsoleMode(hOut, &dwMode))
+ return;
+
+#ifndef ENABLE_VIRTUAL_TERMINAL_PROCESSING
+#define ENABLE_VIRTUAL_TERMINAL_PROCESSING 0x4
+#endif
+ dwMode |= ENABLE_VIRTUAL_TERMINAL_PROCESSING;
+ SetConsoleMode(hOut, dwMode);
+}
+#endif
+
+
+static void test_init(void) {
+ int seed;
+ const char *tmp;
+
+
+ if (test_seed)
+ return;
+
+ if ((tmp = test_getenv("TEST_LEVEL", NULL)))
+ test_level = atoi(tmp);
+ if ((tmp = test_getenv("TEST_MODE", NULL)))
+ strncpy(test_mode, tmp, sizeof(test_mode) - 1);
+ if ((tmp = test_getenv("TEST_SCENARIO", NULL)))
+ strncpy(test_scenario, tmp, sizeof(test_scenario) - 1);
+ if ((tmp = test_getenv("TEST_SOCKEM", NULL)))
+ test_sockem_conf = tmp;
+ if ((tmp = test_getenv("TEST_SEED", NULL)))
+ seed = atoi(tmp);
+ else
+ seed = test_clock() & 0xffffffff;
+ if ((tmp = test_getenv("TEST_CPU_CALIBRATION", NULL))) {
+ test_rusage_cpu_calibration = strtod(tmp, NULL);
+ if (test_rusage_cpu_calibration < 0.00001) {
+ fprintf(stderr,
+ "%% Invalid CPU calibration "
+ "value (from TEST_CPU_CALIBRATION env): %s\n",
+ tmp);
+ exit(1);
+ }
+ }
+
+#ifdef _WIN32
+ test_init_win32();
+ {
+ LARGE_INTEGER cycl;
+ QueryPerformanceCounter(&cycl);
+ seed = (int)cycl.QuadPart;
+ }
+#endif
+ srand(seed);
+ test_seed = seed;
+}
+
+
+const char *test_mk_topic_name(const char *suffix, int randomized) {
+ static RD_TLS char ret[512];
+
+ /* Strip main_ prefix (caller is using __FUNCTION__) */
+ if (!strncmp(suffix, "main_", 5))
+ suffix += 5;
+
+ if (test_topic_random || randomized)
+ rd_snprintf(ret, sizeof(ret), "%s_rnd%" PRIx64 "_%s",
+ test_topic_prefix, test_id_generate(), suffix);
+ else
+ rd_snprintf(ret, sizeof(ret), "%s_%s", test_topic_prefix,
+ suffix);
+
+ TEST_SAY("Using topic \"%s\"\n", ret);
+
+ return ret;
+}
+
+
+/**
+ * @brief Set special test config property
+ * @returns 1 if property was known, else 0.
+ */
+int test_set_special_conf(const char *name, const char *val, int *timeoutp) {
+ if (!strcmp(name, "test.timeout.multiplier")) {
+ TEST_LOCK();
+ test_timeout_multiplier = strtod(val, NULL);
+ TEST_UNLOCK();
+ *timeoutp = tmout_multip((*timeoutp) * 1000) / 1000;
+ } else if (!strcmp(name, "test.topic.prefix")) {
+ rd_snprintf(test_topic_prefix, sizeof(test_topic_prefix), "%s",
+ val);
+ } else if (!strcmp(name, "test.topic.random")) {
+ if (!strcmp(val, "true") || !strcmp(val, "1"))
+ test_topic_random = 1;
+ else
+ test_topic_random = 0;
+ } else if (!strcmp(name, "test.concurrent.max")) {
+ TEST_LOCK();
+ test_concurrent_max = (int)strtod(val, NULL);
+ TEST_UNLOCK();
+ } else if (!strcmp(name, "test.sql.command")) {
+ TEST_LOCK();
+ if (test_sql_cmd)
+ rd_free(test_sql_cmd);
+ test_sql_cmd = rd_strdup(val);
+ TEST_UNLOCK();
+ } else
+ return 0;
+
+ return 1;
+}
+
+static void test_read_conf_file(const char *conf_path,
+ rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *topic_conf,
+ int *timeoutp) {
+ FILE *fp;
+ char buf[1024];
+ int line = 0;
+
+#ifndef _WIN32
+ fp = fopen(conf_path, "r");
+#else
+ fp = NULL;
+ errno = fopen_s(&fp, conf_path, "r");
+#endif
+ if (!fp) {
+ if (errno == ENOENT) {
+ TEST_SAY("Test config file %s not found\n", conf_path);
+ return;
+ } else
+ TEST_FAIL("Failed to read %s: %s", conf_path,
+ strerror(errno));
+ }
+
+ while (fgets(buf, sizeof(buf) - 1, fp)) {
+ char *t;
+ char *b = buf;
+ rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN;
+ char *name, *val;
+ char errstr[512];
+
+ line++;
+ if ((t = strchr(b, '\n')))
+ *t = '\0';
+
+ if (*b == '#' || !*b)
+ continue;
+
+ if (!(t = strchr(b, '=')))
+ TEST_FAIL("%s:%i: expected name=value format\n",
+ conf_path, line);
+
+ name = b;
+ *t = '\0';
+ val = t + 1;
+
+ if (test_set_special_conf(name, val, timeoutp))
+ continue;
+
+ if (!strncmp(name, "topic.", strlen("topic."))) {
+ name += strlen("topic.");
+ if (topic_conf)
+ res = rd_kafka_topic_conf_set(topic_conf, name,
+ val, errstr,
+ sizeof(errstr));
+ else
+ res = RD_KAFKA_CONF_OK;
+ name -= strlen("topic.");
+ }
+
+ if (res == RD_KAFKA_CONF_UNKNOWN) {
+ if (conf)
+ res = rd_kafka_conf_set(conf, name, val, errstr,
+ sizeof(errstr));
+ else
+ res = RD_KAFKA_CONF_OK;
+ }
+
+ if (res != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s:%i: %s\n", conf_path, line, errstr);
+ }
+
+ fclose(fp);
+}
+
+/**
+ * @brief Get path to test config file
+ */
+const char *test_conf_get_path(void) {
+ return test_getenv("RDKAFKA_TEST_CONF", "test.conf");
+}
+
+const char *test_getenv(const char *env, const char *def) {
+ return rd_getenv(env, def);
+}
+
+void test_conf_common_init(rd_kafka_conf_t *conf, int timeout) {
+ if (conf) {
+ const char *tmp = test_getenv("TEST_DEBUG", NULL);
+ if (tmp)
+ test_conf_set(conf, "debug", tmp);
+ }
+
+ if (timeout)
+ test_timeout_set(timeout);
+}
+
+
+/**
+ * Creates and sets up kafka configuration objects.
+ * Will read "test.conf" file if it exists.
+ */
+void test_conf_init(rd_kafka_conf_t **conf,
+ rd_kafka_topic_conf_t **topic_conf,
+ int timeout) {
+ const char *test_conf = test_conf_get_path();
+
+ if (conf) {
+ *conf = rd_kafka_conf_new();
+ rd_kafka_conf_set(*conf, "client.id", test_curr->name, NULL, 0);
+ if (test_idempotent_producer)
+ test_conf_set(*conf, "enable.idempotence", "true");
+ rd_kafka_conf_set_error_cb(*conf, test_error_cb);
+ rd_kafka_conf_set_stats_cb(*conf, test_stats_cb);
+
+ /* Allow higher request timeouts on CI */
+ if (test_on_ci)
+ test_conf_set(*conf, "request.timeout.ms", "10000");
+
+#ifdef SIGIO
+ {
+ char buf[64];
+
+ /* Quick termination */
+ rd_snprintf(buf, sizeof(buf), "%i", SIGIO);
+ rd_kafka_conf_set(*conf, "internal.termination.signal",
+ buf, NULL, 0);
+ signal(SIGIO, SIG_IGN);
+ }
+#endif
+ }
+
+#if WITH_SOCKEM
+ if (*test_sockem_conf && conf)
+ test_socket_enable(*conf);
+#endif
+
+ if (topic_conf)
+ *topic_conf = rd_kafka_topic_conf_new();
+
+ /* Open and read optional local test configuration file, if any. */
+ test_read_conf_file(test_conf, conf ? *conf : NULL,
+ topic_conf ? *topic_conf : NULL, &timeout);
+
+ test_conf_common_init(conf ? *conf : NULL, timeout);
+}
+
+
+static RD_INLINE unsigned int test_rand(void) {
+ unsigned int r;
+#ifdef _WIN32
+ rand_s(&r);
+#else
+ r = rand();
+#endif
+ return r;
+}
+/**
+ * Generate a "unique" test id.
+ */
+uint64_t test_id_generate(void) {
+ return (((uint64_t)test_rand()) << 32) | (uint64_t)test_rand();
+}
+
+
+/**
+ * Generate a "unique" string id
+ */
+char *test_str_id_generate(char *dest, size_t dest_size) {
+ rd_snprintf(dest, dest_size, "%" PRId64, test_id_generate());
+ return dest;
+}
+
+/**
+ * Same as test_str_id_generate but returns a temporary string.
+ */
+const char *test_str_id_generate_tmp(void) {
+ static RD_TLS char ret[64];
+ return test_str_id_generate(ret, sizeof(ret));
+}
+
+/**
+ * Format a message token.
+ * Pad's to dest_size.
+ */
+void test_msg_fmt(char *dest,
+ size_t dest_size,
+ uint64_t testid,
+ int32_t partition,
+ int msgid) {
+ size_t of;
+
+ of = rd_snprintf(dest, dest_size,
+ "testid=%" PRIu64 ", partition=%" PRId32 ", msg=%i\n",
+ testid, partition, msgid);
+ if (of < dest_size - 1) {
+ memset(dest + of, '!', dest_size - of);
+ dest[dest_size - 1] = '\0';
+ }
+}
+
+/**
+ * @brief Prepare message value and key for test produce.
+ */
+void test_prepare_msg(uint64_t testid,
+ int32_t partition,
+ int msg_id,
+ char *val,
+ size_t val_size,
+ char *key,
+ size_t key_size) {
+ size_t of = 0;
+
+ test_msg_fmt(key, key_size, testid, partition, msg_id);
+
+ while (of < val_size) {
+ /* Copy-repeat key into val until val_size */
+ size_t len = RD_MIN(val_size - of, key_size);
+ memcpy(val + of, key, len);
+ of += len;
+ }
+}
+
+
+
+/**
+ * Parse a message token
+ */
+void test_msg_parse00(const char *func,
+ int line,
+ uint64_t testid,
+ int32_t exp_partition,
+ int *msgidp,
+ const char *topic,
+ int32_t partition,
+ int64_t offset,
+ const char *key,
+ size_t key_size) {
+ char buf[128];
+ uint64_t in_testid;
+ int in_part;
+
+ if (!key)
+ TEST_FAIL("%s:%i: Message (%s [%" PRId32 "] @ %" PRId64
+ ") "
+ "has empty key\n",
+ func, line, topic, partition, offset);
+
+ rd_snprintf(buf, sizeof(buf), "%.*s", (int)key_size, key);
+
+ if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n",
+ &in_testid, &in_part, msgidp) != 3)
+ TEST_FAIL("%s:%i: Incorrect key format: %s", func, line, buf);
+
+
+ if (testid != in_testid ||
+ (exp_partition != -1 && exp_partition != in_part))
+ TEST_FAIL("%s:%i: Our testid %" PRIu64
+ ", part %i did "
+ "not match message: \"%s\"\n",
+ func, line, testid, (int)exp_partition, buf);
+}
+
+void test_msg_parse0(const char *func,
+ int line,
+ uint64_t testid,
+ rd_kafka_message_t *rkmessage,
+ int32_t exp_partition,
+ int *msgidp) {
+ test_msg_parse00(func, line, testid, exp_partition, msgidp,
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ (const char *)rkmessage->key, rkmessage->key_len);
+}
+
+
+struct run_args {
+ struct test *test;
+ int argc;
+ char **argv;
+};
+
+static int run_test0(struct run_args *run_args) {
+ struct test *test = run_args->test;
+ test_timing_t t_run;
+ int r;
+ char stats_file[256];
+
+ rd_snprintf(stats_file, sizeof(stats_file), "stats_%s_%" PRIu64 ".json",
+ test->name, test_id_generate());
+ if (!(test->stats_fp = fopen(stats_file, "w+")))
+ TEST_SAY("=== Failed to create stats file %s: %s ===\n",
+ stats_file, strerror(errno));
+
+ test_curr = test;
+
+#if WITH_SOCKEM
+ rd_list_init(&test->sockets, 16, (void *)sockem_close);
+#endif
+ /* Don't check message status by default */
+ test->exp_dr_status = (rd_kafka_msg_status_t)-1;
+
+ TEST_SAY("================= Running test %s =================\n",
+ test->name);
+ if (test->stats_fp)
+ TEST_SAY("==== Stats written to file %s ====\n", stats_file);
+
+ test_rusage_start(test_curr);
+ TIMING_START(&t_run, "%s", test->name);
+ test->start = t_run.ts_start;
+
+ /* Run test main function */
+ r = test->mainfunc(run_args->argc, run_args->argv);
+
+ TIMING_STOP(&t_run);
+ test_rusage_stop(test_curr,
+ (double)TIMING_DURATION(&t_run) / 1000000.0);
+
+ TEST_LOCK();
+ test->duration = TIMING_DURATION(&t_run);
+
+ if (test->state == TEST_SKIPPED) {
+ TEST_SAY(
+ "================= Test %s SKIPPED "
+ "=================\n",
+ run_args->test->name);
+ } else if (r) {
+ test->state = TEST_FAILED;
+ TEST_SAY(
+ "\033[31m"
+ "================= Test %s FAILED ================="
+ "\033[0m\n",
+ run_args->test->name);
+ } else {
+ test->state = TEST_PASSED;
+ TEST_SAY(
+ "\033[32m"
+ "================= Test %s PASSED ================="
+ "\033[0m\n",
+ run_args->test->name);
+ }
+ TEST_UNLOCK();
+
+ cnd_broadcast(&test_cnd);
+
+#if WITH_SOCKEM
+ test_socket_close_all(test, 0);
+#endif
+
+ if (test->stats_fp) {
+ long pos = ftell(test->stats_fp);
+ fclose(test->stats_fp);
+ test->stats_fp = NULL;
+ /* Delete file if nothing was written */
+ if (pos == 0) {
+#ifndef _WIN32
+ unlink(stats_file);
+#else
+ _unlink(stats_file);
+#endif
+ }
+ }
+
+ if (test_delete_topics_between && test_concurrent_max == 1)
+ test_delete_all_test_topics(60 * 1000);
+
+ return r;
+}
+
+
+
+static int run_test_from_thread(void *arg) {
+ struct run_args *run_args = arg;
+
+ thrd_detach(thrd_current());
+
+ run_test0(run_args);
+
+ TEST_LOCK();
+ tests_running_cnt--;
+ TEST_UNLOCK();
+
+ free(run_args);
+
+ return 0;
+}
+
+
+/**
+ * @brief Check running tests for timeouts.
+ * @locks TEST_LOCK MUST be held
+ */
+static void check_test_timeouts(void) {
+ int64_t now = test_clock();
+ struct test *test;
+
+ for (test = tests; test->name; test++) {
+ if (test->state != TEST_RUNNING)
+ continue;
+
+ /* Timeout check */
+ if (now > test->timeout) {
+ struct test *save_test = test_curr;
+ test_curr = test;
+ test->state = TEST_FAILED;
+ test_summary(0 /*no-locks*/);
+ TEST_FAIL0(
+ __FILE__, __LINE__, 0 /*nolock*/, 0 /*fail-later*/,
+ "Test %s%s%s%s timed out "
+ "(timeout set to %d seconds)\n",
+ test->name, *test->subtest ? " (" : "",
+ test->subtest, *test->subtest ? ")" : "",
+ (int)(test->timeout - test->start) / 1000000);
+ test_curr = save_test;
+ tests_running_cnt--; /* fail-later misses this*/
+#ifdef _WIN32
+ TerminateThread(test->thrd, -1);
+#else
+ pthread_kill(test->thrd, SIGKILL);
+#endif
+ }
+ }
+}
+
+
+static int run_test(struct test *test, int argc, char **argv) {
+ struct run_args *run_args = calloc(1, sizeof(*run_args));
+ int wait_cnt = 0;
+
+ run_args->test = test;
+ run_args->argc = argc;
+ run_args->argv = argv;
+
+ TEST_LOCK();
+ while (tests_running_cnt >= test_concurrent_max) {
+ if (!(wait_cnt++ % 100))
+ TEST_SAY(
+ "Too many tests running (%d >= %d): "
+ "postponing %s start...\n",
+ tests_running_cnt, test_concurrent_max, test->name);
+ cnd_timedwait_ms(&test_cnd, &test_mtx, 100);
+
+ check_test_timeouts();
+ }
+ tests_running_cnt++;
+ test->timeout = test_clock() +
+ (int64_t)(30.0 * 1000000.0 * test_timeout_multiplier);
+ test->state = TEST_RUNNING;
+ TEST_UNLOCK();
+
+ if (thrd_create(&test->thrd, run_test_from_thread, run_args) !=
+ thrd_success) {
+ TEST_LOCK();
+ tests_running_cnt--;
+ test->state = TEST_FAILED;
+ TEST_UNLOCK();
+
+ TEST_FAIL("Failed to start thread for test %s\n", test->name);
+ }
+
+ return 0;
+}
+
+static void run_tests(int argc, char **argv) {
+ struct test *test;
+
+ for (test = tests; test->name; test++) {
+ char testnum[128];
+ char *t;
+ const char *skip_reason = NULL;
+ rd_bool_t skip_silent = rd_false;
+ char tmp[128];
+ const char *scenario =
+ test->scenario ? test->scenario : "default";
+
+ if (!test->mainfunc)
+ continue;
+
+ /* Extract test number, as string */
+ strncpy(testnum, test->name, sizeof(testnum) - 1);
+ testnum[sizeof(testnum) - 1] = '\0';
+ if ((t = strchr(testnum, '_')))
+ *t = '\0';
+
+ if ((test_flags && (test_flags & test->flags) != test_flags)) {
+ skip_reason = "filtered due to test flags";
+ skip_silent = rd_true;
+ }
+ if ((test_neg_flags & ~test_flags) & test->flags)
+ skip_reason = "Filtered due to negative test flags";
+ if (test_broker_version &&
+ (test->minver > test_broker_version ||
+ (test->maxver && test->maxver < test_broker_version))) {
+ rd_snprintf(tmp, sizeof(tmp),
+ "not applicable for broker "
+ "version %d.%d.%d.%d",
+ TEST_BRKVER_X(test_broker_version, 0),
+ TEST_BRKVER_X(test_broker_version, 1),
+ TEST_BRKVER_X(test_broker_version, 2),
+ TEST_BRKVER_X(test_broker_version, 3));
+ skip_reason = tmp;
+ }
+
+ if (!strstr(scenario, test_scenario)) {
+ rd_snprintf(tmp, sizeof(tmp),
+ "requires test scenario %s", scenario);
+ skip_silent = rd_true;
+ skip_reason = tmp;
+ }
+
+ if (tests_to_run && !strstr(tests_to_run, testnum)) {
+ skip_reason = "not included in TESTS list";
+ skip_silent = rd_true;
+ } else if (!tests_to_run && (test->flags & TEST_F_MANUAL)) {
+ skip_reason = "manual test";
+ skip_silent = rd_true;
+ } else if (tests_to_skip && strstr(tests_to_skip, testnum))
+ skip_reason = "included in TESTS_SKIP list";
+
+ if (!skip_reason) {
+ run_test(test, argc, argv);
+ } else {
+ if (skip_silent) {
+ TEST_SAYL(3,
+ "================= Skipping test %s "
+ "(%s) ================\n",
+ test->name, skip_reason);
+ TEST_LOCK();
+ test->state = TEST_SKIPPED;
+ TEST_UNLOCK();
+ } else {
+ test_curr = test;
+ TEST_SKIP("%s\n", skip_reason);
+ test_curr = &tests[0];
+ }
+ }
+ }
+}
+
+/**
+ * @brief Print summary for all tests.
+ *
+ * @returns the number of failed tests.
+ */
+static int test_summary(int do_lock) {
+ struct test *test;
+ FILE *report_fp = NULL;
+ char report_path[128];
+ time_t t;
+ struct tm *tm;
+ char datestr[64];
+ int64_t total_duration = 0;
+ int tests_run = 0;
+ int tests_failed = 0;
+ int tests_failed_known = 0;
+ int tests_passed = 0;
+ FILE *sql_fp = NULL;
+ const char *tmp;
+
+ t = time(NULL);
+ tm = localtime(&t);
+ strftime(datestr, sizeof(datestr), "%Y%m%d%H%M%S", tm);
+
+ if ((tmp = test_getenv("TEST_REPORT", NULL)))
+ rd_snprintf(report_path, sizeof(report_path), "%s", tmp);
+ else if (test_write_report)
+ rd_snprintf(report_path, sizeof(report_path),
+ "test_report_%s.json", datestr);
+ else
+ report_path[0] = '\0';
+
+ if (*report_path) {
+ report_fp = fopen(report_path, "w+");
+ if (!report_fp)
+ TEST_WARN("Failed to create report file %s: %s\n",
+ report_path, strerror(errno));
+ else
+ fprintf(report_fp,
+ "{ \"id\": \"%s_%s\", \"mode\": \"%s\", "
+ "\"scenario\": \"%s\", "
+ "\"date\": \"%s\", "
+ "\"git_version\": \"%s\", "
+ "\"broker_version\": \"%s\", "
+ "\"tests\": {",
+ datestr, test_mode, test_mode, test_scenario,
+ datestr, test_git_version,
+ test_broker_version_str);
+ }
+
+ if (do_lock)
+ TEST_LOCK();
+
+ if (test_sql_cmd) {
+#ifdef _WIN32
+ sql_fp = _popen(test_sql_cmd, "w");
+#else
+ sql_fp = popen(test_sql_cmd, "w");
+#endif
+ if (!sql_fp)
+ TEST_WARN("Failed to execute test.sql.command: %s",
+ test_sql_cmd);
+ else
+ fprintf(sql_fp,
+ "CREATE TABLE IF NOT EXISTS "
+ "runs(runid text PRIMARY KEY, mode text, "
+ "date datetime, cnt int, passed int, "
+ "failed int, duration numeric);\n"
+ "CREATE TABLE IF NOT EXISTS "
+ "tests(runid text, mode text, name text, "
+ "state text, extra text, duration numeric);\n");
+ }
+
+ if (show_summary)
+ printf(
+ "TEST %s (%s, scenario %s) SUMMARY\n"
+ "#========================================================="
+ "=========#\n",
+ datestr, test_mode, test_scenario);
+
+ for (test = tests; test->name; test++) {
+ const char *color;
+ int64_t duration;
+ char extra[128] = "";
+ int do_count = 1;
+
+ if (!(duration = test->duration) && test->start > 0)
+ duration = test_clock() - test->start;
+
+ if (test == tests) {
+ /* <MAIN> test:
+ * test accounts for total runtime.
+ * dont include in passed/run/failed counts. */
+ total_duration = duration;
+ do_count = 0;
+ }
+
+ switch (test->state) {
+ case TEST_PASSED:
+ color = _C_GRN;
+ if (do_count) {
+ tests_passed++;
+ tests_run++;
+ }
+ break;
+ case TEST_FAILED:
+ if (test->flags & TEST_F_KNOWN_ISSUE) {
+ rd_snprintf(extra, sizeof(extra),
+ " <-- known issue%s%s",
+ test->extra ? ": " : "",
+ test->extra ? test->extra : "");
+ if (do_count)
+ tests_failed_known++;
+ }
+ color = _C_RED;
+ if (do_count) {
+ tests_failed++;
+ tests_run++;
+ }
+ break;
+ case TEST_RUNNING:
+ color = _C_MAG;
+ if (do_count) {
+ tests_failed++; /* All tests should be finished
+ */
+ tests_run++;
+ }
+ break;
+ case TEST_NOT_STARTED:
+ color = _C_YEL;
+ if (test->extra)
+ rd_snprintf(extra, sizeof(extra), " %s",
+ test->extra);
+ break;
+ default:
+ color = _C_CYA;
+ break;
+ }
+
+ if (show_summary &&
+ (test->state != TEST_SKIPPED || *test->failstr ||
+ (tests_to_run && !strncmp(tests_to_run, test->name,
+ strlen(tests_to_run))))) {
+ printf("|%s %-40s | %10s | %7.3fs %s|", color,
+ test->name, test_states[test->state],
+ (double)duration / 1000000.0, _C_CLR);
+ if (test->state == TEST_FAILED)
+ printf(_C_RED " %s" _C_CLR, test->failstr);
+ else if (test->state == TEST_SKIPPED)
+ printf(_C_CYA " %s" _C_CLR, test->failstr);
+ printf("%s\n", extra);
+ }
+
+ if (report_fp) {
+ int i;
+ fprintf(report_fp,
+ "%s\"%s\": {"
+ "\"name\": \"%s\", "
+ "\"state\": \"%s\", "
+ "\"known_issue\": %s, "
+ "\"extra\": \"%s\", "
+ "\"duration\": %.3f, "
+ "\"report\": [ ",
+ test == tests ? "" : ", ", test->name,
+ test->name, test_states[test->state],
+ test->flags & TEST_F_KNOWN_ISSUE ? "true"
+ : "false",
+ test->extra ? test->extra : "",
+ (double)duration / 1000000.0);
+
+ for (i = 0; i < test->report_cnt; i++) {
+ fprintf(report_fp, "%s%s ", i == 0 ? "" : ",",
+ test->report_arr[i]);
+ }
+
+ fprintf(report_fp, "] }");
+ }
+
+ if (sql_fp)
+ fprintf(sql_fp,
+ "INSERT INTO tests VALUES("
+ "'%s_%s', '%s', '%s', '%s', '%s', %f);\n",
+ datestr, test_mode, test_mode, test->name,
+ test_states[test->state],
+ test->extra ? test->extra : "",
+ (double)duration / 1000000.0);
+ }
+ if (do_lock)
+ TEST_UNLOCK();
+
+ if (show_summary)
+ printf(
+ "#========================================================="
+ "=========#\n");
+
+ if (report_fp) {
+ fprintf(report_fp,
+ "}, "
+ "\"tests_run\": %d, "
+ "\"tests_passed\": %d, "
+ "\"tests_failed\": %d, "
+ "\"duration\": %.3f"
+ "}\n",
+ tests_run, tests_passed, tests_failed,
+ (double)total_duration / 1000000.0);
+
+ fclose(report_fp);
+ TEST_SAY("# Test report written to %s\n", report_path);
+ }
+
+ if (sql_fp) {
+ fprintf(sql_fp,
+ "INSERT INTO runs VALUES('%s_%s', '%s', datetime(), "
+ "%d, %d, %d, %f);\n",
+ datestr, test_mode, test_mode, tests_run, tests_passed,
+ tests_failed, (double)total_duration / 1000000.0);
+ fclose(sql_fp);
+ }
+
+ return tests_failed - tests_failed_known;
+}
+
+#ifndef _WIN32
+static void test_sig_term(int sig) {
+ if (test_exit)
+ exit(1);
+ fprintf(stderr,
+ "Exiting tests, waiting for running tests to finish.\n");
+ test_exit = 1;
+}
+#endif
+
+/**
+ * Wait 'timeout' seconds for rdkafka to kill all its threads and clean up.
+ */
+static void test_wait_exit(int timeout) {
+ int r;
+ time_t start = time(NULL);
+
+ while ((r = rd_kafka_thread_cnt()) && timeout-- >= 0) {
+ TEST_SAY("%i thread(s) in use by librdkafka, waiting...\n", r);
+ rd_sleep(1);
+ }
+
+ TEST_SAY("%i thread(s) in use by librdkafka\n", r);
+
+ if (r > 0)
+ TEST_FAIL("%i thread(s) still active in librdkafka", r);
+
+ timeout -= (int)(time(NULL) - start);
+ if (timeout > 0) {
+ TEST_SAY(
+ "Waiting %d seconds for all librdkafka memory "
+ "to be released\n",
+ timeout);
+ if (rd_kafka_wait_destroyed(timeout * 1000) == -1)
+ TEST_FAIL(
+ "Not all internal librdkafka "
+ "objects destroyed\n");
+ }
+}
+
+
+
+/**
+ * @brief Test framework cleanup before termination.
+ */
+static void test_cleanup(void) {
+ struct test *test;
+
+ /* Free report arrays */
+ for (test = tests; test->name; test++) {
+ int i;
+ if (!test->report_arr)
+ continue;
+ for (i = 0; i < test->report_cnt; i++)
+ rd_free(test->report_arr[i]);
+ rd_free(test->report_arr);
+ test->report_arr = NULL;
+ }
+
+ if (test_sql_cmd)
+ rd_free(test_sql_cmd);
+}
+
+
+int main(int argc, char **argv) {
+ int i, r;
+ test_timing_t t_all;
+ int a, b, c, d;
+ const char *tmpver;
+
+ mtx_init(&test_mtx, mtx_plain);
+ cnd_init(&test_cnd);
+
+ test_init();
+
+#ifndef _WIN32
+ signal(SIGINT, test_sig_term);
+#endif
+ tests_to_run = test_getenv("TESTS", NULL);
+ subtests_to_run = test_getenv("SUBTESTS", NULL);
+ tests_to_skip = test_getenv("TESTS_SKIP", NULL);
+ tmpver = test_getenv("TEST_KAFKA_VERSION", NULL);
+ if (!tmpver)
+ tmpver = test_getenv("KAFKA_VERSION", test_broker_version_str);
+ test_broker_version_str = tmpver;
+
+ test_git_version = test_getenv("RDKAFKA_GITVER", "HEAD");
+
+ /* Are we running on CI? */
+ if (test_getenv("CI", NULL)) {
+ test_on_ci = 1;
+ test_concurrent_max = 3;
+ }
+
+ test_conf_init(NULL, NULL, 10);
+
+ for (i = 1; i < argc; i++) {
+ if (!strncmp(argv[i], "-p", 2) && strlen(argv[i]) > 2) {
+ if (test_rusage) {
+ fprintf(stderr,
+ "%% %s ignored: -R takes preceedence\n",
+ argv[i]);
+ continue;
+ }
+ test_concurrent_max = (int)strtod(argv[i] + 2, NULL);
+ } else if (!strcmp(argv[i], "-l"))
+ test_flags |= TEST_F_LOCAL;
+ else if (!strcmp(argv[i], "-L"))
+ test_neg_flags |= TEST_F_LOCAL;
+ else if (!strcmp(argv[i], "-a"))
+ test_assert_on_fail = 1;
+ else if (!strcmp(argv[i], "-k"))
+ test_flags |= TEST_F_KNOWN_ISSUE;
+ else if (!strcmp(argv[i], "-K"))
+ test_neg_flags |= TEST_F_KNOWN_ISSUE;
+ else if (!strcmp(argv[i], "-E"))
+ test_neg_flags |= TEST_F_SOCKEM;
+ else if (!strcmp(argv[i], "-V") && i + 1 < argc)
+ test_broker_version_str = argv[++i];
+ else if (!strcmp(argv[i], "-s") && i + 1 < argc)
+ strncpy(test_scenario, argv[++i],
+ sizeof(test_scenario) - 1);
+ else if (!strcmp(argv[i], "-S"))
+ show_summary = 0;
+ else if (!strcmp(argv[i], "-D"))
+ test_delete_topics_between = 1;
+ else if (!strcmp(argv[i], "-P"))
+ test_idempotent_producer = 1;
+ else if (!strcmp(argv[i], "-Q"))
+ test_quick = 1;
+ else if (!strcmp(argv[i], "-r"))
+ test_write_report = 1;
+ else if (!strncmp(argv[i], "-R", 2)) {
+ test_rusage = 1;
+ test_concurrent_max = 1;
+ if (strlen(argv[i]) > strlen("-R")) {
+ test_rusage_cpu_calibration =
+ strtod(argv[i] + 2, NULL);
+ if (test_rusage_cpu_calibration < 0.00001) {
+ fprintf(stderr,
+ "%% Invalid CPU calibration "
+ "value: %s\n",
+ argv[i] + 2);
+ exit(1);
+ }
+ }
+ } else if (*argv[i] != '-')
+ tests_to_run = argv[i];
+ else {
+ printf(
+ "Unknown option: %s\n"
+ "\n"
+ "Usage: %s [options] [<test-match-substr>]\n"
+ "Options:\n"
+ " -p<N> Run N tests in parallel\n"
+ " -l/-L Only/dont run local tests (no broker "
+ "needed)\n"
+ " -k/-K Only/dont run tests with known issues\n"
+ " -E Don't run sockem tests\n"
+ " -a Assert on failures\n"
+ " -r Write test_report_...json file.\n"
+ " -S Dont show test summary\n"
+ " -s <scenario> Test scenario.\n"
+ " -V <N.N.N.N> Broker version.\n"
+ " -D Delete all test topics between each test "
+ "(-p1) or after all tests\n"
+ " -P Run all tests with "
+ "`enable.idempotency=true`\n"
+ " -Q Run tests in quick mode: faster tests, "
+ "fewer iterations, less data.\n"
+ " -R Check resource usage thresholds.\n"
+ " -R<C> Check resource usage thresholds but "
+ "adjust CPU thresholds by C (float):\n"
+ " C < 1.0: CPU is faster than base line "
+ "system.\n"
+ " C > 1.0: CPU is slower than base line "
+ "system.\n"
+ " E.g. -R2.5 = CPU is 2.5x slower than "
+ "base line system.\n"
+ "\n"
+ "Environment variables:\n"
+ " TESTS - substring matched test to run (e.g., "
+ "0033)\n"
+ " SUBTESTS - substring matched subtest to run "
+ "(e.g., n_wildcard)\n"
+ " TEST_KAFKA_VERSION - broker version (e.g., "
+ "0.9.0.1)\n"
+ " TEST_SCENARIO - Test scenario\n"
+ " TEST_LEVEL - Test verbosity level\n"
+ " TEST_MODE - bare, helgrind, valgrind\n"
+ " TEST_SEED - random seed\n"
+ " RDKAFKA_TEST_CONF - test config file "
+ "(test.conf)\n"
+ " KAFKA_PATH - Path to kafka source dir\n"
+ " ZK_ADDRESS - Zookeeper address\n"
+ "\n",
+ argv[i], argv[0]);
+ exit(1);
+ }
+ }
+
+ TEST_SAY("Git version: %s\n", test_git_version);
+
+ if (!strcmp(test_broker_version_str, "trunk"))
+ test_broker_version_str = "9.9.9.9"; /* for now */
+
+ d = 0;
+ if (sscanf(test_broker_version_str, "%d.%d.%d.%d", &a, &b, &c, &d) <
+ 3) {
+ printf(
+ "%% Expected broker version to be in format "
+ "N.N.N (N=int), not %s\n",
+ test_broker_version_str);
+ exit(1);
+ }
+ test_broker_version = TEST_BRKVER(a, b, c, d);
+ TEST_SAY("Broker version: %s (%d.%d.%d.%d)\n", test_broker_version_str,
+ TEST_BRKVER_X(test_broker_version, 0),
+ TEST_BRKVER_X(test_broker_version, 1),
+ TEST_BRKVER_X(test_broker_version, 2),
+ TEST_BRKVER_X(test_broker_version, 3));
+
+ /* Set up fake "<MAIN>" test for all operations performed in
+ * the main thread rather than the per-test threads.
+ * Nice side effect is that we get timing and status for main as well.*/
+ test_curr = &tests[0];
+ test_curr->state = TEST_PASSED;
+ test_curr->start = test_clock();
+
+ if (test_on_ci) {
+ TEST_LOCK();
+ test_timeout_multiplier += 2;
+ TEST_UNLOCK();
+ }
+
+ if (!strcmp(test_mode, "helgrind") || !strcmp(test_mode, "drd")) {
+ TEST_LOCK();
+ test_timeout_multiplier += 5;
+ TEST_UNLOCK();
+ } else if (!strcmp(test_mode, "valgrind")) {
+ TEST_LOCK();
+ test_timeout_multiplier += 3;
+ TEST_UNLOCK();
+ }
+
+ /* Broker version 0.9 and api.version.request=true (which is default)
+ * will cause a 10s stall per connection. Instead of fixing
+ * that for each affected API in every test we increase the timeout
+ * multiplier accordingly instead. The typical consume timeout is 5
+ * seconds, so a multiplier of 3 should be good. */
+ if ((test_broker_version & 0xffff0000) == 0x00090000)
+ test_timeout_multiplier += 3;
+
+ if (test_concurrent_max > 1)
+ test_timeout_multiplier += (double)test_concurrent_max / 3;
+
+ TEST_SAY("Tests to run : %s\n", tests_to_run ? tests_to_run : "all");
+ if (subtests_to_run)
+ TEST_SAY("Sub tests : %s\n", subtests_to_run);
+ if (tests_to_skip)
+ TEST_SAY("Skip tests : %s\n", tests_to_skip);
+ TEST_SAY("Test mode : %s%s%s\n", test_quick ? "quick, " : "",
+ test_mode, test_on_ci ? ", CI" : "");
+ TEST_SAY("Test scenario: %s\n", test_scenario);
+ TEST_SAY("Test filter : %s\n", (test_flags & TEST_F_LOCAL)
+ ? "local tests only"
+ : "no filter");
+ TEST_SAY("Test timeout multiplier: %.1f\n", test_timeout_multiplier);
+ TEST_SAY("Action on test failure: %s\n",
+ test_assert_on_fail ? "assert crash" : "continue other tests");
+ if (test_rusage)
+ TEST_SAY("Test rusage : yes (%.2fx CPU calibration)\n",
+ test_rusage_cpu_calibration);
+ if (test_idempotent_producer)
+ TEST_SAY("Test Idempotent Producer: enabled\n");
+
+ {
+ char cwd[512], *pcwd;
+#ifdef _WIN32
+ pcwd = _getcwd(cwd, sizeof(cwd) - 1);
+#else
+ pcwd = getcwd(cwd, sizeof(cwd) - 1);
+#endif
+ if (pcwd)
+ TEST_SAY("Current directory: %s\n", cwd);
+ }
+
+ test_timeout_set(30);
+
+ TIMING_START(&t_all, "ALL-TESTS");
+
+ /* Run tests */
+ run_tests(argc, argv);
+
+ TEST_LOCK();
+ while (tests_running_cnt > 0 && !test_exit) {
+ struct test *test;
+
+ if (!test_quick && test_level >= 2) {
+ TEST_SAY("%d test(s) running:", tests_running_cnt);
+
+ for (test = tests; test->name; test++) {
+ if (test->state != TEST_RUNNING)
+ continue;
+
+ TEST_SAY0(" %s", test->name);
+ }
+
+ TEST_SAY0("\n");
+ }
+
+ check_test_timeouts();
+
+ TEST_UNLOCK();
+
+ if (test_quick)
+ rd_usleep(200 * 1000, NULL);
+ else
+ rd_sleep(1);
+ TEST_LOCK();
+ }
+
+ TIMING_STOP(&t_all);
+
+ test_curr = &tests[0];
+ test_curr->duration = test_clock() - test_curr->start;
+
+ TEST_UNLOCK();
+
+ if (test_delete_topics_between)
+ test_delete_all_test_topics(60 * 1000);
+
+ r = test_summary(1 /*lock*/) ? 1 : 0;
+
+ /* Wait for everything to be cleaned up since broker destroys are
+ * handled in its own thread. */
+ test_wait_exit(0);
+
+ /* If we havent failed at this point then
+ * there were no threads leaked */
+ if (r == 0)
+ TEST_SAY("\n============== ALL TESTS PASSED ==============\n");
+
+ test_cleanup();
+
+ if (r > 0)
+ TEST_FAIL("%d test(s) failed, see previous errors", r);
+
+ return r;
+}
+
+
+
+/******************************************************************************
+ *
+ * Helpers
+ *
+ ******************************************************************************/
+
+void test_dr_msg_cb(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque) {
+ int *remainsp = rkmessage->_private;
+ static const char *status_names[] = {
+ [RD_KAFKA_MSG_STATUS_NOT_PERSISTED] = "NotPersisted",
+ [RD_KAFKA_MSG_STATUS_POSSIBLY_PERSISTED] = "PossiblyPersisted",
+ [RD_KAFKA_MSG_STATUS_PERSISTED] = "Persisted"};
+
+ TEST_SAYL(4,
+ "Delivery report: %s (%s) to %s [%" PRId32
+ "] "
+ "at offset %" PRId64 " latency %.2fms\n",
+ rd_kafka_err2str(rkmessage->err),
+ status_names[rd_kafka_message_status(rkmessage)],
+ rd_kafka_topic_name(rkmessage->rkt), rkmessage->partition,
+ rkmessage->offset,
+ (float)rd_kafka_message_latency(rkmessage) / 1000.0);
+
+ if (!test_curr->produce_sync) {
+ if (!test_curr->ignore_dr_err &&
+ rkmessage->err != test_curr->exp_dr_err)
+ TEST_FAIL("Message delivery (to %s [%" PRId32
+ "]) "
+ "failed: expected %s, got %s",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition,
+ rd_kafka_err2str(test_curr->exp_dr_err),
+ rd_kafka_err2str(rkmessage->err));
+
+ if ((int)test_curr->exp_dr_status != -1) {
+ rd_kafka_msg_status_t status =
+ rd_kafka_message_status(rkmessage);
+
+ TEST_ASSERT(status == test_curr->exp_dr_status,
+ "Expected message status %s, not %s",
+ status_names[test_curr->exp_dr_status],
+ status_names[status]);
+ }
+
+ /* Add message to msgver */
+ if (!rkmessage->err && test_curr->dr_mv)
+ test_msgver_add_msg(rk, test_curr->dr_mv, rkmessage);
+ }
+
+ if (remainsp) {
+ TEST_ASSERT(*remainsp > 0,
+ "Too many messages delivered (remains %i)",
+ *remainsp);
+
+ (*remainsp)--;
+ }
+
+ if (test_curr->produce_sync)
+ test_curr->produce_sync_err = rkmessage->err;
+}
+
+
+rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf) {
+ rd_kafka_t *rk;
+ char errstr[512];
+
+ if (!conf) {
+ test_conf_init(&conf, NULL, 0);
+#if WITH_SOCKEM
+ if (*test_sockem_conf)
+ test_socket_enable(conf);
+#endif
+ } else {
+ if (!strcmp(test_conf_get(conf, "client.id"), "rdkafka"))
+ test_conf_set(conf, "client.id", test_curr->name);
+ }
+
+
+
+ /* Creat kafka instance */
+ rk = rd_kafka_new(mode, conf, errstr, sizeof(errstr));
+ if (!rk)
+ TEST_FAIL("Failed to create rdkafka instance: %s\n", errstr);
+
+ TEST_SAY("Created kafka instance %s\n", rd_kafka_name(rk));
+
+ return rk;
+}
+
+
+rd_kafka_t *test_create_producer(void) {
+ rd_kafka_conf_t *conf;
+
+ test_conf_init(&conf, NULL, 0);
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ return test_create_handle(RD_KAFKA_PRODUCER, conf);
+}
+
+
+/**
+ * Create topic_t object with va-arg list as key-value config pairs
+ * terminated by NULL.
+ */
+rd_kafka_topic_t *
+test_create_topic_object(rd_kafka_t *rk, const char *topic, ...) {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_topic_conf_t *topic_conf;
+ va_list ap;
+ const char *name, *val;
+
+ test_conf_init(NULL, &topic_conf, 0);
+
+ va_start(ap, topic);
+ while ((name = va_arg(ap, const char *)) &&
+ (val = va_arg(ap, const char *))) {
+ test_topic_conf_set(topic_conf, name, val);
+ }
+ va_end(ap);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ return rkt;
+}
+
+
+rd_kafka_topic_t *
+test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...) {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_topic_conf_t *topic_conf;
+ char errstr[512];
+ va_list ap;
+ const char *name, *val;
+
+ test_conf_init(NULL, &topic_conf, 0);
+
+ va_start(ap, topic);
+ while ((name = va_arg(ap, const char *)) &&
+ (val = va_arg(ap, const char *))) {
+ if (rd_kafka_topic_conf_set(topic_conf, name, val, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("Conf failed: %s\n", errstr);
+ }
+ va_end(ap);
+
+ /* Make sure all replicas are in-sync after producing
+ * so that consume test wont fail. */
+ rd_kafka_topic_conf_set(topic_conf, "request.required.acks", "-1",
+ errstr, sizeof(errstr));
+
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ return rkt;
+}
+
+
+
+/**
+ * Produces \p cnt messages and returns immediately.
+ * Does not wait for delivery.
+ * \p msgcounterp is incremented for each produced messages and passed
+ * as \p msg_opaque which is later used in test_dr_msg_cb to decrement
+ * the counter on delivery.
+ *
+ * If \p payload is NULL the message key and payload will be formatted
+ * according to standard test format, otherwise the key will be NULL and
+ * payload send as message payload.
+ *
+ * Default message size is 128 bytes, if \p size is non-zero and \p payload
+ * is NULL the message size of \p size will be used.
+ */
+void test_produce_msgs_nowait(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size,
+ int msgrate,
+ int *msgcounterp) {
+ int msg_id;
+ test_timing_t t_all, t_poll;
+ char key[128];
+ void *buf;
+ int64_t tot_bytes = 0;
+ int64_t tot_time_poll = 0;
+ int64_t per_msg_wait = 0;
+
+ if (msgrate > 0)
+ per_msg_wait = 1000000 / (int64_t)msgrate;
+
+
+ if (payload)
+ buf = (void *)payload;
+ else {
+ if (size == 0)
+ size = 128;
+ buf = calloc(1, size);
+ }
+
+ TEST_SAY("Produce to %s [%" PRId32 "]: messages #%d..%d\n",
+ rd_kafka_topic_name(rkt), partition, msg_base, msg_base + cnt);
+
+ TIMING_START(&t_all, "PRODUCE");
+ TIMING_START(&t_poll, "SUM(POLL)");
+
+ for (msg_id = msg_base; msg_id < msg_base + cnt; msg_id++) {
+ int wait_time = 0;
+
+ if (!payload)
+ test_prepare_msg(testid, partition, msg_id, buf, size,
+ key, sizeof(key));
+
+
+ if (rd_kafka_produce(rkt, partition, RD_KAFKA_MSG_F_COPY, buf,
+ size, !payload ? key : NULL,
+ !payload ? strlen(key) : 0,
+ msgcounterp) == -1)
+ TEST_FAIL(
+ "Failed to produce message %i "
+ "to partition %i: %s",
+ msg_id, (int)partition,
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ (*msgcounterp)++;
+ tot_bytes += size;
+
+ TIMING_RESTART(&t_poll);
+ do {
+ if (per_msg_wait) {
+ wait_time = (int)(per_msg_wait -
+ TIMING_DURATION(&t_poll)) /
+ 1000;
+ if (wait_time < 0)
+ wait_time = 0;
+ }
+ rd_kafka_poll(rk, wait_time);
+ } while (wait_time > 0);
+
+ tot_time_poll = TIMING_DURATION(&t_poll);
+
+ if (TIMING_EVERY(&t_all, 3 * 1000000))
+ TEST_SAY(
+ "produced %3d%%: %d/%d messages "
+ "(%d msgs/s, %d bytes/s)\n",
+ ((msg_id - msg_base) * 100) / cnt,
+ msg_id - msg_base, cnt,
+ (int)((msg_id - msg_base) /
+ (TIMING_DURATION(&t_all) / 1000000)),
+ (int)((tot_bytes) /
+ (TIMING_DURATION(&t_all) / 1000000)));
+ }
+
+ if (!payload)
+ free(buf);
+
+ t_poll.duration = tot_time_poll;
+ TIMING_STOP(&t_poll);
+ TIMING_STOP(&t_all);
+}
+
+/**
+ * Waits for the messages tracked by counter \p msgcounterp to be delivered.
+ */
+void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp) {
+ test_timing_t t_all;
+ int start_cnt = *msgcounterp;
+
+ TIMING_START(&t_all, "PRODUCE.DELIVERY.WAIT");
+
+ /* Wait for messages to be delivered */
+ while (*msgcounterp > 0 && rd_kafka_outq_len(rk) > 0) {
+ rd_kafka_poll(rk, 10);
+ if (TIMING_EVERY(&t_all, 3 * 1000000)) {
+ int delivered = start_cnt - *msgcounterp;
+ TEST_SAY(
+ "wait_delivery: "
+ "%d/%d messages delivered: %d msgs/s\n",
+ delivered, start_cnt,
+ (int)(delivered /
+ (TIMING_DURATION(&t_all) / 1000000)));
+ }
+ }
+
+ TIMING_STOP(&t_all);
+
+ TEST_ASSERT(*msgcounterp == 0,
+ "Not all messages delivered: msgcounter still at %d, "
+ "outq_len %d",
+ *msgcounterp, rd_kafka_outq_len(rk));
+}
+
+/**
+ * Produces \p cnt messages and waits for succesful delivery
+ */
+void test_produce_msgs(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size) {
+ int remains = 0;
+
+ test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
+ payload, size, 0, &remains);
+
+ test_wait_delivery(rk, &remains);
+}
+
+
+/**
+ * @brief Produces \p cnt messages and waits for succesful delivery
+ */
+void test_produce_msgs2(rd_kafka_t *rk,
+ const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size) {
+ int remains = 0;
+ rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL);
+
+ test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
+ payload, size, 0, &remains);
+
+ test_wait_delivery(rk, &remains);
+
+ rd_kafka_topic_destroy(rkt);
+}
+
+/**
+ * @brief Produces \p cnt messages without waiting for delivery.
+ */
+void test_produce_msgs2_nowait(rd_kafka_t *rk,
+ const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size,
+ int *remainsp) {
+ rd_kafka_topic_t *rkt = test_create_topic_object(rk, topic, NULL);
+
+ test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
+ payload, size, 0, remainsp);
+
+ rd_kafka_topic_destroy(rkt);
+}
+
+
+/**
+ * Produces \p cnt messages at \p msgs/s, and waits for succesful delivery
+ */
+void test_produce_msgs_rate(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size,
+ int msgrate) {
+ int remains = 0;
+
+ test_produce_msgs_nowait(rk, rkt, testid, partition, msg_base, cnt,
+ payload, size, msgrate, &remains);
+
+ test_wait_delivery(rk, &remains);
+}
+
+
+
+/**
+ * Create producer, produce \p msgcnt messages to \p topic \p partition,
+ * destroy consumer, and returns the used testid.
+ */
+uint64_t test_produce_msgs_easy_size(const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msgcnt,
+ size_t size) {
+ rd_kafka_t *rk;
+ rd_kafka_topic_t *rkt;
+ test_timing_t t_produce;
+
+ if (!testid)
+ testid = test_id_generate();
+ rk = test_create_producer();
+ rkt = test_create_producer_topic(rk, topic, NULL);
+
+ TIMING_START(&t_produce, "PRODUCE");
+ test_produce_msgs(rk, rkt, testid, partition, 0, msgcnt, NULL, size);
+ TIMING_STOP(&t_produce);
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(rk);
+
+ return testid;
+}
+
+rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition) {
+ test_curr->produce_sync = 1;
+ test_produce_msgs(rk, rkt, testid, partition, 0, 1, NULL, 0);
+ test_curr->produce_sync = 0;
+ return test_curr->produce_sync_err;
+}
+
+
+/**
+ * @brief Easy produce function.
+ *
+ * @param ... is a NULL-terminated list of key, value config property pairs.
+ */
+void test_produce_msgs_easy_v(const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ size_t size,
+ ...) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *p;
+ rd_kafka_topic_t *rkt;
+ va_list ap;
+ const char *key, *val;
+
+ test_conf_init(&conf, NULL, 0);
+
+ va_start(ap, size);
+ while ((key = va_arg(ap, const char *)) &&
+ (val = va_arg(ap, const char *)))
+ test_conf_set(conf, key, val);
+ va_end(ap);
+
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ rkt = test_create_producer_topic(p, topic, NULL);
+
+ test_produce_msgs(p, rkt, testid, partition, msg_base, cnt, NULL, size);
+
+ rd_kafka_topic_destroy(rkt);
+ rd_kafka_destroy(p);
+}
+
+
+/**
+ * @brief Produce messages to multiple topic-partitions.
+ *
+ * @param ...vararg is a tuple of:
+ * const char *topic
+ * int32_t partition (or UA)
+ * int msg_base
+ * int msg_cnt
+ *
+ * End with a NULL topic
+ */
+void test_produce_msgs_easy_multi(uint64_t testid, ...) {
+ rd_kafka_conf_t *conf;
+ rd_kafka_t *p;
+ va_list ap;
+ const char *topic;
+ int msgcounter = 0;
+
+ test_conf_init(&conf, NULL, 0);
+
+ rd_kafka_conf_set_dr_msg_cb(conf, test_dr_msg_cb);
+
+ p = test_create_handle(RD_KAFKA_PRODUCER, conf);
+
+ va_start(ap, testid);
+ while ((topic = va_arg(ap, const char *))) {
+ int32_t partition = va_arg(ap, int32_t);
+ int msg_base = va_arg(ap, int);
+ int msg_cnt = va_arg(ap, int);
+ rd_kafka_topic_t *rkt;
+
+ rkt = test_create_producer_topic(p, topic, NULL);
+
+ test_produce_msgs_nowait(p, rkt, testid, partition, msg_base,
+ msg_cnt, NULL, 0, 0, &msgcounter);
+
+ rd_kafka_topic_destroy(rkt);
+ }
+ va_end(ap);
+
+ test_flush(p, tmout_multip(10 * 1000));
+
+ rd_kafka_destroy(p);
+}
+
+
+
+/**
+ * @brief A standard incremental rebalance callback.
+ */
+void test_incremental_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+ TEST_SAY("%s: incremental rebalance: %s: %d partition(s)%s\n",
+ rd_kafka_name(rk), rd_kafka_err2name(err), parts->cnt,
+ rd_kafka_assignment_lost(rk) ? ", assignment lost" : "");
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ test_consumer_incremental_assign("rebalance_cb", rk, parts);
+ break;
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ test_consumer_incremental_unassign("rebalance_cb", rk, parts);
+ break;
+ default:
+ TEST_FAIL("Unknown rebalance event: %s",
+ rd_kafka_err2name(err));
+ break;
+ }
+}
+
+/**
+ * @brief A standard rebalance callback.
+ */
+void test_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque) {
+
+ if (!strcmp(rd_kafka_rebalance_protocol(rk), "COOPERATIVE")) {
+ test_incremental_rebalance_cb(rk, err, parts, opaque);
+ return;
+ }
+
+ TEST_SAY("%s: Rebalance: %s: %d partition(s)\n", rd_kafka_name(rk),
+ rd_kafka_err2name(err), parts->cnt);
+
+ switch (err) {
+ case RD_KAFKA_RESP_ERR__ASSIGN_PARTITIONS:
+ test_consumer_assign("assign", rk, parts);
+ break;
+ case RD_KAFKA_RESP_ERR__REVOKE_PARTITIONS:
+ test_consumer_unassign("unassign", rk);
+ break;
+ default:
+ TEST_FAIL("Unknown rebalance event: %s",
+ rd_kafka_err2name(err));
+ break;
+ }
+}
+
+
+
+rd_kafka_t *test_create_consumer(
+ const char *group_id,
+ void (*rebalance_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque),
+ rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *default_topic_conf) {
+ rd_kafka_t *rk;
+ char tmp[64];
+
+ if (!conf)
+ test_conf_init(&conf, NULL, 0);
+
+ if (group_id) {
+ test_conf_set(conf, "group.id", group_id);
+
+ rd_snprintf(tmp, sizeof(tmp), "%d", test_session_timeout_ms);
+ test_conf_set(conf, "session.timeout.ms", tmp);
+
+ if (rebalance_cb)
+ rd_kafka_conf_set_rebalance_cb(conf, rebalance_cb);
+ } else {
+ TEST_ASSERT(!rebalance_cb);
+ }
+
+ if (default_topic_conf)
+ rd_kafka_conf_set_default_topic_conf(conf, default_topic_conf);
+
+ /* Create kafka instance */
+ rk = test_create_handle(RD_KAFKA_CONSUMER, conf);
+
+ if (group_id)
+ rd_kafka_poll_set_consumer(rk);
+
+ return rk;
+}
+
+rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk,
+ const char *topic) {
+ rd_kafka_topic_t *rkt;
+ rd_kafka_topic_conf_t *topic_conf;
+
+ test_conf_init(NULL, &topic_conf, 0);
+
+ rkt = rd_kafka_topic_new(rk, topic, topic_conf);
+ if (!rkt)
+ TEST_FAIL("Failed to create topic: %s\n",
+ rd_kafka_err2str(rd_kafka_last_error()));
+
+ return rkt;
+}
+
+
+void test_consumer_start(const char *what,
+ rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t start_offset) {
+
+ TEST_SAY("%s: consumer_start: %s [%" PRId32 "] at offset %" PRId64 "\n",
+ what, rd_kafka_topic_name(rkt), partition, start_offset);
+
+ if (rd_kafka_consume_start(rkt, partition, start_offset) == -1)
+ TEST_FAIL("%s: consume_start failed: %s\n", what,
+ rd_kafka_err2str(rd_kafka_last_error()));
+}
+
+void test_consumer_stop(const char *what,
+ rd_kafka_topic_t *rkt,
+ int32_t partition) {
+
+ TEST_SAY("%s: consumer_stop: %s [%" PRId32 "]\n", what,
+ rd_kafka_topic_name(rkt), partition);
+
+ if (rd_kafka_consume_stop(rkt, partition) == -1)
+ TEST_FAIL("%s: consume_stop failed: %s\n", what,
+ rd_kafka_err2str(rd_kafka_last_error()));
+}
+
+void test_consumer_seek(const char *what,
+ rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t offset) {
+ int err;
+
+ TEST_SAY("%s: consumer_seek: %s [%" PRId32 "] to offset %" PRId64 "\n",
+ what, rd_kafka_topic_name(rkt), partition, offset);
+
+ if ((err = rd_kafka_seek(rkt, partition, offset, 2000)))
+ TEST_FAIL("%s: consume_seek(%s, %" PRId32 ", %" PRId64
+ ") "
+ "failed: %s\n",
+ what, rd_kafka_topic_name(rkt), partition, offset,
+ rd_kafka_err2str(err));
+}
+
+
+
+/**
+ * Returns offset of the last message consumed
+ */
+int64_t test_consume_msgs(const char *what,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int64_t offset,
+ int exp_msg_base,
+ int exp_cnt,
+ int parse_fmt) {
+ int cnt = 0;
+ int msg_next = exp_msg_base;
+ int fails = 0;
+ int64_t offset_last = -1;
+ int64_t tot_bytes = 0;
+ test_timing_t t_first, t_all;
+
+ TEST_SAY("%s: consume_msgs: %s [%" PRId32
+ "]: expect msg #%d..%d "
+ "at offset %" PRId64 "\n",
+ what, rd_kafka_topic_name(rkt), partition, exp_msg_base,
+ exp_msg_base + exp_cnt, offset);
+
+ if (offset != TEST_NO_SEEK) {
+ rd_kafka_resp_err_t err;
+ test_timing_t t_seek;
+
+ TIMING_START(&t_seek, "SEEK");
+ if ((err = rd_kafka_seek(rkt, partition, offset, 5000)))
+ TEST_FAIL("%s: consume_msgs: %s [%" PRId32
+ "]: "
+ "seek to %" PRId64 " failed: %s\n",
+ what, rd_kafka_topic_name(rkt), partition,
+ offset, rd_kafka_err2str(err));
+ TIMING_STOP(&t_seek);
+ TEST_SAY("%s: seeked to offset %" PRId64 "\n", what, offset);
+ }
+
+ TIMING_START(&t_first, "FIRST MSG");
+ TIMING_START(&t_all, "ALL MSGS");
+
+ while (cnt < exp_cnt) {
+ rd_kafka_message_t *rkmessage;
+ int msg_id;
+
+ rkmessage =
+ rd_kafka_consume(rkt, partition, tmout_multip(5000));
+
+ if (TIMING_EVERY(&t_all, 3 * 1000000))
+ TEST_SAY(
+ "%s: "
+ "consumed %3d%%: %d/%d messages "
+ "(%d msgs/s, %d bytes/s)\n",
+ what, cnt * 100 / exp_cnt, cnt, exp_cnt,
+ (int)(cnt / (TIMING_DURATION(&t_all) / 1000000)),
+ (int)(tot_bytes /
+ (TIMING_DURATION(&t_all) / 1000000)));
+
+ if (!rkmessage)
+ TEST_FAIL("%s: consume_msgs: %s [%" PRId32
+ "]: "
+ "expected msg #%d (%d/%d): timed out\n",
+ what, rd_kafka_topic_name(rkt), partition,
+ msg_next, cnt, exp_cnt);
+
+ if (rkmessage->err)
+ TEST_FAIL("%s: consume_msgs: %s [%" PRId32
+ "]: "
+ "expected msg #%d (%d/%d): got error: %s\n",
+ what, rd_kafka_topic_name(rkt), partition,
+ msg_next, cnt, exp_cnt,
+ rd_kafka_err2str(rkmessage->err));
+
+ if (cnt == 0)
+ TIMING_STOP(&t_first);
+
+ if (parse_fmt)
+ test_msg_parse(testid, rkmessage, partition, &msg_id);
+ else
+ msg_id = 0;
+
+ if (test_level >= 3)
+ TEST_SAY("%s: consume_msgs: %s [%" PRId32
+ "]: "
+ "got msg #%d at offset %" PRId64
+ " (expect #%d at offset %" PRId64 ")\n",
+ what, rd_kafka_topic_name(rkt), partition,
+ msg_id, rkmessage->offset, msg_next,
+ offset >= 0 ? offset + cnt : -1);
+
+ if (parse_fmt && msg_id != msg_next) {
+ TEST_SAY("%s: consume_msgs: %s [%" PRId32
+ "]: "
+ "expected msg #%d (%d/%d): got msg #%d\n",
+ what, rd_kafka_topic_name(rkt), partition,
+ msg_next, cnt, exp_cnt, msg_id);
+ fails++;
+ }
+
+ cnt++;
+ tot_bytes += rkmessage->len;
+ msg_next++;
+ offset_last = rkmessage->offset;
+
+ rd_kafka_message_destroy(rkmessage);
+ }
+
+ TIMING_STOP(&t_all);
+
+ if (fails)
+ TEST_FAIL("%s: consume_msgs: %s [%" PRId32 "]: %d failures\n",
+ what, rd_kafka_topic_name(rkt), partition, fails);
+
+ TEST_SAY("%s: consume_msgs: %s [%" PRId32
+ "]: "
+ "%d/%d messages consumed succesfully\n",
+ what, rd_kafka_topic_name(rkt), partition, cnt, exp_cnt);
+ return offset_last;
+}
+
+
+/**
+ * Create high-level consumer subscribing to \p topic from BEGINNING
+ * and expects \d exp_msgcnt with matching \p testid
+ * Destroys consumer when done.
+ *
+ * @param txn If true, isolation.level is set to read_committed.
+ * @param partition If -1 the topic will be subscribed to, otherwise the
+ * single partition will be assigned immediately.
+ *
+ * If \p group_id is NULL a new unique group is generated
+ */
+void test_consume_msgs_easy_mv0(const char *group_id,
+ const char *topic,
+ rd_bool_t txn,
+ int32_t partition,
+ uint64_t testid,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ rd_kafka_topic_conf_t *tconf,
+ test_msgver_t *mv) {
+ rd_kafka_t *rk;
+ char grpid0[64];
+ rd_kafka_conf_t *conf;
+
+ test_conf_init(&conf, tconf ? NULL : &tconf, 0);
+
+ if (!group_id)
+ group_id = test_str_id_generate(grpid0, sizeof(grpid0));
+
+ if (txn)
+ test_conf_set(conf, "isolation.level", "read_committed");
+
+ test_topic_conf_set(tconf, "auto.offset.reset", "smallest");
+ if (exp_eofcnt != -1)
+ test_conf_set(conf, "enable.partition.eof", "true");
+ rk = test_create_consumer(group_id, NULL, conf, tconf);
+
+ rd_kafka_poll_set_consumer(rk);
+
+ if (partition == -1) {
+ TEST_SAY(
+ "Subscribing to topic %s in group %s "
+ "(expecting %d msgs with testid %" PRIu64 ")\n",
+ topic, group_id, exp_msgcnt, testid);
+
+ test_consumer_subscribe(rk, topic);
+ } else {
+ rd_kafka_topic_partition_list_t *plist;
+
+ TEST_SAY("Assign topic %s [%" PRId32
+ "] in group %s "
+ "(expecting %d msgs with testid %" PRIu64 ")\n",
+ topic, partition, group_id, exp_msgcnt, testid);
+
+ plist = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(plist, topic, partition);
+ test_consumer_assign("consume_easy_mv", rk, plist);
+ rd_kafka_topic_partition_list_destroy(plist);
+ }
+
+ /* Consume messages */
+ test_consumer_poll("consume.easy", rk, testid, exp_eofcnt, -1,
+ exp_msgcnt, mv);
+
+ test_consumer_close(rk);
+
+ rd_kafka_destroy(rk);
+}
+
+void test_consume_msgs_easy(const char *group_id,
+ const char *topic,
+ uint64_t testid,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ rd_kafka_topic_conf_t *tconf) {
+ test_msgver_t mv;
+
+ test_msgver_init(&mv, testid);
+
+ test_consume_msgs_easy_mv(group_id, topic, -1, testid, exp_eofcnt,
+ exp_msgcnt, tconf, &mv);
+
+ test_msgver_clear(&mv);
+}
+
+
+void test_consume_txn_msgs_easy(const char *group_id,
+ const char *topic,
+ uint64_t testid,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ rd_kafka_topic_conf_t *tconf) {
+ test_msgver_t mv;
+
+ test_msgver_init(&mv, testid);
+
+ test_consume_msgs_easy_mv0(group_id, topic, rd_true /*txn*/, -1, testid,
+ exp_eofcnt, exp_msgcnt, tconf, &mv);
+
+ test_msgver_clear(&mv);
+}
+
+
+/**
+ * @brief Waits for up to \p timeout_ms for consumer to receive assignment.
+ * If no assignment received without the timeout the test fails.
+ *
+ * @warning This method will poll the consumer and might thus read messages.
+ * Set \p do_poll to false to use a sleep rather than poll.
+ */
+void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll) {
+ rd_kafka_topic_partition_list_t *assignment = NULL;
+ int i;
+
+ while (1) {
+ rd_kafka_resp_err_t err;
+
+ err = rd_kafka_assignment(rk, &assignment);
+ TEST_ASSERT(!err, "rd_kafka_assignment() failed: %s",
+ rd_kafka_err2str(err));
+
+ if (assignment->cnt > 0)
+ break;
+
+ rd_kafka_topic_partition_list_destroy(assignment);
+
+ if (do_poll)
+ test_consumer_poll_once(rk, NULL, 1000);
+ else
+ rd_usleep(1000 * 1000, NULL);
+ }
+
+ TEST_SAY("%s: Assignment (%d partition(s)): ", rd_kafka_name(rk),
+ assignment->cnt);
+ for (i = 0; i < assignment->cnt; i++)
+ TEST_SAY0("%s%s[%" PRId32 "]", i == 0 ? "" : ", ",
+ assignment->elems[i].topic,
+ assignment->elems[i].partition);
+ TEST_SAY0("\n");
+
+ rd_kafka_topic_partition_list_destroy(assignment);
+}
+
+
+/**
+ * @brief Verify that the consumer's assignment matches the expected assignment.
+ *
+ * The va-list is a NULL-terminated list of (const char *topic, int partition)
+ * tuples.
+ *
+ * Fails the test on mismatch, unless \p fail_immediately is false.
+ */
+void test_consumer_verify_assignment0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ int fail_immediately,
+ ...) {
+ va_list ap;
+ int cnt = 0;
+ const char *topic;
+ rd_kafka_topic_partition_list_t *assignment;
+ rd_kafka_resp_err_t err;
+ int i;
+
+ if ((err = rd_kafka_assignment(rk, &assignment)))
+ TEST_FAIL("%s:%d: Failed to get assignment for %s: %s", func,
+ line, rd_kafka_name(rk), rd_kafka_err2str(err));
+
+ TEST_SAY("%s assignment (%d partition(s)):\n", rd_kafka_name(rk),
+ assignment->cnt);
+ for (i = 0; i < assignment->cnt; i++)
+ TEST_SAY(" %s [%" PRId32 "]\n", assignment->elems[i].topic,
+ assignment->elems[i].partition);
+
+ va_start(ap, fail_immediately);
+ while ((topic = va_arg(ap, const char *))) {
+ int partition = va_arg(ap, int);
+ cnt++;
+
+ if (!rd_kafka_topic_partition_list_find(assignment, topic,
+ partition))
+ TEST_FAIL_LATER(
+ "%s:%d: Expected %s [%d] not found in %s's "
+ "assignment (%d partition(s))",
+ func, line, topic, partition, rd_kafka_name(rk),
+ assignment->cnt);
+ }
+ va_end(ap);
+
+ if (cnt != assignment->cnt)
+ TEST_FAIL_LATER(
+ "%s:%d: "
+ "Expected %d assigned partition(s) for %s, not %d",
+ func, line, cnt, rd_kafka_name(rk), assignment->cnt);
+
+ if (fail_immediately)
+ TEST_LATER_CHECK();
+
+ rd_kafka_topic_partition_list_destroy(assignment);
+}
+
+
+
+/**
+ * @brief Start subscribing for 'topic'
+ */
+void test_consumer_subscribe(rd_kafka_t *rk, const char *topic) {
+ rd_kafka_topic_partition_list_t *topics;
+ rd_kafka_resp_err_t err;
+
+ topics = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(topics, topic, RD_KAFKA_PARTITION_UA);
+
+ err = rd_kafka_subscribe(rk, topics);
+ if (err)
+ TEST_FAIL("%s: Failed to subscribe to %s: %s\n",
+ rd_kafka_name(rk), topic, rd_kafka_err2str(err));
+
+ rd_kafka_topic_partition_list_destroy(topics);
+}
+
+
+void test_consumer_assign(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+
+ TIMING_START(&timing, "ASSIGN.PARTITIONS");
+ err = rd_kafka_assign(rk, partitions);
+ TIMING_STOP(&timing);
+ if (err)
+ TEST_FAIL("%s: failed to assign %d partition(s): %s\n", what,
+ partitions->cnt, rd_kafka_err2str(err));
+ else
+ TEST_SAY("%s: assigned %d partition(s)\n", what,
+ partitions->cnt);
+}
+
+
+void test_consumer_incremental_assign(
+ const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_error_t *error;
+ test_timing_t timing;
+
+ TIMING_START(&timing, "INCREMENTAL.ASSIGN.PARTITIONS");
+ error = rd_kafka_incremental_assign(rk, partitions);
+ TIMING_STOP(&timing);
+ if (error) {
+ TEST_FAIL(
+ "%s: incremental assign of %d partition(s) failed: "
+ "%s",
+ what, partitions->cnt, rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ } else
+ TEST_SAY("%s: incremental assign of %d partition(s) done\n",
+ what, partitions->cnt);
+}
+
+
+void test_consumer_unassign(const char *what, rd_kafka_t *rk) {
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+
+ TIMING_START(&timing, "UNASSIGN.PARTITIONS");
+ err = rd_kafka_assign(rk, NULL);
+ TIMING_STOP(&timing);
+ if (err)
+ TEST_FAIL("%s: failed to unassign current partitions: %s\n",
+ what, rd_kafka_err2str(err));
+ else
+ TEST_SAY("%s: unassigned current partitions\n", what);
+}
+
+
+void test_consumer_incremental_unassign(
+ const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *partitions) {
+ rd_kafka_error_t *error;
+ test_timing_t timing;
+
+ TIMING_START(&timing, "INCREMENTAL.UNASSIGN.PARTITIONS");
+ error = rd_kafka_incremental_unassign(rk, partitions);
+ TIMING_STOP(&timing);
+ if (error) {
+ TEST_FAIL(
+ "%s: incremental unassign of %d partition(s) "
+ "failed: %s",
+ what, partitions->cnt, rd_kafka_error_string(error));
+ rd_kafka_error_destroy(error);
+ } else
+ TEST_SAY("%s: incremental unassign of %d partition(s) done\n",
+ what, partitions->cnt);
+}
+
+
+/**
+ * @brief Assign a single partition with an optional starting offset
+ */
+void test_consumer_assign_partition(const char *what,
+ rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t offset) {
+ rd_kafka_topic_partition_list_t *part;
+
+ part = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(part, topic, partition)->offset =
+ offset;
+
+ test_consumer_assign(what, rk, part);
+
+ rd_kafka_topic_partition_list_destroy(part);
+}
+
+
+void test_consumer_pause_resume_partition(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ rd_bool_t pause) {
+ rd_kafka_topic_partition_list_t *part;
+ rd_kafka_resp_err_t err;
+
+ part = rd_kafka_topic_partition_list_new(1);
+ rd_kafka_topic_partition_list_add(part, topic, partition);
+
+ if (pause)
+ err = rd_kafka_pause_partitions(rk, part);
+ else
+ err = rd_kafka_resume_partitions(rk, part);
+
+ TEST_ASSERT(!err, "Failed to %s %s [%" PRId32 "]: %s",
+ pause ? "pause" : "resume", topic, partition,
+ rd_kafka_err2str(err));
+
+ rd_kafka_topic_partition_list_destroy(part);
+}
+
+
+/**
+ * Message verification services
+ *
+ */
+
+void test_msgver_init(test_msgver_t *mv, uint64_t testid) {
+ memset(mv, 0, sizeof(*mv));
+ mv->testid = testid;
+ /* Max warning logs before suppressing. */
+ mv->log_max = (test_level + 1) * 100;
+}
+
+void test_msgver_ignore_eof(test_msgver_t *mv) {
+ mv->ignore_eof = rd_true;
+}
+
+#define TEST_MV_WARN(mv, ...) \
+ do { \
+ if ((mv)->log_cnt++ > (mv)->log_max) \
+ (mv)->log_suppr_cnt++; \
+ else \
+ TEST_WARN(__VA_ARGS__); \
+ } while (0)
+
+
+
+static void test_mv_mvec_grow(struct test_mv_mvec *mvec, int tot_size) {
+ if (tot_size <= mvec->size)
+ return;
+ mvec->size = tot_size;
+ mvec->m = realloc(mvec->m, sizeof(*mvec->m) * mvec->size);
+}
+
+/**
+ * Make sure there is room for at least \p cnt messages, else grow mvec.
+ */
+static void test_mv_mvec_reserve(struct test_mv_mvec *mvec, int cnt) {
+ test_mv_mvec_grow(mvec, mvec->cnt + cnt);
+}
+
+void test_mv_mvec_init(struct test_mv_mvec *mvec, int exp_cnt) {
+ TEST_ASSERT(mvec->m == NULL, "mvec not cleared");
+
+ if (!exp_cnt)
+ return;
+
+ test_mv_mvec_grow(mvec, exp_cnt);
+}
+
+
+void test_mv_mvec_clear(struct test_mv_mvec *mvec) {
+ if (mvec->m)
+ free(mvec->m);
+}
+
+void test_msgver_clear(test_msgver_t *mv) {
+ int i;
+ for (i = 0; i < mv->p_cnt; i++) {
+ struct test_mv_p *p = mv->p[i];
+ free(p->topic);
+ test_mv_mvec_clear(&p->mvec);
+ free(p);
+ }
+
+ free(mv->p);
+
+ test_msgver_init(mv, mv->testid);
+}
+
+struct test_mv_p *test_msgver_p_get(test_msgver_t *mv,
+ const char *topic,
+ int32_t partition,
+ int do_create) {
+ int i;
+ struct test_mv_p *p;
+
+ for (i = 0; i < mv->p_cnt; i++) {
+ p = mv->p[i];
+ if (p->partition == partition && !strcmp(p->topic, topic))
+ return p;
+ }
+
+ if (!do_create)
+ TEST_FAIL("Topic %s [%d] not found in msgver", topic,
+ partition);
+
+ if (mv->p_cnt == mv->p_size) {
+ mv->p_size = (mv->p_size + 4) * 2;
+ mv->p = realloc(mv->p, sizeof(*mv->p) * mv->p_size);
+ }
+
+ mv->p[mv->p_cnt++] = p = calloc(1, sizeof(*p));
+
+ p->topic = rd_strdup(topic);
+ p->partition = partition;
+ p->eof_offset = RD_KAFKA_OFFSET_INVALID;
+
+ return p;
+}
+
+
+/**
+ * Add (room for) message to message vector.
+ * Resizes the vector as needed.
+ */
+static struct test_mv_m *test_mv_mvec_add(struct test_mv_mvec *mvec) {
+ if (mvec->cnt == mvec->size) {
+ test_mv_mvec_grow(mvec, (mvec->size ? mvec->size * 2 : 10000));
+ }
+
+ mvec->cnt++;
+
+ return &mvec->m[mvec->cnt - 1];
+}
+
+/**
+ * Returns message at index \p mi
+ */
+static RD_INLINE struct test_mv_m *test_mv_mvec_get(struct test_mv_mvec *mvec,
+ int mi) {
+ if (mi >= mvec->cnt)
+ return NULL;
+ return &mvec->m[mi];
+}
+
+/**
+ * @returns the message with msgid \p msgid, or NULL.
+ */
+static struct test_mv_m *test_mv_mvec_find_by_msgid(struct test_mv_mvec *mvec,
+ int msgid) {
+ int mi;
+
+ for (mi = 0; mi < mvec->cnt; mi++)
+ if (mvec->m[mi].msgid == msgid)
+ return &mvec->m[mi];
+
+ return NULL;
+}
+
+
+/**
+ * Print message list to \p fp
+ */
+static RD_UNUSED void test_mv_mvec_dump(FILE *fp,
+ const struct test_mv_mvec *mvec) {
+ int mi;
+
+ fprintf(fp, "*** Dump mvec with %d messages (capacity %d): ***\n",
+ mvec->cnt, mvec->size);
+ for (mi = 0; mi < mvec->cnt; mi++)
+ fprintf(fp, " msgid %d, offset %" PRId64 "\n",
+ mvec->m[mi].msgid, mvec->m[mi].offset);
+ fprintf(fp, "*** Done ***\n");
+}
+
+static void test_mv_mvec_sort(struct test_mv_mvec *mvec,
+ int (*cmp)(const void *, const void *)) {
+ qsort(mvec->m, mvec->cnt, sizeof(*mvec->m), cmp);
+}
+
+
+/**
+ * @brief Adds a message to the msgver service.
+ *
+ * @returns 1 if message is from the expected testid, else 0 (not added)
+ */
+int test_msgver_add_msg00(const char *func,
+ int line,
+ const char *clientname,
+ test_msgver_t *mv,
+ uint64_t testid,
+ const char *topic,
+ int32_t partition,
+ int64_t offset,
+ int64_t timestamp,
+ int32_t broker_id,
+ rd_kafka_resp_err_t err,
+ int msgnum) {
+ struct test_mv_p *p;
+ struct test_mv_m *m;
+
+ if (testid != mv->testid) {
+ TEST_SAYL(3,
+ "%s:%d: %s: mismatching testid %" PRIu64
+ " != %" PRIu64 "\n",
+ func, line, clientname, testid, mv->testid);
+ return 0; /* Ignore message */
+ }
+
+ if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF && mv->ignore_eof) {
+ TEST_SAYL(3, "%s:%d: %s: ignoring EOF for %s [%" PRId32 "]\n",
+ func, line, clientname, topic, partition);
+ return 0; /* Ignore message */
+ }
+
+ p = test_msgver_p_get(mv, topic, partition, 1);
+
+ if (err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ p->eof_offset = offset;
+ return 1;
+ }
+
+ m = test_mv_mvec_add(&p->mvec);
+
+ m->offset = offset;
+ m->msgid = msgnum;
+ m->timestamp = timestamp;
+ m->broker_id = broker_id;
+
+ if (test_level > 2) {
+ TEST_SAY(
+ "%s:%d: %s: "
+ "Recv msg %s [%" PRId32 "] offset %" PRId64
+ " msgid %d "
+ "timestamp %" PRId64 " broker %" PRId32 "\n",
+ func, line, clientname, p->topic, p->partition, m->offset,
+ m->msgid, m->timestamp, m->broker_id);
+ }
+
+ mv->msgcnt++;
+
+ return 1;
+}
+
+/**
+ * Adds a message to the msgver service.
+ *
+ * Message must be a proper message or PARTITION_EOF.
+ *
+ * @param override_topic if non-NULL, overrides the rkmessage's topic
+ * with this one.
+ *
+ * @returns 1 if message is from the expected testid, else 0 (not added).
+ */
+int test_msgver_add_msg0(const char *func,
+ int line,
+ const char *clientname,
+ test_msgver_t *mv,
+ const rd_kafka_message_t *rkmessage,
+ const char *override_topic) {
+ uint64_t in_testid;
+ int in_part;
+ int in_msgnum = -1;
+ char buf[128];
+ const void *val;
+ size_t valsize;
+
+ if (mv->fwd)
+ test_msgver_add_msg0(func, line, clientname, mv->fwd, rkmessage,
+ override_topic);
+
+ if (rd_kafka_message_status(rkmessage) ==
+ RD_KAFKA_MSG_STATUS_NOT_PERSISTED &&
+ rkmessage->err) {
+ if (rkmessage->err != RD_KAFKA_RESP_ERR__PARTITION_EOF)
+ return 0; /* Ignore error */
+
+ in_testid = mv->testid;
+
+ } else {
+
+ if (!mv->msgid_hdr) {
+ rd_snprintf(buf, sizeof(buf), "%.*s",
+ (int)rkmessage->len,
+ (char *)rkmessage->payload);
+ val = buf;
+ } else {
+ /* msgid is in message header */
+ rd_kafka_headers_t *hdrs;
+
+ if (rd_kafka_message_headers(rkmessage, &hdrs) ||
+ rd_kafka_header_get_last(hdrs, mv->msgid_hdr, &val,
+ &valsize)) {
+ TEST_SAYL(3,
+ "%s:%d: msgid expected in header %s "
+ "but %s exists for "
+ "message at offset %" PRId64
+ " has no headers\n",
+ func, line, mv->msgid_hdr,
+ hdrs ? "no such header"
+ : "no headers",
+ rkmessage->offset);
+
+ return 0;
+ }
+ }
+
+ if (sscanf(val, "testid=%" SCNu64 ", partition=%i, msg=%i\n",
+ &in_testid, &in_part, &in_msgnum) != 3)
+ TEST_FAIL(
+ "%s:%d: Incorrect format at offset %" PRId64 ": %s",
+ func, line, rkmessage->offset, (const char *)val);
+ }
+
+ return test_msgver_add_msg00(
+ func, line, clientname, mv, in_testid,
+ override_topic ? override_topic
+ : rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_timestamp(rkmessage, NULL),
+ rd_kafka_message_broker_id(rkmessage), rkmessage->err, in_msgnum);
+ return 1;
+}
+
+
+
+/**
+ * Verify that all messages were received in order.
+ *
+ * - Offsets need to occur without gaps
+ * - msgids need to be increasing: but may have gaps, e.g., using partitioner)
+ */
+static int test_mv_mvec_verify_order(test_msgver_t *mv,
+ int flags,
+ struct test_mv_p *p,
+ struct test_mv_mvec *mvec,
+ struct test_mv_vs *vs) {
+ int mi;
+ int fails = 0;
+
+ for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) {
+ struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1);
+ struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
+
+ if (((flags & TEST_MSGVER_BY_OFFSET) &&
+ prev->offset + 1 != this->offset) ||
+ ((flags & TEST_MSGVER_BY_MSGID) &&
+ prev->msgid > this->msgid)) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] msg rcvidx #%d/%d: "
+ "out of order (prev vs this): "
+ "offset %" PRId64 " vs %" PRId64
+ ", "
+ "msgid %d vs %d\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ mi, mvec->cnt, prev->offset, this->offset,
+ prev->msgid, this->msgid);
+ fails++;
+ } else if ((flags & TEST_MSGVER_BY_BROKER_ID) &&
+ this->broker_id != vs->broker_id) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] msg rcvidx #%d/%d: "
+ "broker id mismatch: expected %" PRId32
+ ", not %" PRId32 "\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ mi, mvec->cnt, vs->broker_id,
+ this->broker_id);
+ fails++;
+ }
+ }
+
+ return fails;
+}
+
+
+/**
+ * @brief Verify that messages correspond to 'correct' msgver.
+ */
+static int test_mv_mvec_verify_corr(test_msgver_t *mv,
+ int flags,
+ struct test_mv_p *p,
+ struct test_mv_mvec *mvec,
+ struct test_mv_vs *vs) {
+ int mi;
+ int fails = 0;
+ struct test_mv_p *corr_p = NULL;
+ struct test_mv_mvec *corr_mvec;
+ int verifycnt = 0;
+
+ TEST_ASSERT(vs->corr);
+
+ /* Get correct mvec for comparison. */
+ if (p)
+ corr_p = test_msgver_p_get(vs->corr, p->topic, p->partition, 0);
+ if (!corr_p) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "]: "
+ "no corresponding correct partition found\n",
+ p ? p->topic : "*", p ? p->partition : -1);
+ return 1;
+ }
+
+ corr_mvec = &corr_p->mvec;
+
+ for (mi = 0; mi < mvec->cnt; mi++) {
+ struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
+ const struct test_mv_m *corr;
+
+
+ if (flags & TEST_MSGVER_SUBSET)
+ corr =
+ test_mv_mvec_find_by_msgid(corr_mvec, this->msgid);
+ else
+ corr = test_mv_mvec_get(corr_mvec, mi);
+
+ if (0)
+ TEST_MV_WARN(mv,
+ "msg #%d: msgid %d, offset %" PRId64 "\n",
+ mi, this->msgid, this->offset);
+ if (!corr) {
+ if (!(flags & TEST_MSGVER_SUBSET)) {
+ TEST_MV_WARN(
+ mv,
+ " %s [%" PRId32
+ "] msg rcvidx #%d/%d: "
+ "out of range: correct mvec has "
+ "%d messages: "
+ "message offset %" PRId64 ", msgid %d\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ mi, mvec->cnt, corr_mvec->cnt, this->offset,
+ this->msgid);
+ fails++;
+ }
+ continue;
+ }
+
+ if (((flags & TEST_MSGVER_BY_OFFSET) &&
+ this->offset != corr->offset) ||
+ ((flags & TEST_MSGVER_BY_MSGID) &&
+ this->msgid != corr->msgid) ||
+ ((flags & TEST_MSGVER_BY_TIMESTAMP) &&
+ this->timestamp != corr->timestamp) ||
+ ((flags & TEST_MSGVER_BY_BROKER_ID) &&
+ this->broker_id != corr->broker_id)) {
+ TEST_MV_WARN(
+ mv,
+ " %s [%" PRId32
+ "] msg rcvidx #%d/%d: "
+ "did not match correct msg: "
+ "offset %" PRId64 " vs %" PRId64
+ ", "
+ "msgid %d vs %d, "
+ "timestamp %" PRId64 " vs %" PRId64
+ ", "
+ "broker %" PRId32 " vs %" PRId32 " (fl 0x%x)\n",
+ p ? p->topic : "*", p ? p->partition : -1, mi,
+ mvec->cnt, this->offset, corr->offset, this->msgid,
+ corr->msgid, this->timestamp, corr->timestamp,
+ this->broker_id, corr->broker_id, flags);
+ fails++;
+ } else {
+ verifycnt++;
+ }
+ }
+
+ if (verifycnt != corr_mvec->cnt && !(flags & TEST_MSGVER_SUBSET)) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "]: of %d input messages, "
+ "only %d/%d matched correct messages\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ mvec->cnt, verifycnt, corr_mvec->cnt);
+ fails++;
+ }
+
+ return fails;
+}
+
+
+
+static int test_mv_m_cmp_offset(const void *_a, const void *_b) {
+ const struct test_mv_m *a = _a, *b = _b;
+
+ return RD_CMP(a->offset, b->offset);
+}
+
+static int test_mv_m_cmp_msgid(const void *_a, const void *_b) {
+ const struct test_mv_m *a = _a, *b = _b;
+
+ return RD_CMP(a->msgid, b->msgid);
+}
+
+
+/**
+ * Verify that there are no duplicate message.
+ *
+ * - Offsets are checked
+ * - msgids are checked
+ *
+ * * NOTE: This sorts the message (.m) array, first by offset, then by msgid
+ * and leaves the message array sorted (by msgid)
+ */
+static int test_mv_mvec_verify_dup(test_msgver_t *mv,
+ int flags,
+ struct test_mv_p *p,
+ struct test_mv_mvec *mvec,
+ struct test_mv_vs *vs) {
+ int mi;
+ int fails = 0;
+ enum { _P_OFFSET, _P_MSGID } pass;
+
+ for (pass = _P_OFFSET; pass <= _P_MSGID; pass++) {
+
+ if (pass == _P_OFFSET) {
+ if (!(flags & TEST_MSGVER_BY_OFFSET))
+ continue;
+ test_mv_mvec_sort(mvec, test_mv_m_cmp_offset);
+ } else if (pass == _P_MSGID) {
+ if (!(flags & TEST_MSGVER_BY_MSGID))
+ continue;
+ test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid);
+ }
+
+ for (mi = 1 /*skip first*/; mi < mvec->cnt; mi++) {
+ struct test_mv_m *prev = test_mv_mvec_get(mvec, mi - 1);
+ struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
+ int is_dup = 0;
+
+ if (pass == _P_OFFSET)
+ is_dup = prev->offset == this->offset;
+ else if (pass == _P_MSGID)
+ is_dup = prev->msgid == this->msgid;
+
+ if (!is_dup)
+ continue;
+
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] "
+ "duplicate msg (prev vs this): "
+ "offset %" PRId64 " vs %" PRId64
+ ", "
+ "msgid %d vs %d\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ prev->offset, this->offset, prev->msgid,
+ this->msgid);
+ fails++;
+ }
+ }
+
+ return fails;
+}
+
+/**
+ * @brief Verify that all messages are from the correct broker.
+ */
+static int test_mv_mvec_verify_broker(test_msgver_t *mv,
+ int flags,
+ struct test_mv_p *p,
+ struct test_mv_mvec *mvec,
+ struct test_mv_vs *vs) {
+ int mi;
+ int fails = 0;
+
+ /* Assume that the correct flag has been checked already. */
+
+
+ rd_assert(flags & TEST_MSGVER_BY_BROKER_ID);
+ for (mi = 0; mi < mvec->cnt; mi++) {
+ struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
+ if (this->broker_id != vs->broker_id) {
+ TEST_MV_WARN(
+ mv,
+ " %s [%" PRId32
+ "] broker_id check: "
+ "msgid #%d (at mi %d): "
+ "broker_id %" PRId32
+ " is not the expected broker_id %" PRId32 "\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ this->msgid, mi, this->broker_id, vs->broker_id);
+ fails++;
+ }
+ }
+ return fails;
+}
+
+
+/**
+ * Verify that \p mvec contains the expected range:
+ * - TEST_MSGVER_BY_MSGID: msgid within \p vs->msgid_min .. \p vs->msgid_max
+ * - TEST_MSGVER_BY_TIMESTAMP: timestamp with \p vs->timestamp_min .. _max
+ *
+ * * NOTE: TEST_MSGVER_BY_MSGID is required
+ *
+ * * NOTE: This sorts the message (.m) array by msgid
+ * and leaves the message array sorted (by msgid)
+ */
+static int test_mv_mvec_verify_range(test_msgver_t *mv,
+ int flags,
+ struct test_mv_p *p,
+ struct test_mv_mvec *mvec,
+ struct test_mv_vs *vs) {
+ int mi;
+ int fails = 0;
+ int cnt = 0;
+ int exp_cnt = vs->msgid_max - vs->msgid_min + 1;
+ int skip_cnt = 0;
+
+ if (!(flags & TEST_MSGVER_BY_MSGID))
+ return 0;
+
+ test_mv_mvec_sort(mvec, test_mv_m_cmp_msgid);
+
+ // test_mv_mvec_dump(stdout, mvec);
+
+ for (mi = 0; mi < mvec->cnt; mi++) {
+ struct test_mv_m *prev =
+ mi ? test_mv_mvec_get(mvec, mi - 1) : NULL;
+ struct test_mv_m *this = test_mv_mvec_get(mvec, mi);
+
+ if (this->msgid < vs->msgid_min) {
+ skip_cnt++;
+ continue;
+ } else if (this->msgid > vs->msgid_max)
+ break;
+
+ if (flags & TEST_MSGVER_BY_TIMESTAMP) {
+ if (this->timestamp < vs->timestamp_min ||
+ this->timestamp > vs->timestamp_max) {
+ TEST_MV_WARN(
+ mv,
+ " %s [%" PRId32
+ "] range check: "
+ "msgid #%d (at mi %d): "
+ "timestamp %" PRId64
+ " outside "
+ "expected range %" PRId64 "..%" PRId64 "\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ this->msgid, mi, this->timestamp,
+ vs->timestamp_min, vs->timestamp_max);
+ fails++;
+ }
+ }
+
+ if ((flags & TEST_MSGVER_BY_BROKER_ID) &&
+ this->broker_id != vs->broker_id) {
+ TEST_MV_WARN(
+ mv,
+ " %s [%" PRId32
+ "] range check: "
+ "msgid #%d (at mi %d): "
+ "expected broker id %" PRId32 ", not %" PRId32 "\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ this->msgid, mi, vs->broker_id, this->broker_id);
+ fails++;
+ }
+
+ if (cnt++ == 0) {
+ if (this->msgid != vs->msgid_min) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] range check: "
+ "first message #%d (at mi %d) "
+ "is not first in "
+ "expected range %d..%d\n",
+ p ? p->topic : "*",
+ p ? p->partition : -1, this->msgid,
+ mi, vs->msgid_min, vs->msgid_max);
+ fails++;
+ }
+ } else if (cnt > exp_cnt) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] range check: "
+ "too many messages received (%d/%d) at "
+ "msgid %d for expected range %d..%d\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ cnt, exp_cnt, this->msgid, vs->msgid_min,
+ vs->msgid_max);
+ fails++;
+ }
+
+ if (!prev) {
+ skip_cnt++;
+ continue;
+ }
+
+ if (prev->msgid + 1 != this->msgid) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] range check: "
+ " %d message(s) missing between "
+ "msgid %d..%d in expected range %d..%d\n",
+ p ? p->topic : "*", p ? p->partition : -1,
+ this->msgid - prev->msgid - 1,
+ prev->msgid + 1, this->msgid - 1,
+ vs->msgid_min, vs->msgid_max);
+ fails++;
+ }
+ }
+
+ if (cnt != exp_cnt) {
+ TEST_MV_WARN(mv,
+ " %s [%" PRId32
+ "] range check: "
+ " wrong number of messages seen, wanted %d got %d "
+ "in expected range %d..%d (%d messages skipped)\n",
+ p ? p->topic : "*", p ? p->partition : -1, exp_cnt,
+ cnt, vs->msgid_min, vs->msgid_max, skip_cnt);
+ fails++;
+ }
+
+ return fails;
+}
+
+
+
+/**
+ * Run verifier \p f for all partitions.
+ */
+#define test_mv_p_verify_f(mv, flags, f, vs) \
+ test_mv_p_verify_f0(mv, flags, f, #f, vs)
+static int test_mv_p_verify_f0(test_msgver_t *mv,
+ int flags,
+ int (*f)(test_msgver_t *mv,
+ int flags,
+ struct test_mv_p *p,
+ struct test_mv_mvec *mvec,
+ struct test_mv_vs *vs),
+ const char *f_name,
+ struct test_mv_vs *vs) {
+ int i;
+ int fails = 0;
+
+ for (i = 0; i < mv->p_cnt; i++) {
+ TEST_SAY("Verifying %s [%" PRId32 "] %d msgs with %s\n",
+ mv->p[i]->topic, mv->p[i]->partition,
+ mv->p[i]->mvec.cnt, f_name);
+ fails += f(mv, flags, mv->p[i], &mv->p[i]->mvec, vs);
+ }
+
+ return fails;
+}
+
+
+/**
+ * Collect all messages from all topics and partitions into vs->mvec
+ */
+static void test_mv_collect_all_msgs(test_msgver_t *mv, struct test_mv_vs *vs) {
+ int i;
+
+ for (i = 0; i < mv->p_cnt; i++) {
+ struct test_mv_p *p = mv->p[i];
+ int mi;
+
+ test_mv_mvec_reserve(&vs->mvec, p->mvec.cnt);
+ for (mi = 0; mi < p->mvec.cnt; mi++) {
+ struct test_mv_m *m = test_mv_mvec_get(&p->mvec, mi);
+ struct test_mv_m *m_new = test_mv_mvec_add(&vs->mvec);
+ *m_new = *m;
+ }
+ }
+}
+
+
+/**
+ * Verify that all messages (by msgid) in range msg_base+exp_cnt were received
+ * and received only once.
+ * This works across all partitions.
+ */
+static int
+test_msgver_verify_range(test_msgver_t *mv, int flags, struct test_mv_vs *vs) {
+ int fails = 0;
+
+ /**
+ * Create temporary array to hold expected message set,
+ * then traverse all topics and partitions and move matching messages
+ * to that set. Then verify the message set.
+ */
+
+ test_mv_mvec_init(&vs->mvec, vs->exp_cnt);
+
+ /* Collect all msgs into vs mvec */
+ test_mv_collect_all_msgs(mv, vs);
+
+ fails += test_mv_mvec_verify_range(mv, TEST_MSGVER_BY_MSGID | flags,
+ NULL, &vs->mvec, vs);
+ fails += test_mv_mvec_verify_dup(mv, TEST_MSGVER_BY_MSGID | flags, NULL,
+ &vs->mvec, vs);
+
+ test_mv_mvec_clear(&vs->mvec);
+
+ return fails;
+}
+
+
+/**
+ * Verify that \p exp_cnt messages were received for \p topic and \p partition
+ * starting at msgid base \p msg_base.
+ */
+int test_msgver_verify_part0(const char *func,
+ int line,
+ const char *what,
+ test_msgver_t *mv,
+ int flags,
+ const char *topic,
+ int partition,
+ int msg_base,
+ int exp_cnt) {
+ int fails = 0;
+ struct test_mv_vs vs = {.msg_base = msg_base, .exp_cnt = exp_cnt};
+ struct test_mv_p *p;
+
+ TEST_SAY(
+ "%s:%d: %s: Verifying %d received messages (flags 0x%x) "
+ "in %s [%d]: expecting msgids %d..%d (%d)\n",
+ func, line, what, mv->msgcnt, flags, topic, partition, msg_base,
+ msg_base + exp_cnt, exp_cnt);
+
+ p = test_msgver_p_get(mv, topic, partition, 0);
+
+ /* Per-partition checks */
+ if (flags & TEST_MSGVER_ORDER)
+ fails += test_mv_mvec_verify_order(mv, flags, p, &p->mvec, &vs);
+ if (flags & TEST_MSGVER_DUP)
+ fails += test_mv_mvec_verify_dup(mv, flags, p, &p->mvec, &vs);
+
+ if (mv->msgcnt < vs.exp_cnt) {
+ TEST_MV_WARN(mv,
+ "%s:%d: "
+ "%s [%" PRId32
+ "] expected %d messages but only "
+ "%d received\n",
+ func, line, p ? p->topic : "*",
+ p ? p->partition : -1, vs.exp_cnt, mv->msgcnt);
+ fails++;
+ }
+
+
+ if (mv->log_suppr_cnt > 0)
+ TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n",
+ func, line, what, mv->log_suppr_cnt);
+
+ if (fails)
+ TEST_FAIL(
+ "%s:%d: %s: Verification of %d received messages "
+ "failed: "
+ "expected msgids %d..%d (%d): see previous errors\n",
+ func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt,
+ exp_cnt);
+ else
+ TEST_SAY(
+ "%s:%d: %s: Verification of %d received messages "
+ "succeeded: "
+ "expected msgids %d..%d (%d)\n",
+ func, line, what, mv->msgcnt, msg_base, msg_base + exp_cnt,
+ exp_cnt);
+
+ return fails;
+}
+
+/**
+ * Verify that \p exp_cnt messages were received starting at
+ * msgid base \p msg_base.
+ */
+int test_msgver_verify0(const char *func,
+ int line,
+ const char *what,
+ test_msgver_t *mv,
+ int flags,
+ struct test_mv_vs vs) {
+ int fails = 0;
+
+ TEST_SAY(
+ "%s:%d: %s: Verifying %d received messages (flags 0x%x): "
+ "expecting msgids %d..%d (%d)\n",
+ func, line, what, mv->msgcnt, flags, vs.msg_base,
+ vs.msg_base + vs.exp_cnt, vs.exp_cnt);
+ if (flags & TEST_MSGVER_BY_TIMESTAMP) {
+ assert((flags & TEST_MSGVER_BY_MSGID)); /* Required */
+ TEST_SAY(
+ "%s:%d: %s: "
+ " and expecting timestamps %" PRId64 "..%" PRId64 "\n",
+ func, line, what, vs.timestamp_min, vs.timestamp_max);
+ }
+
+ /* Per-partition checks */
+ if (flags & TEST_MSGVER_ORDER)
+ fails += test_mv_p_verify_f(mv, flags,
+ test_mv_mvec_verify_order, &vs);
+ if (flags & TEST_MSGVER_DUP)
+ fails +=
+ test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_dup, &vs);
+
+ if (flags & TEST_MSGVER_BY_BROKER_ID)
+ fails += test_mv_p_verify_f(mv, flags,
+ test_mv_mvec_verify_broker, &vs);
+
+ /* Checks across all partitions */
+ if ((flags & TEST_MSGVER_RANGE) && vs.exp_cnt > 0) {
+ vs.msgid_min = vs.msg_base;
+ vs.msgid_max = vs.msgid_min + vs.exp_cnt - 1;
+ fails += test_msgver_verify_range(mv, flags, &vs);
+ }
+
+ if (mv->log_suppr_cnt > 0)
+ TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n",
+ func, line, what, mv->log_suppr_cnt);
+
+ if (vs.exp_cnt != mv->msgcnt) {
+ if (!(flags & TEST_MSGVER_SUBSET)) {
+ TEST_WARN("%s:%d: %s: expected %d messages, got %d\n",
+ func, line, what, vs.exp_cnt, mv->msgcnt);
+ fails++;
+ }
+ }
+
+ if (fails)
+ TEST_FAIL(
+ "%s:%d: %s: Verification of %d received messages "
+ "failed: "
+ "expected msgids %d..%d (%d): see previous errors\n",
+ func, line, what, mv->msgcnt, vs.msg_base,
+ vs.msg_base + vs.exp_cnt, vs.exp_cnt);
+ else
+ TEST_SAY(
+ "%s:%d: %s: Verification of %d received messages "
+ "succeeded: "
+ "expected msgids %d..%d (%d)\n",
+ func, line, what, mv->msgcnt, vs.msg_base,
+ vs.msg_base + vs.exp_cnt, vs.exp_cnt);
+
+ return fails;
+}
+
+
+
+void test_verify_rkmessage0(const char *func,
+ int line,
+ rd_kafka_message_t *rkmessage,
+ uint64_t testid,
+ int32_t partition,
+ int msgnum) {
+ uint64_t in_testid;
+ int in_part;
+ int in_msgnum;
+ char buf[128];
+
+ rd_snprintf(buf, sizeof(buf), "%.*s", (int)rkmessage->len,
+ (char *)rkmessage->payload);
+
+ if (sscanf(buf, "testid=%" SCNu64 ", partition=%i, msg=%i\n",
+ &in_testid, &in_part, &in_msgnum) != 3)
+ TEST_FAIL("Incorrect format: %s", buf);
+
+ if (testid != in_testid || (partition != -1 && partition != in_part) ||
+ (msgnum != -1 && msgnum != in_msgnum) || in_msgnum < 0)
+ goto fail_match;
+
+ if (test_level > 2) {
+ TEST_SAY("%s:%i: Our testid %" PRIu64
+ ", part %i (%i), msg %i\n",
+ func, line, testid, (int)partition,
+ (int)rkmessage->partition, msgnum);
+ }
+
+
+ return;
+
+fail_match:
+ TEST_FAIL("%s:%i: Our testid %" PRIu64
+ ", part %i, msg %i did "
+ "not match message: \"%s\"\n",
+ func, line, testid, (int)partition, msgnum, buf);
+}
+
+
+/**
+ * @brief Verify that \p mv is identical to \p corr according to flags.
+ */
+void test_msgver_verify_compare0(const char *func,
+ int line,
+ const char *what,
+ test_msgver_t *mv,
+ test_msgver_t *corr,
+ int flags) {
+ struct test_mv_vs vs;
+ int fails = 0;
+
+ memset(&vs, 0, sizeof(vs));
+
+ TEST_SAY(
+ "%s:%d: %s: Verifying %d received messages (flags 0x%x) by "
+ "comparison to correct msgver (%d messages)\n",
+ func, line, what, mv->msgcnt, flags, corr->msgcnt);
+
+ vs.corr = corr;
+
+ /* Per-partition checks */
+ fails += test_mv_p_verify_f(mv, flags, test_mv_mvec_verify_corr, &vs);
+
+ if (mv->log_suppr_cnt > 0)
+ TEST_WARN("%s:%d: %s: %d message warning logs suppressed\n",
+ func, line, what, mv->log_suppr_cnt);
+
+ if (corr->msgcnt != mv->msgcnt) {
+ if (!(flags & TEST_MSGVER_SUBSET)) {
+ TEST_WARN("%s:%d: %s: expected %d messages, got %d\n",
+ func, line, what, corr->msgcnt, mv->msgcnt);
+ fails++;
+ }
+ }
+
+ if (fails)
+ TEST_FAIL(
+ "%s:%d: %s: Verification of %d received messages "
+ "failed: expected %d messages: see previous errors\n",
+ func, line, what, mv->msgcnt, corr->msgcnt);
+ else
+ TEST_SAY(
+ "%s:%d: %s: Verification of %d received messages "
+ "succeeded: matching %d messages from correct msgver\n",
+ func, line, what, mv->msgcnt, corr->msgcnt);
+}
+
+
+/**
+ * Consumer poll but dont expect any proper messages for \p timeout_ms.
+ */
+void test_consumer_poll_no_msgs(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int timeout_ms) {
+ int64_t tmout = test_clock() + ((int64_t)timeout_ms * 1000);
+ int cnt = 0;
+ test_timing_t t_cons;
+ test_msgver_t mv;
+
+ test_msgver_init(&mv, testid);
+
+ if (what)
+ TEST_SAY("%s: not expecting any messages for %dms\n", what,
+ timeout_ms);
+
+ TIMING_START(&t_cons, "CONSUME");
+
+ do {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(rk, timeout_ms);
+ if (!rkmessage)
+ continue;
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("%s [%" PRId32
+ "] reached EOF at "
+ "offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+ test_msgver_add_msg(rk, &mv, rkmessage);
+
+ } else if (rkmessage->err) {
+ TEST_FAIL(
+ "%s [%" PRId32 "] error (offset %" PRId64 "): %s",
+ rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
+ : "(no-topic)",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ } else {
+ if (test_msgver_add_msg(rk, &mv, rkmessage)) {
+ TEST_MV_WARN(
+ &mv,
+ "Received unexpected message on "
+ "%s [%" PRId32
+ "] at offset "
+ "%" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+ cnt++;
+ }
+ }
+
+ rd_kafka_message_destroy(rkmessage);
+ } while (test_clock() <= tmout);
+
+ if (what)
+ TIMING_STOP(&t_cons);
+
+ test_msgver_verify(what, &mv, TEST_MSGVER_ALL, 0, 0);
+ test_msgver_clear(&mv);
+
+ TEST_ASSERT(cnt == 0, "Expected 0 messages, got %d", cnt);
+}
+
+/**
+ * @brief Consumer poll with expectation that a \p err will be reached
+ * within \p timeout_ms.
+ */
+void test_consumer_poll_expect_err(rd_kafka_t *rk,
+ uint64_t testid,
+ int timeout_ms,
+ rd_kafka_resp_err_t err) {
+ int64_t tmout = test_clock() + ((int64_t)timeout_ms * 1000);
+
+ TEST_SAY("%s: expecting error %s within %dms\n", rd_kafka_name(rk),
+ rd_kafka_err2name(err), timeout_ms);
+
+ do {
+ rd_kafka_message_t *rkmessage;
+ rkmessage = rd_kafka_consumer_poll(rk, timeout_ms);
+ if (!rkmessage)
+ continue;
+
+ if (rkmessage->err == err) {
+ TEST_SAY("Got expected error: %s: %s\n",
+ rd_kafka_err2name(rkmessage->err),
+ rd_kafka_message_errstr(rkmessage));
+ rd_kafka_message_destroy(rkmessage);
+
+ return;
+ } else if (rkmessage->err) {
+ TEST_FAIL("%s [%" PRId32
+ "] unexpected error "
+ "(offset %" PRId64 "): %s",
+ rkmessage->rkt
+ ? rd_kafka_topic_name(rkmessage->rkt)
+ : "(no-topic)",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_err2name(rkmessage->err));
+ }
+
+ rd_kafka_message_destroy(rkmessage);
+ } while (test_clock() <= tmout);
+ TEST_FAIL("Expected error %s not seen in %dms", rd_kafka_err2name(err),
+ timeout_ms);
+}
+
+/**
+ * Call consumer poll once and then return.
+ * Messages are handled.
+ *
+ * \p mv is optional
+ *
+ * @returns 0 on timeout, 1 if a message was received or .._PARTITION_EOF
+ * if EOF was reached.
+ * TEST_FAIL()s on all errors.
+ */
+int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage = rd_kafka_consumer_poll(rk, timeout_ms);
+ if (!rkmessage)
+ return 0;
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("%s [%" PRId32
+ "] reached EOF at "
+ "offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+ if (mv)
+ test_msgver_add_msg(rk, mv, rkmessage);
+ rd_kafka_message_destroy(rkmessage);
+ return RD_KAFKA_RESP_ERR__PARTITION_EOF;
+
+ } else if (rkmessage->err) {
+ TEST_FAIL("%s [%" PRId32 "] error (offset %" PRId64 "): %s",
+ rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
+ : "(no-topic)",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ } else {
+ if (mv)
+ test_msgver_add_msg(rk, mv, rkmessage);
+ }
+
+ rd_kafka_message_destroy(rkmessage);
+ return 1;
+}
+
+/**
+ * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1).
+ * If false: poll until either one is reached.
+ * @param timeout_ms Each call to poll has a timeout set by this argument. The
+ * test fails if any poll times out.
+ */
+int test_consumer_poll_exact_timeout(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ rd_bool_t exact,
+ test_msgver_t *mv,
+ int timeout_ms) {
+ int eof_cnt = 0;
+ int cnt = 0;
+ test_timing_t t_cons;
+
+ TEST_SAY("%s: consume %s%d messages\n", what, exact ? "exactly " : "",
+ exp_cnt);
+
+ TIMING_START(&t_cons, "CONSUME");
+
+ while ((!exact && ((exp_eof_cnt <= 0 || eof_cnt < exp_eof_cnt) &&
+ (exp_cnt <= 0 || cnt < exp_cnt))) ||
+ (exact && (eof_cnt < exp_eof_cnt || cnt < exp_cnt))) {
+ rd_kafka_message_t *rkmessage;
+
+ rkmessage =
+ rd_kafka_consumer_poll(rk, tmout_multip(timeout_ms));
+ if (!rkmessage) /* Shouldn't take this long to get a msg */
+ TEST_FAIL(
+ "%s: consumer_poll() timeout "
+ "(%d/%d eof, %d/%d msgs)\n",
+ what, eof_cnt, exp_eof_cnt, cnt, exp_cnt);
+
+
+ if (rkmessage->err == RD_KAFKA_RESP_ERR__PARTITION_EOF) {
+ TEST_SAY("%s [%" PRId32
+ "] reached EOF at "
+ "offset %" PRId64 "\n",
+ rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset);
+ TEST_ASSERT(exp_eof_cnt != 0, "expected no EOFs");
+ if (mv)
+ test_msgver_add_msg(rk, mv, rkmessage);
+ eof_cnt++;
+
+ } else if (rkmessage->err) {
+ TEST_FAIL(
+ "%s [%" PRId32 "] error (offset %" PRId64 "): %s",
+ rkmessage->rkt ? rd_kafka_topic_name(rkmessage->rkt)
+ : "(no-topic)",
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_errstr(rkmessage));
+
+ } else {
+ TEST_SAYL(4,
+ "%s: consumed message on %s [%" PRId32
+ "] "
+ "at offset %" PRId64 " (leader epoch %" PRId32
+ ")\n",
+ what, rd_kafka_topic_name(rkmessage->rkt),
+ rkmessage->partition, rkmessage->offset,
+ rd_kafka_message_leader_epoch(rkmessage));
+
+ if (!mv || test_msgver_add_msg(rk, mv, rkmessage))
+ cnt++;
+ }
+
+ rd_kafka_message_destroy(rkmessage);
+ }
+
+ TIMING_STOP(&t_cons);
+
+ TEST_SAY("%s: consumed %d/%d messages (%d/%d EOFs)\n", what, cnt,
+ exp_cnt, eof_cnt, exp_eof_cnt);
+
+ TEST_ASSERT(!exact || ((exp_cnt == -1 || exp_cnt == cnt) &&
+ (exp_eof_cnt == -1 || exp_eof_cnt == eof_cnt)),
+ "%s: mismatch between exact expected counts and actual: "
+ "%d/%d EOFs, %d/%d msgs",
+ what, eof_cnt, exp_eof_cnt, cnt, exp_cnt);
+
+ if (exp_cnt == 0)
+ TEST_ASSERT(cnt == 0 && eof_cnt == exp_eof_cnt,
+ "%s: expected no messages and %d EOFs: "
+ "got %d messages and %d EOFs",
+ what, exp_eof_cnt, cnt, eof_cnt);
+ return cnt;
+}
+
+
+/**
+ * @param exact Require exact exp_eof_cnt (unless -1) and exp_cnt (unless -1).
+ * If false: poll until either one is reached.
+ */
+int test_consumer_poll_exact(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ rd_bool_t exact,
+ test_msgver_t *mv) {
+ return test_consumer_poll_exact_timeout(what, rk, testid, exp_eof_cnt,
+ exp_msg_base, exp_cnt, exact,
+ mv, 10 * 1000);
+}
+
+int test_consumer_poll(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ test_msgver_t *mv) {
+ return test_consumer_poll_exact(what, rk, testid, exp_eof_cnt,
+ exp_msg_base, exp_cnt,
+ rd_false /*not exact */, mv);
+}
+
+int test_consumer_poll_timeout(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ test_msgver_t *mv,
+ int timeout_ms) {
+ return test_consumer_poll_exact_timeout(
+ what, rk, testid, exp_eof_cnt, exp_msg_base, exp_cnt,
+ rd_false /*not exact */, mv, timeout_ms);
+}
+
+void test_consumer_close(rd_kafka_t *rk) {
+ rd_kafka_resp_err_t err;
+ test_timing_t timing;
+
+ TEST_SAY("Closing consumer %s\n", rd_kafka_name(rk));
+
+ TIMING_START(&timing, "CONSUMER.CLOSE");
+ err = rd_kafka_consumer_close(rk);
+ TIMING_STOP(&timing);
+ if (err)
+ TEST_FAIL("Failed to close consumer: %s\n",
+ rd_kafka_err2str(err));
+}
+
+
+void test_flush(rd_kafka_t *rk, int timeout_ms) {
+ test_timing_t timing;
+ rd_kafka_resp_err_t err;
+
+ TEST_SAY("%s: Flushing %d messages\n", rd_kafka_name(rk),
+ rd_kafka_outq_len(rk));
+ TIMING_START(&timing, "FLUSH");
+ err = rd_kafka_flush(rk, timeout_ms);
+ TIMING_STOP(&timing);
+ if (err)
+ TEST_FAIL("Failed to flush(%s, %d): %s: len() = %d\n",
+ rd_kafka_name(rk), timeout_ms, rd_kafka_err2str(err),
+ rd_kafka_outq_len(rk));
+}
+
+
+void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char errstr[512];
+ if (rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ TEST_FAIL("Failed to set config \"%s\"=\"%s\": %s\n", name, val,
+ errstr);
+}
+
+/**
+ * @brief Get configuration value for property \p name.
+ *
+ * @param conf Configuration to get value from. If NULL the test.conf (if any)
+ * configuration will be used.
+ */
+char *test_conf_get(const rd_kafka_conf_t *conf, const char *name) {
+ static RD_TLS char ret[256];
+ size_t ret_sz = sizeof(ret);
+ rd_kafka_conf_t *def_conf = NULL;
+
+ if (!conf) /* Use the current test.conf */
+ test_conf_init(&def_conf, NULL, 0);
+
+ if (rd_kafka_conf_get(conf ? conf : def_conf, name, ret, &ret_sz) !=
+ RD_KAFKA_CONF_OK)
+ TEST_FAIL("Failed to get config \"%s\": %s\n", name,
+ "unknown property");
+
+ if (def_conf)
+ rd_kafka_conf_destroy(def_conf);
+
+ return ret;
+}
+
+
+char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf,
+ const char *name) {
+ static RD_TLS char ret[256];
+ size_t ret_sz = sizeof(ret);
+ if (rd_kafka_topic_conf_get(tconf, name, ret, &ret_sz) !=
+ RD_KAFKA_CONF_OK)
+ TEST_FAIL("Failed to get topic config \"%s\": %s\n", name,
+ "unknown property");
+ return ret;
+}
+
+
+/**
+ * @brief Check if property \name matches \p val in \p conf.
+ * If \p conf is NULL the test config will be used. */
+int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val) {
+ char *real;
+ int free_conf = 0;
+
+ if (!conf) {
+ test_conf_init(&conf, NULL, 0);
+ free_conf = 1;
+ }
+
+ real = test_conf_get(conf, name);
+
+ if (free_conf)
+ rd_kafka_conf_destroy(conf);
+
+ return !strcmp(real, val);
+}
+
+
+void test_topic_conf_set(rd_kafka_topic_conf_t *tconf,
+ const char *name,
+ const char *val) {
+ char errstr[512];
+ if (rd_kafka_topic_conf_set(tconf, name, val, errstr, sizeof(errstr)) !=
+ RD_KAFKA_CONF_OK)
+ TEST_FAIL("Failed to set topic config \"%s\"=\"%s\": %s\n",
+ name, val, errstr);
+}
+
+/**
+ * @brief First attempt to set topic level property, then global.
+ */
+void test_any_conf_set(rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *tconf,
+ const char *name,
+ const char *val) {
+ rd_kafka_conf_res_t res = RD_KAFKA_CONF_UNKNOWN;
+ char errstr[512] = {"Missing conf_t"};
+
+ if (tconf)
+ res = rd_kafka_topic_conf_set(tconf, name, val, errstr,
+ sizeof(errstr));
+ if (res == RD_KAFKA_CONF_UNKNOWN && conf)
+ res =
+ rd_kafka_conf_set(conf, name, val, errstr, sizeof(errstr));
+
+ if (res != RD_KAFKA_CONF_OK)
+ TEST_FAIL("Failed to set any config \"%s\"=\"%s\": %s\n", name,
+ val, errstr);
+}
+
+
+/**
+ * @returns true if test clients need to be configured for authentication
+ * or other security measures (SSL), else false for unauthed plaintext.
+ */
+int test_needs_auth(void) {
+ rd_kafka_conf_t *conf;
+ const char *sec;
+
+ test_conf_init(&conf, NULL, 0);
+
+ sec = test_conf_get(conf, "security.protocol");
+
+ rd_kafka_conf_destroy(conf);
+
+ return strcmp(sec, "plaintext");
+}
+
+
+void test_print_partition_list(
+ const rd_kafka_topic_partition_list_t *partitions) {
+ int i;
+ for (i = 0; i < partitions->cnt; i++) {
+ TEST_SAY(" %s [%" PRId32 "] offset %" PRId64 " (epoch %" PRId32
+ ") %s%s\n",
+ partitions->elems[i].topic,
+ partitions->elems[i].partition,
+ partitions->elems[i].offset,
+ rd_kafka_topic_partition_get_leader_epoch(
+ &partitions->elems[i]),
+ partitions->elems[i].err ? ": " : "",
+ partitions->elems[i].err
+ ? rd_kafka_err2str(partitions->elems[i].err)
+ : "");
+ }
+}
+
+/**
+ * @brief Compare two lists, returning 0 if equal.
+ *
+ * @remark The lists may be sorted by this function.
+ */
+int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al,
+ rd_kafka_topic_partition_list_t *bl) {
+ int i;
+
+ if (al->cnt < bl->cnt)
+ return -1;
+ else if (al->cnt > bl->cnt)
+ return 1;
+ else if (al->cnt == 0)
+ return 0;
+
+ rd_kafka_topic_partition_list_sort(al, NULL, NULL);
+ rd_kafka_topic_partition_list_sort(bl, NULL, NULL);
+
+ for (i = 0; i < al->cnt; i++) {
+ const rd_kafka_topic_partition_t *a = &al->elems[i];
+ const rd_kafka_topic_partition_t *b = &bl->elems[i];
+ if (a->partition != b->partition || strcmp(a->topic, b->topic))
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Compare two lists and their offsets, returning 0 if equal.
+ *
+ * @remark The lists may be sorted by this function.
+ */
+int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al,
+ rd_kafka_topic_partition_list_t *bl) {
+ int i;
+
+ if (al->cnt < bl->cnt)
+ return -1;
+ else if (al->cnt > bl->cnt)
+ return 1;
+ else if (al->cnt == 0)
+ return 0;
+
+ rd_kafka_topic_partition_list_sort(al, NULL, NULL);
+ rd_kafka_topic_partition_list_sort(bl, NULL, NULL);
+
+ for (i = 0; i < al->cnt; i++) {
+ const rd_kafka_topic_partition_t *a = &al->elems[i];
+ const rd_kafka_topic_partition_t *b = &bl->elems[i];
+ if (a->partition != b->partition ||
+ strcmp(a->topic, b->topic) || a->offset != b->offset ||
+ rd_kafka_topic_partition_get_leader_epoch(a) !=
+ rd_kafka_topic_partition_get_leader_epoch(b))
+ return -1;
+ }
+
+ return 0;
+}
+
+/**
+ * @brief Execute script from the Kafka distribution bin/ path.
+ */
+void test_kafka_cmd(const char *fmt, ...) {
+#ifdef _WIN32
+ TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__);
+#else
+ char cmd[1024];
+ int r;
+ va_list ap;
+ test_timing_t t_cmd;
+ const char *kpath;
+
+ kpath = test_getenv("KAFKA_PATH", NULL);
+
+ if (!kpath)
+ TEST_FAIL("%s: KAFKA_PATH must be set", __FUNCTION__);
+
+ r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/", kpath);
+ TEST_ASSERT(r < (int)sizeof(cmd));
+
+ va_start(ap, fmt);
+ rd_vsnprintf(cmd + r, sizeof(cmd) - r, fmt, ap);
+ va_end(ap);
+
+ TEST_SAY("Executing: %s\n", cmd);
+ TIMING_START(&t_cmd, "exec");
+ r = system(cmd);
+ TIMING_STOP(&t_cmd);
+
+ if (r == -1)
+ TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno));
+ else if (WIFSIGNALED(r))
+ TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd,
+ WTERMSIG(r));
+ else if (WEXITSTATUS(r))
+ TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd,
+ WEXITSTATUS(r));
+#endif
+}
+
+/**
+ * @brief Execute kafka-topics.sh from the Kafka distribution.
+ */
+void test_kafka_topics(const char *fmt, ...) {
+#ifdef _WIN32
+ TEST_FAIL("%s not supported on Windows, yet", __FUNCTION__);
+#else
+ char cmd[1024];
+ int r, bytes_left;
+ va_list ap;
+ test_timing_t t_cmd;
+ const char *kpath, *bootstrap_env, *flag, *bootstrap_srvs;
+
+ if (test_broker_version >= TEST_BRKVER(3, 0, 0, 0)) {
+ bootstrap_env = "BROKERS";
+ flag = "--bootstrap-server";
+ } else {
+ bootstrap_env = "ZK_ADDRESS";
+ flag = "--zookeeper";
+ }
+
+ kpath = test_getenv("KAFKA_PATH", NULL);
+ bootstrap_srvs = test_getenv(bootstrap_env, NULL);
+
+ if (!kpath || !bootstrap_srvs)
+ TEST_FAIL("%s: KAFKA_PATH and %s must be set", __FUNCTION__,
+ bootstrap_env);
+
+ r = rd_snprintf(cmd, sizeof(cmd), "%s/bin/kafka-topics.sh %s %s ",
+ kpath, flag, bootstrap_srvs);
+ TEST_ASSERT(r > 0 && r < (int)sizeof(cmd));
+
+ bytes_left = sizeof(cmd) - r;
+
+ va_start(ap, fmt);
+ r = rd_vsnprintf(cmd + r, bytes_left, fmt, ap);
+ va_end(ap);
+ TEST_ASSERT(r > 0 && r < bytes_left);
+
+ TEST_SAY("Executing: %s\n", cmd);
+ TIMING_START(&t_cmd, "exec");
+ r = system(cmd);
+ TIMING_STOP(&t_cmd);
+
+ if (r == -1)
+ TEST_FAIL("system(\"%s\") failed: %s", cmd, strerror(errno));
+ else if (WIFSIGNALED(r))
+ TEST_FAIL("system(\"%s\") terminated by signal %d\n", cmd,
+ WTERMSIG(r));
+ else if (WEXITSTATUS(r))
+ TEST_FAIL("system(\"%s\") failed with exit status %d\n", cmd,
+ WEXITSTATUS(r));
+#endif
+}
+
+
+
+/**
+ * @brief Create topic using Topic Admin API
+ *
+ * @param configs is an optional key-value tuple array of
+ * topic configs (or NULL).
+ */
+void test_admin_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor,
+ const char **configs) {
+ rd_kafka_t *rk;
+ rd_kafka_NewTopic_t *newt[1];
+ const size_t newt_cnt = 1;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *rkqu;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_CreateTopics_result_t *res;
+ const rd_kafka_topic_result_t **terr;
+ int timeout_ms = tmout_multip(10000);
+ size_t res_cnt;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ test_timing_t t_create;
+
+ if (!(rk = use_rk))
+ rk = test_create_producer();
+
+ rkqu = rd_kafka_queue_new(rk);
+
+ newt[0] =
+ rd_kafka_NewTopic_new(topicname, partition_cnt, replication_factor,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(newt[0] != NULL, "%s", errstr);
+
+ if (configs) {
+ int i;
+
+ for (i = 0; configs[i] && configs[i + 1]; i += 2)
+ TEST_CALL_ERR__(rd_kafka_NewTopic_set_config(
+ newt[0], configs[i], configs[i + 1]));
+ }
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, timeout_ms, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+ TEST_SAY(
+ "Creating topic \"%s\" "
+ "(partitions=%d, replication_factor=%d, timeout=%d)\n",
+ topicname, partition_cnt, replication_factor, timeout_ms);
+
+ TIMING_START(&t_create, "CreateTopics");
+ rd_kafka_CreateTopics(rk, newt, newt_cnt, options, rkqu);
+
+ /* Wait for result */
+ rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000);
+ TEST_ASSERT(rkev, "Timed out waiting for CreateTopics result");
+
+ TIMING_STOP(&t_create);
+
+ TEST_ASSERT(!rd_kafka_event_error(rkev), "CreateTopics failed: %s",
+ rd_kafka_event_error_string(rkev));
+
+ res = rd_kafka_event_CreateTopics_result(rkev);
+ TEST_ASSERT(res, "Expected CreateTopics_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ terr = rd_kafka_CreateTopics_result_topics(res, &res_cnt);
+ TEST_ASSERT(terr, "CreateTopics_result_topics returned NULL");
+ TEST_ASSERT(res_cnt == newt_cnt,
+ "CreateTopics_result_topics returned %" PRIusz
+ " topics, "
+ "not the expected %" PRIusz,
+ res_cnt, newt_cnt);
+
+ TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]) ||
+ rd_kafka_topic_result_error(terr[0]) ==
+ RD_KAFKA_RESP_ERR_TOPIC_ALREADY_EXISTS,
+ "Topic %s result error: %s",
+ rd_kafka_topic_result_name(terr[0]),
+ rd_kafka_topic_result_error_string(terr[0]));
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_queue_destroy(rkqu);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_NewTopic_destroy(newt[0]);
+
+ if (!use_rk)
+ rd_kafka_destroy(rk);
+}
+
+
+
+/**
+ * @brief Create topic using kafka-topics.sh --create
+ */
+static void test_create_topic_sh(const char *topicname,
+ int partition_cnt,
+ int replication_factor) {
+ test_kafka_topics(
+ "--create --topic \"%s\" "
+ "--replication-factor %d --partitions %d",
+ topicname, replication_factor, partition_cnt);
+}
+
+
+/**
+ * @brief Create topic
+ */
+void test_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor) {
+ if (test_broker_version < TEST_BRKVER(0, 10, 2, 0))
+ test_create_topic_sh(topicname, partition_cnt,
+ replication_factor);
+ else
+ test_admin_create_topic(use_rk, topicname, partition_cnt,
+ replication_factor, NULL);
+}
+
+
+/**
+ * @brief Create topic using kafka-topics.sh --delete
+ */
+static void test_delete_topic_sh(const char *topicname) {
+ test_kafka_topics("--delete --topic \"%s\" ", topicname);
+}
+
+
+/**
+ * @brief Delete topic using Topic Admin API
+ */
+static void test_admin_delete_topic(rd_kafka_t *use_rk, const char *topicname) {
+ rd_kafka_t *rk;
+ rd_kafka_DeleteTopic_t *delt[1];
+ const size_t delt_cnt = 1;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *rkqu;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteTopics_result_t *res;
+ const rd_kafka_topic_result_t **terr;
+ int timeout_ms = tmout_multip(10000);
+ size_t res_cnt;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ test_timing_t t_create;
+
+ if (!(rk = use_rk))
+ rk = test_create_producer();
+
+ rkqu = rd_kafka_queue_new(rk);
+
+ delt[0] = rd_kafka_DeleteTopic_new(topicname);
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, timeout_ms, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+ TEST_SAY(
+ "Deleting topic \"%s\" "
+ "(timeout=%d)\n",
+ topicname, timeout_ms);
+
+ TIMING_START(&t_create, "DeleteTopics");
+ rd_kafka_DeleteTopics(rk, delt, delt_cnt, options, rkqu);
+
+ /* Wait for result */
+ rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000);
+ TEST_ASSERT(rkev, "Timed out waiting for DeleteTopics result");
+
+ TIMING_STOP(&t_create);
+
+ res = rd_kafka_event_DeleteTopics_result(rkev);
+ TEST_ASSERT(res, "Expected DeleteTopics_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ terr = rd_kafka_DeleteTopics_result_topics(res, &res_cnt);
+ TEST_ASSERT(terr, "DeleteTopics_result_topics returned NULL");
+ TEST_ASSERT(res_cnt == delt_cnt,
+ "DeleteTopics_result_topics returned %" PRIusz
+ " topics, "
+ "not the expected %" PRIusz,
+ res_cnt, delt_cnt);
+
+ TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]),
+ "Topic %s result error: %s",
+ rd_kafka_topic_result_name(terr[0]),
+ rd_kafka_topic_result_error_string(terr[0]));
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_queue_destroy(rkqu);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_DeleteTopic_destroy(delt[0]);
+
+ if (!use_rk)
+ rd_kafka_destroy(rk);
+}
+
+
+/**
+ * @brief Delete a topic
+ */
+void test_delete_topic(rd_kafka_t *use_rk, const char *topicname) {
+ if (test_broker_version < TEST_BRKVER(0, 10, 2, 0))
+ test_delete_topic_sh(topicname);
+ else
+ test_admin_delete_topic(use_rk, topicname);
+}
+
+
+/**
+ * @brief Create additional partitions for a topic using Admin API
+ */
+static void test_admin_create_partitions(rd_kafka_t *use_rk,
+ const char *topicname,
+ int new_partition_cnt) {
+ rd_kafka_t *rk;
+ rd_kafka_NewPartitions_t *newp[1];
+ const size_t newp_cnt = 1;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *rkqu;
+ rd_kafka_event_t *rkev;
+ const rd_kafka_CreatePartitions_result_t *res;
+ const rd_kafka_topic_result_t **terr;
+ int timeout_ms = tmout_multip(10000);
+ size_t res_cnt;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+ test_timing_t t_create;
+
+ if (!(rk = use_rk))
+ rk = test_create_producer();
+
+ rkqu = rd_kafka_queue_new(rk);
+
+ newp[0] = rd_kafka_NewPartitions_new(topicname, new_partition_cnt,
+ errstr, sizeof(errstr));
+ TEST_ASSERT(newp[0] != NULL, "%s", errstr);
+
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, timeout_ms, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "%s", errstr);
+
+ TEST_SAY("Creating %d (total) partitions for topic \"%s\"\n",
+ new_partition_cnt, topicname);
+
+ TIMING_START(&t_create, "CreatePartitions");
+ rd_kafka_CreatePartitions(rk, newp, newp_cnt, options, rkqu);
+
+ /* Wait for result */
+ rkev = rd_kafka_queue_poll(rkqu, timeout_ms + 2000);
+ TEST_ASSERT(rkev, "Timed out waiting for CreatePartitions result");
+
+ TIMING_STOP(&t_create);
+
+ res = rd_kafka_event_CreatePartitions_result(rkev);
+ TEST_ASSERT(res, "Expected CreatePartitions_result, not %s",
+ rd_kafka_event_name(rkev));
+
+ terr = rd_kafka_CreatePartitions_result_topics(res, &res_cnt);
+ TEST_ASSERT(terr, "CreatePartitions_result_topics returned NULL");
+ TEST_ASSERT(res_cnt == newp_cnt,
+ "CreatePartitions_result_topics returned %" PRIusz
+ " topics, not the expected %" PRIusz,
+ res_cnt, newp_cnt);
+
+ TEST_ASSERT(!rd_kafka_topic_result_error(terr[0]),
+ "Topic %s result error: %s",
+ rd_kafka_topic_result_name(terr[0]),
+ rd_kafka_topic_result_error_string(terr[0]));
+
+ rd_kafka_event_destroy(rkev);
+
+ rd_kafka_queue_destroy(rkqu);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_NewPartitions_destroy(newp[0]);
+
+ if (!use_rk)
+ rd_kafka_destroy(rk);
+}
+
+
+/**
+ * @brief Create partitions for topic
+ */
+void test_create_partitions(rd_kafka_t *use_rk,
+ const char *topicname,
+ int new_partition_cnt) {
+ if (test_broker_version < TEST_BRKVER(0, 10, 2, 0))
+ test_kafka_topics("--alter --topic %s --partitions %d",
+ topicname, new_partition_cnt);
+ else
+ test_admin_create_partitions(use_rk, topicname,
+ new_partition_cnt);
+}
+
+
+int test_get_partition_count(rd_kafka_t *rk,
+ const char *topicname,
+ int timeout_ms) {
+ rd_kafka_t *use_rk;
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_t *rkt;
+ int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
+ int ret = -1;
+
+ if (!rk)
+ use_rk = test_create_producer();
+ else
+ use_rk = rk;
+
+ rkt = rd_kafka_topic_new(use_rk, topicname, NULL);
+
+ do {
+ const struct rd_kafka_metadata *metadata;
+
+ err = rd_kafka_metadata(use_rk, 0, rkt, &metadata,
+ tmout_multip(15000));
+ if (err)
+ TEST_WARN("metadata() for %s failed: %s\n",
+ rkt ? rd_kafka_topic_name(rkt)
+ : "(all-local)",
+ rd_kafka_err2str(err));
+ else {
+ if (metadata->topic_cnt == 1) {
+ if (metadata->topics[0].err == 0 ||
+ metadata->topics[0].partition_cnt > 0) {
+ int32_t cnt;
+ cnt = metadata->topics[0].partition_cnt;
+ rd_kafka_metadata_destroy(metadata);
+ ret = (int)cnt;
+ break;
+ }
+ TEST_SAY(
+ "metadata(%s) returned %s: retrying\n",
+ rd_kafka_topic_name(rkt),
+ rd_kafka_err2str(metadata->topics[0].err));
+ }
+ rd_kafka_metadata_destroy(metadata);
+ rd_sleep(1);
+ }
+ } while (test_clock() < abs_timeout);
+
+ rd_kafka_topic_destroy(rkt);
+
+ if (!rk)
+ rd_kafka_destroy(use_rk);
+
+ return ret;
+}
+
+/**
+ * @brief Let the broker auto-create the topic for us.
+ */
+rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ int timeout_ms) {
+ const struct rd_kafka_metadata *metadata;
+ rd_kafka_resp_err_t err;
+ test_timing_t t;
+ int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
+
+ do {
+ TIMING_START(&t, "auto_create_topic");
+ err = rd_kafka_metadata(rk, 0, rkt, &metadata,
+ tmout_multip(15000));
+ TIMING_STOP(&t);
+ if (err)
+ TEST_WARN("metadata() for %s failed: %s\n",
+ rkt ? rd_kafka_topic_name(rkt)
+ : "(all-local)",
+ rd_kafka_err2str(err));
+ else {
+ if (metadata->topic_cnt == 1) {
+ if (metadata->topics[0].err == 0 ||
+ metadata->topics[0].partition_cnt > 0) {
+ rd_kafka_metadata_destroy(metadata);
+ return 0;
+ }
+ TEST_SAY(
+ "metadata(%s) returned %s: retrying\n",
+ rd_kafka_topic_name(rkt),
+ rd_kafka_err2str(metadata->topics[0].err));
+ }
+ rd_kafka_metadata_destroy(metadata);
+ rd_sleep(1);
+ }
+ } while (test_clock() < abs_timeout);
+
+ return err;
+}
+
+rd_kafka_resp_err_t
+test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms) {
+ rd_kafka_topic_t *rkt = rd_kafka_topic_new(rk, name, NULL);
+ rd_kafka_resp_err_t err;
+ if (!rkt)
+ return rd_kafka_last_error();
+ err = test_auto_create_topic_rkt(rk, rkt, timeout_ms);
+ rd_kafka_topic_destroy(rkt);
+ return err;
+}
+
+
+/**
+ * @brief Check if topic auto creation works.
+ * @returns 1 if it does, else 0.
+ */
+int test_check_auto_create_topic(void) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf;
+ rd_kafka_resp_err_t err;
+ const char *topic = test_mk_topic_name("autocreatetest", 1);
+
+ test_conf_init(&conf, NULL, 0);
+ rk = test_create_handle(RD_KAFKA_PRODUCER, conf);
+ err = test_auto_create_topic(rk, topic, tmout_multip(5000));
+ if (err)
+ TEST_SAY("Auto topic creation of \"%s\" failed: %s\n", topic,
+ rd_kafka_err2str(err));
+ rd_kafka_destroy(rk);
+
+ return err ? 0 : 1;
+}
+
+
+/**
+ * @brief Builds and runs a Java application from the java/ directory.
+ *
+ * The application is started in the background, use
+ * test_waitpid() to await its demise.
+ *
+ * @param cls The app class to run using java/run-class.sh
+ *
+ * @returns -1 if the application could not be started, else the pid.
+ */
+int test_run_java(const char *cls, const char **argv) {
+#ifdef _WIN32
+ TEST_WARN("%s(%s) not supported Windows, yet", __FUNCTION__, cls);
+ return -1;
+#else
+ int r;
+ const char *kpath;
+ pid_t pid;
+ const char **full_argv, **p;
+ int cnt;
+ extern char **environ;
+
+ kpath = test_getenv("KAFKA_PATH", NULL);
+
+ if (!kpath) {
+ TEST_WARN("%s(%s): KAFKA_PATH must be set\n", __FUNCTION__,
+ cls);
+ return -1;
+ }
+
+ /* Build */
+ r = system("make -s java");
+
+ if (r == -1 || WIFSIGNALED(r) || WEXITSTATUS(r)) {
+ TEST_WARN("%s(%s): failed to build java class (code %d)\n",
+ __FUNCTION__, cls, r);
+ return -1;
+ }
+
+ /* For child process and run cls */
+ pid = fork();
+ if (pid == -1) {
+ TEST_WARN("%s(%s): failed to fork: %s\n", __FUNCTION__, cls,
+ strerror(errno));
+ return -1;
+ }
+
+ if (pid > 0)
+ return (int)pid; /* In parent process */
+
+ /* In child process */
+
+ /* Reconstruct argv to contain run-class.sh and the cls */
+ for (cnt = 0; argv[cnt]; cnt++)
+ ;
+
+ cnt += 3; /* run-class.sh, cls, .., NULL */
+ full_argv = malloc(sizeof(*full_argv) * cnt);
+ full_argv[0] = "java/run-class.sh";
+ full_argv[1] = (const char *)cls;
+
+ /* Copy arguments */
+ for (p = &full_argv[2]; *argv; p++, argv++)
+ *p = *argv;
+ *p = NULL;
+
+ /* Run */
+ r = execve(full_argv[0], (char *const *)full_argv, environ);
+
+ TEST_WARN("%s(%s): failed to execute run-class.sh: %s\n", __FUNCTION__,
+ cls, strerror(errno));
+ exit(2);
+
+ return -1; /* NOTREACHED */
+#endif
+}
+
+
+/**
+ * @brief Wait for child-process \p pid to exit.
+ *
+ * @returns -1 if the child process exited successfully, else -1.
+ */
+int test_waitpid(int pid) {
+#ifdef _WIN32
+ TEST_WARN("%s() not supported Windows, yet", __FUNCTION__);
+ return -1;
+#else
+ pid_t r;
+ int status = 0;
+
+ r = waitpid((pid_t)pid, &status, 0);
+
+ if (r == -1) {
+ TEST_WARN("waitpid(%d) failed: %s\n", pid, strerror(errno));
+ return -1;
+ }
+
+ if (WIFSIGNALED(status)) {
+ TEST_WARN("Process %d terminated by signal %d\n", pid,
+ WTERMSIG(status));
+ return -1;
+ } else if (WEXITSTATUS(status)) {
+ TEST_WARN("Process %d exited with status %d\n", pid,
+ WEXITSTATUS(status));
+ return -1;
+ }
+
+ return 0;
+#endif
+}
+
+
+/**
+ * @brief Check if \p feature is builtin to librdkafka.
+ * @returns returns 1 if feature is built in, else 0.
+ */
+int test_check_builtin(const char *feature) {
+ rd_kafka_conf_t *conf;
+ char errstr[128];
+ int r;
+
+ conf = rd_kafka_conf_new();
+ if (rd_kafka_conf_set(conf, "builtin.features", feature, errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK) {
+ TEST_SAY("Feature \"%s\" not built-in: %s\n", feature, errstr);
+ r = 0;
+ } else {
+ TEST_SAY("Feature \"%s\" is built-in\n", feature);
+ r = 1;
+ }
+
+ rd_kafka_conf_destroy(conf);
+ return r;
+}
+
+
+char *tsprintf(const char *fmt, ...) {
+ static RD_TLS char ret[8][512];
+ static RD_TLS int i;
+ va_list ap;
+
+
+ i = (i + 1) % 8;
+
+ va_start(ap, fmt);
+ rd_vsnprintf(ret[i], sizeof(ret[i]), fmt, ap);
+ va_end(ap);
+
+ return ret[i];
+}
+
+
+/**
+ * @brief Add a test report JSON object.
+ * These will be written as a JSON array to the test report file.
+ */
+void test_report_add(struct test *test, const char *fmt, ...) {
+ va_list ap;
+ char buf[512];
+
+ va_start(ap, fmt);
+ vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ if (test->report_cnt == test->report_size) {
+ if (test->report_size == 0)
+ test->report_size = 8;
+ else
+ test->report_size *= 2;
+
+ test->report_arr =
+ realloc(test->report_arr,
+ sizeof(*test->report_arr) * test->report_size);
+ }
+
+ test->report_arr[test->report_cnt++] = rd_strdup(buf);
+
+ TEST_SAYL(1, "Report #%d: %s\n", test->report_cnt - 1, buf);
+}
+
+/**
+ * Returns 1 if KAFKA_PATH and BROKERS (or ZK_ADDRESS) is set to se we can use
+ * the kafka-topics.sh script to manually create topics.
+ *
+ * If \p skip is set TEST_SKIP() will be called with a helpful message.
+ */
+int test_can_create_topics(int skip) {
+#ifndef _WIN32
+ const char *bootstrap;
+#endif
+
+ /* Has AdminAPI */
+ if (test_broker_version >= TEST_BRKVER(0, 10, 2, 0))
+ return 1;
+
+#ifdef _WIN32
+ if (skip)
+ TEST_SKIP("Cannot create topics on Win32\n");
+ return 0;
+#else
+
+ bootstrap = test_broker_version >= TEST_BRKVER(3, 0, 0, 0)
+ ? "BROKERS"
+ : "ZK_ADDRESS";
+
+ if (!test_getenv("KAFKA_PATH", NULL) || !test_getenv(bootstrap, NULL)) {
+ if (skip)
+ TEST_SKIP(
+ "Cannot create topics "
+ "(set KAFKA_PATH and %s)\n",
+ bootstrap);
+ return 0;
+ }
+
+
+ return 1;
+#endif
+}
+
+
+/**
+ * Wait for \p event_type, discarding all other events prior to it.
+ */
+rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq,
+ rd_kafka_event_type_t event_type,
+ int timeout_ms) {
+ test_timing_t t_w;
+ int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
+
+ TIMING_START(&t_w, "wait_event");
+ while (test_clock() < abs_timeout) {
+ rd_kafka_event_t *rkev;
+
+ rkev = rd_kafka_queue_poll(
+ eventq, (int)(abs_timeout - test_clock()) / 1000);
+
+ if (rd_kafka_event_type(rkev) == event_type) {
+ TIMING_STOP(&t_w);
+ return rkev;
+ }
+
+ if (!rkev)
+ continue;
+
+ if (rd_kafka_event_error(rkev))
+ TEST_SAY("discarding ignored event %s: %s\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+ else
+ TEST_SAY("discarding ignored event %s\n",
+ rd_kafka_event_name(rkev));
+ rd_kafka_event_destroy(rkev);
+ }
+ TIMING_STOP(&t_w);
+
+ return NULL;
+}
+
+
+void test_SAY(const char *file, int line, int level, const char *str) {
+ TEST_SAYL(level, "%s", str);
+}
+
+void test_SKIP(const char *file, int line, const char *str) {
+ TEST_WARN("SKIPPING TEST: %s", str);
+ TEST_LOCK();
+ test_curr->state = TEST_SKIPPED;
+ if (!*test_curr->failstr) {
+ rd_snprintf(test_curr->failstr, sizeof(test_curr->failstr),
+ "%s", str);
+ rtrim(test_curr->failstr);
+ }
+ TEST_UNLOCK();
+}
+
+const char *test_curr_name(void) {
+ return test_curr->name;
+}
+
+
+/**
+ * @brief Dump/print message haders
+ */
+void test_headers_dump(const char *what,
+ int lvl,
+ const rd_kafka_headers_t *hdrs) {
+ size_t idx = 0;
+ const char *name, *value;
+ size_t size;
+
+ while (!rd_kafka_header_get_all(hdrs, idx++, &name,
+ (const void **)&value, &size))
+ TEST_SAYL(lvl, "%s: Header #%" PRIusz ": %s='%s'\n", what,
+ idx - 1, name, value ? value : "(NULL)");
+}
+
+
+/**
+ * @brief Retrieve and return the list of broker ids in the cluster.
+ *
+ * @param rk Optional instance to use.
+ * @param cntp Will be updated to the number of brokers returned.
+ *
+ * @returns a malloc:ed list of int32_t broker ids.
+ */
+int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp) {
+ int32_t *ids;
+ rd_kafka_t *rk;
+ const rd_kafka_metadata_t *md;
+ rd_kafka_resp_err_t err;
+ size_t i;
+
+ if (!(rk = use_rk))
+ rk = test_create_producer();
+
+ err = rd_kafka_metadata(rk, 0, NULL, &md, tmout_multip(5000));
+ TEST_ASSERT(!err, "%s", rd_kafka_err2str(err));
+ TEST_ASSERT(md->broker_cnt > 0, "%d brokers, expected > 0",
+ md->broker_cnt);
+
+ ids = malloc(sizeof(*ids) * md->broker_cnt);
+
+ for (i = 0; i < (size_t)md->broker_cnt; i++)
+ ids[i] = md->brokers[i].id;
+
+ *cntp = md->broker_cnt;
+
+ rd_kafka_metadata_destroy(md);
+
+ if (!use_rk)
+ rd_kafka_destroy(rk);
+
+ return ids;
+}
+
+
+
+/**
+ * @brief Verify that all topics in \p topics are reported in metadata,
+ * and that none of the topics in \p not_topics are reported.
+ *
+ * @returns the number of failures (but does not FAIL).
+ */
+static int verify_topics_in_metadata(rd_kafka_t *rk,
+ rd_kafka_metadata_topic_t *topics,
+ size_t topic_cnt,
+ rd_kafka_metadata_topic_t *not_topics,
+ size_t not_topic_cnt) {
+ const rd_kafka_metadata_t *md;
+ rd_kafka_resp_err_t err;
+ int ti;
+ size_t i;
+ int fails = 0;
+
+ /* Mark topics with dummy error which is overwritten
+ * when topic is found in metadata, allowing us to check
+ * for missed topics. */
+ for (i = 0; i < topic_cnt; i++)
+ topics[i].err = 12345;
+
+ err = rd_kafka_metadata(rk, 1 /*all_topics*/, NULL, &md,
+ tmout_multip(5000));
+ TEST_ASSERT(!err, "metadata failed: %s", rd_kafka_err2str(err));
+
+ for (ti = 0; ti < md->topic_cnt; ti++) {
+ const rd_kafka_metadata_topic_t *mdt = &md->topics[ti];
+
+ for (i = 0; i < topic_cnt; i++) {
+ int pi;
+ rd_kafka_metadata_topic_t *exp_mdt;
+
+ if (strcmp(topics[i].topic, mdt->topic))
+ continue;
+
+ exp_mdt = &topics[i];
+
+ exp_mdt->err = mdt->err; /* indicate found */
+ if (mdt->err) {
+ TEST_SAY(
+ "metadata: "
+ "Topic %s has error %s\n",
+ mdt->topic, rd_kafka_err2str(mdt->err));
+ fails++;
+ }
+
+ if (exp_mdt->partition_cnt > 0 &&
+ mdt->partition_cnt != exp_mdt->partition_cnt) {
+ TEST_SAY(
+ "metadata: "
+ "Topic %s, expected %d partitions"
+ ", not %d\n",
+ mdt->topic, exp_mdt->partition_cnt,
+ mdt->partition_cnt);
+ fails++;
+ continue;
+ }
+
+ /* Verify per-partition values */
+ for (pi = 0;
+ exp_mdt->partitions && pi < exp_mdt->partition_cnt;
+ pi++) {
+ const rd_kafka_metadata_partition_t *mdp =
+ &mdt->partitions[pi];
+ const rd_kafka_metadata_partition_t *exp_mdp =
+ &exp_mdt->partitions[pi];
+
+ if (mdp->id != exp_mdp->id) {
+ TEST_SAY(
+ "metadata: "
+ "Topic %s, "
+ "partition %d, "
+ "partition list out of order,"
+ " expected %d, not %d\n",
+ mdt->topic, pi, exp_mdp->id,
+ mdp->id);
+ fails++;
+ continue;
+ }
+
+ if (exp_mdp->replicas) {
+ if (mdp->replica_cnt !=
+ exp_mdp->replica_cnt) {
+ TEST_SAY(
+ "metadata: "
+ "Topic %s, "
+ "partition %d, "
+ "expected %d replicas,"
+ " not %d\n",
+ mdt->topic, pi,
+ exp_mdp->replica_cnt,
+ mdp->replica_cnt);
+ fails++;
+ } else if (
+ memcmp(
+ mdp->replicas,
+ exp_mdp->replicas,
+ mdp->replica_cnt *
+ sizeof(*mdp->replicas))) {
+ int ri;
+
+ TEST_SAY(
+ "metadata: "
+ "Topic %s, "
+ "partition %d, "
+ "replica mismatch:\n",
+ mdt->topic, pi);
+
+ for (ri = 0;
+ ri < mdp->replica_cnt;
+ ri++) {
+ TEST_SAY(
+ " #%d: "
+ "expected "
+ "replica %d, "
+ "not %d\n",
+ ri,
+ exp_mdp
+ ->replicas[ri],
+ mdp->replicas[ri]);
+ }
+
+ fails++;
+ }
+ }
+ }
+ }
+
+ for (i = 0; i < not_topic_cnt; i++) {
+ if (strcmp(not_topics[i].topic, mdt->topic))
+ continue;
+
+ TEST_SAY(
+ "metadata: "
+ "Topic %s found in metadata, unexpected\n",
+ mdt->topic);
+ fails++;
+ }
+ }
+
+ for (i = 0; i < topic_cnt; i++) {
+ if ((int)topics[i].err == 12345) {
+ TEST_SAY(
+ "metadata: "
+ "Topic %s not seen in metadata\n",
+ topics[i].topic);
+ fails++;
+ }
+ }
+
+ if (fails > 0)
+ TEST_SAY("Metadata verification for %" PRIusz
+ " topics failed "
+ "with %d errors (see above)\n",
+ topic_cnt, fails);
+ else
+ TEST_SAY(
+ "Metadata verification succeeded: "
+ "%" PRIusz
+ " desired topics seen, "
+ "%" PRIusz " undesired topics not seen\n",
+ topic_cnt, not_topic_cnt);
+
+ rd_kafka_metadata_destroy(md);
+
+ return fails;
+}
+
+
+
+/**
+ * @brief Wait for metadata to reflect expected and not expected topics
+ */
+void test_wait_metadata_update(rd_kafka_t *rk,
+ rd_kafka_metadata_topic_t *topics,
+ size_t topic_cnt,
+ rd_kafka_metadata_topic_t *not_topics,
+ size_t not_topic_cnt,
+ int tmout) {
+ int64_t abs_timeout;
+ test_timing_t t_md;
+ rd_kafka_t *our_rk = NULL;
+
+ if (!rk)
+ rk = our_rk = test_create_handle(RD_KAFKA_PRODUCER, NULL);
+
+ abs_timeout = test_clock() + ((int64_t)tmout * 1000);
+
+ TEST_SAY("Waiting for up to %dms for metadata update\n", tmout);
+
+ TIMING_START(&t_md, "METADATA.WAIT");
+ do {
+ int md_fails;
+
+ md_fails = verify_topics_in_metadata(rk, topics, topic_cnt,
+ not_topics, not_topic_cnt);
+
+ if (!md_fails) {
+ TEST_SAY(
+ "All expected topics (not?) "
+ "seen in metadata\n");
+ abs_timeout = 0;
+ break;
+ }
+
+ rd_sleep(1);
+ } while (test_clock() < abs_timeout);
+ TIMING_STOP(&t_md);
+
+ if (our_rk)
+ rd_kafka_destroy(our_rk);
+
+ if (abs_timeout)
+ TEST_FAIL("Expected topics not seen in given time.");
+}
+
+/**
+ * @brief Wait for topic to be available in metadata
+ */
+void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout) {
+ rd_kafka_metadata_topic_t topics = {.topic = (char *)topic};
+
+ test_wait_metadata_update(rk, &topics, 1, NULL, 0, tmout);
+
+ /* Wait an additional second for the topic to propagate in
+ * the cluster. This is not perfect but a cheap workaround for
+ * the asynchronous nature of topic creations in Kafka. */
+ rd_sleep(1);
+}
+
+
+
+/**
+ * @brief Wait for up to \p tmout for any type of admin result.
+ * @returns the event
+ */
+rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
+ rd_kafka_event_type_t evtype,
+ int tmout) {
+ rd_kafka_event_t *rkev;
+
+ while (1) {
+ rkev = rd_kafka_queue_poll(q, tmout);
+ if (!rkev)
+ TEST_FAIL("Timed out waiting for admin result (%d)\n",
+ evtype);
+
+ if (rd_kafka_event_type(rkev) == evtype)
+ return rkev;
+
+
+ if (rd_kafka_event_type(rkev) == RD_KAFKA_EVENT_ERROR) {
+ TEST_WARN(
+ "Received error event while waiting for %d: "
+ "%s: ignoring",
+ evtype, rd_kafka_event_error_string(rkev));
+ continue;
+ }
+
+
+ TEST_ASSERT(rd_kafka_event_type(rkev) == evtype,
+ "Expected event type %d, got %d (%s)", evtype,
+ rd_kafka_event_type(rkev),
+ rd_kafka_event_name(rkev));
+ }
+
+ return NULL;
+}
+
+/**
+ * @brief Wait for up to \p tmout for an admin API result and return the
+ * distilled error code.
+ *
+ * Supported APIs:
+ * - AlterConfigs
+ * - CreatePartitions
+ * - CreateTopics
+ * - DeleteGroups
+ * - DeleteRecords
+ * - DeleteTopics
+ * - DeleteConsumerGroupOffsets
+ * - DescribeConfigs
+ * - CreateAcls
+ */
+rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
+ rd_kafka_event_type_t evtype,
+ rd_kafka_event_t **retevent,
+ int tmout) {
+ rd_kafka_event_t *rkev;
+ size_t i;
+ const rd_kafka_topic_result_t **terr = NULL;
+ size_t terr_cnt = 0;
+ const rd_kafka_ConfigResource_t **cres = NULL;
+ size_t cres_cnt = 0;
+ const rd_kafka_acl_result_t **aclres = NULL;
+ size_t aclres_cnt = 0;
+ int errcnt = 0;
+ rd_kafka_resp_err_t err;
+ const rd_kafka_group_result_t **gres = NULL;
+ size_t gres_cnt = 0;
+ const rd_kafka_ConsumerGroupDescription_t **gdescs = NULL;
+ size_t gdescs_cnt = 0;
+ const rd_kafka_error_t **glists_errors = NULL;
+ size_t glists_error_cnt = 0;
+ const rd_kafka_topic_partition_list_t *offsets = NULL;
+
+ rkev = test_wait_admin_result(q, evtype, tmout);
+
+ if ((err = rd_kafka_event_error(rkev))) {
+ TEST_WARN("%s failed: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+ rd_kafka_event_destroy(rkev);
+ return err;
+ }
+
+ if (evtype == RD_KAFKA_EVENT_CREATETOPICS_RESULT) {
+ const rd_kafka_CreateTopics_result_t *res;
+ if (!(res = rd_kafka_event_CreateTopics_result(rkev)))
+ TEST_FAIL("Expected a CreateTopics result, not %s",
+ rd_kafka_event_name(rkev));
+
+ terr = rd_kafka_CreateTopics_result_topics(res, &terr_cnt);
+
+ } else if (evtype == RD_KAFKA_EVENT_DELETETOPICS_RESULT) {
+ const rd_kafka_DeleteTopics_result_t *res;
+ if (!(res = rd_kafka_event_DeleteTopics_result(rkev)))
+ TEST_FAIL("Expected a DeleteTopics result, not %s",
+ rd_kafka_event_name(rkev));
+
+ terr = rd_kafka_DeleteTopics_result_topics(res, &terr_cnt);
+
+ } else if (evtype == RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT) {
+ const rd_kafka_CreatePartitions_result_t *res;
+ if (!(res = rd_kafka_event_CreatePartitions_result(rkev)))
+ TEST_FAIL("Expected a CreatePartitions result, not %s",
+ rd_kafka_event_name(rkev));
+
+ terr = rd_kafka_CreatePartitions_result_topics(res, &terr_cnt);
+
+ } else if (evtype == RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT) {
+ const rd_kafka_DescribeConfigs_result_t *res;
+
+ if (!(res = rd_kafka_event_DescribeConfigs_result(rkev)))
+ TEST_FAIL("Expected a DescribeConfigs result, not %s",
+ rd_kafka_event_name(rkev));
+
+ cres =
+ rd_kafka_DescribeConfigs_result_resources(res, &cres_cnt);
+
+ } else if (evtype == RD_KAFKA_EVENT_ALTERCONFIGS_RESULT) {
+ const rd_kafka_AlterConfigs_result_t *res;
+
+ if (!(res = rd_kafka_event_AlterConfigs_result(rkev)))
+ TEST_FAIL("Expected a AlterConfigs result, not %s",
+ rd_kafka_event_name(rkev));
+
+ cres = rd_kafka_AlterConfigs_result_resources(res, &cres_cnt);
+
+ } else if (evtype == RD_KAFKA_EVENT_CREATEACLS_RESULT) {
+ const rd_kafka_CreateAcls_result_t *res;
+
+ if (!(res = rd_kafka_event_CreateAcls_result(rkev)))
+ TEST_FAIL("Expected a CreateAcls result, not %s",
+ rd_kafka_event_name(rkev));
+
+ aclres = rd_kafka_CreateAcls_result_acls(res, &aclres_cnt);
+ } else if (evtype == RD_KAFKA_EVENT_LISTCONSUMERGROUPS_RESULT) {
+ const rd_kafka_ListConsumerGroups_result_t *res;
+ if (!(res = rd_kafka_event_ListConsumerGroups_result(rkev)))
+ TEST_FAIL(
+ "Expected a ListConsumerGroups result, not %s",
+ rd_kafka_event_name(rkev));
+
+ glists_errors = rd_kafka_ListConsumerGroups_result_errors(
+ res, &glists_error_cnt);
+ } else if (evtype == RD_KAFKA_EVENT_DESCRIBECONSUMERGROUPS_RESULT) {
+ const rd_kafka_DescribeConsumerGroups_result_t *res;
+ if (!(res = rd_kafka_event_DescribeConsumerGroups_result(rkev)))
+ TEST_FAIL(
+ "Expected a DescribeConsumerGroups result, not %s",
+ rd_kafka_event_name(rkev));
+
+ gdescs = rd_kafka_DescribeConsumerGroups_result_groups(
+ res, &gdescs_cnt);
+ } else if (evtype == RD_KAFKA_EVENT_DELETEGROUPS_RESULT) {
+ const rd_kafka_DeleteGroups_result_t *res;
+ if (!(res = rd_kafka_event_DeleteGroups_result(rkev)))
+ TEST_FAIL("Expected a DeleteGroups result, not %s",
+ rd_kafka_event_name(rkev));
+
+ gres = rd_kafka_DeleteGroups_result_groups(res, &gres_cnt);
+
+ } else if (evtype == RD_KAFKA_EVENT_DELETERECORDS_RESULT) {
+ const rd_kafka_DeleteRecords_result_t *res;
+ if (!(res = rd_kafka_event_DeleteRecords_result(rkev)))
+ TEST_FAIL("Expected a DeleteRecords result, not %s",
+ rd_kafka_event_name(rkev));
+
+ offsets = rd_kafka_DeleteRecords_result_offsets(res);
+
+ } else if (evtype == RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT) {
+ const rd_kafka_DeleteConsumerGroupOffsets_result_t *res;
+ if (!(res = rd_kafka_event_DeleteConsumerGroupOffsets_result(
+ rkev)))
+ TEST_FAIL(
+ "Expected a DeleteConsumerGroupOffsets "
+ "result, not %s",
+ rd_kafka_event_name(rkev));
+
+ gres = rd_kafka_DeleteConsumerGroupOffsets_result_groups(
+ rkev, &gres_cnt);
+
+ } else {
+ TEST_FAIL("Bad evtype: %d", evtype);
+ RD_NOTREACHED();
+ }
+
+ /* Check topic errors */
+ for (i = 0; i < terr_cnt; i++) {
+ if (rd_kafka_topic_result_error(terr[i])) {
+ TEST_WARN("..Topics result: %s: error: %s\n",
+ rd_kafka_topic_result_name(terr[i]),
+ rd_kafka_topic_result_error_string(terr[i]));
+ if (!(errcnt++))
+ err = rd_kafka_topic_result_error(terr[i]);
+ }
+ }
+
+ /* Check resource errors */
+ for (i = 0; i < cres_cnt; i++) {
+ if (rd_kafka_ConfigResource_error(cres[i])) {
+ TEST_WARN(
+ "ConfigResource result: %d,%s: error: %s\n",
+ rd_kafka_ConfigResource_type(cres[i]),
+ rd_kafka_ConfigResource_name(cres[i]),
+ rd_kafka_ConfigResource_error_string(cres[i]));
+ if (!(errcnt++))
+ err = rd_kafka_ConfigResource_error(cres[i]);
+ }
+ }
+
+ /* Check ACL errors */
+ for (i = 0; i < aclres_cnt; i++) {
+ const rd_kafka_error_t *error =
+ rd_kafka_acl_result_error(aclres[i]);
+ if (error) {
+ TEST_WARN("AclResult error: %s: %s\n",
+ rd_kafka_error_name(error),
+ rd_kafka_error_string(error));
+ if (!(errcnt++))
+ err = rd_kafka_error_code(error);
+ }
+ }
+
+ /* Check list groups errors */
+ for (i = 0; i < glists_error_cnt; i++) {
+ const rd_kafka_error_t *error = glists_errors[i];
+ TEST_WARN("%s error: %s\n", rd_kafka_event_name(rkev),
+ rd_kafka_error_string(error));
+ if (!(errcnt++))
+ err = rd_kafka_error_code(error);
+ }
+
+ /* Check describe groups errors */
+ for (i = 0; i < gdescs_cnt; i++) {
+ const rd_kafka_error_t *error;
+ if ((error =
+ rd_kafka_ConsumerGroupDescription_error(gdescs[i]))) {
+ TEST_WARN("%s result: %s: error: %s\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_ConsumerGroupDescription_group_id(
+ gdescs[i]),
+ rd_kafka_error_string(error));
+ if (!(errcnt++))
+ err = rd_kafka_error_code(error);
+ }
+ }
+
+ /* Check group errors */
+ for (i = 0; i < gres_cnt; i++) {
+ const rd_kafka_topic_partition_list_t *parts;
+
+ if (rd_kafka_group_result_error(gres[i])) {
+
+ TEST_WARN("%s result: %s: error: %s\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_group_result_name(gres[i]),
+ rd_kafka_error_string(
+ rd_kafka_group_result_error(gres[i])));
+ if (!(errcnt++))
+ err = rd_kafka_error_code(
+ rd_kafka_group_result_error(gres[i]));
+ }
+
+ parts = rd_kafka_group_result_partitions(gres[i]);
+ if (parts) {
+ int j;
+ for (j = 0; j < parts->cnt; i++) {
+ if (!parts->elems[j].err)
+ continue;
+
+ TEST_WARN(
+ "%s result: %s: "
+ "%s [%" PRId32 "] error: %s\n",
+ rd_kafka_event_name(rkev),
+ rd_kafka_group_result_name(gres[i]),
+ parts->elems[j].topic,
+ parts->elems[j].partition,
+ rd_kafka_err2str(parts->elems[j].err));
+ errcnt++;
+ }
+ }
+ }
+
+ /* Check offset errors */
+ for (i = 0; (offsets && i < (size_t)offsets->cnt); i++) {
+ if (offsets->elems[i].err) {
+ TEST_WARN("DeleteRecords result: %s [%d]: error: %s\n",
+ offsets->elems[i].topic,
+ offsets->elems[i].partition,
+ rd_kafka_err2str(offsets->elems[i].err));
+ if (!(errcnt++))
+ err = offsets->elems[i].err;
+ }
+ }
+
+ if (!err && retevent)
+ *retevent = rkev;
+ else
+ rd_kafka_event_destroy(rkev);
+
+ return err;
+}
+
+
+
+/**
+ * @brief Topic Admin API helpers
+ *
+ * @param useq Makes the call async and posts the response in this queue.
+ * If NULL this call will be synchronous and return the error
+ * result.
+ *
+ * @remark Fails the current test on failure.
+ */
+
+rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ char **topics,
+ size_t topic_cnt,
+ int num_partitions,
+ void *opaque) {
+ rd_kafka_NewTopic_t **new_topics;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *q;
+ size_t i;
+ const int tmout = 30 * 1000;
+ rd_kafka_resp_err_t err;
+
+ new_topics = malloc(sizeof(*new_topics) * topic_cnt);
+
+ for (i = 0; i < topic_cnt; i++) {
+ char errstr[512];
+ new_topics[i] = rd_kafka_NewTopic_new(
+ topics[i], num_partitions, 1, errstr, sizeof(errstr));
+ TEST_ASSERT(new_topics[i],
+ "Failed to NewTopic(\"%s\", %d) #%" PRIusz ": %s",
+ topics[i], num_partitions, i, errstr);
+ }
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATETOPICS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ char errstr[512];
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, tmout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, tmout - 5000, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
+
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Creating %" PRIusz " topics\n", topic_cnt);
+
+ rd_kafka_CreateTopics(rk, new_topics, topic_cnt, options, q);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_NewTopic_destroy_array(new_topics, topic_cnt);
+ free(new_topics);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_CREATETOPICS_RESULT, NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to create %d topic(s): %s", (int)topic_cnt,
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+
+rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ const char *topic,
+ size_t total_part_cnt,
+ void *opaque) {
+ rd_kafka_NewPartitions_t *newp[1];
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *q;
+ const int tmout = 30 * 1000;
+ rd_kafka_resp_err_t err;
+ char errstr[512];
+
+ newp[0] = rd_kafka_NewPartitions_new(topic, total_part_cnt, errstr,
+ sizeof(errstr));
+ TEST_ASSERT(newp[0], "Failed to NewPartitions(\"%s\", %" PRIusz "): %s",
+ topic, total_part_cnt, errstr);
+
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEPARTITIONS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, tmout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, tmout - 5000, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
+
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Creating (up to) %" PRIusz " partitions for topic \"%s\"\n",
+ total_part_cnt, topic);
+
+ rd_kafka_CreatePartitions(rk, newp, 1, options, q);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_NewPartitions_destroy(newp[0]);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_CREATEPARTITIONS_RESULT, NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to create partitions: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+
+rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ char **topics,
+ size_t topic_cnt,
+ void *opaque) {
+ rd_kafka_queue_t *q;
+ rd_kafka_DeleteTopic_t **del_topics;
+ rd_kafka_AdminOptions_t *options;
+ size_t i;
+ rd_kafka_resp_err_t err;
+ const int tmout = 30 * 1000;
+
+ del_topics = malloc(sizeof(*del_topics) * topic_cnt);
+
+ for (i = 0; i < topic_cnt; i++) {
+ del_topics[i] = rd_kafka_DeleteTopic_new(topics[i]);
+ TEST_ASSERT(del_topics[i]);
+ }
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ char errstr[512];
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, tmout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, tmout - 5000, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
+
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Deleting %" PRIusz " topics\n", topic_cnt);
+
+ rd_kafka_DeleteTopics(rk, del_topics, topic_cnt, options, useq);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_DeleteTopic_destroy_array(del_topics, topic_cnt);
+
+ free(del_topics);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_DELETETOPICS_RESULT, NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to delete topics: %s", rd_kafka_err2str(err));
+
+ return err;
+}
+
+rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ char **groups,
+ size_t group_cnt,
+ void *opaque) {
+ rd_kafka_queue_t *q;
+ rd_kafka_DeleteGroup_t **del_groups;
+ rd_kafka_AdminOptions_t *options;
+ size_t i;
+ rd_kafka_resp_err_t err;
+ const int tmout = 30 * 1000;
+
+ del_groups = malloc(sizeof(*del_groups) * group_cnt);
+
+ for (i = 0; i < group_cnt; i++) {
+ del_groups[i] = rd_kafka_DeleteGroup_new(groups[i]);
+ TEST_ASSERT(del_groups[i]);
+ }
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETEGROUPS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ char errstr[512];
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, tmout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
+
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Deleting %" PRIusz " groups\n", group_cnt);
+
+ rd_kafka_DeleteGroups(rk, del_groups, group_cnt, options, q);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ rd_kafka_DeleteGroup_destroy_array(del_groups, group_cnt);
+ free(del_groups);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_DELETEGROUPS_RESULT, NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to delete groups: %s", rd_kafka_err2str(err));
+
+ return err;
+}
+
+rd_kafka_resp_err_t
+test_DeleteRecords_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ const rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_resp_err_t err;
+ rd_kafka_DeleteRecords_t *del_records =
+ rd_kafka_DeleteRecords_new(offsets);
+ const int tmout = 30 * 1000;
+
+ options =
+ rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETERECORDS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ char errstr[512];
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, tmout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, tmout - 5000, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
+
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Deleting offsets from %d partitions\n", offsets->cnt);
+
+ rd_kafka_DeleteRecords(rk, &del_records, 1, options, q);
+
+ rd_kafka_DeleteRecords_destroy(del_records);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_DELETERECORDS_RESULT, NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to delete records: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple(
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ const char *group_id,
+ const rd_kafka_topic_partition_list_t *offsets,
+ void *opaque) {
+ rd_kafka_queue_t *q;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_resp_err_t err;
+ const int tmout = 30 * 1000;
+ rd_kafka_DeleteConsumerGroupOffsets_t *cgoffsets;
+
+ options = rd_kafka_AdminOptions_new(
+ rk, RD_KAFKA_ADMIN_OP_DELETECONSUMERGROUPOFFSETS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ char errstr[512];
+
+ err = rd_kafka_AdminOptions_set_request_timeout(
+ options, tmout, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_request_timeout: %s", errstr);
+ err = rd_kafka_AdminOptions_set_operation_timeout(
+ options, tmout - 5000, errstr, sizeof(errstr));
+ TEST_ASSERT(!err, "set_operation_timeout: %s", errstr);
+
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ if (offsets) {
+ TEST_SAY(
+ "Deleting committed offsets for group %s and "
+ "%d partitions\n",
+ group_id, offsets->cnt);
+
+ cgoffsets =
+ rd_kafka_DeleteConsumerGroupOffsets_new(group_id, offsets);
+ } else {
+ TEST_SAY("Provoking invalid DeleteConsumerGroupOffsets call\n");
+ cgoffsets = NULL;
+ }
+
+ rd_kafka_DeleteConsumerGroupOffsets(rk, &cgoffsets, cgoffsets ? 1 : 0,
+ options, useq);
+
+ if (cgoffsets)
+ rd_kafka_DeleteConsumerGroupOffsets_destroy(cgoffsets);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_DELETECONSUMERGROUPOFFSETS_RESULT, NULL,
+ tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to delete committed offsets: %s",
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+/**
+ * @brief Delta Alter configuration for the given resource,
+ * overwriting/setting the configs provided in \p configs.
+ * Existing configuration remains intact.
+ *
+ * @param configs 'const char *name, const char *value' tuples
+ * @param config_cnt is the number of tuples in \p configs
+ */
+rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk,
+ rd_kafka_ResourceType_t restype,
+ const char *resname,
+ const char **configs,
+ size_t config_cnt) {
+ rd_kafka_queue_t *q;
+ rd_kafka_ConfigResource_t *confres;
+ rd_kafka_event_t *rkev;
+ size_t i;
+ rd_kafka_resp_err_t err;
+ const rd_kafka_ConfigResource_t **results;
+ size_t result_cnt;
+ const rd_kafka_ConfigEntry_t **configents;
+ size_t configent_cnt;
+
+
+ q = rd_kafka_queue_new(rk);
+
+ TEST_SAY("Getting configuration for %d %s\n", restype, resname);
+
+ confres = rd_kafka_ConfigResource_new(restype, resname);
+ rd_kafka_DescribeConfigs(rk, &confres, 1, NULL, q);
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_DESCRIBECONFIGS_RESULT, &rkev, 15 * 1000);
+ if (err) {
+ rd_kafka_queue_destroy(q);
+ rd_kafka_ConfigResource_destroy(confres);
+ return err;
+ }
+
+ results = rd_kafka_DescribeConfigs_result_resources(
+ rd_kafka_event_DescribeConfigs_result(rkev), &result_cnt);
+ TEST_ASSERT(result_cnt == 1,
+ "expected 1 DescribeConfigs result, not %" PRIusz,
+ result_cnt);
+
+ configents =
+ rd_kafka_ConfigResource_configs(results[0], &configent_cnt);
+ TEST_ASSERT(configent_cnt > 0,
+ "expected > 0 ConfigEntry:s, not %" PRIusz, configent_cnt);
+
+ TEST_SAY("Altering configuration for %d %s\n", restype, resname);
+
+ /* Apply all existing configuration entries to resource object that
+ * will later be passed to AlterConfigs. */
+ for (i = 0; i < configent_cnt; i++) {
+ const char *entry_name =
+ rd_kafka_ConfigEntry_name(configents[i]);
+
+ if (test_broker_version >= TEST_BRKVER(3, 2, 0, 0)) {
+ /* Skip entries that are overwritten to
+ * avoid duplicates, that cause an error since
+ * this broker version. */
+ size_t j;
+ for (j = 0; j < config_cnt; j += 2) {
+ if (!strcmp(configs[j], entry_name)) {
+ break;
+ }
+ }
+
+ if (j < config_cnt)
+ continue;
+ }
+
+ err = rd_kafka_ConfigResource_set_config(
+ confres, entry_name,
+ rd_kafka_ConfigEntry_value(configents[i]));
+ TEST_ASSERT(!err,
+ "Failed to set read-back config %s=%s "
+ "on local resource object",
+ entry_name,
+ rd_kafka_ConfigEntry_value(configents[i]));
+ }
+
+ rd_kafka_event_destroy(rkev);
+
+ /* Then apply the configuration to change. */
+ for (i = 0; i < config_cnt; i += 2) {
+ err = rd_kafka_ConfigResource_set_config(confres, configs[i],
+ configs[i + 1]);
+ TEST_ASSERT(!err,
+ "Failed to set config %s=%s on "
+ "local resource object",
+ configs[i], configs[i + 1]);
+ }
+
+ rd_kafka_AlterConfigs(rk, &confres, 1, NULL, q);
+
+ rd_kafka_ConfigResource_destroy(confres);
+
+ err = test_wait_topic_admin_result(
+ q, RD_KAFKA_EVENT_ALTERCONFIGS_RESULT, NULL, 15 * 1000);
+
+ rd_kafka_queue_destroy(q);
+
+ return err;
+}
+
+/**
+ * @brief Topic Admin API helpers
+ *
+ * @param useq Makes the call async and posts the response in this queue.
+ * If NULL this call will be synchronous and return the error
+ * result.
+ *
+ * @remark Fails the current test on failure.
+ */
+
+rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_kafka_AclBinding_t **acls,
+ size_t acl_cnt,
+ void *opaque) {
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *q;
+ rd_kafka_resp_err_t err;
+ const int tmout = 30 * 1000;
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_CREATEACLS);
+ rd_kafka_AdminOptions_set_opaque(options, opaque);
+
+ if (!useq) {
+ q = rd_kafka_queue_new(rk);
+ } else {
+ q = useq;
+ }
+
+ TEST_SAY("Creating %" PRIusz " acls\n", acl_cnt);
+
+ rd_kafka_CreateAcls(rk, acls, acl_cnt, options, q);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ if (useq)
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+
+ err = test_wait_topic_admin_result(q, RD_KAFKA_EVENT_CREATEACLS_RESULT,
+ NULL, tmout + 5000);
+
+ rd_kafka_queue_destroy(q);
+
+ if (err)
+ TEST_FAIL("Failed to create %d acl(s): %s", (int)acl_cnt,
+ rd_kafka_err2str(err));
+
+ return err;
+}
+
+static void test_free_string_array(char **strs, size_t cnt) {
+ size_t i;
+ for (i = 0; i < cnt; i++)
+ free(strs[i]);
+ free(strs);
+}
+
+
+/**
+ * @return an array of all topics in the cluster matching our the
+ * rdkafka test prefix.
+ */
+static rd_kafka_resp_err_t
+test_get_all_test_topics(rd_kafka_t *rk, char ***topicsp, size_t *topic_cntp) {
+ size_t test_topic_prefix_len = strlen(test_topic_prefix);
+ const rd_kafka_metadata_t *md;
+ char **topics = NULL;
+ size_t topic_cnt = 0;
+ int i;
+ rd_kafka_resp_err_t err;
+
+ *topic_cntp = 0;
+ if (topicsp)
+ *topicsp = NULL;
+
+ /* Retrieve list of topics */
+ err = rd_kafka_metadata(rk, 1 /*all topics*/, NULL, &md,
+ tmout_multip(10000));
+ if (err) {
+ TEST_WARN(
+ "%s: Failed to acquire metadata: %s: "
+ "not deleting any topics\n",
+ __FUNCTION__, rd_kafka_err2str(err));
+ return err;
+ }
+
+ if (md->topic_cnt == 0) {
+ TEST_WARN("%s: No topics in cluster\n", __FUNCTION__);
+ rd_kafka_metadata_destroy(md);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ if (topicsp)
+ topics = malloc(sizeof(*topics) * md->topic_cnt);
+
+ for (i = 0; i < md->topic_cnt; i++) {
+ if (strlen(md->topics[i].topic) >= test_topic_prefix_len &&
+ !strncmp(md->topics[i].topic, test_topic_prefix,
+ test_topic_prefix_len)) {
+ if (topicsp)
+ topics[topic_cnt++] =
+ rd_strdup(md->topics[i].topic);
+ else
+ topic_cnt++;
+ }
+ }
+
+ if (topic_cnt == 0) {
+ TEST_SAY(
+ "%s: No topics (out of %d) matching our "
+ "test prefix (%s)\n",
+ __FUNCTION__, md->topic_cnt, test_topic_prefix);
+ rd_kafka_metadata_destroy(md);
+ if (topics)
+ test_free_string_array(topics, topic_cnt);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_kafka_metadata_destroy(md);
+
+ if (topicsp)
+ *topicsp = topics;
+ *topic_cntp = topic_cnt;
+
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+}
+
+/**
+ * @brief Delete all test topics using the Kafka Admin API.
+ */
+rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms) {
+ rd_kafka_t *rk;
+ char **topics;
+ size_t topic_cnt = 0;
+ rd_kafka_resp_err_t err;
+ int i;
+ rd_kafka_AdminOptions_t *options;
+ rd_kafka_queue_t *q;
+ char errstr[256];
+ int64_t abs_timeout = test_clock() + ((int64_t)timeout_ms * 1000);
+
+ rk = test_create_producer();
+
+ err = test_get_all_test_topics(rk, &topics, &topic_cnt);
+ if (err) {
+ /* Error already reported by test_get_all_test_topics() */
+ rd_kafka_destroy(rk);
+ return err;
+ }
+
+ if (topic_cnt == 0) {
+ rd_kafka_destroy(rk);
+ return RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ q = rd_kafka_queue_get_main(rk);
+
+ options = rd_kafka_AdminOptions_new(rk, RD_KAFKA_ADMIN_OP_DELETETOPICS);
+ if (rd_kafka_AdminOptions_set_operation_timeout(options, 2 * 60 * 1000,
+ errstr, sizeof(errstr)))
+ TEST_SAY(_C_YEL
+ "Failed to set DeleteTopics timeout: %s: "
+ "ignoring\n",
+ errstr);
+
+ TEST_SAY(_C_MAG
+ "====> Deleting all test topics with <===="
+ "a timeout of 2 minutes\n");
+
+ test_DeleteTopics_simple(rk, q, topics, topic_cnt, options);
+
+ rd_kafka_AdminOptions_destroy(options);
+
+ while (1) {
+ rd_kafka_event_t *rkev;
+ const rd_kafka_DeleteTopics_result_t *res;
+
+ rkev = rd_kafka_queue_poll(q, -1);
+
+ res = rd_kafka_event_DeleteTopics_result(rkev);
+ if (!res) {
+ TEST_SAY("%s: Ignoring event: %s: %s\n", __FUNCTION__,
+ rd_kafka_event_name(rkev),
+ rd_kafka_event_error_string(rkev));
+ rd_kafka_event_destroy(rkev);
+ continue;
+ }
+
+ if (rd_kafka_event_error(rkev)) {
+ TEST_WARN("%s: DeleteTopics for %" PRIusz
+ " topics "
+ "failed: %s\n",
+ __FUNCTION__, topic_cnt,
+ rd_kafka_event_error_string(rkev));
+ err = rd_kafka_event_error(rkev);
+ } else {
+ const rd_kafka_topic_result_t **terr;
+ size_t tcnt;
+ int okcnt = 0;
+
+ terr = rd_kafka_DeleteTopics_result_topics(res, &tcnt);
+
+ for (i = 0; i < (int)tcnt; i++) {
+ if (!rd_kafka_topic_result_error(terr[i])) {
+ okcnt++;
+ continue;
+ }
+
+ TEST_WARN("%s: Failed to delete topic %s: %s\n",
+ __FUNCTION__,
+ rd_kafka_topic_result_name(terr[i]),
+ rd_kafka_topic_result_error_string(
+ terr[i]));
+ }
+
+ TEST_SAY(
+ "%s: DeleteTopics "
+ "succeeded for %d/%" PRIusz " topics\n",
+ __FUNCTION__, okcnt, topic_cnt);
+ err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ }
+
+ rd_kafka_event_destroy(rkev);
+ break;
+ }
+
+ rd_kafka_queue_destroy(q);
+
+ test_free_string_array(topics, topic_cnt);
+
+ /* Wait for topics to be fully deleted */
+ while (1) {
+ err = test_get_all_test_topics(rk, NULL, &topic_cnt);
+
+ if (!err && topic_cnt == 0)
+ break;
+
+ if (abs_timeout < test_clock()) {
+ TEST_WARN(
+ "%s: Timed out waiting for "
+ "remaining %" PRIusz
+ " deleted topics "
+ "to disappear from cluster metadata\n",
+ __FUNCTION__, topic_cnt);
+ break;
+ }
+
+ TEST_SAY("Waiting for remaining %" PRIusz
+ " delete topics "
+ "to disappear from cluster metadata\n",
+ topic_cnt);
+
+ rd_sleep(1);
+ }
+
+ rd_kafka_destroy(rk);
+
+ return err;
+}
+
+
+
+void test_fail0(const char *file,
+ int line,
+ const char *function,
+ int do_lock,
+ int fail_now,
+ const char *fmt,
+ ...) {
+ char buf[512];
+ int is_thrd = 0;
+ size_t of;
+ va_list ap;
+ char *t;
+ char timestr[32];
+ time_t tnow = time(NULL);
+
+#ifdef __MINGW32__
+ strftime(timestr, sizeof(timestr), "%a %b %d %H:%M:%S %Y",
+ localtime(&tnow));
+#elif defined(_WIN32)
+ ctime_s(timestr, sizeof(timestr), &tnow);
+#else
+ ctime_r(&tnow, timestr);
+#endif
+ t = strchr(timestr, '\n');
+ if (t)
+ *t = '\0';
+
+ of = rd_snprintf(buf, sizeof(buf), "%s%s%s():%i: ", test_curr->subtest,
+ *test_curr->subtest ? ": " : "", function, line);
+ rd_assert(of < sizeof(buf));
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf + of, sizeof(buf) - of, fmt, ap);
+ va_end(ap);
+
+ /* Remove trailing newline */
+ if ((t = strchr(buf, '\n')) && !*(t + 1))
+ *t = '\0';
+
+ TEST_SAYL(0, "TEST FAILURE\n");
+ fprintf(stderr,
+ "\033[31m### Test \"%s%s%s%s\" failed at %s:%i:%s() at %s: "
+ "###\n"
+ "%s\n",
+ test_curr->name, *test_curr->subtest ? " (" : "",
+ test_curr->subtest, *test_curr->subtest ? ")" : "", file, line,
+ function, timestr, buf + of);
+ if (do_lock)
+ TEST_LOCK();
+ test_curr->state = TEST_FAILED;
+ test_curr->failcnt += 1;
+ test_curr->is_fatal_cb = NULL;
+
+ if (!*test_curr->failstr) {
+ strncpy(test_curr->failstr, buf, sizeof(test_curr->failstr));
+ test_curr->failstr[sizeof(test_curr->failstr) - 1] = '\0';
+ }
+ if (fail_now && test_curr->mainfunc) {
+ tests_running_cnt--;
+ is_thrd = 1;
+ }
+ if (do_lock)
+ TEST_UNLOCK();
+ if (!fail_now)
+ return;
+ if (test_assert_on_fail || !is_thrd)
+ assert(0);
+ else
+ thrd_exit(0);
+}
+
+
+/**
+ * @brief Destroy a mock cluster and its underlying rd_kafka_t handle
+ */
+void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster) {
+ rd_kafka_t *rk = rd_kafka_mock_cluster_handle(mcluster);
+ rd_kafka_mock_cluster_destroy(mcluster);
+ rd_kafka_destroy(rk);
+}
+
+
+
+/**
+ * @brief Create a standalone mock cluster that can be used by multiple
+ * rd_kafka_t instances.
+ */
+rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt,
+ const char **bootstraps) {
+ rd_kafka_t *rk;
+ rd_kafka_conf_t *conf = rd_kafka_conf_new();
+ rd_kafka_mock_cluster_t *mcluster;
+ char errstr[256];
+
+ test_conf_common_init(conf, 0);
+
+ test_conf_set(conf, "client.id", "MOCK");
+
+ rk = rd_kafka_new(RD_KAFKA_PRODUCER, conf, errstr, sizeof(errstr));
+ TEST_ASSERT(rk, "Failed to create mock cluster rd_kafka_t: %s", errstr);
+
+ mcluster = rd_kafka_mock_cluster_new(rk, broker_cnt);
+ TEST_ASSERT(mcluster, "Failed to acquire mock cluster");
+
+ if (bootstraps)
+ *bootstraps = rd_kafka_mock_cluster_bootstraps(mcluster);
+
+ return mcluster;
+}
+
+
+
+/**
+ * @name Sub-tests
+ */
+
+
+/**
+ * @brief Start a sub-test. \p fmt is optional and allows additional
+ * sub-test info to be displayed, e.g., test parameters.
+ *
+ * @returns 0 if sub-test should not be run, else 1.
+ */
+int test_sub_start(const char *func,
+ int line,
+ int is_quick,
+ const char *fmt,
+ ...) {
+
+ if (!is_quick && test_quick)
+ return 0;
+
+ if (fmt && *fmt) {
+ va_list ap;
+ char buf[256];
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ rd_snprintf(test_curr->subtest, sizeof(test_curr->subtest),
+ "%s:%d: %s", func, line, buf);
+ } else {
+ rd_snprintf(test_curr->subtest, sizeof(test_curr->subtest),
+ "%s:%d", func, line);
+ }
+
+ if (subtests_to_run && !strstr(test_curr->subtest, subtests_to_run)) {
+ *test_curr->subtest = '\0';
+ return 0;
+ }
+
+ test_curr->subtest_quick = is_quick;
+
+ TIMING_START(&test_curr->subtest_duration, "SUBTEST");
+
+ TEST_SAY(_C_MAG "[ %s ]\n", test_curr->subtest);
+
+ return 1;
+}
+
+
+/**
+ * @brief Reset the current subtest state.
+ */
+static void test_sub_reset(void) {
+ *test_curr->subtest = '\0';
+ test_curr->is_fatal_cb = NULL;
+ test_curr->ignore_dr_err = rd_false;
+ test_curr->exp_dr_err = RD_KAFKA_RESP_ERR_NO_ERROR;
+ /* Don't check msg status by default */
+ test_curr->exp_dr_status = (rd_kafka_msg_status_t)-1;
+ test_curr->dr_mv = NULL;
+}
+
+/**
+ * @brief Sub-test has passed.
+ */
+void test_sub_pass(void) {
+
+ TEST_ASSERT(*test_curr->subtest);
+
+ TEST_SAYL(1, _C_GRN "[ %s: PASS (%.02fs) ]\n", test_curr->subtest,
+ (float)(TIMING_DURATION(&test_curr->subtest_duration) /
+ 1000000.0f));
+
+ if (test_curr->subtest_quick && test_quick && !test_on_ci &&
+ TIMING_DURATION(&test_curr->subtest_duration) > 45 * 1000 * 1000)
+ TEST_WARN(
+ "Subtest %s marked as QUICK but took %.02fs to "
+ "finish: either fix the test or "
+ "remove the _QUICK identifier (limit is 45s)\n",
+ test_curr->subtest,
+ (float)(TIMING_DURATION(&test_curr->subtest_duration) /
+ 1000000.0f));
+
+ test_sub_reset();
+}
+
+
+/**
+ * @brief Skip sub-test (must have been started with SUB_TEST*()).
+ */
+void test_sub_skip(const char *fmt, ...) {
+ va_list ap;
+ char buf[256];
+
+ TEST_ASSERT(*test_curr->subtest);
+
+ va_start(ap, fmt);
+ rd_vsnprintf(buf, sizeof(buf), fmt, ap);
+ va_end(ap);
+
+ TEST_SAYL(1, _C_YEL "[ %s: SKIP: %s ]\n", test_curr->subtest, buf);
+
+ test_sub_reset();
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example b/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example
new file mode 100644
index 000000000..dea4a09f6
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/test.conf.example
@@ -0,0 +1,27 @@
+# Copy this file to test.conf and set up according to your configuration.
+
+#
+# Test configuration
+#
+# For slow connections: multiply test timeouts by this much (float)
+#test.timeout.multiplier=3.5
+
+# Test topic names are constructed by:
+# <prefix>_<suffix>, where default topic prefix is "rdkafkatest".
+# suffix is specified by the tests.
+#test.topic.prefix=bib
+
+# Make topic names random:
+# <prefix>_<randomnumber>_<suffix>
+#test.topic.random=true
+
+# Write test results to sqlite3 database
+#test.sql.command=sqlite3 rdktests
+
+# Bootstrap broker(s)
+metadata.broker.list=localhost:9092
+
+# Debugging
+#debug=metadata,topic,msg,broker
+
+# Any other librdkafka configuration property.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/test.h b/fluent-bit/lib/librdkafka-2.1.0/tests/test.h
new file mode 100644
index 000000000..a431f9a25
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/test.h
@@ -0,0 +1,936 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _TEST_H_
+#define _TEST_H_
+
+#include "../src/rd.h"
+
+#include <stdio.h>
+#include <string.h>
+#include <stdlib.h>
+#ifndef _WIN32
+#include <unistd.h>
+#endif
+#include <errno.h>
+#include <assert.h>
+#include <time.h>
+#include <ctype.h>
+
+#if HAVE_GETRUSAGE
+#include <sys/time.h>
+#include <sys/resource.h>
+#endif
+
+#include "rdkafka.h"
+#include "rdkafka_mock.h"
+#include "tinycthread.h"
+#include "rdlist.h"
+
+#if WITH_SOCKEM
+#include "sockem.h"
+#endif
+
+#include "testshared.h"
+#ifdef _WIN32
+#define sscanf(...) sscanf_s(__VA_ARGS__)
+#endif
+
+/**
+ * Test output is controlled through "TEST_LEVEL=N" environemnt variable.
+ * N < 2: TEST_SAY() is quiet.
+ */
+
+extern int test_seed;
+extern char test_mode[64];
+extern RD_TLS struct test *test_curr;
+extern int test_assert_on_fail;
+extern int tests_running_cnt;
+extern int test_concurrent_max;
+extern int test_rusage;
+extern double test_rusage_cpu_calibration;
+extern double test_timeout_multiplier;
+extern int test_session_timeout_ms; /* Group session timeout */
+extern int test_flags;
+extern int test_neg_flags;
+extern int test_idempotent_producer;
+
+extern mtx_t test_mtx;
+
+#define TEST_LOCK() mtx_lock(&test_mtx)
+#define TEST_UNLOCK() mtx_unlock(&test_mtx)
+
+
+/* Forward decl */
+typedef struct test_msgver_s test_msgver_t;
+
+
+/** @struct Resource usage thresholds */
+struct rusage_thres {
+ double ucpu; /**< Max User CPU in percentage */
+ double scpu; /**< Max Sys CPU in percentage */
+ double rss; /**< Max RSS (memory) increase in MB */
+ int ctxsw; /**< Max number of voluntary context switches, i.e.
+ * syscalls. */
+};
+
+typedef enum {
+ TEST_NOT_STARTED,
+ TEST_SKIPPED,
+ TEST_RUNNING,
+ TEST_PASSED,
+ TEST_FAILED,
+} test_state_t;
+
+struct test {
+ /**
+ * Setup
+ */
+ const char *name; /**< e.g. Same as filename minus extension */
+ int (*mainfunc)(int argc, char **argv); /**< test's main func */
+ const int flags; /**< Test flags */
+#define TEST_F_LOCAL 0x1 /**< Test is local, no broker requirement */
+#define TEST_F_KNOWN_ISSUE \
+ 0x2 /**< Known issue, can fail without affecting \
+ * total test run status. */
+#define TEST_F_MANUAL \
+ 0x4 /**< Manual test, only started when specifically \
+ * stated */
+#define TEST_F_SOCKEM 0x8 /**< Test requires socket emulation. */
+ int minver; /**< Limit tests to broker version range. */
+ int maxver;
+
+ const char *extra; /**< Extra information to print in test_summary. */
+
+ const char *scenario; /**< Test scenario */
+
+ char *
+ *report_arr; /**< Test-specific reporting, JSON array of objects. */
+ int report_cnt;
+ int report_size;
+
+ rd_bool_t ignore_dr_err; /**< Ignore delivery report errors */
+ rd_kafka_resp_err_t exp_dr_err; /* Expected error in test_dr_cb */
+ rd_kafka_msg_status_t exp_dr_status; /**< Expected delivery status,
+ * or -1 for not checking. */
+ int produce_sync; /**< test_produce_sync() call in action */
+ rd_kafka_resp_err_t produce_sync_err; /**< DR error */
+ test_msgver_t *dr_mv; /**< MsgVer that delivered messages will be
+ * added to (if not NULL).
+ * Must be set and freed by test. */
+
+ /**
+ * Runtime
+ */
+ thrd_t thrd;
+ int64_t start;
+ int64_t duration;
+ FILE *stats_fp;
+ int64_t timeout;
+ test_state_t state;
+ int failcnt; /**< Number of failures, useful with FAIL_LATER */
+ char failstr[512 + 1]; /**< First test failure reason */
+ char subtest[400]; /**< Current subtest, if any */
+ test_timing_t subtest_duration; /**< Subtest duration timing */
+ rd_bool_t subtest_quick; /**< Subtest is marked as QUICK */
+
+#if WITH_SOCKEM
+ rd_list_t sockets;
+ int (*connect_cb)(struct test *test, sockem_t *skm, const char *id);
+#endif
+ int (*is_fatal_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason);
+
+ /**< Resource usage thresholds */
+ struct rusage_thres rusage_thres; /**< Usage thresholds */
+#if HAVE_GETRUSAGE
+ struct rusage rusage; /**< Monitored process CPU/mem usage */
+#endif
+};
+
+
+#ifdef _WIN32
+#define TEST_F_KNOWN_ISSUE_WIN32 TEST_F_KNOWN_ISSUE
+#else
+#define TEST_F_KNOWN_ISSUE_WIN32 0
+#endif
+
+#ifdef __APPLE__
+#define TEST_F_KNOWN_ISSUE_OSX TEST_F_KNOWN_ISSUE
+#else
+#define TEST_F_KNOWN_ISSUE_OSX 0
+#endif
+
+
+#define TEST_SAY0(...) fprintf(stderr, __VA_ARGS__)
+#define TEST_SAYL(LVL, ...) \
+ do { \
+ if (test_level >= LVL) { \
+ fprintf( \
+ stderr, "\033[36m[%-28s/%7.3fs] ", \
+ test_curr->name, \
+ test_curr->start \
+ ? ((float)(test_clock() - test_curr->start) / \
+ 1000000.0f) \
+ : 0); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m"); \
+ } \
+ } while (0)
+#define TEST_SAY(...) TEST_SAYL(2, __VA_ARGS__)
+
+/**
+ * Append JSON object (as string) to this tests' report array.
+ */
+#define TEST_REPORT(...) test_report_add(test_curr, __VA_ARGS__)
+
+
+
+static RD_INLINE RD_UNUSED void rtrim(char *str) {
+ size_t len = strlen(str);
+ char *s;
+
+ if (len == 0)
+ return;
+
+ s = str + len - 1;
+ while (isspace((int)*s)) {
+ *s = '\0';
+ s--;
+ }
+}
+
+/* Skip the current test. Argument is textual reason (printf format) */
+#define TEST_SKIP(...) \
+ do { \
+ TEST_WARN("SKIPPING TEST: " __VA_ARGS__); \
+ TEST_LOCK(); \
+ test_curr->state = TEST_SKIPPED; \
+ if (!*test_curr->failstr) { \
+ rd_snprintf(test_curr->failstr, \
+ sizeof(test_curr->failstr), __VA_ARGS__); \
+ rtrim(test_curr->failstr); \
+ } \
+ TEST_UNLOCK(); \
+ } while (0)
+
+
+void test_conf_init(rd_kafka_conf_t **conf,
+ rd_kafka_topic_conf_t **topic_conf,
+ int timeout);
+
+
+
+void test_msg_fmt(char *dest,
+ size_t dest_size,
+ uint64_t testid,
+ int32_t partition,
+ int msgid);
+void test_msg_parse0(const char *func,
+ int line,
+ uint64_t testid,
+ rd_kafka_message_t *rkmessage,
+ int32_t exp_partition,
+ int *msgidp);
+#define test_msg_parse(testid, rkmessage, exp_partition, msgidp) \
+ test_msg_parse0(__FUNCTION__, __LINE__, testid, rkmessage, \
+ exp_partition, msgidp)
+
+
+static RD_INLINE int jitter(int low, int high) RD_UNUSED;
+static RD_INLINE int jitter(int low, int high) {
+ return (low + (rand() % ((high - low) + 1)));
+}
+
+
+
+/******************************************************************************
+ *
+ * Helpers
+ *
+ ******************************************************************************/
+
+
+
+/****************************************************************
+ * Message verification services *
+ * *
+ * *
+ * *
+ ****************************************************************/
+
+
+/**
+ * A test_msgver_t is first fed with messages from any number of
+ * topics and partitions, it is then checked for expected messages, such as:
+ * - all messages received, based on message payload information.
+ * - messages received in order
+ * - EOF
+ */
+struct test_msgver_s {
+ struct test_mv_p **p; /* Partitions array */
+ int p_cnt; /* Partition count */
+ int p_size; /* p size */
+ int msgcnt; /* Total message count */
+ uint64_t testid; /* Only accept messages for this testid */
+ rd_bool_t ignore_eof; /* Don't end PARTITION_EOF messages */
+
+ struct test_msgver_s *fwd; /* Also forward add_msg() to this mv */
+
+ int log_cnt; /* Current number of warning logs */
+ int log_max; /* Max warning logs before suppressing. */
+ int log_suppr_cnt; /* Number of suppressed log messages. */
+
+ const char *msgid_hdr; /**< msgid string is in header by this name,
+ * rather than in the payload (default). */
+}; /* test_msgver_t; */
+
+/* Message */
+struct test_mv_m {
+ int64_t offset; /* Message offset */
+ int msgid; /* Message id */
+ int64_t timestamp; /* Message timestamp */
+ int32_t broker_id; /* Message broker id */
+};
+
+
+/* Message vector */
+struct test_mv_mvec {
+ struct test_mv_m *m;
+ int cnt;
+ int size; /* m[] size */
+};
+
+/* Partition */
+struct test_mv_p {
+ char *topic;
+ int32_t partition;
+ struct test_mv_mvec mvec;
+ int64_t eof_offset;
+};
+
+/* Verification state */
+struct test_mv_vs {
+ int msg_base;
+ int exp_cnt;
+
+ /* used by verify_range */
+ int msgid_min;
+ int msgid_max;
+ int64_t timestamp_min;
+ int64_t timestamp_max;
+
+ /* used by verify_broker_id */
+ int32_t broker_id;
+
+ struct test_mv_mvec mvec;
+
+ /* Correct msgver for comparison */
+ test_msgver_t *corr;
+};
+
+
+void test_msgver_init(test_msgver_t *mv, uint64_t testid);
+void test_msgver_clear(test_msgver_t *mv);
+void test_msgver_ignore_eof(test_msgver_t *mv);
+int test_msgver_add_msg00(const char *func,
+ int line,
+ const char *clientname,
+ test_msgver_t *mv,
+ uint64_t testid,
+ const char *topic,
+ int32_t partition,
+ int64_t offset,
+ int64_t timestamp,
+ int32_t broker_id,
+ rd_kafka_resp_err_t err,
+ int msgnum);
+int test_msgver_add_msg0(const char *func,
+ int line,
+ const char *clientname,
+ test_msgver_t *mv,
+ const rd_kafka_message_t *rkmessage,
+ const char *override_topic);
+#define test_msgver_add_msg(rk, mv, rkm) \
+ test_msgver_add_msg0(__FUNCTION__, __LINE__, rd_kafka_name(rk), mv, \
+ rkm, NULL)
+
+/**
+ * Flags to indicate what to verify.
+ */
+#define TEST_MSGVER_ORDER 0x1 /* Order */
+#define TEST_MSGVER_DUP 0x2 /* Duplicates */
+#define TEST_MSGVER_RANGE 0x4 /* Range of messages */
+
+#define TEST_MSGVER_ALL 0xf /* All verifiers */
+
+#define TEST_MSGVER_BY_MSGID 0x10000 /* Verify by msgid (unique in testid) */
+#define TEST_MSGVER_BY_OFFSET \
+ 0x20000 /* Verify by offset (unique in partition)*/
+#define TEST_MSGVER_BY_TIMESTAMP 0x40000 /* Verify by timestamp range */
+#define TEST_MSGVER_BY_BROKER_ID 0x80000 /* Verify by broker id */
+
+#define TEST_MSGVER_SUBSET \
+ 0x100000 /* verify_compare: allow correct mv to be \
+ * a subset of mv. */
+
+/* Only test per partition, not across all messages received on all partitions.
+ * This is useful when doing incremental verifications with multiple partitions
+ * and the total number of messages has not been received yet.
+ * Can't do range check here since messages may be spread out on multiple
+ * partitions and we might just have read a few partitions. */
+#define TEST_MSGVER_PER_PART \
+ ((TEST_MSGVER_ALL & ~TEST_MSGVER_RANGE) | TEST_MSGVER_BY_MSGID | \
+ TEST_MSGVER_BY_OFFSET)
+
+/* Test on all messages across all partitions.
+ * This can only be used to check with msgid, not offset since that
+ * is partition local. */
+#define TEST_MSGVER_ALL_PART (TEST_MSGVER_ALL | TEST_MSGVER_BY_MSGID)
+
+
+int test_msgver_verify_part0(const char *func,
+ int line,
+ const char *what,
+ test_msgver_t *mv,
+ int flags,
+ const char *topic,
+ int partition,
+ int msg_base,
+ int exp_cnt);
+#define test_msgver_verify_part(what, mv, flags, topic, partition, msg_base, \
+ exp_cnt) \
+ test_msgver_verify_part0(__FUNCTION__, __LINE__, what, mv, flags, \
+ topic, partition, msg_base, exp_cnt)
+
+int test_msgver_verify0(const char *func,
+ int line,
+ const char *what,
+ test_msgver_t *mv,
+ int flags,
+ struct test_mv_vs vs);
+#define test_msgver_verify(what, mv, flags, msgbase, expcnt) \
+ test_msgver_verify0( \
+ __FUNCTION__, __LINE__, what, mv, flags, \
+ (struct test_mv_vs) {.msg_base = msgbase, .exp_cnt = expcnt})
+
+
+void test_msgver_verify_compare0(const char *func,
+ int line,
+ const char *what,
+ test_msgver_t *mv,
+ test_msgver_t *corr,
+ int flags);
+#define test_msgver_verify_compare(what, mv, corr, flags) \
+ test_msgver_verify_compare0(__FUNCTION__, __LINE__, what, mv, corr, \
+ flags)
+
+rd_kafka_t *test_create_handle(int mode, rd_kafka_conf_t *conf);
+
+/**
+ * Delivery reported callback.
+ * Called for each message once to signal its delivery status.
+ */
+void test_dr_msg_cb(rd_kafka_t *rk,
+ const rd_kafka_message_t *rkmessage,
+ void *opaque);
+
+rd_kafka_t *test_create_producer(void);
+rd_kafka_topic_t *
+test_create_producer_topic(rd_kafka_t *rk, const char *topic, ...);
+void test_wait_delivery(rd_kafka_t *rk, int *msgcounterp);
+void test_produce_msgs_nowait(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size,
+ int msgrate,
+ int *msgcounterp);
+void test_produce_msgs(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size);
+void test_produce_msgs2(rd_kafka_t *rk,
+ const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size);
+void test_produce_msgs2_nowait(rd_kafka_t *rk,
+ const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size,
+ int *remainsp);
+void test_produce_msgs_rate(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ const char *payload,
+ size_t size,
+ int msgrate);
+rd_kafka_resp_err_t test_produce_sync(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition);
+
+void test_produce_msgs_easy_v(const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msg_base,
+ int cnt,
+ size_t size,
+ ...);
+void test_produce_msgs_easy_multi(uint64_t testid, ...);
+
+void test_incremental_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque);
+void test_rebalance_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *parts,
+ void *opaque);
+
+rd_kafka_t *test_create_consumer(
+ const char *group_id,
+ void (*rebalance_cb)(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ rd_kafka_topic_partition_list_t *partitions,
+ void *opaque),
+ rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *default_topic_conf);
+rd_kafka_topic_t *test_create_consumer_topic(rd_kafka_t *rk, const char *topic);
+rd_kafka_topic_t *
+test_create_topic_object(rd_kafka_t *rk, const char *topic, ...);
+void test_consumer_start(const char *what,
+ rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t start_offset);
+void test_consumer_stop(const char *what,
+ rd_kafka_topic_t *rkt,
+ int32_t partition);
+void test_consumer_seek(const char *what,
+ rd_kafka_topic_t *rkt,
+ int32_t partition,
+ int64_t offset);
+
+#define TEST_NO_SEEK -1
+int64_t test_consume_msgs(const char *what,
+ rd_kafka_topic_t *rkt,
+ uint64_t testid,
+ int32_t partition,
+ int64_t offset,
+ int exp_msg_base,
+ int exp_cnt,
+ int parse_fmt);
+
+
+void test_verify_rkmessage0(const char *func,
+ int line,
+ rd_kafka_message_t *rkmessage,
+ uint64_t testid,
+ int32_t partition,
+ int msgnum);
+#define test_verify_rkmessage(rkmessage, testid, partition, msgnum) \
+ test_verify_rkmessage0(__FUNCTION__, __LINE__, rkmessage, testid, \
+ partition, msgnum)
+
+void test_consumer_subscribe(rd_kafka_t *rk, const char *topic);
+
+void test_consume_msgs_easy_mv0(const char *group_id,
+ const char *topic,
+ rd_bool_t txn,
+ int32_t partition,
+ uint64_t testid,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ rd_kafka_topic_conf_t *tconf,
+ test_msgver_t *mv);
+
+#define test_consume_msgs_easy_mv(group_id, topic, partition, testid, \
+ exp_eofcnt, exp_msgcnt, tconf, mv) \
+ test_consume_msgs_easy_mv0(group_id, topic, rd_false /*not-txn*/, \
+ partition, testid, exp_eofcnt, exp_msgcnt, \
+ tconf, mv)
+
+void test_consume_msgs_easy(const char *group_id,
+ const char *topic,
+ uint64_t testid,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ rd_kafka_topic_conf_t *tconf);
+
+void test_consume_txn_msgs_easy(const char *group_id,
+ const char *topic,
+ uint64_t testid,
+ int exp_eofcnt,
+ int exp_msgcnt,
+ rd_kafka_topic_conf_t *tconf);
+
+void test_consumer_poll_no_msgs(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int timeout_ms);
+void test_consumer_poll_expect_err(rd_kafka_t *rk,
+ uint64_t testid,
+ int timeout_ms,
+ rd_kafka_resp_err_t err);
+int test_consumer_poll_once(rd_kafka_t *rk, test_msgver_t *mv, int timeout_ms);
+int test_consumer_poll_exact_timeout(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ rd_bool_t exact,
+ test_msgver_t *mv,
+ int timeout_ms);
+int test_consumer_poll_exact(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ rd_bool_t exact,
+ test_msgver_t *mv);
+int test_consumer_poll(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ test_msgver_t *mv);
+int test_consumer_poll_timeout(const char *what,
+ rd_kafka_t *rk,
+ uint64_t testid,
+ int exp_eof_cnt,
+ int exp_msg_base,
+ int exp_cnt,
+ test_msgver_t *mv,
+ int timeout_ms);
+
+void test_consumer_wait_assignment(rd_kafka_t *rk, rd_bool_t do_poll);
+void test_consumer_verify_assignment0(const char *func,
+ int line,
+ rd_kafka_t *rk,
+ int fail_immediately,
+ ...);
+#define test_consumer_verify_assignment(rk, fail_immediately, ...) \
+ test_consumer_verify_assignment0(__FUNCTION__, __LINE__, rk, \
+ fail_immediately, __VA_ARGS__)
+
+void test_consumer_assign(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *parts);
+void test_consumer_incremental_assign(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *parts);
+void test_consumer_unassign(const char *what, rd_kafka_t *rk);
+void test_consumer_incremental_unassign(const char *what,
+ rd_kafka_t *rk,
+ rd_kafka_topic_partition_list_t *parts);
+void test_consumer_assign_partition(const char *what,
+ rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ int64_t offset);
+void test_consumer_pause_resume_partition(rd_kafka_t *rk,
+ const char *topic,
+ int32_t partition,
+ rd_bool_t pause);
+
+void test_consumer_close(rd_kafka_t *rk);
+
+void test_flush(rd_kafka_t *rk, int timeout_ms);
+
+void test_conf_set(rd_kafka_conf_t *conf, const char *name, const char *val);
+char *test_topic_conf_get(const rd_kafka_topic_conf_t *tconf, const char *name);
+int test_conf_match(rd_kafka_conf_t *conf, const char *name, const char *val);
+void test_topic_conf_set(rd_kafka_topic_conf_t *tconf,
+ const char *name,
+ const char *val);
+void test_any_conf_set(rd_kafka_conf_t *conf,
+ rd_kafka_topic_conf_t *tconf,
+ const char *name,
+ const char *val);
+
+void test_print_partition_list(
+ const rd_kafka_topic_partition_list_t *partitions);
+int test_partition_list_cmp(rd_kafka_topic_partition_list_t *al,
+ rd_kafka_topic_partition_list_t *bl);
+int test_partition_list_and_offsets_cmp(rd_kafka_topic_partition_list_t *al,
+ rd_kafka_topic_partition_list_t *bl);
+
+void test_kafka_topics(const char *fmt, ...);
+void test_admin_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor,
+ const char **configs);
+void test_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor);
+rd_kafka_resp_err_t test_auto_create_topic_rkt(rd_kafka_t *rk,
+ rd_kafka_topic_t *rkt,
+ int timeout_ms);
+rd_kafka_resp_err_t
+test_auto_create_topic(rd_kafka_t *rk, const char *name, int timeout_ms);
+int test_check_auto_create_topic(void);
+
+void test_create_partitions(rd_kafka_t *use_rk,
+ const char *topicname,
+ int new_partition_cnt);
+
+int test_get_partition_count(rd_kafka_t *rk,
+ const char *topicname,
+ int timeout_ms);
+
+char *tsprintf(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
+
+void test_report_add(struct test *test, const char *fmt, ...);
+int test_can_create_topics(int skip);
+
+rd_kafka_event_t *test_wait_event(rd_kafka_queue_t *eventq,
+ rd_kafka_event_type_t event_type,
+ int timeout_ms);
+
+void test_prepare_msg(uint64_t testid,
+ int32_t partition,
+ int msg_id,
+ char *val,
+ size_t val_size,
+ char *key,
+ size_t key_size);
+
+#if WITH_SOCKEM
+void test_socket_enable(rd_kafka_conf_t *conf);
+void test_socket_close_all(struct test *test, int reinit);
+int test_socket_sockem_set_all(const char *key, int val);
+void test_socket_sockem_set(int s, const char *key, int value);
+#endif
+
+void test_headers_dump(const char *what,
+ int lvl,
+ const rd_kafka_headers_t *hdrs);
+
+int32_t *test_get_broker_ids(rd_kafka_t *use_rk, size_t *cntp);
+
+void test_wait_metadata_update(rd_kafka_t *rk,
+ rd_kafka_metadata_topic_t *topics,
+ size_t topic_cnt,
+ rd_kafka_metadata_topic_t *not_topics,
+ size_t not_topic_cnt,
+ int tmout);
+
+rd_kafka_event_t *test_wait_admin_result(rd_kafka_queue_t *q,
+ rd_kafka_event_type_t evtype,
+ int tmout);
+
+rd_kafka_resp_err_t test_wait_topic_admin_result(rd_kafka_queue_t *q,
+ rd_kafka_event_type_t evtype,
+ rd_kafka_event_t **retevent,
+ int tmout);
+
+rd_kafka_resp_err_t test_CreateTopics_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ char **topics,
+ size_t topic_cnt,
+ int num_partitions,
+ void *opaque);
+rd_kafka_resp_err_t test_CreatePartitions_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ const char *topic,
+ size_t total_part_cnt,
+ void *opaque);
+
+rd_kafka_resp_err_t test_DeleteTopics_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ char **topics,
+ size_t topic_cnt,
+ void *opaque);
+
+rd_kafka_resp_err_t test_AlterConfigs_simple(rd_kafka_t *rk,
+ rd_kafka_ResourceType_t restype,
+ const char *resname,
+ const char **configs,
+ size_t config_cnt);
+
+rd_kafka_resp_err_t test_DeleteGroups_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ char **groups,
+ size_t group_cnt,
+ void *opaque);
+
+rd_kafka_resp_err_t
+test_DeleteRecords_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ const rd_kafka_topic_partition_list_t *offsets,
+ void *opaque);
+
+rd_kafka_resp_err_t test_DeleteConsumerGroupOffsets_simple(
+ rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ const char *group_id,
+ const rd_kafka_topic_partition_list_t *offsets,
+ void *opaque);
+
+rd_kafka_resp_err_t test_CreateAcls_simple(rd_kafka_t *rk,
+ rd_kafka_queue_t *useq,
+ rd_kafka_AclBinding_t **acls,
+ size_t acl_cnt,
+ void *opaque);
+
+rd_kafka_resp_err_t test_delete_all_test_topics(int timeout_ms);
+
+void test_mock_cluster_destroy(rd_kafka_mock_cluster_t *mcluster);
+rd_kafka_mock_cluster_t *test_mock_cluster_new(int broker_cnt,
+ const char **bootstraps);
+
+
+
+int test_error_is_not_fatal_cb(rd_kafka_t *rk,
+ rd_kafka_resp_err_t err,
+ const char *reason);
+
+
+/**
+ * @brief Calls rdkafka function (with arguments)
+ * and checks its return value (must be rd_kafka_resp_err_t) for
+ * error, in which case the test fails.
+ * Also times the call.
+ *
+ * @remark The trailing __ makes calling code easier to read.
+ */
+#define TEST_CALL__(FUNC_W_ARGS) \
+ do { \
+ test_timing_t _timing; \
+ const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
+ rd_kafka_resp_err_t _err; \
+ TIMING_START(&_timing, "%s", _desc); \
+ TEST_SAYL(3, "Begin call %s\n", _desc); \
+ _err = FUNC_W_ARGS; \
+ TIMING_STOP(&_timing); \
+ if (!_err) \
+ break; \
+ if (strstr(_desc, "errstr")) \
+ TEST_FAIL("%s failed: %s: %s\n", _desc, \
+ rd_kafka_err2name(_err), errstr); \
+ else \
+ TEST_FAIL("%s failed: %s\n", _desc, \
+ rd_kafka_err2str(_err)); \
+ } while (0)
+
+
+/**
+ * @brief Same as TEST_CALL__() but expects an rd_kafka_error_t * return type.
+ */
+#define TEST_CALL_ERROR__(FUNC_W_ARGS) \
+ do { \
+ test_timing_t _timing; \
+ const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
+ const rd_kafka_error_t *_error; \
+ TIMING_START(&_timing, "%s", _desc); \
+ TEST_SAYL(3, "Begin call %s\n", _desc); \
+ _error = FUNC_W_ARGS; \
+ TIMING_STOP(&_timing); \
+ if (!_error) \
+ break; \
+ TEST_FAIL("%s failed: %s\n", _desc, \
+ rd_kafka_error_string(_error)); \
+ } while (0)
+
+/**
+ * @brief Same as TEST_CALL__() but expects an rd_kafka_resp_err_t return type
+ * without errstr.
+ */
+#define TEST_CALL_ERR__(FUNC_W_ARGS) \
+ do { \
+ test_timing_t _timing; \
+ const char *_desc = RD_STRINGIFY(FUNC_W_ARGS); \
+ rd_kafka_resp_err_t _err; \
+ TIMING_START(&_timing, "%s", _desc); \
+ TEST_SAYL(3, "Begin call %s\n", _desc); \
+ _err = FUNC_W_ARGS; \
+ TIMING_STOP(&_timing); \
+ if (!_err) \
+ break; \
+ TEST_FAIL("%s failed: %s\n", _desc, rd_kafka_err2str(_err)); \
+ } while (0)
+
+
+/**
+ * @brief Print a rich error_t object in all its glory. NULL is ok.
+ *
+ * @param ... Is a prefix format-string+args that is printed with TEST_SAY()
+ * prior to the error details. E.g., "commit() returned: ".
+ * A newline is automatically appended.
+ */
+#define TEST_SAY_ERROR(ERROR, ...) \
+ do { \
+ rd_kafka_error_t *_e = (ERROR); \
+ TEST_SAY(__VA_ARGS__); \
+ if (!_e) { \
+ TEST_SAY0("No error" _C_CLR "\n"); \
+ break; \
+ } \
+ if (rd_kafka_error_is_fatal(_e)) \
+ TEST_SAY0(_C_RED "FATAL "); \
+ if (rd_kafka_error_is_retriable(_e)) \
+ TEST_SAY0("Retriable "); \
+ if (rd_kafka_error_txn_requires_abort(_e)) \
+ TEST_SAY0("TxnRequiresAbort "); \
+ TEST_SAY0("Error: %s: %s" _C_CLR "\n", \
+ rd_kafka_error_name(_e), rd_kafka_error_string(_e)); \
+ } while (0)
+
+/**
+ * @name rusage.c
+ * @{
+ */
+void test_rusage_start(struct test *test);
+int test_rusage_stop(struct test *test, double duration);
+
+/**@}*/
+
+#endif /* _TEST_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp
new file mode 100644
index 000000000..e965e249f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.cpp
@@ -0,0 +1,126 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+
+#include "testcpp.h"
+
+#include <fstream>
+#include <cstring>
+
+
+namespace Test {
+
+/**
+ * @brief Read config file and populate config objects.
+ * @returns 0 on success or -1 on error
+ */
+static int read_config_file(std::string path,
+ RdKafka::Conf *conf,
+ RdKafka::Conf *topic_conf,
+ int *timeoutp) {
+ std::ifstream input(path.c_str(), std::ifstream::in);
+
+ if (!input)
+ return 0;
+
+ std::string line;
+ while (std::getline(input, line)) {
+ /* Trim string */
+ line.erase(0, line.find_first_not_of("\t "));
+ line.erase(line.find_last_not_of("\t ") + 1);
+
+ if (line.length() == 0 || line.substr(0, 1) == "#")
+ continue;
+
+ size_t f = line.find("=");
+ if (f == std::string::npos) {
+ Test::Fail(tostr() << "Conf file: malformed line: " << line);
+ return -1;
+ }
+
+ std::string n = line.substr(0, f);
+ std::string v = line.substr(f + 1);
+ std::string errstr;
+
+ if (test_set_special_conf(n.c_str(), v.c_str(), timeoutp))
+ continue;
+
+ RdKafka::Conf::ConfResult r = RdKafka::Conf::CONF_UNKNOWN;
+
+ if (n.substr(0, 6) == "topic.")
+ r = topic_conf->set(n.substr(6), v, errstr);
+ if (r == RdKafka::Conf::CONF_UNKNOWN)
+ r = conf->set(n, v, errstr);
+
+ if (r != RdKafka::Conf::CONF_OK) {
+ Test::Fail(errstr);
+ return -1;
+ }
+ }
+
+ return 0;
+}
+
+void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout) {
+ const char *tmp;
+
+ if (conf)
+ *conf = RdKafka::Conf::create(RdKafka::Conf::CONF_GLOBAL);
+ if (topic_conf)
+ *topic_conf = RdKafka::Conf::create(RdKafka::Conf::CONF_TOPIC);
+
+ read_config_file(test_conf_get_path(), conf ? *conf : NULL,
+ topic_conf ? *topic_conf : NULL, &timeout);
+
+ std::string errstr;
+ if ((*conf)->set("client.id", test_curr_name(), errstr) !=
+ RdKafka::Conf::CONF_OK)
+ Test::Fail("set client.id failed: " + errstr);
+
+ if (*conf && (tmp = test_getenv("TEST_DEBUG", NULL))) {
+ if ((*conf)->set("debug", tmp, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail("TEST_DEBUG failed: " + errstr);
+ }
+
+
+ if (timeout)
+ test_timeout_set(timeout);
+}
+
+
+void DeliveryReportCb::dr_cb(RdKafka::Message &msg) {
+ if (msg.err() != RdKafka::ERR_NO_ERROR)
+ Test::Fail(tostr() << "Delivery failed to " << msg.topic_name() << " ["
+ << msg.partition() << "]: " << msg.errstr());
+ else
+ Test::Say(3, tostr() << "Delivered to " << msg.topic_name() << " ["
+ << msg.partition() << "] @ " << msg.offset()
+ << " (timestamp " << msg.timestamp().timestamp
+ << ")\n");
+}
+}; // namespace Test
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h b/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h
new file mode 100644
index 000000000..2ecaed394
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/testcpp.h
@@ -0,0 +1,360 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _TESTCPP_H_
+#define _TESTCPP_H_
+
+#include <sstream>
+
+#include "rdkafkacpp.h"
+
+extern "C" {
+#ifdef _WIN32
+/* Win32/Visual Studio */
+#include "../src/win32_config.h"
+#include "../src/rdwin32.h"
+#else
+#include "../config.h"
+/* POSIX / UNIX based systems */
+#include "../src/rdposix.h"
+#endif
+#include "testshared.h"
+}
+
+// courtesy of
+// http://stackoverview.blogspot.se/2011/04/create-string-on-fly-just-in-one-line.html
+struct tostr {
+ std::stringstream ss;
+ template <typename T>
+ tostr &operator<<(const T &data) {
+ ss << data;
+ return *this;
+ }
+ operator std::string() {
+ return ss.str();
+ }
+};
+
+
+
+#define TestMessageVerify(testid, exp_partition, msgidp, msg) \
+ test_msg_parse00(__FUNCTION__, __LINE__, testid, exp_partition, msgidp, \
+ (msg)->topic_name().c_str(), (msg)->partition(), \
+ (msg)->offset(), (const char *)(msg)->key_pointer(), \
+ (msg)->key_len())
+
+namespace Test {
+
+/**
+ * @brief Get test config object
+ */
+
+static RD_UNUSED void Fail(std::string str) {
+ test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 1 /*now*/, "%s",
+ str.c_str());
+}
+static RD_UNUSED void FailLater(std::string str) {
+ test_fail0(__FILE__, __LINE__, "", 1 /*do-lock*/, 0 /*later*/, "%s",
+ str.c_str());
+}
+static RD_UNUSED void Skip(std::string str) {
+ test_SKIP(__FILE__, __LINE__, str.c_str());
+}
+static RD_UNUSED void Say(int level, std::string str) {
+ test_SAY(__FILE__, __LINE__, level, str.c_str());
+}
+static RD_UNUSED void Say(std::string str) {
+ Test::Say(2, str);
+}
+
+/**
+ * @brief Generate test topic name
+ */
+static RD_UNUSED std::string mk_topic_name(std::string suffix,
+ bool randomized) {
+ return test_mk_topic_name(suffix.c_str(), (int)randomized);
+}
+
+/**
+ * @brief Generate random test group name
+ */
+static RD_UNUSED std::string mk_unique_group_name(std::string suffix) {
+ return test_mk_topic_name(suffix.c_str(), 1);
+}
+
+/**
+ * @brief Create partitions
+ */
+static RD_UNUSED void create_partitions(RdKafka::Handle *use_handle,
+ const char *topicname,
+ int new_partition_cnt) {
+ rd_kafka_t *use_rk = NULL;
+ if (use_handle != NULL)
+ use_rk = use_handle->c_ptr();
+ test_create_partitions(use_rk, topicname, new_partition_cnt);
+}
+
+/**
+ * @brief Create a topic
+ */
+static RD_UNUSED void create_topic(RdKafka::Handle *use_handle,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor) {
+ rd_kafka_t *use_rk = NULL;
+ if (use_handle != NULL)
+ use_rk = use_handle->c_ptr();
+ test_create_topic(use_rk, topicname, partition_cnt, replication_factor);
+}
+
+/**
+ * @brief Delete a topic
+ */
+static RD_UNUSED void delete_topic(RdKafka::Handle *use_handle,
+ const char *topicname) {
+ rd_kafka_t *use_rk = NULL;
+ if (use_handle != NULL)
+ use_rk = use_handle->c_ptr();
+ test_delete_topic(use_rk, topicname);
+}
+
+/**
+ * @brief Get new configuration objects
+ */
+void conf_init(RdKafka::Conf **conf, RdKafka::Conf **topic_conf, int timeout);
+
+
+static RD_UNUSED void conf_set(RdKafka::Conf *conf,
+ std::string name,
+ std::string val) {
+ std::string errstr;
+ if (conf->set(name, val, errstr) != RdKafka::Conf::CONF_OK)
+ Test::Fail("Conf failed: " + errstr);
+}
+
+static RD_UNUSED void print_TopicPartitions(
+ std::string header,
+ const std::vector<RdKafka::TopicPartition *> &partitions) {
+ Test::Say(tostr() << header << ": " << partitions.size()
+ << " TopicPartition(s):\n");
+ for (unsigned int i = 0; i < partitions.size(); i++)
+ Test::Say(tostr() << " " << partitions[i]->topic() << "["
+ << partitions[i]->partition() << "] "
+ << "offset " << partitions[i]->offset() << ": "
+ << RdKafka::err2str(partitions[i]->err()) << "\n");
+}
+
+
+/* Convenience subscribe() */
+static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c,
+ const std::string &topic) {
+ Test::Say(c->name() + ": Subscribing to " + topic + "\n");
+ std::vector<std::string> topics;
+ topics.push_back(topic);
+ RdKafka::ErrorCode err;
+ if ((err = c->subscribe(topics)))
+ Test::Fail("Subscribe failed: " + RdKafka::err2str(err));
+}
+
+
+/* Convenience subscribe() to two topics */
+static RD_UNUSED void subscribe(RdKafka::KafkaConsumer *c,
+ const std::string &topic1,
+ const std::string &topic2) {
+ Test::Say(c->name() + ": Subscribing to " + topic1 + " and " + topic2 + "\n");
+ std::vector<std::string> topics;
+ topics.push_back(topic1);
+ topics.push_back(topic2);
+ RdKafka::ErrorCode err;
+ if ((err = c->subscribe(topics)))
+ Test::Fail("Subscribe failed: " + RdKafka::err2str(err));
+}
+
+/* Convenience unsubscribe() */
+static RD_UNUSED void unsubscribe(RdKafka::KafkaConsumer *c) {
+ Test::Say(c->name() + ": Unsubscribing\n");
+ RdKafka::ErrorCode err;
+ if ((err = c->unsubscribe()))
+ Test::Fail("Unsubscribe failed: " + RdKafka::err2str(err));
+}
+
+
+static RD_UNUSED void incremental_assign(
+ RdKafka::KafkaConsumer *c,
+ const std::vector<RdKafka::TopicPartition *> &parts) {
+ Test::Say(tostr() << c->name() << ": incremental assign of " << parts.size()
+ << " partition(s)\n");
+ if (test_level >= 2)
+ print_TopicPartitions("incremental_assign()", parts);
+ RdKafka::Error *error;
+ if ((error = c->incremental_assign(parts)))
+ Test::Fail(c->name() + ": Incremental assign failed: " + error->str());
+}
+
+static RD_UNUSED void incremental_unassign(
+ RdKafka::KafkaConsumer *c,
+ const std::vector<RdKafka::TopicPartition *> &parts) {
+ Test::Say(tostr() << c->name() << ": incremental unassign of " << parts.size()
+ << " partition(s)\n");
+ if (test_level >= 2)
+ print_TopicPartitions("incremental_unassign()", parts);
+ RdKafka::Error *error;
+ if ((error = c->incremental_unassign(parts)))
+ Test::Fail(c->name() + ": Incremental unassign failed: " + error->str());
+}
+
+/**
+ * @brief Wait until the current assignment size is \p partition_count.
+ * If \p topic is not NULL, then additionally, each partition in
+ * the assignment must have topic \p topic.
+ */
+static RD_UNUSED void wait_for_assignment(RdKafka::KafkaConsumer *c,
+ size_t partition_count,
+ const std::string *topic) {
+ bool done = false;
+ while (!done) {
+ RdKafka::Message *msg1 = c->consume(500);
+ delete msg1;
+
+ std::vector<RdKafka::TopicPartition *> partitions;
+ c->assignment(partitions);
+
+ if (partitions.size() == partition_count) {
+ done = true;
+ if (topic) {
+ for (size_t i = 0; i < partitions.size(); i++) {
+ if (partitions[i]->topic() != *topic) {
+ done = false;
+ break;
+ }
+ }
+ }
+ }
+
+ RdKafka::TopicPartition::destroy(partitions);
+ }
+}
+
+
+/**
+ * @brief Check current assignment has size \p partition_count
+ * If \p topic is not NULL, then additionally check that
+ * each partition in the assignment has topic \p topic.
+ */
+static RD_UNUSED void check_assignment(RdKafka::KafkaConsumer *c,
+ size_t partition_count,
+ const std::string *topic) {
+ std::vector<RdKafka::TopicPartition *> partitions;
+ c->assignment(partitions);
+ if (partition_count != partitions.size())
+ Test::Fail(tostr() << "Expecting current assignment to have size "
+ << partition_count << ", not: " << partitions.size());
+ for (size_t i = 0; i < partitions.size(); i++) {
+ if (topic != NULL) {
+ if (partitions[i]->topic() != *topic)
+ Test::Fail(tostr() << "Expecting assignment to be " << *topic
+ << ", not " << partitions[i]->topic());
+ }
+ delete partitions[i];
+ }
+}
+
+
+/**
+ * @brief Current assignment partition count. If \p topic is
+ * NULL, then the total partition count, else the number
+ * of assigned partitions from \p topic.
+ */
+static RD_UNUSED size_t assignment_partition_count(RdKafka::KafkaConsumer *c,
+ std::string *topic) {
+ std::vector<RdKafka::TopicPartition *> partitions;
+ c->assignment(partitions);
+ int cnt = 0;
+ for (size_t i = 0; i < partitions.size(); i++) {
+ if (topic == NULL || *topic == partitions[i]->topic())
+ cnt++;
+ delete partitions[i];
+ }
+ return cnt;
+}
+
+
+/**
+ * @brief Poll the consumer once, discarding the returned message
+ * or error event.
+ * @returns true if a proper event/message was seen, or false on timeout.
+ */
+static RD_UNUSED bool poll_once(RdKafka::KafkaConsumer *c, int timeout_ms) {
+ RdKafka::Message *msg = c->consume(timeout_ms);
+ bool ret = msg->err() != RdKafka::ERR__TIMED_OUT;
+ delete msg;
+ return ret;
+}
+
+
+/**
+ * @brief Produce \p msgcnt messages to \p topic \p partition.
+ */
+static RD_UNUSED void produce_msgs(RdKafka::Producer *p,
+ const std::string &topic,
+ int32_t partition,
+ int msgcnt,
+ int msgsize,
+ bool flush) {
+ char *buf = (char *)malloc(msgsize);
+
+ for (int i = 0; i < msgsize; i++)
+ buf[i] = (char)((int)'a' + (i % 26));
+
+ for (int i = 0; i < msgcnt; i++) {
+ RdKafka::ErrorCode err;
+ err = p->produce(topic, partition, RdKafka::Producer::RK_MSG_COPY,
+ (void *)buf, (size_t)msgsize, NULL, 0, 0, NULL);
+ TEST_ASSERT(!err, "produce() failed: %s", RdKafka::err2str(err).c_str());
+ p->poll(0);
+ }
+
+ free(buf);
+
+ if (flush)
+ p->flush(10 * 1000);
+}
+
+
+
+/**
+ * @brief Delivery report class
+ */
+class DeliveryReportCb : public RdKafka::DeliveryReportCb {
+ public:
+ void dr_cb(RdKafka::Message &msg);
+};
+
+static DeliveryReportCb DrCb;
+}; // namespace Test
+
+#endif /* _TESTCPP_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h b/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h
new file mode 100644
index 000000000..efdd5d555
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/testshared.h
@@ -0,0 +1,402 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+#ifndef _TESTSHARED_H_
+#define _TESTSHARED_H_
+
+/**
+ * C variables and functions shared with C++ tests
+ */
+
+#ifndef _RDKAFKA_H_
+typedef struct rd_kafka_s rd_kafka_t;
+typedef struct rd_kafka_conf_s rd_kafka_conf_t;
+#endif
+
+/* ANSI color codes */
+#define _C_CLR "\033[0m"
+#define _C_RED "\033[31m"
+#define _C_GRN "\033[32m"
+#define _C_YEL "\033[33m"
+#define _C_BLU "\033[34m"
+#define _C_MAG "\033[35m"
+#define _C_CYA "\033[36m"
+
+
+/** Test logging level (TEST_LEVEL=.. env) */
+extern int test_level;
+
+/** Test scenario */
+extern char test_scenario[64];
+
+/** @returns the \p msecs timeout multiplied by the test timeout multiplier */
+extern int tmout_multip(int msecs);
+
+/** @brief true if tests should run in quick-mode (faster, less data) */
+extern int test_quick;
+
+/** @brief Broker version to int */
+#define TEST_BRKVER(A, B, C, D) (((A) << 24) | ((B) << 16) | ((C) << 8) | (D))
+/** @brief return single version component from int */
+#define TEST_BRKVER_X(V, I) (((V) >> (24 - ((I)*8))) & 0xff)
+
+/** @brief Topic Admin API supported by this broker version and later */
+#define TEST_BRKVER_TOPIC_ADMINAPI TEST_BRKVER(0, 10, 2, 0)
+
+extern int test_broker_version;
+extern int test_on_ci;
+
+const char *test_mk_topic_name(const char *suffix, int randomized);
+
+void test_delete_topic(rd_kafka_t *use_rk, const char *topicname);
+
+void test_create_topic(rd_kafka_t *use_rk,
+ const char *topicname,
+ int partition_cnt,
+ int replication_factor);
+
+void test_create_partitions(rd_kafka_t *use_rk,
+ const char *topicname,
+ int new_partition_cnt);
+
+void test_wait_topic_exists(rd_kafka_t *rk, const char *topic, int tmout);
+
+void test_kafka_cmd(const char *fmt, ...);
+
+uint64_t test_produce_msgs_easy_size(const char *topic,
+ uint64_t testid,
+ int32_t partition,
+ int msgcnt,
+ size_t size);
+#define test_produce_msgs_easy(topic, testid, partition, msgcnt) \
+ test_produce_msgs_easy_size(topic, testid, partition, msgcnt, 0)
+
+
+void test_fail0(const char *file,
+ int line,
+ const char *function,
+ int do_lock,
+ int fail_now,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 6, 7);
+
+
+
+void test_fail0(const char *file,
+ int line,
+ const char *function,
+ int do_lock,
+ int fail_now,
+ const char *fmt,
+ ...) RD_FORMAT(printf, 6, 7);
+
+#define TEST_FAIL0(file, line, do_lock, fail_now, ...) \
+ test_fail0(__FILE__, __LINE__, __FUNCTION__, do_lock, fail_now, \
+ __VA_ARGS__)
+
+/* Whine and abort test */
+#define TEST_FAIL(...) TEST_FAIL0(__FILE__, __LINE__, 1, 1, __VA_ARGS__)
+
+/* Whine right away, mark the test as failed, but continue the test. */
+#define TEST_FAIL_LATER(...) TEST_FAIL0(__FILE__, __LINE__, 1, 0, __VA_ARGS__)
+
+/* Whine right away, maybe mark the test as failed, but continue the test. */
+#define TEST_FAIL_LATER0(LATER, ...) \
+ TEST_FAIL0(__FILE__, __LINE__, 1, !(LATER), __VA_ARGS__)
+
+#define TEST_FAILCNT() (test_curr->failcnt)
+
+#define TEST_LATER_CHECK(...) \
+ do { \
+ if (test_curr->state == TEST_FAILED) \
+ TEST_FAIL("See previous errors. " __VA_ARGS__); \
+ } while (0)
+
+#define TEST_PERROR(call) \
+ do { \
+ if (!(call)) \
+ TEST_FAIL(#call " failed: %s", rd_strerror(errno)); \
+ } while (0)
+
+#define TEST_WARN(...) \
+ do { \
+ fprintf(stderr, \
+ "\033[33m[%-28s/%7.3fs] WARN: ", test_curr->name, \
+ test_curr->start \
+ ? ((float)(test_clock() - test_curr->start) / \
+ 1000000.0f) \
+ : 0); \
+ fprintf(stderr, __VA_ARGS__); \
+ fprintf(stderr, "\033[0m"); \
+ } while (0)
+
+/* "..." is a failure reason in printf format, include as much info as needed */
+#define TEST_ASSERT(expr, ...) \
+ do { \
+ if (!(expr)) { \
+ TEST_FAIL("Test assertion failed: \"" #expr \
+ "\": " __VA_ARGS__); \
+ } \
+ } while (0)
+
+
+/* "..." is a failure reason in printf format, include as much info as needed */
+#define TEST_ASSERT_LATER(expr, ...) \
+ do { \
+ if (!(expr)) { \
+ TEST_FAIL0(__FILE__, __LINE__, 1, 0, \
+ "Test assertion failed: \"" #expr \
+ "\": " __VA_ARGS__); \
+ } \
+ } while (0)
+
+
+void test_SAY(const char *file, int line, int level, const char *str);
+void test_SKIP(const char *file, int line, const char *str);
+
+void test_timeout_set(int timeout);
+int test_set_special_conf(const char *name, const char *val, int *timeoutp);
+char *test_conf_get(const rd_kafka_conf_t *conf, const char *name);
+const char *test_conf_get_path(void);
+const char *test_getenv(const char *env, const char *def);
+
+int test_needs_auth(void);
+
+uint64_t test_id_generate(void);
+char *test_str_id_generate(char *dest, size_t dest_size);
+const char *test_str_id_generate_tmp(void);
+
+void test_prepare_msg(uint64_t testid,
+ int32_t partition,
+ int msg_id,
+ char *val,
+ size_t val_size,
+ char *key,
+ size_t key_size);
+/**
+ * Parse a message token
+ */
+void test_msg_parse00(const char *func,
+ int line,
+ uint64_t testid,
+ int32_t exp_partition,
+ int *msgidp,
+ const char *topic,
+ int32_t partition,
+ int64_t offset,
+ const char *key,
+ size_t key_size);
+
+
+int test_check_builtin(const char *feature);
+
+/**
+ * @returns the current test's name (thread-local)
+ */
+extern const char *test_curr_name(void);
+
+#ifndef _WIN32
+#include <sys/time.h>
+#ifndef RD_UNUSED
+#define RD_UNUSED __attribute__((unused))
+#endif
+
+#else
+
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#endif
+
+#ifndef RD_UNUSED
+#define RD_UNUSED
+#endif
+
+
+/**
+ * A microsecond monotonic clock
+ */
+static RD_INLINE int64_t test_clock(void)
+#ifndef _MSC_VER
+ __attribute__((unused))
+#endif
+ ;
+static RD_INLINE int64_t test_clock(void) {
+#ifdef __APPLE__
+ /* No monotonic clock on Darwin */
+ struct timeval tv;
+ gettimeofday(&tv, NULL);
+ return ((int64_t)tv.tv_sec * 1000000LLU) + (int64_t)tv.tv_usec;
+#elif defined(_WIN32)
+ LARGE_INTEGER now;
+ static RD_TLS LARGE_INTEGER freq;
+ if (!freq.QuadPart)
+ QueryPerformanceFrequency(&freq);
+ QueryPerformanceCounter(&now);
+ return (now.QuadPart * 1000000) / freq.QuadPart;
+#else
+ struct timespec ts;
+ clock_gettime(CLOCK_MONOTONIC, &ts);
+ return ((int64_t)ts.tv_sec * 1000000LLU) +
+ ((int64_t)ts.tv_nsec / 1000LLU);
+#endif
+}
+
+
+typedef struct test_timing_s {
+ char name[450];
+ int64_t ts_start;
+ int64_t duration;
+ int64_t ts_every; /* Last every */
+} test_timing_t;
+
+/**
+ * @brief Start timing, Va-Argument is textual name (printf format)
+ */
+#define TIMING_RESTART(TIMING) \
+ do { \
+ (TIMING)->ts_start = test_clock(); \
+ (TIMING)->duration = 0; \
+ } while (0)
+
+#define TIMING_START(TIMING, ...) \
+ do { \
+ rd_snprintf((TIMING)->name, sizeof((TIMING)->name), \
+ __VA_ARGS__); \
+ TIMING_RESTART(TIMING); \
+ (TIMING)->ts_every = (TIMING)->ts_start; \
+ } while (0)
+
+#define TIMING_STOPPED(TIMING) ((TIMING)->duration != 0)
+
+#ifndef __cplusplus
+#define TIMING_STOP(TIMING) \
+ do { \
+ (TIMING)->duration = test_clock() - (TIMING)->ts_start; \
+ TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \
+ (float)(TIMING)->duration / 1000.0f); \
+ } while (0)
+#define TIMING_REPORT(TIMING) \
+ TEST_SAY("%s: duration %.3fms\n", (TIMING)->name, \
+ (float)(TIMING)->duration / 1000.0f);
+
+#else
+#define TIMING_STOP(TIMING) \
+ do { \
+ char _str[512]; \
+ (TIMING)->duration = test_clock() - (TIMING)->ts_start; \
+ rd_snprintf(_str, sizeof(_str), "%s: duration %.3fms\n", \
+ (TIMING)->name, \
+ (float)(TIMING)->duration / 1000.0f); \
+ Test::Say(_str); \
+ } while (0)
+
+#endif
+
+#define TIMING_DURATION(TIMING) \
+ ((TIMING)->duration ? (TIMING)->duration \
+ : (test_clock() - (TIMING)->ts_start))
+
+#define TIMING_ASSERT0(TIMING, DO_FAIL_LATER, TMIN_MS, TMAX_MS) \
+ do { \
+ if (!TIMING_STOPPED(TIMING)) \
+ TIMING_STOP(TIMING); \
+ int _dur_ms = (int)TIMING_DURATION(TIMING) / 1000; \
+ if (TMIN_MS <= _dur_ms && _dur_ms <= TMAX_MS) \
+ break; \
+ if (test_on_ci || strcmp(test_mode, "bare")) \
+ TEST_WARN( \
+ "%s: expected duration %d <= %d <= %d ms%s\n", \
+ (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS, \
+ ": not FAILING test on CI"); \
+ else \
+ TEST_FAIL_LATER0( \
+ DO_FAIL_LATER, \
+ "%s: expected duration %d <= %d <= %d ms", \
+ (TIMING)->name, TMIN_MS, _dur_ms, TMAX_MS); \
+ } while (0)
+
+#define TIMING_ASSERT(TIMING, TMIN_MS, TMAX_MS) \
+ TIMING_ASSERT0(TIMING, 0, TMIN_MS, TMAX_MS)
+#define TIMING_ASSERT_LATER(TIMING, TMIN_MS, TMAX_MS) \
+ TIMING_ASSERT0(TIMING, 1, TMIN_MS, TMAX_MS)
+
+/* Trigger something every US microseconds. */
+static RD_UNUSED int TIMING_EVERY(test_timing_t *timing, int us) {
+ int64_t now = test_clock();
+ if (timing->ts_every + us <= now) {
+ timing->ts_every = now;
+ return 1;
+ }
+ return 0;
+}
+
+
+/**
+ * Sub-tests
+ */
+int test_sub_start(const char *func,
+ int line,
+ int is_quick,
+ const char *fmt,
+ ...);
+void test_sub_pass(void);
+void test_sub_skip(const char *fmt, ...) RD_FORMAT(printf, 1, 2);
+
+#define SUB_TEST0(IS_QUICK, ...) \
+ do { \
+ if (!test_sub_start(__FUNCTION__, __LINE__, IS_QUICK, \
+ __VA_ARGS__)) \
+ return; \
+ } while (0)
+
+#define SUB_TEST(...) SUB_TEST0(0, "" __VA_ARGS__)
+#define SUB_TEST_QUICK(...) SUB_TEST0(1, "" __VA_ARGS__)
+#define SUB_TEST_PASS() test_sub_pass()
+#define SUB_TEST_SKIP(...) \
+ do { \
+ test_sub_skip(__VA_ARGS__); \
+ return; \
+ } while (0)
+
+
+#ifndef _WIN32
+#define rd_sleep(S) sleep(S)
+#else
+#define rd_sleep(S) Sleep((S)*1000)
+#endif
+
+/* Make sure __SANITIZE_ADDRESS__ (gcc) is defined if compiled with asan */
+#if !defined(__SANITIZE_ADDRESS__) && defined(__has_feature)
+#if __has_feature(address_sanitizer)
+#define __SANITIZE_ADDRESS__ 1
+#endif
+#endif
+
+
+int test_run_java(const char *cls, const char **argv);
+int test_waitpid(int pid);
+#endif /* _TESTSHARED_H_ */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md
new file mode 100644
index 000000000..f1ec5681b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/README.md
@@ -0,0 +1,4 @@
+# Tools
+
+Asorted librdkafka tools.
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md
new file mode 100644
index 000000000..a4ce80bd9
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/README.md
@@ -0,0 +1,21 @@
+# Stats tools
+
+These tools are suitable for parsing librdkafka's statistics
+as emitted by the `stats_cb` when `statistics.interval.ms` is set.
+
+ * [to_csv.py](to_csv.py) - selectively convert stats JSON to CSV.
+ * [graph.py](graph.py) - graph CSV files.
+ * [filter.jq](filter.jq) - basic `jq` filter.
+
+Install dependencies:
+
+ $ python3 -m pip install -r requirements.txt
+
+
+Examples:
+
+ # Extract stats json from log line (test*.csv files are created)
+ $ grep -F STATS: file.log | sed -e 's/^.*STATS: //' | ./to_csv.py test1
+
+ # Graph toppar graphs (group by partition), but skip some columns.
+ $ ./graph.py --skip '*bytes,*msg_cnt,stateage,*msgs,leader' --group-by 1partition test1_toppars.csv
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq
new file mode 100644
index 000000000..414a20697
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/filter.jq
@@ -0,0 +1,42 @@
+# Usage:
+# cat stats.json | jq -R -f filter.jq
+
+fromjson? |
+{
+ time: .time | (. - (3600*5) | strftime("%Y-%m-%d %H:%M:%S")),
+ brokers:
+ [ .brokers[] | select(.req.Produce > 0) | {
+ (.nodeid | tostring): {
+ "nodeid": .nodeid,
+ "state": .state,
+ "stateage": (.stateage/1000000.0),
+ "connects": .connects,
+ "rtt_p99": .rtt.p99,
+ "throttle": .throttle.cnt,
+ "outbuf_cnt": .outbuf_cnt,
+ "outbuf_msg_cnt": .outbuf_msg_cnt,
+ "waitresp_cnt": .waitresp_cnt,
+ "Produce": .req.Produce,
+ "Metadata": .req.Metadata,
+ "toppar_cnt": (.toppars | length)
+ }
+ }
+ ],
+
+ topics:
+ [ .topics[] | select(.batchcnt.cnt > 0) | {
+ (.topic): {
+ "batchsize_p99": .batchsize.p99,
+ "batchcnt_p99": .batchcnt.p99,
+ "toppars": (.partitions[] | {
+ (.partition | tostring): {
+ leader: .leader,
+ msgq_cnt: .msgq_cnt,
+ xmit_msgq_cnt: .xmit_msgq_cnt,
+ txmsgs: .txmsgs,
+ msgs_inflight: .msgs_inflight
+ }
+ }),
+ }
+ } ]
+} \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py
new file mode 100755
index 000000000..3eeaa1541
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/graph.py
@@ -0,0 +1,150 @@
+#!/usr/bin/env python3
+#
+# Use pandas + bokeh to create graphs/charts/plots for stats CSV (to_csv.py).
+#
+
+import os
+import pandas as pd
+from bokeh.io import curdoc
+from bokeh.models import ColumnDataSource, HoverTool
+from bokeh.plotting import figure
+from bokeh.palettes import Dark2_5 as palette
+from bokeh.models.formatters import DatetimeTickFormatter
+
+import pandas_bokeh
+import argparse
+import itertools
+from fnmatch import fnmatch
+
+datecolumn = '0time'
+
+
+if __name__ == '__main__':
+ parser = argparse.ArgumentParser(description='Graph CSV files')
+ parser.add_argument('infiles', nargs='+', type=str,
+ help='CSV files to plot.')
+ parser.add_argument('--cols', type=str,
+ help='Columns to plot (CSV list)')
+ parser.add_argument('--skip', type=str,
+ help='Columns to skip (CSV list)')
+ parser.add_argument('--group-by', type=str,
+ help='Group data series by field')
+ parser.add_argument('--chart-cols', type=int, default=3,
+ help='Number of chart columns')
+ parser.add_argument('--plot-width', type=int, default=400,
+ help='Per-plot width')
+ parser.add_argument('--plot-height', type=int, default=300,
+ help='Per-plot height')
+ parser.add_argument('--out', type=str, default='out.html',
+ help='Output file (HTML)')
+ args = parser.parse_args()
+
+ outpath = args.out
+ if args.cols is None:
+ cols = None
+ else:
+ cols = args.cols.split(',')
+ cols.append(datecolumn)
+
+ if args.skip is None:
+ assert cols is None, "--cols and --skip are mutually exclusive"
+ skip = None
+ else:
+ skip = args.skip.split(',')
+
+ group_by = args.group_by
+
+ pandas_bokeh.output_file(outpath)
+ curdoc().theme = 'dark_minimal'
+
+ figs = {}
+ plots = []
+ for infile in args.infiles:
+
+ colors = itertools.cycle(palette)
+
+ cols_to_use = cols
+
+ if skip is not None:
+ # First read available fields
+ avail_cols = list(pd.read_csv(infile, nrows=1))
+
+ cols_to_use = [c for c in avail_cols
+ if len([x for x in skip if fnmatch(c, x)]) == 0]
+
+ df = pd.read_csv(infile,
+ parse_dates=[datecolumn],
+ index_col=datecolumn,
+ usecols=cols_to_use)
+ title = os.path.basename(infile)
+ print(f"{infile}:")
+
+ if group_by is not None:
+
+ grp = df.groupby([group_by])
+
+ # Make one plot per column, skipping the index and group_by cols.
+ for col in df.keys():
+ if col in (datecolumn, group_by):
+ continue
+
+ print("col: ", col)
+
+ for _, dg in grp:
+ print(col, " dg:\n", dg.head())
+ figtitle = f"{title}: {col}"
+ p = figs.get(figtitle, None)
+ if p is None:
+ p = figure(title=f"{title}: {col}",
+ plot_width=args.plot_width,
+ plot_height=args.plot_height,
+ x_axis_type='datetime',
+ tools="hover,box_zoom,wheel_zoom," +
+ "reset,pan,poly_select,tap,save")
+ figs[figtitle] = p
+ plots.append(p)
+
+ p.add_tools(HoverTool(
+ tooltips=[
+ ("index", "$index"),
+ ("time", "@0time{%F}"),
+ ("y", "$y"),
+ ("desc", "$name"),
+ ],
+ formatters={
+ "@0time": "datetime",
+ },
+ mode='vline'))
+
+ p.xaxis.formatter = DatetimeTickFormatter(
+ minutes=['%H:%M'],
+ seconds=['%H:%M:%S'])
+
+ source = ColumnDataSource(dg)
+
+ val = dg[group_by][0]
+ for k in dg:
+ if k != col:
+ continue
+
+ p.line(x=datecolumn, y=k, source=source,
+ legend_label=f"{k}[{val}]",
+ name=f"{k}[{val}]",
+ color=next(colors))
+
+ continue
+
+ else:
+ p = df.plot_bokeh(title=title,
+ kind='line', show_figure=False)
+
+ plots.append(p)
+
+ for p in plots:
+ p.legend.click_policy = "hide"
+
+ grid = []
+ for i in range(0, len(plots), args.chart_cols):
+ grid.append(plots[i:i + args.chart_cols])
+
+ pandas_bokeh.plot_grid(grid)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt
new file mode 100644
index 000000000..1ea1d84d2
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/requirements.txt
@@ -0,0 +1,3 @@
+pandas
+pandas-bokeh
+numpy
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py
new file mode 100755
index 000000000..d5fc9b6e7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/tools/stats/to_csv.py
@@ -0,0 +1,124 @@
+#!/usr/bin/env python3
+#
+# Parse librdkafka stats JSON from stdin, one stats object per line, pick out
+# the relevant fields and emit CSV files suitable for plotting with graph.py
+#
+
+import sys
+import json
+from datetime import datetime
+from collections import OrderedDict
+
+
+def parse(linenr, string):
+ try:
+ js = json.loads(string)
+ except Exception:
+ return [], [], [], []
+
+ dt = datetime.utcfromtimestamp(js['time']).strftime('%Y-%m-%d %H:%M:%S')
+
+ top = {'0time': dt}
+ topcollect = ['msg_cnt', 'msg_size']
+ for c in topcollect:
+ top[c] = js[c]
+
+ top['msg_cnt_fill'] = (float(js['msg_cnt']) / js['msg_max']) * 100.0
+ top['msg_size_fill'] = (float(js['msg_size']) / js['msg_size_max']) * 100.0
+
+ collect = ['outbuf_cnt', 'outbuf_msg_cnt', 'tx',
+ 'waitresp_cnt', 'waitresp_msg_cnt', 'wakeups']
+
+ brokers = []
+ for b, d in js['brokers'].items():
+ if d['req']['Produce'] == 0:
+ continue
+
+ out = {'0time': dt, '1nodeid': d['nodeid']}
+ out['stateage'] = int(d['stateage'] / 1000)
+
+ for c in collect:
+ out[c] = d[c]
+
+ out['rtt_p99'] = int(d['rtt']['p99'] / 1000)
+ out['int_latency_p99'] = int(d['int_latency']['p99'] / 1000)
+ out['outbuf_latency_p99'] = int(d['outbuf_latency']['p99'] / 1000)
+ out['throttle_p99'] = d['throttle']['p99']
+ out['throttle_cnt'] = d['throttle']['cnt']
+ out['latency_p99'] = (out['int_latency_p99'] +
+ out['outbuf_latency_p99'] +
+ out['rtt_p99'])
+ out['toppars_cnt'] = len(d['toppars'])
+ out['produce_req'] = d['req']['Produce']
+
+ brokers.append(out)
+
+ tcollect = []
+ tpcollect = ['leader', 'msgq_cnt', 'msgq_bytes',
+ 'xmit_msgq_cnt', 'xmit_msgq_bytes',
+ 'txmsgs', 'txbytes', 'msgs_inflight']
+
+ topics = []
+ toppars = []
+ for t, d in js['topics'].items():
+
+ tout = {'0time': dt, '1topic': t}
+ for c in tcollect:
+ tout[c] = d[c]
+ tout['batchsize_p99'] = d['batchsize']['p99']
+ tout['batchcnt_p99'] = d['batchcnt']['p99']
+
+ for tp, d2 in d['partitions'].items():
+ if d2['txmsgs'] == 0:
+ continue
+
+ tpout = {'0time': dt, '1partition': d2['partition']}
+
+ for c in tpcollect:
+ tpout[c] = d2[c]
+
+ toppars.append(tpout)
+
+ topics.append(tout)
+
+ return [top], brokers, topics, toppars
+
+
+class CsvWriter(object):
+ def __init__(self, outpfx, name):
+ self.f = open(f"{outpfx}_{name}.csv", "w")
+ self.cnt = 0
+
+ def write(self, d):
+ od = OrderedDict(sorted(d.items()))
+ if self.cnt == 0:
+ # Write heading
+ self.f.write(','.join(od.keys()) + '\n')
+
+ self.f.write(','.join(map(str, od.values())) + '\n')
+ self.cnt += 1
+
+ def write_list(self, a_list_of_dicts):
+ for d in a_list_of_dicts:
+ self.write(d)
+
+
+out = sys.argv[1]
+
+w_top = CsvWriter(out, 'top')
+w_brokers = CsvWriter(out, 'brokers')
+w_topics = CsvWriter(out, 'topics')
+w_toppars = CsvWriter(out, 'toppars')
+
+
+for linenr, string in enumerate(sys.stdin):
+ try:
+ top, brokers, topics, toppars = parse(linenr, string)
+ except Exception as e:
+ print(f"SKIP {linenr+1}: {e}")
+ continue
+
+ w_top.write_list(top)
+ w_brokers.write_list(brokers)
+ w_topics.write_list(topics)
+ w_toppars.write_list(toppars)
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh b/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh
new file mode 100755
index 000000000..48cbecb0c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/until-fail.sh
@@ -0,0 +1,87 @@
+#!/bin/bash
+#
+#
+# Run tests, one by one, until a failure.
+#
+# Usage:
+# ./until-fail.sh [test-runner args] [mode]
+#
+# mode := bare valgrind helgrind gdb ..
+#
+# Logs for the last test run is written to _until-fail_<PID>.log.
+#
+
+[[ -z "$DELETE_TOPICS" ]] && DELETE_TOPICS=y
+
+if [[ -z $ZK_ADDRESS ]]; then
+ ZK_ADDRESS="localhost"
+fi
+
+set -e
+set -o pipefail # to have 'run-test.sh | tee' fail if run-test.sh fails.
+
+ARGS=
+while [[ $1 == -* ]]; do
+ ARGS="$ARGS $1"
+ shift
+done
+
+modes=$*
+if [[ -z "$modes" ]]; then
+ modes="valgrind"
+fi
+
+if [[ -z "$TESTS" ]]; then
+ tests=$(echo 0???-*.c 0???-*.cpp)
+else
+ tests="$TESTS"
+fi
+
+if [[ $modes != gdb ]]; then
+ ARGS="-p1 $ARGS"
+fi
+
+LOG_FILE="_until_fail_$$.log"
+
+iter=0
+while true ; do
+ iter=$(expr $iter + 1)
+
+ for t in $tests ; do
+ # Strip everything after test number (0001-....)
+ t=$(echo $t | cut -d- -f1)
+
+ for mode in $modes ; do
+
+ echo "##################################################"
+ echo "##################################################"
+ echo "############ Test iteration $iter ################"
+ echo "############ Test $t in mode $mode ###############"
+ echo "##################################################"
+ echo "##################################################"
+
+ if [[ $t == all ]]; then
+ unset TESTS
+ else
+ export TESTS=$t
+ fi
+ (./run-test.sh $ARGS $mode 2>&1 | tee $LOG_FILE) || (echo "Failed on iteration $iter, test $t, mode $mode, logs in $LOG_FILE" ; exit 1)
+ done
+ done
+
+
+ if [[ "$DELETE_TOPICS" == "y" ]]; then
+ # Delete topics using Admin API, which is very fast
+ # leads to sub-sequent test failures because of the background
+ # deletes in Kafka still taking a long time:
+ #
+ #make delete_topics
+
+ # Delete topic-by-topic using kafka-topics for each one,
+ # very slow but topics are properly deleted before the script
+ # returns.
+ ./delete-test-topics.sh $ZK_ADDRESS || true
+ fi
+done
+
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c b/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c
new file mode 100644
index 000000000..18431ba72
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-assign_partition.c
@@ -0,0 +1,122 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2015, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include "test.h"
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafka.h" /* for Kafka driver */
+
+
+/**
+ * Consumer partition assignment test, without consumer group balancing.
+ */
+
+
+int main_0016_assign_partition(int argc, char **argv) {
+ const char *topic = test_mk_topic_name(__FUNCTION__, 1);
+ rd_kafka_t *rk_p, *rk_c;
+ rd_kafka_topic_t *rkt_p;
+ int msg_cnt = 1000;
+ int msg_base = 0;
+ int partition_cnt = 2;
+ int partition;
+ uint64_t testid;
+ rd_kafka_topic_conf_t *default_topic_conf;
+ rd_kafka_topic_partition_list_t *partitions;
+ char errstr[512];
+
+ testid = test_id_generate();
+
+ /* Produce messages */
+ rk_p = test_create_producer();
+ rkt_p = test_create_producer_topic(rk_p, topic, NULL);
+
+ for (partition = 0; partition < partition_cnt; partition++) {
+ test_produce_msgs(rk_p, rkt_p, testid, partition,
+ msg_base + (partition * msg_cnt), msg_cnt,
+ NULL, 0);
+ }
+
+ rd_kafka_topic_destroy(rkt_p);
+ rd_kafka_destroy(rk_p);
+
+
+ test_conf_init(NULL, &default_topic_conf, 0);
+ if (rd_kafka_topic_conf_set(default_topic_conf, "auto.offset.reset",
+ "smallest", errstr,
+ sizeof(errstr)) != RD_KAFKA_CONF_OK)
+ TEST_FAIL("%s\n", errstr);
+
+ rk_c =
+ test_create_consumer(topic /*group_id*/, NULL, default_topic_conf);
+
+ /* Fill in partition set */
+ partitions = rd_kafka_topic_partition_list_new(partition_cnt);
+
+ for (partition = 0; partition < partition_cnt; partition++)
+ rd_kafka_topic_partition_list_add(partitions, topic, partition);
+
+ test_consumer_assign("assign.partition", rk_c, partitions);
+
+ /* Make sure all messages are available */
+ test_consumer_poll("verify.all", rk_c, testid, partition_cnt, msg_base,
+ partition_cnt * msg_cnt);
+
+ /* Stop assignments */
+ test_consumer_unassign("unassign.partitions", rk_c);
+
+#if 0 // FIXME when get_offset() is functional
+ /* Acquire stored offsets */
+ for (partition = 0 ; partition < partition_cnt ; partition++) {
+ rd_kafka_resp_err_t err;
+ rd_kafka_topic_t *rkt_c = rd_kafka_topic_new(rk_c, topic, NULL);
+ int64_t offset;
+ test_timing_t t_offs;
+
+ TIMING_START(&t_offs, "GET.OFFSET");
+ err = rd_kafka_consumer_get_offset(rkt_c, partition,
+ &offset, 5000);
+ TIMING_STOP(&t_offs);
+ if (err)
+ TEST_FAIL("Failed to get offsets for %s [%"PRId32"]: "
+ "%s\n",
+ rd_kafka_topic_name(rkt_c), partition,
+ rd_kafka_err2str(err));
+ TEST_SAY("get_offset for %s [%"PRId32"] returned %"PRId64"\n",
+ rd_kafka_topic_name(rkt_c), partition, offset);
+
+ rd_kafka_topic_destroy(rkt_c);
+ }
+#endif
+ test_consumer_close(rk_c);
+
+ rd_kafka_destroy(rk_c);
+
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp b/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp
new file mode 100644
index 000000000..00c31bc82
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/tests/xxxx-metadata.cpp
@@ -0,0 +1,159 @@
+/*
+ * librdkafka - Apache Kafka C library
+ *
+ * Copyright (c) 2012-2014, Magnus Edenhill
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+/**
+ * - Generate unique topic name (there is a C function for that in test.h wihch
+ * you should use)
+ * - Query metadata for that topic
+ * - Wait one second
+ * - Query again, it should now have isrs and everything
+ * Note: The test require auto.create.topics.enable = true in kafka server
+ * properties.
+ */
+
+
+#define _GNU_SOURCE
+#include <sys/time.h>
+#include <time.h>
+#include <string>
+#include <sstream>
+#include <iostream>
+
+
+extern "C" {
+#include "test.h"
+}
+
+/* Typical include path would be <librdkafka/rdkafka.h>, but this program
+ * is built from within the librdkafka source tree and thus differs. */
+#include "rdkafkacpp.h" /* for Kafka driver */
+
+/**
+ * Generate unique topic name (there is a C function for that in test.h wihch
+ * you should use) Query metadata for that topic Wait one second Query again, it
+ * should now have isrs and everything
+ */
+static void test_metadata_cpp(void) {
+ RdKafka::Conf *conf = RdKafka::Conf::create(
+ RdKafka::Conf::CONF_GLOBAL); /* @TODO: Do we need to merge with C
+ test_conf_init()? */
+ RdKafka::Conf *tconf = RdKafka::Conf::create(
+ RdKafka::Conf::CONF_TOPIC); /* @TODO: Same of prev */
+
+ RdKafka::Metadata *metadata;
+ RdKafka::ErrorCode err;
+ int msgcnt = test_on_ci ? 1000 : 10000;
+ int partition_cnt = 2;
+ int i;
+ uint64_t testid;
+ int msg_base = 0;
+ std::string errstr;
+ const char *topic_str = test_mk_topic_name("0013", 1);
+ /* if(!topic){
+ TEST_FAIL()
+ }*/
+
+ // const RdKafka::Conf::ConfResult confResult =
+ // conf->set("debug","all",errstr); if(confResult != RdKafka::Conf::CONF_OK){
+ // std::stringstream errstring;
+ // errstring << "Can't set config" << errstr;
+ // TEST_FAIL(errstring.str().c_str());
+ //}
+
+ TEST_SAY("Topic %s.\n", topic_str);
+
+ const RdKafka::Conf::ConfResult confBrokerResult =
+ conf->set("metadata.broker.list", "localhost:9092", errstr);
+ if (confBrokerResult != RdKafka::Conf::CONF_OK) {
+ std::stringstream errstring;
+ errstring << "Can't set broker" << errstr;
+ TEST_FAIL(errstring.str().c_str());
+ }
+
+ /* Create a producer to fetch metadata */
+ RdKafka::Producer *producer = RdKafka::Producer::create(conf, errstr);
+ if (!producer) {
+ std::stringstream errstring;
+ errstring << "Can't create producer" << errstr;
+ TEST_FAIL(errstring.str().c_str());
+ }
+
+ /*
+ * Create topic handle.
+ */
+ RdKafka::Topic *topic = NULL;
+ topic = RdKafka::Topic::create(producer, topic_str, tconf, errstr);
+ if (!topic) {
+ std::stringstream errstring;
+ errstring << "Can't create topic" << errstr;
+ exit(1);
+ }
+
+ /* First request of metadata: It have to fail */
+ err = producer->metadata(topic != NULL, topic, &metadata, 5000);
+ if (err != RdKafka::ERR_NO_ERROR) {
+ std::stringstream errstring;
+ errstring << "Can't request first metadata: " << errstr;
+ TEST_FAIL(errstring.str().c_str());
+ }
+
+ /* It's a new topic, it should have no partitions */
+ if (metadata->topics()->at(0)->partitions()->size() != 0) {
+ TEST_FAIL("ISRS != 0");
+ }
+
+ sleep(1);
+
+ /* Second request of metadata: It have to success */
+ err = producer->metadata(topic != NULL, topic, &metadata, 5000);
+
+ /* It should have now partitions */
+ if (metadata->topics()->at(0)->partitions()->size() == 0) {
+ TEST_FAIL("ISRS == 0");
+ }
+
+
+ delete topic;
+ delete producer;
+ delete tconf;
+ delete conf;
+
+ /* Wait for everything to be cleaned up since broker destroys are
+ * handled in its own thread. */
+ test_wait_exit(10);
+
+ /* If we havent failed at this point then
+ * there were no threads leaked */
+ return;
+}
+
+int main(int argc, char **argv) {
+ test_conf_init(NULL, NULL, 20);
+ test_metadata_cpp();
+ return 0;
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/vcpkg.json b/fluent-bit/lib/librdkafka-2.1.0/vcpkg.json
new file mode 100644
index 000000000..5e446107a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/vcpkg.json
@@ -0,0 +1,23 @@
+{
+ "name": "librdkafka",
+ "version": "2.1.0",
+ "dependencies": [
+ {
+ "name": "zstd",
+ "version>=": "1.5.2"
+ },
+ {
+ "name": "zlib",
+ "version>=": "1.2.13"
+ },
+ {
+ "name": "openssl",
+ "version>=": "3.0.8"
+ },
+ {
+ "name": "curl",
+ "version>=": "7.86.0"
+ }
+ ],
+ "builtin-baseline": "56765209ec0e92c58a5fd91aa09c46a16d660026"
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/win32/.gitignore
new file mode 100644
index 000000000..6b56d66f1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/.gitignore
@@ -0,0 +1,109 @@
+## Ignore Visual Studio temporary files, build results, and
+## files generated by popular Visual Studio add-ons.
+
+# User-specific files
+*.suo
+*.user
+*.userosscache
+*.sln.docstates
+
+# User-specific files (MonoDevelop/Xamarin Studio)
+*.userprefs
+
+# Build results
+[Dd]ebug/
+[Dd]ebugPublic/
+[Rr]elease/
+[Rr]eleases/
+x64/
+x86/
+build/
+bld/
+[Bb]in/
+[Oo]bj/
+
+# Visual Studo 2015 cache/options directory
+.vs/
+*.opendb
+
+# MSTest test Results
+[Tt]est[Rr]esult*/
+[Bb]uild[Ll]og.*
+
+# NUNIT
+*.VisualState.xml
+TestResult.xml
+
+# Build Results of an ATL Project
+[Dd]ebugPS/
+[Rr]eleasePS/
+dlldata.c
+
+*_i.c
+*_p.c
+*_i.h
+*.ilk
+*.meta
+*.obj
+*.pch
+*.pdb
+*.pgc
+*.pgd
+*.rsp
+*.sbr
+*.tlb
+*.tli
+*.tlh
+*.tmp
+*.tmp_proj
+*.log
+*.vspscc
+*.vssscc
+.builds
+*.pidb
+*.svclog
+*.scc
+
+# Visual C++ cache files
+ipch/
+*.aps
+*.ncb
+*.opensdf
+*.sdf
+*.cachefile
+
+# Visual Studio profiler
+*.psess
+*.vsp
+*.vspx
+
+# NuGet
+packages/*
+!packages/repositories.config
+
+# Installshield output folder
+[Ee]xpress/
+
+# Others
+*.[Cc]ache
+ClientBin/
+[Ss]tyle[Cc]op.*
+~$*
+*~
+*.dbmdl
+*.dbproj.schemaview
+*.pfx
+*.publishsettings
+node_modules/
+bower_components/
+
+*.filters
+*.tlog
+*.db
+*.opendb
+*.idb
+*.nupkg
+intdir
+outdir
+interim
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/README.md b/fluent-bit/lib/librdkafka-2.1.0/win32/README.md
new file mode 100644
index 000000000..4c52a9ec7
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/README.md
@@ -0,0 +1,5 @@
+# Build guide for Windows
+
+* build.bat - Build for all combos of: Win32,x64,Release,Debug using the current msbuild toolset
+* package-zip.ps1 - Build zip package (using build.bat artifacts)
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/build-package.bat b/fluent-bit/lib/librdkafka-2.1.0/win32/build-package.bat
new file mode 100644
index 000000000..3a2b2a20d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/build-package.bat
@@ -0,0 +1,3 @@
+
+powershell "%CD%\package-nuget.ps1"
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/build.bat b/fluent-bit/lib/librdkafka-2.1.0/win32/build.bat
new file mode 100644
index 000000000..cb1870f7f
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/build.bat
@@ -0,0 +1,19 @@
+@echo off
+
+SET TOOLCHAIN=v140
+
+FOR %%C IN (Debug,Release) DO (
+ FOR %%P IN (Win32,x64) DO (
+ @echo Building %%C %%P
+ msbuild librdkafka.sln /p:Configuration=%%C /p:Platform=%%P /target:Clean
+ msbuild librdkafka.sln /p:Configuration=%%C /p:Platform=%%P || goto :error
+
+
+ )
+)
+
+exit /b 0
+
+:error
+echo "Build failed"
+exit /b 1
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/common.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/common.vcxproj
new file mode 100644
index 000000000..850602c34
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/common.vcxproj
@@ -0,0 +1,84 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemGroup Label="ProjectConfigurations">
+ <ProjectConfiguration Include="Debug|Win32">
+ <Configuration>Debug</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Debug|x64">
+ <Configuration>Debug</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|Win32">
+ <Configuration>Release</Configuration>
+ <Platform>Win32</Platform>
+ </ProjectConfiguration>
+ <ProjectConfiguration Include="Release|x64">
+ <Configuration>Release</Configuration>
+ <Platform>x64</Platform>
+ </ProjectConfiguration>
+ </ItemGroup>
+
+ <PropertyGroup>
+ <!-- Assume Visual Studio 2013 / 12.0 as the default -->
+ <VisualStudioVersion Condition="'$(VisualStudioVersion)' == ''">12.0</VisualStudioVersion>
+ </PropertyGroup>
+ <!-- Visual Studio 2013 (12.0) -->
+ <PropertyGroup Condition="'$(VisualStudioVersion)' == '12.0'">
+ <PlatformToolset>v120</PlatformToolset>
+ </PropertyGroup>
+ <!-- Visual Studio 2015 (14.0) -->
+ <PropertyGroup Condition="'$(VisualStudioVersion)' == '14.0'">
+ <PlatformToolset>v140</PlatformToolset>
+ </PropertyGroup>
+ <!-- Visual Studio 2017 (15.0) -->
+ <PropertyGroup Condition="'$(VisualStudioVersion)' == '15.0'">
+ <PlatformToolset>v141</PlatformToolset>
+ </PropertyGroup>
+ <!-- Visual Studio 2019 (16.0) -->
+ <PropertyGroup Condition="'$(VisualStudioVersion)' == '16.0'">
+ <PlatformToolset>v142</PlatformToolset>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)'=='Debug'" Label="Configuration">
+ <UseDebugLibraries>true</UseDebugLibraries>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)'=='Release'" Label="Configuration">
+ <UseDebugLibraries>false</UseDebugLibraries>
+ </PropertyGroup>
+ <PropertyGroup Label="Configuration">
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup>
+ <BuildOutputDir>$(SolutionDir)\outdir\$(PlatformToolSet)\$(Platform)\$(Configuration)\</BuildOutputDir>
+ <BuildIntDir>interim\$(PlatformToolSet)\$(Platform)\$(Configuration)\</BuildIntDir>
+ </PropertyGroup>
+
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.Default.props" />
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.props" />
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="PropertySheets">
+ <Import Project="$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props" Condition="exists('$(UserRootDir)\Microsoft.Cpp.$(Platform).user.props')" Label="LocalAppDataPlatform" />
+ </ImportGroup>
+
+
+ <PropertyGroup Label="Configuration">
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ </PropertyGroup>
+
+ <PropertyGroup>
+ <OutDir>$(BuildOutputDir)</OutDir>
+ <IntDir>$(BuildIntDir)</IntDir>
+ </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Configuration)'=='Release'" Label="Configuration">
+ <LinkIncremental>false</LinkIncremental>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ </PropertyGroup>
+
+ <PropertyGroup Condition="'$(Configuration)'=='Debug'" Label="Configuration">
+ <LinkIncremental>true</LinkIncremental>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ </PropertyGroup>
+
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/install-openssl.ps1 b/fluent-bit/lib/librdkafka-2.1.0/win32/install-openssl.ps1
new file mode 100644
index 000000000..d4724ffe1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/install-openssl.ps1
@@ -0,0 +1,33 @@
+$OpenSSLVersion = "1_1_1k"
+$OpenSSLExe = "OpenSSL-$OpenSSLVersion.exe"
+
+if (!(Test-Path("C:\OpenSSL-Win32"))) {
+ instDir = "C:\OpenSSL-Win32"
+ $exeFull = "Win32$OpenSSLExe"
+ $exePath = "$($env:USERPROFILE)\$exeFull"
+
+ Write-Host "Downloading and installing OpenSSL v1.1 32-bit ..." -ForegroundColor Cyan
+ (New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/$exeFull', $exePath)
+
+ Write-Host "Installing to $instDir..."
+ cmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=$instDir
+ Write-Host "Installed" -ForegroundColor Green
+} else {
+ echo "OpenSSL-Win32 already exists: not downloading"
+}
+
+
+if (!(Test-Path("C:\OpenSSL-Win64"))) {
+ instDir = "C:\OpenSSL-Win64"
+ $exeFull = "Win64$OpenSSLExe"
+ $exePath = "$($env:USERPROFILE)\$exeFull"
+
+ Write-Host "Downloading and installing OpenSSL v1.1 64-bit ..." -ForegroundColor Cyan
+ (New-Object Net.WebClient).DownloadFile('https://slproweb.com/download/$exeFull', $exePath)
+
+ Write-Host "Installing to $instDir..."
+ cmd /c start /wait $exePath /silent /verysilent /sp- /suppressmsgboxes /DIR=$instDir
+ Write-Host "Installed" -ForegroundColor Green
+} else {
+ echo "OpenSSL-Win64 already exists: not downloading"
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/interceptor_test/interceptor_test.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/interceptor_test/interceptor_test.vcxproj
new file mode 100644
index 000000000..e6828b2aa
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/interceptor_test/interceptor_test.vcxproj
@@ -0,0 +1,87 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{492CF5A9-EBF5-494E-8F71-B9B262C4D220}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>interceptor_test</RootNamespace>
+ <ProjectName>interceptor_test</ProjectName>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj" />
+ <PropertyGroup Label="UserMacros" />
+ <ItemDefinitionGroup>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>librdkafka.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\tests\interceptor_test\interceptor_test.c" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.autopkg.template b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.autopkg.template
new file mode 100644
index 000000000..5ad8b1026
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.autopkg.template
@@ -0,0 +1,55 @@
+configurations {
+ Toolset {
+ key : "PlatformToolset";
+ choices: { v120, v140, v142 };
+
+ // Explicitly Not including pivot variants: "WindowsKernelModeDriver8.0", "WindowsApplicationForDrivers8.0", "WindowsUserModeDriver8.0"
+
+ // We're normalizing out the concept of the v140 platform -- Overloading the $(PlatformToolset) variable for additional pivots was a dumb idea.
+ v140.condition = "( $(PlatformToolset.ToLower().IndexOf('v140')) > -1 Or '$(PlatformToolset.ToLower())' == 'windowskernelmodedriver8.0' Or '$(PlatformToolset.ToLower())' == 'windowsapplicationfordrivers8.0' Or '$(PlatformToolset.ToLower())' == 'windowsusermodedriver8.0' )";
+ };
+ };
+
+nuget {
+ nuspec {
+ id = librdkafka;
+ // "@version" is replaced by the current Appveyor build number in the
+ // pre-deployment script.
+ version : @version;
+ title: "librdkafka";
+ authors: {Magnus Edenhill, edenhill};
+ owners: {Magnus Edenhill, edenhill};
+ licenseUrl: "https://github.com/edenhill/librdkafka/blob/master/LICENSES.txt";
+ projectUrl: "https://github.com/edenhill/librdkafka";
+ requireLicenseAcceptance: false;
+ summary: "The Apache Kafka C/C++ client library";
+ description:"The Apache Kafka C/C++ client library";
+ releaseNotes: "Release of librdkafka";
+ copyright: "Copyright 2012-2022";
+ tags: { native, kafka, librdkafka, C, C++ };
+ };
+
+ files {
+ #defines {
+ TOPDIR = ..\;
+ };
+ nestedInclude: {
+ #destination = ${d_include}librdkafka;
+ ${TOPDIR}src\rdkafka.h, ${TOPDIR}src\rdkafka_mock.h, ${TOPDIR}src-cpp\rdkafkacpp.h
+ };
+ docs: { ${TOPDIR}README.md, ${TOPDIR}CONFIGURATION.md, ${TOPDIR}LICENSES.txt };
+
+ ("v120,v140,v142", "Win32,x64", "Release,Debug") => {
+ [${0},${1},${2}] {
+ lib: { outdir\${0}\${1}\${2}\librdkafka*.lib };
+ symbols: { outdir\${0}\${1}\${2}\librdkafka*.pdb };
+ bin: { outdir\${0}\${1}\${2}\*.dll };
+ };
+ };
+
+ };
+
+ targets {
+ Defines += HAS_LIBRDKAFKA;
+ };
+}; \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.master.testing.targets b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.master.testing.targets
new file mode 100644
index 000000000..94372cef4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.master.testing.targets
@@ -0,0 +1,13 @@
+<Project ToolsVersion="4.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <ItemDefinitionGroup>
+ <Link>
+ <AdditionalDependencies>$(MSBuildThisFileDirectory)..\..\package-win\runtimes\$(Configuration)\win-$(Platform)\native\librdkafka.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ <ClCompile>
+ <AdditionalIncludeDirectories>$(MSBuildThisFileDirectory)include;%(AdditionalIncludeDirectories)</AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ReferenceCopyLocalPaths Include="$(MSBuildThisFileDirectory)..\..\package-win\runtimes\$(Configuration)\win-$(Platform)\librdkafka.dll" />
+ </ItemGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.sln b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.sln
new file mode 100644
index 000000000..614396ed4
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.sln
@@ -0,0 +1,226 @@
+Microsoft Visual Studio Solution File, Format Version 12.00
+# Visual Studio Version 16
+VisualStudioVersion = 16.0.31112.23
+MinimumVisualStudioVersion = 10.0.40219.1
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librdkafka", "librdkafka.vcxproj", "{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}"
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "librdkafkacpp", "librdkafkacpp\librdkafkacpp.vcxproj", "{E9641737-EE62-4EC8-88C8-792D2E3CE32D}"
+ ProjectSection(ProjectDependencies) = postProject
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "tests", "tests\tests.vcxproj", "{BE4E1264-5D13-423D-8191-71F7041459E7}"
+ ProjectSection(ProjectDependencies) = postProject
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_example", "rdkafka_example\rdkafka_example.vcxproj", "{84585784-5BDC-43BE-B714-23EA2E7AEA5B}"
+ ProjectSection(ProjectDependencies) = postProject
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
+ EndProjectSection
+EndProject
+Project("{2150E333-8FDC-42A3-9474-1A3956D46DE8}") = "Solution Items", "Solution Items", "{AE17F6C0-6C4D-4E92-A04D-48214C70D1AC}"
+ ProjectSection(SolutionItems) = preProject
+ librdkafka.autopkg = librdkafka.autopkg
+ librdkafka.nuspec = librdkafka.nuspec
+ librdkafka.testing.targets = librdkafka.testing.targets
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_complex_consumer_example_cpp", "rdkafka_complex_consumer_example_cpp\rdkafka_complex_consumer_example_cpp.vcxproj", "{88B682AB-5082-49D5-A672-9904C5F43ABB}"
+ ProjectSection(ProjectDependencies) = postProject
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "rdkafka_performance", "rdkafka_performance\rdkafka_performance.vcxproj", "{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}"
+ ProjectSection(ProjectDependencies) = postProject
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "interceptor_test", "interceptor_test\interceptor_test.vcxproj", "{492CF5A9-EBF5-494E-8F71-B9B262C4D220}"
+ ProjectSection(ProjectDependencies) = postProject
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54} = {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "win_ssl_cert_store", "win_ssl_cert_store\win_ssl_cert_store.vcxproj", "{1A64A271-4840-4686-9F6F-F5AF0F7C385A}"
+ ProjectSection(ProjectDependencies) = postProject
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
+ EndProjectSection
+EndProject
+Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "openssl_engine_example", "openssl_engine_example\openssl_engine_example.vcxproj", "{A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}"
+ ProjectSection(ProjectDependencies) = postProject
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D} = {E9641737-EE62-4EC8-88C8-792D2E3CE32D}
+ EndProjectSection
+EndProject
+Global
+ GlobalSection(SolutionConfigurationPlatforms) = preSolution
+ Debug|Any CPU = Debug|Any CPU
+ Debug|Mixed Platforms = Debug|Mixed Platforms
+ Debug|Win32 = Debug|Win32
+ Debug|x64 = Debug|x64
+ Debug|x86 = Debug|x86
+ Release|Any CPU = Release|Any CPU
+ Release|Mixed Platforms = Release|Mixed Platforms
+ Release|Win32 = Release|Win32
+ Release|x64 = Release|x64
+ Release|x86 = Release|x86
+ EndGlobalSection
+ GlobalSection(ProjectConfigurationPlatforms) = postSolution
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Win32.ActiveCfg = Debug|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|Win32.Build.0 = Debug|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|x64.ActiveCfg = Debug|x64
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|x64.Build.0 = Debug|x64
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Debug|x86.ActiveCfg = Debug|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Any CPU.ActiveCfg = Release|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Win32.ActiveCfg = Release|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|Win32.Build.0 = Release|Win32
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|x64.ActiveCfg = Release|x64
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|x64.Build.0 = Release|x64
+ {4BEBB59C-477B-4F7A-8AE8-4228D0861E54}.Release|x86.ActiveCfg = Release|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Win32.ActiveCfg = Debug|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|Win32.Build.0 = Debug|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|x64.ActiveCfg = Debug|x64
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|x64.Build.0 = Debug|x64
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Debug|x86.ActiveCfg = Debug|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Any CPU.ActiveCfg = Release|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Win32.ActiveCfg = Release|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|Win32.Build.0 = Release|Win32
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|x64.ActiveCfg = Release|x64
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|x64.Build.0 = Release|x64
+ {E9641737-EE62-4EC8-88C8-792D2E3CE32D}.Release|x86.ActiveCfg = Release|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Win32.ActiveCfg = Debug|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|Win32.Build.0 = Debug|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|x64.ActiveCfg = Debug|x64
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|x64.Build.0 = Debug|x64
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Debug|x86.ActiveCfg = Debug|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Any CPU.ActiveCfg = Release|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Win32.ActiveCfg = Release|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|Win32.Build.0 = Release|Win32
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|x64.ActiveCfg = Release|x64
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|x64.Build.0 = Release|x64
+ {BE4E1264-5D13-423D-8191-71F7041459E7}.Release|x86.ActiveCfg = Release|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Win32.ActiveCfg = Debug|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|Win32.Build.0 = Debug|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|x64.ActiveCfg = Debug|x64
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|x64.Build.0 = Debug|x64
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Debug|x86.ActiveCfg = Debug|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Any CPU.ActiveCfg = Release|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Win32.ActiveCfg = Release|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|Win32.Build.0 = Release|Win32
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|x64.ActiveCfg = Release|x64
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|x64.Build.0 = Release|x64
+ {84585784-5BDC-43BE-B714-23EA2E7AEA5B}.Release|x86.ActiveCfg = Release|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Win32.ActiveCfg = Debug|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|Win32.Build.0 = Debug|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|x64.ActiveCfg = Debug|x64
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|x64.Build.0 = Debug|x64
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Debug|x86.ActiveCfg = Debug|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Any CPU.ActiveCfg = Release|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Win32.ActiveCfg = Release|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|Win32.Build.0 = Release|Win32
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|x64.ActiveCfg = Release|x64
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|x64.Build.0 = Release|x64
+ {88B682AB-5082-49D5-A672-9904C5F43ABB}.Release|x86.ActiveCfg = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Win32.ActiveCfg = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|Win32.Build.0 = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x64.ActiveCfg = Debug|x64
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x64.Build.0 = Debug|x64
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x86.ActiveCfg = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Debug|x86.Build.0 = Debug|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Any CPU.ActiveCfg = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Win32.ActiveCfg = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|Win32.Build.0 = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x64.ActiveCfg = Release|x64
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x64.Build.0 = Release|x64
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x86.ActiveCfg = Release|Win32
+ {82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}.Release|x86.Build.0 = Release|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|Win32.ActiveCfg = Debug|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|x64.ActiveCfg = Debug|x64
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Debug|x86.ActiveCfg = Debug|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Any CPU.ActiveCfg = Release|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|Win32.ActiveCfg = Release|Win32
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|x64.ActiveCfg = Release|x64
+ {492CF5A9-EBF5-494E-8F71-B9B262C4D220}.Release|x86.ActiveCfg = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|Win32.ActiveCfg = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|Win32.Build.0 = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|x64.ActiveCfg = Debug|x64
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|x64.Build.0 = Debug|x64
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|x86.ActiveCfg = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Debug|x86.Build.0 = Debug|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|Any CPU.ActiveCfg = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|Win32.ActiveCfg = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|Win32.Build.0 = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x64.ActiveCfg = Release|x64
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x64.Build.0 = Release|x64
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x86.ActiveCfg = Release|Win32
+ {1A64A271-4840-4686-9F6F-F5AF0F7C385A}.Release|x86.Build.0 = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Any CPU.ActiveCfg = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Mixed Platforms.ActiveCfg = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Mixed Platforms.Build.0 = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Win32.ActiveCfg = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|Win32.Build.0 = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x64.ActiveCfg = Debug|x64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x64.Build.0 = Debug|x64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|arm64.ActiveCfg = Debug|arm64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|arm64.Build.0 = Debug|arm64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x86.ActiveCfg = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Debug|x86.Build.0 = Debug|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Any CPU.ActiveCfg = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Mixed Platforms.ActiveCfg = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Mixed Platforms.Build.0 = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Win32.ActiveCfg = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|Win32.Build.0 = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x64.ActiveCfg = Release|x64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x64.Build.0 = Release|x64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|arm64.ActiveCfg = Release|arm64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|arm64.Build.0 = Release|arm64
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x86.ActiveCfg = Release|Win32
+ {A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}.Release|x86.Build.0 = Release|Win32
+ EndGlobalSection
+ GlobalSection(SolutionProperties) = preSolution
+ HideSolutionNode = FALSE
+ EndGlobalSection
+ GlobalSection(ExtensibilityGlobals) = postSolution
+ SolutionGuid = {C6FC23A9-9ED2-4E8F-AC27-BF023227C588}
+ EndGlobalSection
+EndGlobal
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.vcxproj
new file mode 100644
index 000000000..2735fca9c
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafka.vcxproj
@@ -0,0 +1,258 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{4BEBB59C-477B-4F7A-8AE8-4228D0861E54}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>librdkafka</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj" />
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Platform)'=='Win32'">
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath)</IncludePath>
+ <LibraryPath>$(VC_LibraryPath_x86);$(WindowsSDK_LibraryPath_x86)</LibraryPath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Platform)'=='x64'">
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath)</IncludePath>
+ <LibraryPath>$(VC_LibraryPath_x64);$(WindowsSDK_LibraryPath_x64)</LibraryPath>
+ </PropertyGroup>
+ <PropertyGroup Label="Vcpkg">
+ <VcpkgEnableManifest>true</VcpkgEnableManifest>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <InlineFunctionExpansion>Default</InlineFunctionExpansion>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
+ <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <InlineFunctionExpansion>Default</InlineFunctionExpansion>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <FavorSizeOrSpeed>Speed</FavorSizeOrSpeed>
+ <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
+ <RuntimeLibrary>MultiThreadedDebug</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalOptions>/SAFESEH:NO</AdditionalOptions>
+ <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKA_EXPORTS;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <RuntimeLibrary>MultiThreaded</RuntimeLibrary>
+ </ClCompile>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <AdditionalDependencies>kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClInclude Include="..\src\cJSON.h" />
+ <ClInclude Include="..\src\crc32c.h" />
+ <ClInclude Include="..\src\queue.h" />
+ <ClInclude Include="..\src\rdatomic.h" />
+ <ClInclude Include="..\src\rdavg.h" />
+ <ClInclude Include="..\src\rdbuf.h" />
+ <ClInclude Include="..\src\rdendian.h" />
+ <ClInclude Include="..\src\rdfloat.h" />
+ <ClInclude Include="..\src\rdgz.h" />
+ <ClInclude Include="..\src\rdinterval.h" />
+ <ClInclude Include="..\src\rdkafka_admin.h" />
+ <ClInclude Include="..\src\rdkafka_assignor.h" />
+ <ClInclude Include="..\src\rdkafka_buf.h" />
+ <ClInclude Include="..\src\rdkafka_cgrp.h" />
+ <ClInclude Include="..\src\rdkafka_conf.h" />
+ <ClInclude Include="..\src\rdkafka_confval.h" />
+ <ClInclude Include="..\src\rdkafka_event.h" />
+ <ClInclude Include="..\src\rdkafka_feature.h" />
+ <ClInclude Include="..\src\rdhttp.h" />
+ <ClInclude Include="..\src\rdkafka_lz4.h" />
+ <ClInclude Include="..\src\rdkafka_mock.h" />
+ <ClInclude Include="..\src\rdkafka_mock_int.h" />
+ <ClInclude Include="..\src\rdkafka_error.h" />
+ <ClInclude Include="..\src\rdkafka_msgset.h" />
+ <ClInclude Include="..\src\rdkafka_op.h" />
+ <ClInclude Include="..\src\rdkafka_partition.h" />
+ <ClInclude Include="..\src\rdkafka_pattern.h" />
+ <ClInclude Include="..\src\rdkafka_queue.h" />
+ <ClInclude Include="..\src\rdkafka_request.h" />
+ <ClInclude Include="..\src\rdkafka_sasl.h" />
+ <ClInclude Include="..\src\rdkafka_sasl_int.h" />
+ <ClInclude Include="..\src\rdkafka_sasl_oauthbearer_oidc.h" />
+ <ClInclude Include="..\src\rdkafka_transport_int.h" />
+ <ClInclude Include="..\src\rdlist.h" />
+ <ClInclude Include="..\src\rdposix.h" />
+ <ClInclude Include="..\src\rd.h" />
+ <ClInclude Include="..\src\rdaddr.h" />
+ <ClInclude Include="..\src\rdcrc32.h" />
+ <ClInclude Include="..\src\rdkafka.h" />
+ <ClInclude Include="..\src\rdkafka_broker.h" />
+ <ClInclude Include="..\src\rdkafka_int.h" />
+ <ClInclude Include="..\src\rdkafka_msg.h" />
+ <ClInclude Include="..\src\rdkafka_offset.h" />
+ <ClInclude Include="..\src\rdkafka_proto.h" />
+ <ClInclude Include="..\src\rdkafka_timer.h" />
+ <ClInclude Include="..\src\rdkafka_topic.h" />
+ <ClInclude Include="..\src\rdkafka_transport.h" />
+ <ClInclude Include="..\src\rdkafka_ssl.h" />
+ <ClInclude Include="..\src\rdkafka_cert.h" />
+ <ClInclude Include="..\src\rdkafka_metadata.h" />
+ <ClInclude Include="..\src\rdkafka_interceptor.h" />
+ <ClInclude Include="..\src\rdkafka_plugin.h" />
+ <ClInclude Include="..\src\rdkafka_header.h" />
+ <ClInclude Include="..\src\rdlog.h" />
+ <ClInclude Include="..\src\rdstring.h" />
+ <ClInclude Include="..\src\rdrand.h" />
+ <ClInclude Include="..\src\rdsysqueue.h" />
+ <ClInclude Include="..\src\rdtime.h" />
+ <ClInclude Include="..\src\rdtypes.h" />
+ <ClInclude Include="..\src\rdregex.h" />
+ <ClInclude Include="..\src\rdunittest.h" />
+ <ClInclude Include="..\src\rdvarint.h" />
+ <ClInclude Include="..\src\snappy.h" />
+ <ClInclude Include="..\src\snappy_compat.h" />
+ <ClInclude Include="..\src\tinycthread.h" />
+ <ClInclude Include="..\src\tinycthread_extra.h" />
+ <ClInclude Include="..\src\rdwin32.h" />
+ <ClInclude Include="..\src\win32_config.h" />
+ <ClInclude Include="..\src\regexp.h" />
+ <ClInclude Include="..\src\rdavl.h" />
+ <ClInclude Include="..\src\rdports.h" />
+ <ClInclude Include="..\src\rddl.h" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClCompile Include="..\src\cJSON.c" />
+ <ClCompile Include="..\src\crc32c.c" />
+ <ClCompile Include="..\src\rdaddr.c" />
+ <ClCompile Include="..\src\rdbuf.c" />
+ <ClCompile Include="..\src\rdcrc32.c" />
+ <ClCompile Include="..\src\rdfnv1a.c" />
+ <ClCompile Include="..\src\rdgz.c" />
+ <ClCompile Include="..\src\rdhdrhistogram.c" />
+ <ClCompile Include="..\src\rdkafka.c" />
+ <ClCompile Include="..\src\rdkafka_assignor.c" />
+ <ClCompile Include="..\src\rdkafka_broker.c" />
+ <ClCompile Include="..\src\rdkafka_cgrp.c" />
+ <ClCompile Include="..\src\rdkafka_conf.c" />
+ <ClCompile Include="..\src\rdkafka_event.c" />
+ <ClCompile Include="..\src\rdhttp.c" />
+ <ClCompile Include="..\src\rdkafka_lz4.c" />
+ <ClCompile Include="..\src\rdkafka_msg.c" />
+ <ClCompile Include="..\src\rdkafka_msgset_reader.c" />
+ <ClCompile Include="..\src\rdkafka_msgset_writer.c" />
+ <ClCompile Include="..\src\rdkafka_offset.c" />
+ <ClCompile Include="..\src\rdkafka_op.c" />
+ <ClCompile Include="..\src\rdkafka_partition.c" />
+ <ClCompile Include="..\src\rdkafka_pattern.c" />
+ <ClCompile Include="..\src\rdkafka_queue.c" />
+ <ClCompile Include="..\src\rdkafka_range_assignor.c" />
+ <ClCompile Include="..\src\rdkafka_roundrobin_assignor.c" />
+ <ClCompile Include="..\src\rdkafka_sticky_assignor.c" />
+ <ClCompile Include="..\src\rdkafka_request.c" />
+ <ClCompile Include="..\src\rdkafka_sasl.c" />
+ <ClCompile Include="..\src\rdkafka_sasl_win32.c" />
+ <ClCompile Include="..\src\rdkafka_sasl_plain.c" />
+ <ClCompile Include="..\src\rdkafka_sasl_scram.c" />
+ <ClCompile Include="..\src\rdkafka_sasl_oauthbearer.c" />
+ <ClCompile Include="..\src\rdkafka_sasl_oauthbearer_oidc.c" />
+ <ClCompile Include="..\src\rdkafka_subscription.c" />
+ <ClCompile Include="..\src\rdkafka_assignment.c" />
+ <ClCompile Include="..\src\rdkafka_timer.c" />
+ <ClCompile Include="..\src\rdkafka_topic.c" />
+ <ClCompile Include="..\src\rdkafka_transport.c" />
+ <ClCompile Include="..\src\rdkafka_ssl.c" />
+ <ClCompile Include="..\src\rdkafka_cert.c" />
+ <ClCompile Include="..\src\rdkafka_buf.c" />
+ <ClCompile Include="..\src\rdkafka_feature.c" />
+ <ClCompile Include="..\src\rdkafka_metadata.c" />
+ <ClCompile Include="..\src\rdkafka_metadata_cache.c" />
+ <ClCompile Include="..\src\rdkafka_interceptor.c" />
+ <ClCompile Include="..\src\rdkafka_plugin.c" />
+ <ClCompile Include="..\src\rdkafka_header.c" />
+ <ClCompile Include="..\src\rdkafka_admin.c" />
+ <ClCompile Include="..\src\rdkafka_aux.c" />
+ <ClCompile Include="..\src\rdkafka_background.c" />
+ <ClCompile Include="..\src\rdkafka_idempotence.c" />
+ <ClCompile Include="..\src\rdkafka_txnmgr.c" />
+ <ClCompile Include="..\src\rdkafka_coord.c" />
+ <ClCompile Include="..\src\rdkafka_zstd.c" />
+ <ClCompile Include="..\src\rdkafka_mock.c" />
+ <ClCompile Include="..\src\rdkafka_mock_handlers.c" />
+ <ClCompile Include="..\src\rdkafka_mock_cgrp.c" />
+ <ClCompile Include="..\src\rdkafka_error.c" />
+ <ClCompile Include="..\src\rdkafka_fetcher.c" />
+ <ClCompile Include="..\src\rdlist.c" />
+ <ClCompile Include="..\src\rdlog.c" />
+ <ClCompile Include="..\src\rdmurmur2.c" />
+ <ClCompile Include="..\src\rdstring.c" />
+ <ClCompile Include="..\src\rdrand.c" />
+ <ClCompile Include="..\src\rdregex.c" />
+ <ClCompile Include="..\src\rdunittest.c" />
+ <ClCompile Include="..\src\rdvarint.c" />
+ <ClCompile Include="..\src\rdmap.c" />
+ <ClCompile Include="..\src\snappy.c" />
+ <ClCompile Include="..\src\tinycthread.c" />
+ <ClCompile Include="..\src\tinycthread_extra.c" />
+ <ClCompile Include="..\src\regexp.c" />
+ <ClCompile Include="..\src\rdports.c" />
+ <ClCompile Include="..\src\rdavl.c" />
+ <ClCompile Include="..\src\rdxxhash.c" />
+ <ClCompile Include="..\src\lz4.c" />
+ <ClCompile Include="..\src\lz4frame.c" />
+ <ClCompile Include="..\src\lz4hc.c" />
+ <ClCompile Include="..\src\rddl.c" />
+ </ItemGroup>
+ <ItemGroup>
+ <None Include="..\README.win32" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafkacpp/librdkafkacpp.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafkacpp/librdkafkacpp.vcxproj
new file mode 100644
index 000000000..ffce70182
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/librdkafkacpp/librdkafkacpp.vcxproj
@@ -0,0 +1,104 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{E9641737-EE62-4EC8-88C8-792D2E3CE32D}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>librdkafkacpp</RootNamespace>
+ <ProjectName>librdkafkacpp</ProjectName>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>DynamicLibrary</ConfigurationType>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj"/>
+ <PropertyGroup Label="UserMacros" />
+
+ <ItemDefinitionGroup>
+ <Link>
+ <SubSystem>Windows</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalDependencies>librdkafka.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalIncludeDirectories>
+ </AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalOptions>/J %(AdditionalOptions)</AdditionalOptions>
+ <AdditionalIncludeDirectories>
+ </AdditionalIncludeDirectories>
+ </ClCompile>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ </ClCompile>
+ <Link>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_WINDOWS;_USRDLL;LIBRDKAFKACPP_EXPORTS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\src-cpp\ConfImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\ConsumerImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\HandleImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\KafkaConsumerImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\HeadersImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\MessageImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\MetadataImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\ProducerImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\QueueImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\RdKafka.cpp" />
+ <ClCompile Include="..\..\src-cpp\TopicImpl.cpp" />
+ <ClCompile Include="..\..\src-cpp\TopicPartitionImpl.cpp" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\src-cpp\rdkafkacpp.h" />
+ <ClInclude Include="..\..\src-cpp\rdkafkacpp_int.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/msbuild.ps1 b/fluent-bit/lib/librdkafka-2.1.0/win32/msbuild.ps1
new file mode 100644
index 000000000..527d3e666
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/msbuild.ps1
@@ -0,0 +1,15 @@
+param(
+ [string]$config='Release',
+ [string]$platform='x64',
+ [string]$toolset='v142'
+)
+
+$msbuild = (& "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -prerelease -products * -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe)
+
+echo "Using msbuild $msbuild"
+
+echo "Cleaning $config $platform $toolset"
+& $msbuild win32\librdkafka.sln /p:Configuration=$config /p:Platform=$platform /p:PlatformToolset=$toolset /target:Clean
+
+echo "Building $config $platform $toolset"
+& $msbuild win32\librdkafka.sln /p:Configuration=$config /p:Platform=$platform /p:PlatformToolset=$toolset
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/openssl_engine_example/openssl_engine_example.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/openssl_engine_example/openssl_engine_example.vcxproj
new file mode 100644
index 000000000..933d1c6af
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/openssl_engine_example/openssl_engine_example.vcxproj
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$(SolutionDir)common.vcxproj" />
+ <ItemGroup>
+ <ClCompile Include="..\..\examples\openssl_engine_example.cpp" />
+ <ClCompile Include="..\wingetopt.c" />
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{A3C4011E-F82E-4E97-9ADB-33B1ECE001A7}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>openssl_engine_example</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="Shared">
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <LinkIncremental>true</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <LinkIncremental>false</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/package-zip.ps1 b/fluent-bit/lib/librdkafka-2.1.0/win32/package-zip.ps1
new file mode 100644
index 000000000..34dd0ab1a
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/package-zip.ps1
@@ -0,0 +1,46 @@
+<#
+.SYNOPSIS
+
+ Create zip package
+
+
+.DESCRIPTION
+
+ A full build must be completed, to populate output directories, before
+
+ running this script.
+
+ Use build.bat to build
+
+#>
+
+param(
+ [string]$config='Release',
+ [string]$platform='x64',
+ [string]$toolset='v142',
+ [string]$version='0.0.0'
+)
+
+$msbuild = (& "${env:ProgramFiles(x86)}\Microsoft Visual Studio\Installer\vswhere.exe" -latest -prerelease -products * -requires Microsoft.Component.MSBuild -find MSBuild\**\Bin\MSBuild.exe)
+
+echo "Packaging $config $platform $toolset"
+
+$bindir = "build\native\bin\${toolset}\${platform}\$config"
+$libdir = "build\native\lib\${toolset}\${platform}\$config"
+$srcdir = "win32\outdir\${toolset}\${platform}\$config"
+
+New-Item -Path $bindir -ItemType Directory
+New-Item -Path $libdir -ItemType Directory
+
+$platformpart = ""
+if ("x64" -eq $platform) {
+ $platformpart = "-${platform}"
+}
+
+Copy-Item "${srcdir}\librdkafka.dll","${srcdir}\librdkafkacpp.dll",
+"${srcdir}\libcrypto-3${platformpart}.dll","${srcdir}\libssl-3${platformpart}.dll",
+"${srcdir}\zlib1.dll","${srcdir}\zstd.dll","${srcdir}\libcurl.dll" -Destination $bindir
+
+Copy-Item "${srcdir}\librdkafka.lib","${srcdir}\librdkafkacpp.lib" -Destination $libdir
+
+7z.exe a "artifacts\librdkafka.redist.zip" "build"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/packages/repositories.config b/fluent-bit/lib/librdkafka-2.1.0/win32/packages/repositories.config
new file mode 100644
index 000000000..0dec135fc
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/packages/repositories.config
@@ -0,0 +1,4 @@
+<?xml version="1.0" encoding="utf-8"?>
+<repositories>
+ <repository path="..\packages.config" />
+</repositories> \ No newline at end of file
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/push-package.bat b/fluent-bit/lib/librdkafka-2.1.0/win32/push-package.bat
new file mode 100644
index 000000000..aa6e75fc8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/push-package.bat
@@ -0,0 +1,4 @@
+set pkgversion=0.9.3-pre-wip1
+nuget push librdkafka.%pkgversion%.nupkg -Source https://www.nuget.org/api/v2/package
+nuget push librdkafka.redist.%pkgversion%.nupkg -Source https://www.nuget.org/api/v2/package
+nuget push librdkafka.symbols.%pkgversion%.nupkg -Source https://www.nuget.org/api/v2/package
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj
new file mode 100644
index 000000000..75d9449cf
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_complex_consumer_example_cpp/rdkafka_complex_consumer_example_cpp.vcxproj
@@ -0,0 +1,67 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{88B682AB-5082-49D5-A672-9904C5F43ABB}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>rdkafka_complex_consumer_example_cpp</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj"/>
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ </PropertyGroup>
+ <PropertyGroup Label="UserMacros" />
+ <ItemDefinitionGroup>
+ <Link>
+ <SubSystem>Console</SubSystem>
+<AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup>
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Enabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup Condition="'$(Configuration)'=='Debug'">
+ <ClCompile>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)'=='Release'">
+ <ClCompile>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ </ClCompile>
+ <Link>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\examples\rdkafka_complex_consumer_example.cpp" />
+ <ClCompile Include="..\wingetopt.c" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\wingetopt.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_example/rdkafka_example.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_example/rdkafka_example.vcxproj
new file mode 100644
index 000000000..a5e35c5c0
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_example/rdkafka_example.vcxproj
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{84585784-5BDC-43BE-B714-23EA2E7AEA5B}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>rdkafka_example</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj"/>
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ </PropertyGroup>
+ <ItemDefinitionGroup>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <AdditionalDependencies>librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\examples\rdkafka_example.cpp" />
+ <ClCompile Include="..\wingetopt.c" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\wingetopt.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_performance/rdkafka_performance.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_performance/rdkafka_performance.vcxproj
new file mode 100644
index 000000000..f4816614b
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/rdkafka_performance/rdkafka_performance.vcxproj
@@ -0,0 +1,97 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{82A67CAA-44B5-4F7D-BAC4-D126CC81FBEC}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>rdkafka_performance</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj" />
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ </PropertyGroup>
+ <ItemDefinitionGroup>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <AdditionalDependencies>librdkafka.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)/../src</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\examples\rdkafka_performance.c" />
+ <ClCompile Include="..\wingetopt.c" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\wingetopt.h" />
+ <ClInclude Include="..\wintime.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/setup-msys2.ps1 b/fluent-bit/lib/librdkafka-2.1.0/win32/setup-msys2.ps1
new file mode 100644
index 000000000..cf7285041
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/setup-msys2.ps1
@@ -0,0 +1,31 @@
+# Install (if necessary) and set up msys2.
+
+
+$url="https://github.com/msys2/msys2-installer/releases/download/2022-10-28/msys2-base-x86_64-20221028.sfx.exe"
+$sha256="e365b79b4b30b6f4baf34bd93f3d2a41c0a92801c7a96d79cddbfca1090a0554"
+
+
+if (!(Test-Path -Path "c:\msys64\usr\bin\bash.exe")) {
+ echo "Downloading and installing msys2 to c:\msys64"
+
+ (New-Object System.Net.WebClient).DownloadFile($url, './msys2-installer.exe')
+
+ # Verify checksum
+ (Get-FileHash -Algorithm "SHA256" .\msys2-installer.exe).hash -eq $sha256
+
+ # Install msys2
+ .\msys2-installer.exe -y -oc:\
+
+ Remove-Item msys2-installer.exe
+
+ # Set up msys2 the first time
+ echo "Setting up msys"
+ c:\msys64\usr\bin\bash -lc ' '
+
+} else {
+ echo "Using previously installed msys2"
+}
+
+# Update packages
+echo "Updating msys2 packages"
+c:\msys64\usr\bin\bash -lc "pacman --noconfirm -Syuu --overwrite '*'"
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/setup-vcpkg.ps1 b/fluent-bit/lib/librdkafka-2.1.0/win32/setup-vcpkg.ps1
new file mode 100644
index 000000000..c2bd78b84
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/setup-vcpkg.ps1
@@ -0,0 +1,13 @@
+# Set up vcpkg and install required packages.
+
+if (!(Test-Path -Path vcpkg/.git)) {
+ git clone https://github.com/Microsoft/vcpkg.git
+}
+
+cd vcpkg
+# latest version is having an issue while doing vcpkg integrate install
+git checkout 328bd79eb8340b8958f567aaf5f8ffb81056cd36
+cd ..
+
+.\vcpkg\bootstrap-vcpkg.bat
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/tests/.gitignore b/fluent-bit/lib/librdkafka-2.1.0/win32/tests/.gitignore
new file mode 100644
index 000000000..a2128016d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/tests/.gitignore
@@ -0,0 +1,3 @@
+test.conf
+*.json
+
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/tests/test.conf.example b/fluent-bit/lib/librdkafka-2.1.0/win32/tests/test.conf.example
new file mode 100644
index 000000000..ef0b5475d
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/tests/test.conf.example
@@ -0,0 +1,25 @@
+# Copy this file to test.conf and set up according to your configuration.
+
+#
+# Test configuration
+#
+# For slow connections: multiply test timeouts by this much (float)
+#test.timeout.multiplier=3.5
+
+# Test topic names are constructed by:
+# <prefix>_<suffix>, where default topic prefix is "rdkafkatest".
+# suffix is specified by the tests.
+#test.topic.prefix=bib
+
+# Make topic names random:
+# <prefix>_<randomnumber>_<suffix>
+#test.topic.random=true
+
+
+# Bootstrap broker(s)
+metadata.broker.list=localhost:9092
+
+# Debugging
+#debug=metadata,topic,msg,broker
+
+# Any other librdkafka configuration property.
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/tests/tests.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/tests/tests.vcxproj
new file mode 100644
index 000000000..de69a62d8
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/tests/tests.vcxproj
@@ -0,0 +1,237 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="12.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{BE4E1264-5D13-423D-8191-71F7041459E7}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>tests</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <Import Project="$(SolutionDir)common.vcxproj" />
+ <PropertyGroup Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ </PropertyGroup>
+ <PropertyGroup Label="UserMacros" />
+ <ItemDefinitionGroup>
+ <Link>
+ <SubSystem>Console</SubSystem>
+<AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ </Link>
+ </ItemDefinitionGroup>
+
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ <ShowIncludes>false</ShowIncludes>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ <ShowIncludes>false</ShowIncludes>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>
+ </PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;_LIB;_CRT_SECURE_NO_WARNINGS;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <AdditionalIncludeDirectories>$(SolutionDir)\..\src;$(SolutionDir)\..\src-cpp</AdditionalIncludeDirectories>
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemGroup>
+ <ClCompile Include="..\..\tests\0000-unittests.c" />
+ <ClCompile Include="..\..\tests\0001-multiobj.c" />
+ <ClCompile Include="..\..\tests\0002-unkpart.c" />
+ <ClCompile Include="..\..\tests\0003-msgmaxsize.c" />
+ <ClCompile Include="..\..\tests\0004-conf.c" />
+ <ClCompile Include="..\..\tests\0005-order.c" />
+ <ClCompile Include="..\..\tests\0006-symbols.c" />
+ <ClCompile Include="..\..\tests\0007-autotopic.c" />
+ <ClCompile Include="..\..\tests\0008-reqacks.c" />
+ <ClCompile Include="..\..\tests\0009-mock_cluster.c" />
+ <ClCompile Include="..\..\tests\0011-produce_batch.c" />
+ <ClCompile Include="..\..\tests\0012-produce_consume.c" />
+ <ClCompile Include="..\..\tests\0013-null-msgs.c" />
+ <ClCompile Include="..\..\tests\0014-reconsume-191.c" />
+ <ClCompile Include="..\..\tests\0015-offset_seeks.c" />
+ <ClCompile Include="..\..\tests\0016-client_swname.c" />
+ <ClCompile Include="..\..\tests\0017-compression.c" />
+ <ClCompile Include="..\..\tests\0018-cgrp_term.c" />
+ <ClCompile Include="..\..\tests\0019-list_groups.c" />
+ <ClCompile Include="..\..\tests\0020-destroy_hang.c" />
+ <ClCompile Include="..\..\tests\0021-rkt_destroy.c" />
+ <ClCompile Include="..\..\tests\0022-consume_batch.c" />
+ <ClCompile Include="..\..\tests\0025-timers.c" />
+ <ClCompile Include="..\..\tests\0026-consume_pause.c" />
+ <ClCompile Include="..\..\tests\0028-long_topicnames.c" />
+ <ClCompile Include="..\..\tests\0029-assign_offset.c" />
+ <ClCompile Include="..\..\tests\0030-offset_commit.c" />
+ <ClCompile Include="..\..\tests\0031-get_offsets.c" />
+ <ClCompile Include="..\..\tests\0033-regex_subscribe.c" />
+ <ClCompile Include="..\..\tests\0034-offset_reset.c" />
+ <ClCompile Include="..\..\tests\0035-api_version.c" />
+ <ClCompile Include="..\..\tests\0036-partial_fetch.c" />
+ <ClCompile Include="..\..\tests\0037-destroy_hang_local.c" />
+ <ClCompile Include="..\..\tests\0038-performance.c" />
+ <ClCompile Include="..\..\tests\0039-event.c" />
+ <ClCompile Include="..\..\tests\0040-io_event.c" />
+ <ClCompile Include="..\..\tests\0041-fetch_max_bytes.c" />
+ <ClCompile Include="..\..\tests\0042-many_topics.c" />
+ <ClCompile Include="..\..\tests\0043-no_connection.c" />
+ <ClCompile Include="..\..\tests\0044-partition_cnt.c" />
+ <ClCompile Include="..\..\tests\0045-subscribe_update.c" />
+ <ClCompile Include="..\..\tests\0046-rkt_cache.c" />
+ <ClCompile Include="..\..\tests\0047-partial_buf_tmout.c" />
+ <ClCompile Include="..\..\tests\0048-partitioner.c" />
+ <ClCompile Include="..\..\tests\0049-consume_conn_close.c" />
+ <ClCompile Include="..\..\tests\0050-subscribe_adds.c" />
+ <ClCompile Include="..\..\tests\0051-assign_adds.c" />
+ <ClCompile Include="..\..\tests\0052-msg_timestamps.c" />
+ <ClCompile Include="..\..\tests\0053-stats_cb.cpp" />
+ <ClCompile Include="..\..\tests\0054-offset_time.cpp" />
+ <ClCompile Include="..\..\tests\0055-producer_latency.c" />
+ <ClCompile Include="..\..\tests\0056-balanced_group_mt.c" />
+ <ClCompile Include="..\..\tests\0057-invalid_topic.cpp" />
+ <ClCompile Include="..\..\tests\0058-log.cpp" />
+ <ClCompile Include="..\..\tests\0059-bsearch.cpp" />
+ <ClCompile Include="..\..\tests\0060-op_prio.cpp" />
+ <ClCompile Include="..\..\tests\0061-consumer_lag.cpp" />
+ <ClCompile Include="..\..\tests\0062-stats_event.c" />
+ <ClCompile Include="..\..\tests\0063-clusterid.cpp" />
+ <ClCompile Include="..\..\tests\0064-interceptors.c" />
+ <ClCompile Include="..\..\tests\0065-yield.cpp" />
+ <ClCompile Include="..\..\tests\0066-plugins.cpp" />
+ <ClCompile Include="..\..\tests\0067-empty_topic.cpp" />
+ <ClCompile Include="..\..\tests\0068-produce_timeout.c" />
+ <ClCompile Include="..\..\tests\0069-consumer_add_parts.c" />
+ <ClCompile Include="..\..\tests\0070-null_empty.cpp" />
+ <ClCompile Include="..\..\tests\0072-headers_ut.c" />
+ <ClCompile Include="..\..\tests\0073-headers.c" />
+ <ClCompile Include="..\..\tests\0074-producev.c" />
+ <ClCompile Include="..\..\tests\0075-retry.c" />
+ <ClCompile Include="..\..\tests\0076-produce_retry.c" />
+ <ClCompile Include="..\..\tests\0077-compaction.c" />
+ <ClCompile Include="..\..\tests\0078-c_from_cpp.cpp" />
+ <ClCompile Include="..\..\tests\0079-fork.c" />
+ <ClCompile Include="..\..\tests\0080-admin_ut.c" />
+ <ClCompile Include="..\..\tests\0081-admin.c" />
+ <ClCompile Include="..\..\tests\0082-fetch_max_bytes.cpp" />
+ <ClCompile Include="..\..\tests\0083-cb_event.c" />
+ <ClCompile Include="..\..\tests\0084-destroy_flags.c" />
+ <ClCompile Include="..\..\tests\0085-headers.cpp" />
+ <ClCompile Include="..\..\tests\0086-purge.c" />
+ <ClCompile Include="..\..\tests\0088-produce_metadata_timeout.c" />
+ <ClCompile Include="..\..\tests\0089-max_poll_interval.c" />
+ <ClCompile Include="..\..\tests\0090-idempotence.c" />
+ <ClCompile Include="..\..\tests\0091-max_poll_interval_timeout.c" />
+ <ClCompile Include="..\..\tests\0092-mixed_msgver.c" />
+ <ClCompile Include="..\..\tests\0093-holb.c" />
+ <ClCompile Include="..\..\tests\0094-idempotence_msg_timeout.c" />
+ <ClCompile Include="..\..\tests\0095-all_brokers_down.cpp" />
+ <ClCompile Include="..\..\tests\0097-ssl_verify.cpp" />
+ <ClCompile Include="..\..\tests\0098-consumer-txn.cpp" />
+ <ClCompile Include="..\..\tests\0099-commit_metadata.c" />
+ <ClCompile Include="..\..\tests\0100-thread_interceptors.cpp" />
+ <ClCompile Include="..\..\tests\0101-fetch-from-follower.cpp" />
+ <ClCompile Include="..\..\tests\0102-static_group_rebalance.c" />
+ <ClCompile Include="..\..\tests\0103-transactions.c" />
+ <ClCompile Include="..\..\tests\0104-fetch_from_follower_mock.c" />
+ <ClCompile Include="..\..\tests\0105-transactions_mock.c" />
+ <ClCompile Include="..\..\tests\0106-cgrp_sess_timeout.c" />
+ <ClCompile Include="..\..\tests\0107-topic_recreate.c" />
+ <ClCompile Include="..\..\tests\0109-auto_create_topics.cpp" />
+ <ClCompile Include="..\..\tests\0110-batch_size.cpp" />
+ <ClCompile Include="..\..\tests\0111-delay_create_topics.cpp" />
+ <ClCompile Include="..\..\tests\0112-assign_unknown_part.c" />
+ <ClCompile Include="..\..\tests\0113-cooperative_rebalance.cpp" />
+ <ClCompile Include="..\..\tests\0114-sticky_partitioning.cpp" />
+ <ClCompile Include="..\..\tests\0115-producer_auth.cpp" />
+ <ClCompile Include="..\..\tests\0116-kafkaconsumer_close.cpp" />
+ <ClCompile Include="..\..\tests\0117-mock_errors.c" />
+ <ClCompile Include="..\..\tests\0118-commit_rebalance.c" />
+ <ClCompile Include="..\..\tests\0119-consumer_auth.cpp" />
+ <ClCompile Include="..\..\tests\0120-asymmetric_subscription.c" />
+ <ClCompile Include="..\..\tests\0121-clusterid.c" />
+ <ClCompile Include="..\..\tests\0122-buffer_cleaning_after_rebalance.c" />
+ <ClCompile Include="..\..\tests\0123-connections_max_idle.c" />
+ <ClCompile Include="..\..\tests\0124-openssl_invalid_engine.c" />
+ <ClCompile Include="..\..\tests\0125-immediate_flush.c" />
+ <ClCompile Include="..\..\tests\0126-oauthbearer_oidc.c" />
+ <ClCompile Include="..\..\tests\0128-sasl_callback_queue.cpp" />
+ <ClCompile Include="..\..\tests\0129-fetch_aborted_msgs.c" />
+ <ClCompile Include="..\..\tests\0130-store_offsets.c" />
+ <ClCompile Include="..\..\tests\0131-connect_timeout.c" />
+ <ClCompile Include="..\..\tests\0132-strategy_ordering.c" />
+ <ClCompile Include="..\..\tests\0133-ssl_keys.c" />
+ <ClCompile Include="..\..\tests\0134-ssl_provider.c" />
+ <ClCompile Include="..\..\tests\0135-sasl_credentials.cpp" />
+ <ClCompile Include="..\..\tests\0136-resolve_cb.c" />
+ <ClCompile Include="..\..\tests\0137-barrier_batch_consume.c" />
+ <ClCompile Include="..\..\tests\0138-admin_mock.c" />
+ <ClCompile Include="..\..\tests\8000-idle.cpp" />
+ <ClCompile Include="..\..\tests\test.c" />
+ <ClCompile Include="..\..\tests\testcpp.cpp" />
+ <ClCompile Include="..\..\tests\rusage.c" />
+ <ClCompile Include="..\..\src\tinycthread.c" />
+ <ClCompile Include="..\..\src\tinycthread_extra.c" />
+ <ClCompile Include="..\..\src\rdlist.c" />
+ </ItemGroup>
+ <ItemGroup>
+ <ClInclude Include="..\..\tests\test.h" />
+ <ClInclude Include="..\..\tests\testcpp.h" />
+ <ClInclude Include="..\..\tests\testshared.h" />
+ </ItemGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets" />
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj b/fluent-bit/lib/librdkafka-2.1.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj
new file mode 100644
index 000000000..4e741d431
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/win_ssl_cert_store/win_ssl_cert_store.vcxproj
@@ -0,0 +1,132 @@
+<?xml version="1.0" encoding="utf-8"?>
+<Project DefaultTargets="Build" ToolsVersion="14.0" xmlns="http://schemas.microsoft.com/developer/msbuild/2003">
+ <Import Project="$(SolutionDir)common.vcxproj" />
+ <ItemGroup>
+ <ClCompile Include="..\..\examples\win_ssl_cert_store.cpp" />
+ <ClCompile Include="..\wingetopt.c" />
+ </ItemGroup>
+ <PropertyGroup Label="Globals">
+ <ProjectGuid>{1A64A271-4840-4686-9F6F-F5AF0F7C385A}</ProjectGuid>
+ <Keyword>Win32Proj</Keyword>
+ <RootNamespace>win_ssl_cert_store</RootNamespace>
+ <WindowsTargetPlatformVersion>10.0</WindowsTargetPlatformVersion>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>true</UseDebugLibraries>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'" Label="Configuration">
+ <ConfigurationType>Application</ConfigurationType>
+ <UseDebugLibraries>false</UseDebugLibraries>
+ <WholeProgramOptimization>true</WholeProgramOptimization>
+ <CharacterSet>Unicode</CharacterSet>
+ </PropertyGroup>
+ <ImportGroup Label="ExtensionSettings">
+ </ImportGroup>
+ <ImportGroup Label="Shared">
+ </ImportGroup>
+ <PropertyGroup Label="UserMacros" />
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <LinkIncremental>true</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <LinkIncremental>true</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <LinkIncremental>false</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <PropertyGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <LinkIncremental>false</LinkIncremental>
+ <IncludePath>$(VC_IncludePath);$(WindowsSDK_IncludePath);$(SolutionDir)/../src-cpp</IncludePath>
+ </PropertyGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|Win32'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>WIN32;_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Debug|x64'">
+ <ClCompile>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <WarningLevel>Level3</WarningLevel>
+ <Optimization>Disabled</Optimization>
+ <PreprocessorDefinitions>_DEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|Win32'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>WIN32;NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <ItemDefinitionGroup Condition="'$(Configuration)|$(Platform)'=='Release|x64'">
+ <ClCompile>
+ <WarningLevel>Level3</WarningLevel>
+ <PrecompiledHeader>NotUsing</PrecompiledHeader>
+ <Optimization>MaxSpeed</Optimization>
+ <FunctionLevelLinking>true</FunctionLevelLinking>
+ <IntrinsicFunctions>true</IntrinsicFunctions>
+ <PreprocessorDefinitions>NDEBUG;_CONSOLE;%(PreprocessorDefinitions)</PreprocessorDefinitions>
+ <SDLCheck>true</SDLCheck>
+ <PrecompiledHeaderFile />
+ </ClCompile>
+ <Link>
+ <SubSystem>Console</SubSystem>
+ <EnableCOMDATFolding>true</EnableCOMDATFolding>
+ <OptimizeReferences>true</OptimizeReferences>
+ <GenerateDebugInformation>true</GenerateDebugInformation>
+ <AdditionalLibraryDirectories>$(BuildOutputDir)</AdditionalLibraryDirectories>
+ <AdditionalDependencies>librdkafka.lib;librdkafkacpp.lib;ws2_32.lib;kernel32.lib;user32.lib;gdi32.lib;winspool.lib;comdlg32.lib;advapi32.lib;shell32.lib;ole32.lib;oleaut32.lib;uuid.lib;odbc32.lib;odbccp32.lib;Crypt32.lib;%(AdditionalDependencies)</AdditionalDependencies>
+ </Link>
+ </ItemDefinitionGroup>
+ <Import Project="$(VCTargetsPath)\Microsoft.Cpp.targets" />
+ <ImportGroup Label="ExtensionTargets">
+ </ImportGroup>
+</Project>
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.c b/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.c
new file mode 100644
index 000000000..b20252932
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.c
@@ -0,0 +1,564 @@
+/* $OpenBSD: getopt_long.c,v 1.23 2007/10/31 12:34:57 chl Exp $ */
+/* $NetBSD: getopt_long.c,v 1.15 2002/01/31 22:43:40 tv Exp $ */
+
+/*
+ * Copyright (c) 2002 Todd C. Miller <Todd.Miller@courtesan.com>
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Sponsored in part by the Defense Advanced Research Projects
+ * Agency (DARPA) and Air Force Research Laboratory, Air Force
+ * Materiel Command, USAF, under agreement number F39502-99-1-0512.
+ */
+/*-
+ * Copyright (c) 2000 The NetBSD Foundation, Inc.
+ * All rights reserved.
+ *
+ * This code is derived from software contributed to The NetBSD Foundation
+ * by Dieter Baron and Thomas Klausner.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ * 1. Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * 2. Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in the
+ * documentation and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE NETBSD FOUNDATION, INC. AND CONTRIBUTORS
+ * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
+ * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+ * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR CONTRIBUTORS
+ * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include "wingetopt.h"
+#include <stdarg.h>
+#include <stdio.h>
+#include <windows.h>
+
+#define REPLACE_GETOPT /* use this getopt as the system getopt(3) */
+
+#ifdef REPLACE_GETOPT
+int opterr = 1; /* if error message should be printed */
+int optind = 1; /* index into parent argv vector */
+int optopt = '?'; /* character checked for validity */
+#undef optreset /* see getopt.h */
+#define optreset __mingw_optreset
+int optreset; /* reset getopt */
+char *optarg; /* argument associated with option */
+#endif
+
+#define PRINT_ERROR ((opterr) && (*options != ':'))
+
+#define FLAG_PERMUTE 0x01 /* permute non-options to the end of argv */
+#define FLAG_ALLARGS 0x02 /* treat non-options as args to option "-1" */
+#define FLAG_LONGONLY 0x04 /* operate as getopt_long_only */
+
+/* return values */
+#define BADCH (int)'?'
+#define BADARG ((*options == ':') ? (int)':' : (int)'?')
+#define INORDER (int)1
+
+#ifndef __CYGWIN__
+#define __progname __argv[0]
+#else
+extern char __declspec(dllimport) * __progname;
+#endif
+
+#ifdef __CYGWIN__
+static char EMSG[] = "";
+#else
+#define EMSG ""
+#endif
+
+static int getopt_internal(int,
+ char *const *,
+ const char *,
+ const struct option *,
+ int *,
+ int);
+static int parse_long_options(char *const *,
+ const char *,
+ const struct option *,
+ int *,
+ int);
+static int gcd(int, int);
+static void permute_args(int, int, int, char *const *);
+
+static char *place = EMSG; /* option letter processing */
+
+/* XXX: set optreset to 1 rather than these two */
+static int nonopt_start = -1; /* first non option argument (for permute) */
+static int nonopt_end = -1; /* first option after non options (for permute) */
+
+/* Error messages */
+static const char recargchar[] = "option requires an argument -- %c";
+static const char recargstring[] = "option requires an argument -- %s";
+static const char ambig[] = "ambiguous option -- %.*s";
+static const char noarg[] = "option doesn't take an argument -- %.*s";
+static const char illoptchar[] = "unknown option -- %c";
+static const char illoptstring[] = "unknown option -- %s";
+
+static void _vwarnx(const char *fmt, va_list ap) {
+ (void)fprintf(stderr, "%s: ", __progname);
+ if (fmt != NULL)
+ (void)vfprintf(stderr, fmt, ap);
+ (void)fprintf(stderr, "\n");
+}
+
+static void warnx(const char *fmt, ...) {
+ va_list ap;
+ va_start(ap, fmt);
+ _vwarnx(fmt, ap);
+ va_end(ap);
+}
+
+/*
+ * Compute the greatest common divisor of a and b.
+ */
+static int gcd(int a, int b) {
+ int c;
+
+ c = a % b;
+ while (c != 0) {
+ a = b;
+ b = c;
+ c = a % b;
+ }
+
+ return (b);
+}
+
+/*
+ * Exchange the block from nonopt_start to nonopt_end with the block
+ * from nonopt_end to opt_end (keeping the same order of arguments
+ * in each block).
+ */
+static void permute_args(int panonopt_start,
+ int panonopt_end,
+ int opt_end,
+ char *const *nargv) {
+ int cstart, cyclelen, i, j, ncycle, nnonopts, nopts, pos;
+ char *swap;
+
+ /*
+ * compute lengths of blocks and number and size of cycles
+ */
+ nnonopts = panonopt_end - panonopt_start;
+ nopts = opt_end - panonopt_end;
+ ncycle = gcd(nnonopts, nopts);
+ cyclelen = (opt_end - panonopt_start) / ncycle;
+
+ for (i = 0; i < ncycle; i++) {
+ cstart = panonopt_end + i;
+ pos = cstart;
+ for (j = 0; j < cyclelen; j++) {
+ if (pos >= panonopt_end)
+ pos -= nnonopts;
+ else
+ pos += nopts;
+ swap = nargv[pos];
+ /* LINTED const cast */
+ ((char **)nargv)[pos] = nargv[cstart];
+ /* LINTED const cast */
+ ((char **)nargv)[cstart] = swap;
+ }
+ }
+}
+
+/*
+ * parse_long_options --
+ * Parse long options in argc/argv argument vector.
+ * Returns -1 if short_too is set and the option does not match long_options.
+ */
+static int parse_long_options(char *const *nargv,
+ const char *options,
+ const struct option *long_options,
+ int *idx,
+ int short_too) {
+ char *current_argv, *has_equal;
+ size_t current_argv_len;
+ int i, ambiguous, match;
+
+#define IDENTICAL_INTERPRETATION(_x, _y) \
+ (long_options[(_x)].has_arg == long_options[(_y)].has_arg && \
+ long_options[(_x)].flag == long_options[(_y)].flag && \
+ long_options[(_x)].val == long_options[(_y)].val)
+
+ current_argv = place;
+ match = -1;
+ ambiguous = 0;
+
+ optind++;
+
+ if ((has_equal = strchr(current_argv, '=')) != NULL) {
+ /* argument found (--option=arg) */
+ current_argv_len = has_equal - current_argv;
+ has_equal++;
+ } else
+ current_argv_len = strlen(current_argv);
+
+ for (i = 0; long_options[i].name; i++) {
+ /* find matching long option */
+ if (strncmp(current_argv, long_options[i].name,
+ current_argv_len))
+ continue;
+
+ if (strlen(long_options[i].name) == current_argv_len) {
+ /* exact match */
+ match = i;
+ ambiguous = 0;
+ break;
+ }
+ /*
+ * If this is a known short option, don't allow
+ * a partial match of a single character.
+ */
+ if (short_too && current_argv_len == 1)
+ continue;
+
+ if (match == -1) /* partial match */
+ match = i;
+ else if (!IDENTICAL_INTERPRETATION(i, match))
+ ambiguous = 1;
+ }
+ if (ambiguous) {
+ /* ambiguous abbreviation */
+ if (PRINT_ERROR)
+ warnx(ambig, (int)current_argv_len, current_argv);
+ optopt = 0;
+ return (BADCH);
+ }
+ if (match != -1) { /* option found */
+ if (long_options[match].has_arg == no_argument && has_equal) {
+ if (PRINT_ERROR)
+ warnx(noarg, (int)current_argv_len,
+ current_argv);
+ /*
+ * XXX: GNU sets optopt to val regardless of flag
+ */
+ if (long_options[match].flag == NULL)
+ optopt = long_options[match].val;
+ else
+ optopt = 0;
+ return (BADARG);
+ }
+ if (long_options[match].has_arg == required_argument ||
+ long_options[match].has_arg == optional_argument) {
+ if (has_equal)
+ optarg = has_equal;
+ else if (long_options[match].has_arg ==
+ required_argument) {
+ /*
+ * optional argument doesn't use next nargv
+ */
+ optarg = nargv[optind++];
+ }
+ }
+ if ((long_options[match].has_arg == required_argument) &&
+ (optarg == NULL)) {
+ /*
+ * Missing argument; leading ':' indicates no error
+ * should be generated.
+ */
+ if (PRINT_ERROR)
+ warnx(recargstring, current_argv);
+ /*
+ * XXX: GNU sets optopt to val regardless of flag
+ */
+ if (long_options[match].flag == NULL)
+ optopt = long_options[match].val;
+ else
+ optopt = 0;
+ --optind;
+ return (BADARG);
+ }
+ } else { /* unknown option */
+ if (short_too) {
+ --optind;
+ return (-1);
+ }
+ if (PRINT_ERROR)
+ warnx(illoptstring, current_argv);
+ optopt = 0;
+ return (BADCH);
+ }
+ if (idx)
+ *idx = match;
+ if (long_options[match].flag) {
+ *long_options[match].flag = long_options[match].val;
+ return (0);
+ } else
+ return (long_options[match].val);
+#undef IDENTICAL_INTERPRETATION
+}
+
+/*
+ * getopt_internal --
+ * Parse argc/argv argument vector. Called by user level routines.
+ */
+static int getopt_internal(int nargc,
+ char *const *nargv,
+ const char *options,
+ const struct option *long_options,
+ int *idx,
+ int flags) {
+ char *oli; /* option letter list index */
+ int optchar, short_too;
+ static int posixly_correct = -1;
+
+ if (options == NULL)
+ return (-1);
+
+ /*
+ * XXX Some GNU programs (like cvs) set optind to 0 instead of
+ * XXX using optreset. Work around this braindamage.
+ */
+ if (optind == 0)
+ optind = optreset = 1;
+
+ /*
+ * Disable GNU extensions if POSIXLY_CORRECT is set or options
+ * string begins with a '+'.
+ *
+ * CV, 2009-12-14: Check POSIXLY_CORRECT anew if optind == 0 or
+ * optreset != 0 for GNU compatibility.
+ */
+#ifndef _WIN32
+ if (posixly_correct == -1 || optreset != 0)
+ posixly_correct = (getenv("POSIXLY_CORRECT") != NULL);
+#endif
+ if (*options == '-')
+ flags |= FLAG_ALLARGS;
+ else if (posixly_correct || *options == '+')
+ flags &= ~FLAG_PERMUTE;
+ if (*options == '+' || *options == '-')
+ options++;
+
+ optarg = NULL;
+ if (optreset)
+ nonopt_start = nonopt_end = -1;
+start:
+ if (optreset || !*place) { /* update scanning pointer */
+ optreset = 0;
+ if (optind >= nargc) { /* end of argument vector */
+ place = EMSG;
+ if (nonopt_end != -1) {
+ /* do permutation, if we have to */
+ permute_args(nonopt_start, nonopt_end, optind,
+ nargv);
+ optind -= nonopt_end - nonopt_start;
+ } else if (nonopt_start != -1) {
+ /*
+ * If we skipped non-options, set optind
+ * to the first of them.
+ */
+ optind = nonopt_start;
+ }
+ nonopt_start = nonopt_end = -1;
+ return (-1);
+ }
+ if (*(place = nargv[optind]) != '-' ||
+ (place[1] == '\0' && strchr(options, '-') == NULL)) {
+ place = EMSG; /* found non-option */
+ if (flags & FLAG_ALLARGS) {
+ /*
+ * GNU extension:
+ * return non-option as argument to option 1
+ */
+ optarg = nargv[optind++];
+ return (INORDER);
+ }
+ if (!(flags & FLAG_PERMUTE)) {
+ /*
+ * If no permutation wanted, stop parsing
+ * at first non-option.
+ */
+ return (-1);
+ }
+ /* do permutation */
+ if (nonopt_start == -1)
+ nonopt_start = optind;
+ else if (nonopt_end != -1) {
+ permute_args(nonopt_start, nonopt_end, optind,
+ nargv);
+ nonopt_start =
+ optind - (nonopt_end - nonopt_start);
+ nonopt_end = -1;
+ }
+ optind++;
+ /* process next argument */
+ goto start;
+ }
+ if (nonopt_start != -1 && nonopt_end == -1)
+ nonopt_end = optind;
+
+ /*
+ * If we have "-" do nothing, if "--" we are done.
+ */
+ if (place[1] != '\0' && *++place == '-' && place[1] == '\0') {
+ optind++;
+ place = EMSG;
+ /*
+ * We found an option (--), so if we skipped
+ * non-options, we have to permute.
+ */
+ if (nonopt_end != -1) {
+ permute_args(nonopt_start, nonopt_end, optind,
+ nargv);
+ optind -= nonopt_end - nonopt_start;
+ }
+ nonopt_start = nonopt_end = -1;
+ return (-1);
+ }
+ }
+
+ /*
+ * Check long options if:
+ * 1) we were passed some
+ * 2) the arg is not just "-"
+ * 3) either the arg starts with -- we are getopt_long_only()
+ */
+ if (long_options != NULL && place != nargv[optind] &&
+ (*place == '-' || (flags & FLAG_LONGONLY))) {
+ short_too = 0;
+ if (*place == '-')
+ place++; /* --foo long option */
+ else if (*place != ':' && strchr(options, *place) != NULL)
+ short_too = 1; /* could be short option too */
+
+ optchar = parse_long_options(nargv, options, long_options, idx,
+ short_too);
+ if (optchar != -1) {
+ place = EMSG;
+ return (optchar);
+ }
+ }
+
+ if ((optchar = (int)*place++) == (int)':' ||
+ (optchar == (int)'-' && *place != '\0') ||
+ (oli = strchr(options, optchar)) == NULL) {
+ /*
+ * If the user specified "-" and '-' isn't listed in
+ * options, return -1 (non-option) as per POSIX.
+ * Otherwise, it is an unknown option character (or ':').
+ */
+ if (optchar == (int)'-' && *place == '\0')
+ return (-1);
+ if (!*place)
+ ++optind;
+ if (PRINT_ERROR)
+ warnx(illoptchar, optchar);
+ optopt = optchar;
+ return (BADCH);
+ }
+ if (long_options != NULL && optchar == 'W' && oli[1] == ';') {
+ /* -W long-option */
+ if (*place) /* no space */
+ /* NOTHING */;
+ else if (++optind >= nargc) { /* no arg */
+ place = EMSG;
+ if (PRINT_ERROR)
+ warnx(recargchar, optchar);
+ optopt = optchar;
+ return (BADARG);
+ } else /* white space */
+ place = nargv[optind];
+ optchar =
+ parse_long_options(nargv, options, long_options, idx, 0);
+ place = EMSG;
+ return (optchar);
+ }
+ if (*++oli != ':') { /* doesn't take argument */
+ if (!*place)
+ ++optind;
+ } else { /* takes (optional) argument */
+ optarg = NULL;
+ if (*place) /* no white space */
+ optarg = place;
+ else if (oli[1] != ':') { /* arg not optional */
+ if (++optind >= nargc) { /* no arg */
+ place = EMSG;
+ if (PRINT_ERROR)
+ warnx(recargchar, optchar);
+ optopt = optchar;
+ return (BADARG);
+ } else
+ optarg = nargv[optind];
+ }
+ place = EMSG;
+ ++optind;
+ }
+ /* dump back option letter */
+ return (optchar);
+}
+
+#ifdef REPLACE_GETOPT
+/*
+ * getopt --
+ * Parse argc/argv argument vector.
+ *
+ * [eventually this will replace the BSD getopt]
+ */
+int getopt(int nargc, char *const *nargv, const char *options) {
+
+ /*
+ * We don't pass FLAG_PERMUTE to getopt_internal() since
+ * the BSD getopt(3) (unlike GNU) has never done this.
+ *
+ * Furthermore, since many privileged programs call getopt()
+ * before dropping privileges it makes sense to keep things
+ * as simple (and bug-free) as possible.
+ */
+ return (getopt_internal(nargc, nargv, options, NULL, NULL, 0));
+}
+#endif /* REPLACE_GETOPT */
+
+/*
+ * getopt_long --
+ * Parse argc/argv argument vector.
+ */
+int getopt_long(int nargc,
+ char *const *nargv,
+ const char *options,
+ const struct option *long_options,
+ int *idx) {
+
+ return (getopt_internal(nargc, nargv, options, long_options, idx,
+ FLAG_PERMUTE));
+}
+
+/*
+ * getopt_long_only --
+ * Parse argc/argv argument vector.
+ */
+int getopt_long_only(int nargc,
+ char *const *nargv,
+ const char *options,
+ const struct option *long_options,
+ int *idx) {
+
+ return (getopt_internal(nargc, nargv, options, long_options, idx,
+ FLAG_PERMUTE | FLAG_LONGONLY));
+}
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.h b/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.h
new file mode 100644
index 000000000..aaaa52378
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/wingetopt.h
@@ -0,0 +1,100 @@
+#ifndef __GETOPT_H__
+/**
+ * DISCLAIMER
+ * This file has no copyright assigned and is placed in the Public Domain.
+ * This file is a part of the w64 mingw-runtime package.
+ *
+ * The w64 mingw-runtime package and its code is distributed in the hope that it
+ * will be useful but WITHOUT ANY WARRANTY. ALL WARRANTIES, EXPRESSED OR
+ * IMPLIED ARE HEREBY DISCLAIMED. This includes but is not limited to
+ * warranties of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
+ */
+
+#define __GETOPT_H__
+
+/* All the headers include this file. */
+#include <crtdefs.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int optind; /* index of first non-option in argv */
+extern int optopt; /* single option character, as parsed */
+extern int opterr; /* flag to enable built-in diagnostics... */
+ /* (user may set to zero, to suppress) */
+
+extern char *optarg; /* pointer to argument of current option */
+
+extern int getopt(int nargc, char *const *nargv, const char *options);
+
+#ifdef _BSD_SOURCE
+/*
+ * BSD adds the non-standard `optreset' feature, for reinitialisation
+ * of `getopt' parsing. We support this feature, for applications which
+ * proclaim their BSD heritage, before including this header; however,
+ * to maintain portability, developers are advised to avoid it.
+ */
+#define optreset __mingw_optreset
+extern int optreset;
+#endif
+#ifdef __cplusplus
+}
+#endif
+/*
+ * POSIX requires the `getopt' API to be specified in `unistd.h';
+ * thus, `unistd.h' includes this header. However, we do not want
+ * to expose the `getopt_long' or `getopt_long_only' APIs, when
+ * included in this manner. Thus, close the standard __GETOPT_H__
+ * declarations block, and open an additional __GETOPT_LONG_H__
+ * specific block, only when *not* __UNISTD_H_SOURCED__, in which
+ * to declare the extended API.
+ */
+#endif /* !defined(__GETOPT_H__) */
+
+#if !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__)
+#define __GETOPT_LONG_H__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct option /* specification for a long form option... */
+{
+ const char *name; /* option name, without leading hyphens */
+ int has_arg; /* does it take an argument? */
+ int *flag; /* where to save its status, or NULL */
+ int val; /* its associated status value */
+};
+
+enum /* permitted values for its `has_arg' field... */
+{ no_argument = 0, /* option never takes an argument */
+ required_argument, /* option always requires an argument */
+ optional_argument /* option may take an argument */
+};
+
+extern int getopt_long(int nargc,
+ char *const *nargv,
+ const char *options,
+ const struct option *long_options,
+ int *idx);
+extern int getopt_long_only(int nargc,
+ char *const *nargv,
+ const char *options,
+ const struct option *long_options,
+ int *idx);
+/*
+ * Previous MinGW implementation had...
+ */
+#ifndef HAVE_DECL_GETOPT
+/*
+ * ...for the long form API only; keep this for compatibility.
+ */
+#define HAVE_DECL_GETOPT 1
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !defined(__UNISTD_H_SOURCED__) && !defined(__GETOPT_LONG_H__) */
diff --git a/fluent-bit/lib/librdkafka-2.1.0/win32/wintime.h b/fluent-bit/lib/librdkafka-2.1.0/win32/wintime.h
new file mode 100644
index 000000000..07f55b8b1
--- /dev/null
+++ b/fluent-bit/lib/librdkafka-2.1.0/win32/wintime.h
@@ -0,0 +1,33 @@
+/**
+ * Copyright: public domain
+ */
+#pragma once
+
+/**
+ * gettimeofday() for Win32 from
+ * http://stackoverflow.com/questions/10905892/equivalent-of-gettimeday-for-windows
+ */
+#define WIN32_LEAN_AND_MEAN
+#include <windows.h>
+#include <stdint.h> // portable: uint64_t MSVC: __int64
+
+static int gettimeofday(struct timeval *tp, struct timezone *tzp) {
+ // Note: some broken versions only have 8 trailing zero's, the correct
+ // epoch has 9 trailing zero's This magic number is the number of 100
+ // nanosecond intervals since January 1, 1601 (UTC) until 00:00:00
+ // January 1, 1970
+ static const uint64_t EPOCH = ((uint64_t)116444736000000000ULL);
+
+ SYSTEMTIME system_time;
+ FILETIME file_time;
+ uint64_t time;
+
+ GetSystemTime(&system_time);
+ SystemTimeToFileTime(&system_time, &file_time);
+ time = ((uint64_t)file_time.dwLowDateTime);
+ time += ((uint64_t)file_time.dwHighDateTime) << 32;
+
+ tp->tv_sec = (long)((time - EPOCH) / 10000000L);
+ tp->tv_usec = (long)(system_time.wMilliseconds * 1000);
+ return 0;
+}